text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Test the Studio help links.
"""
from unittest import skip
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.studio.asset_index import AssetIndexPageStudioFrontend
from common.test.acceptance.pages.studio.course_info import CourseUpdatesPage
from common.test.acceptance.pages.studio.edit_tabs import PagesPage
from common.test.acceptance.pages.studio.import_export import (
ExportCoursePage,
ExportLibraryPage,
ImportCoursePage,
ImportLibraryPage
)
from common.test.acceptance.pages.studio.index import DashboardPage, HomePage, IndexPage
from common.test.acceptance.pages.studio.library import LibraryPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from common.test.acceptance.pages.studio.settings import SettingsPage
from common.test.acceptance.pages.studio.settings_advanced import AdvancedSettingsPage
from common.test.acceptance.pages.studio.settings_certificates import CertificatesPage
from common.test.acceptance.pages.studio.settings_graders import GradingPage
from common.test.acceptance.pages.studio.settings_group_configurations import GroupConfigurationsPage
from common.test.acceptance.pages.studio.textbook_upload import TextbookUploadPage
from common.test.acceptance.pages.studio.users import CourseTeamPage, LibraryUsersPage
from common.test.acceptance.pages.studio.utils import click_css, click_studio_help, studio_help_links
from common.test.acceptance.tests.helpers import (
AcceptanceTest,
assert_nav_help_link,
assert_side_bar_help_link,
url_for_help
)
from common.test.acceptance.tests.studio.base_studio_test import ContainerBase, StudioCourseTest, StudioLibraryTest
from openedx.core.lib.tests import attr
def _get_expected_documentation_url(path):
"""
Returns the expected URL for the building and running a course documentation.
"""
return url_for_help('course_author', path)
@attr(shard=20)
class StudioHelpTest(StudioCourseTest):
"""Tests for Studio help."""
def test_studio_help_links(self):
"""Test that the help links are present and have the correct content."""
page = DashboardPage(self.browser)
page.visit()
click_studio_help(page)
links = studio_help_links(page)
expected_links = [{
'href': u'http://docs.edx.org/',
'text': u'edX Documentation',
'sr_text': u'Access documentation on http://docs.edx.org'
}, {
'href': u'https://open.edx.org/',
'text': u'Open edX Portal',
'sr_text': u'Access the Open edX Portal'
}, {
'href': u'https://www.edx.org/course/overview-creating-edx-course-edx-edx101#.VO4eaLPF-n1',
'text': u'Enroll in edX101',
'sr_text': u'Enroll in edX101: Overview of Creating an edX Course'
}, {
'href': u'https://www.edx.org/course/creating-course-edx-studio-edx-studiox',
'text': u'Enroll in StudioX',
'sr_text': u'Enroll in StudioX: Creating a Course with edX Studio'
}, {
'href': u'mailto:partner-support@example.com',
'text': u'Contact Us',
'sr_text': 'Send an email to partner-support@example.com'
}]
for expected, actual in zip(expected_links, links):
self.assertEqual(expected['href'], actual.get_attribute('href'))
self.assertEqual(expected['text'], actual.text)
self.assertEqual(
expected['sr_text'],
actual.find_element_by_xpath('following-sibling::span').text
)
@attr(shard=20)
class SignInHelpTest(AcceptanceTest):
"""
Tests help links on 'Sign In' page
"""
def setUp(self):
super(SignInHelpTest, self).setUp()
self.index_page = IndexPage(self.browser)
self.index_page.visit()
def test_sign_in_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Sign In' page.
Given that I am on the 'Sign In" page.
And I want help about the sign in
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
sign_in_page = self.index_page.click_sign_in()
expected_url = _get_expected_documentation_url('/getting_started/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=sign_in_page,
href=expected_url,
signed_in=False
)
@attr(shard=20)
class SignUpHelpTest(AcceptanceTest):
"""
Tests help links on 'Sign Up' page.
"""
def setUp(self):
super(SignUpHelpTest, self).setUp()
self.index_page = IndexPage(self.browser)
self.index_page.visit()
def test_sign_up_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Sign Up' page.
Given that I am on the 'Sign Up" page.
And I want help about the sign up
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
sign_up_page = self.index_page.click_sign_up()
expected_url = _get_expected_documentation_url('/getting_started/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=sign_up_page,
href=expected_url,
signed_in=False
)
@attr(shard=20)
class HomeHelpTest(StudioCourseTest):
"""
Tests help links on 'Home'(Courses tab) page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(HomeHelpTest, self).setUp()
self.home_page = HomePage(self.browser)
self.home_page.visit()
def test_course_home_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Home'(Courses tab) page.
Given that I am on the 'Home'(Courses tab) page.
And I want help about the courses
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.home_page,
href=expected_url
)
def test_course_home_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Home'(Courses tab) page.
Given that I am on the 'Home'(Courses tab) page.
And I want help about the courses
And I click the 'Getting Started with Your Platform Studio' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.home_page,
href=expected_url,
help_text='Getting Started with Your Platform Studio',
as_list_item=True
)
@attr(shard=20)
class NewCourseHelpTest(AcceptanceTest):
"""
Test help links while creating a new course.
"""
def setUp(self):
super(NewCourseHelpTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
self.auth_page.visit()
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.new_course_button.present)
self.dashboard_page.click_new_course_button()
def test_course_create_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Create a New Course' page in the dashboard.
Given that I am on the 'Create a New Course' page in the dashboard.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.dashboard_page,
href=expected_url
)
def test_course_create_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Create a New Course' page in the dashboard.
Given that I am on the 'Create a New Course' page in the dashboard.
And I want help about the process
And I click the 'Getting Started with Your Platform Studio' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.dashboard_page,
href=expected_url,
help_text='Getting Started with Your Platform Studio',
as_list_item=True
)
@attr(shard=20)
class NewLibraryHelpTest(AcceptanceTest):
"""
Test help links while creating a new library
"""
def setUp(self):
super(NewLibraryHelpTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
self.auth_page.visit()
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.has_new_library_button)
self.dashboard_page.click_new_library()
def test_library_create_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Create a New Library' page in the dashboard.
Given that I am on the 'Create a New Library' page in the dashboard.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.dashboard_page,
href=expected_url
)
def test_library_create_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Create a New Library' page in the dashboard.
Given that I am on the 'Create a New Library' page in the dashboard.
And I want help about the process
And I click the 'Getting Started with Your Platform Studio' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.dashboard_page,
href=expected_url,
help_text='Getting Started with Your Platform Studio',
as_list_item=True
)
@attr(shard=20)
class LibraryTabHelpTest(AcceptanceTest):
"""
Test help links on the library tab present at dashboard.
"""
def setUp(self):
super(LibraryTabHelpTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
self.auth_page.visit()
self.dashboard_page.visit()
def test_library_tab_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Home'(Courses tab) page.
Given that I am on the 'Home'(Courses tab) page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
self.assertTrue(self.dashboard_page.has_new_library_button)
click_css(self.dashboard_page, '#course-index-tabs .libraries-tab', 0, False)
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.dashboard_page,
href=expected_url
)
@attr(shard=20)
class LibraryHelpTest(StudioLibraryTest):
"""
Test help links on a Library page.
"""
def setUp(self):
super(LibraryHelpTest, self).setUp()
self.library_page = LibraryPage(self.browser, self.library_key)
self.library_user_page = LibraryUsersPage(self.browser, self.library_key)
def test_library_user_access_setting_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'User Access'
settings page of library.
Given that I am on the 'User Access' settings page of library.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct.
"""
self.library_user_page.visit()
expected_url = _get_expected_documentation_url(
'/course_components/libraries.html#give-other-users-access-to-your-library'
)
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.library_user_page,
href=expected_url,
)
@attr(shard=20)
class LibraryImportHelpTest(StudioLibraryTest):
"""
Test help links on a Library import and export pages.
"""
def setUp(self):
super(LibraryImportHelpTest, self).setUp()
self.library_import_page = ImportLibraryPage(self.browser, self.library_key)
self.library_import_page.visit()
def test_library_import_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Library import page.
Given that I am on the Library import page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#import-a-library')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.library_import_page,
href=expected_url
)
def test_library_import_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on Library import page.
Given that I am on the Library import page.
And I want help about the process
And I click the 'Learn more about importing a library' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#import-a-library')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.library_import_page,
href=expected_url,
help_text='Learn more about importing a library'
)
@attr(shard=20)
class LibraryExportHelpTest(StudioLibraryTest):
"""
Test help links on a Library export pages.
"""
def setUp(self):
super(LibraryExportHelpTest, self).setUp()
self.library_export_page = ExportLibraryPage(self.browser, self.library_key)
self.library_export_page.visit()
def test_library_export_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Library export page.
Given that I am on the Library export page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#export-a-library')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.library_export_page,
href=expected_url
)
def test_library_export_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on Library export page.
Given that I am on the Library export page.
And I want help about the process
And I click the 'Learn more about exporting a library' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#export-a-library')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.library_export_page,
href=expected_url,
help_text='Learn more about exporting a library'
)
@attr(shard=20)
class CourseOutlineHelpTest(StudioCourseTest):
"""
Tests help links on course outline page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(CourseOutlineHelpTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_outline_page.visit()
@skip("This scenario depends upon TNL-5460")
def test_course_outline_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Course Outline page
Given that I am on the Course Outline page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/developing_course/course_outline.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_outline_page,
href=expected_url
)
def test_course_outline_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on Course Outline page
Given that I am on the Course Outline page.
And I want help about the process
And I click the 'Learn more about the course outline' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/developing_course/course_outline.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.course_outline_page,
href=expected_url,
help_text='Learn more about the course outline',
index=0
)
@attr(shard=20)
class CourseUpdateHelpTest(StudioCourseTest):
"""
Test help links on Course Update page
"""
def setUp(self): # pylint: disable=arguments-differ
super(CourseUpdateHelpTest, self).setUp()
self.course_update_page = CourseUpdatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_update_page.visit()
def test_course_update_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Course Update' page
Given that I am on the 'Course Update' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/handouts_updates.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_update_page,
href=expected_url,
)
@attr(shard=20)
class AssetIndexHelpTest(StudioCourseTest):
"""
Test help links on Course 'Files & Uploads' page
"""
def setUp(self): # pylint: disable=arguments-differ
super(AssetIndexHelpTest, self).setUp()
self.course_asset_index_page = AssetIndexPageStudioFrontend(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_asset_index_page.visit()
def test_asset_index_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Files & Uploads' page
Given that I am on the 'Files & Uploads' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/course_files.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_asset_index_page,
href=expected_url,
)
@attr(shard=20)
class CoursePagesHelpTest(StudioCourseTest):
"""
Test help links on Course 'Pages' page
"""
def setUp(self): # pylint: disable=arguments-differ
super(CoursePagesHelpTest, self).setUp()
self.course_pages_page = PagesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_pages_page.visit()
def test_course_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Pages' page
Given that I am on the 'Pages' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/pages.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_pages_page,
href=expected_url,
)
@attr(shard=20)
class UploadTextbookHelpTest(StudioCourseTest):
"""
Test help links on Course 'Textbooks' page
"""
def setUp(self): # pylint: disable=arguments-differ
super(UploadTextbookHelpTest, self).setUp()
self.course_textbook_upload_page = TextbookUploadPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_textbook_upload_page.visit()
def test_course_textbook_upload_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Textbooks' page
Given that I am on the 'Textbooks' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/textbooks.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_textbook_upload_page,
href=expected_url,
)
def test_course_textbook_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Textbooks' page
Given that I am on the 'Textbooks' page
And I want help about the process
And I click the 'Learn more about textbooks' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/textbooks.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.course_textbook_upload_page,
href=expected_url,
help_text='Learn more about textbooks'
)
@attr(shard=20)
class StudioUnitHelpTest(ContainerBase):
"""
Tests help links on Unit page.
"""
def setUp(self, is_staff=True):
super(StudioUnitHelpTest, self).setUp(is_staff=is_staff)
def populate_course_fixture(self, course_fixture):
"""
Populates the course fixture.
We are modifying 'advanced_modules' setting of the
course.
Also add a section with a subsection and a unit.
"""
course_fixture.add_advanced_settings(
{u"advanced_modules": {"value": ["split_test"]}}
)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def test_unit_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Unit page.
Given that I am on the Unit page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
unit_page = self.go_to_unit_page()
expected_url = _get_expected_documentation_url('/developing_course/course_units.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=unit_page,
href=expected_url,
)
@attr(shard=20)
class SettingsHelpTest(StudioCourseTest):
"""
Tests help links on Schedule and Details Settings page
"""
def setUp(self, is_staff=False, test_xss=True):
super(SettingsHelpTest, self).setUp()
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.settings_page.visit()
def test_settings_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Settings page.
Given that I am on the Settings page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/studio_add_course_information/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.settings_page,
href=expected_url,
)
@attr(shard=20)
class GradingPageHelpTest(StudioCourseTest):
"""
Tests help links on Grading page
"""
def setUp(self, is_staff=False, test_xss=True):
super(GradingPageHelpTest, self).setUp()
self.grading_page = GradingPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.grading_page.visit()
def test_grading_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Grading page.
Given that I am on the Grading page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/grading/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.grading_page,
href=expected_url,
)
@attr(shard=20)
class CourseTeamSettingsHelpTest(StudioCourseTest):
"""
Tests help links on Course Team settings page
"""
def setUp(self, is_staff=False, test_xss=True):
super(CourseTeamSettingsHelpTest, self).setUp()
self.course_team_settings_page = CourseTeamPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_team_settings_page.visit()
def test_course_course_team_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Course Team settings page
Given that I am on the Course Team settings page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/studio_add_course_information/studio_course_staffing.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_team_settings_page,
href=expected_url,
)
@attr(shard=20)
class CourseGroupConfigurationHelpTest(StudioCourseTest):
"""
Tests help links on course Group Configurations settings page
"""
def setUp(self, is_staff=False, test_xss=True):
super(CourseGroupConfigurationHelpTest, self).setUp()
self.course_group_configuration_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_group_configuration_page.visit()
def test_course_group_conf_nav_help(self):
"""
Scenario: Help link in navigation bar is working on
Group Configurations settings page
Given that I am on the Group Configurations settings page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_group_configuration_page,
href=expected_url,
)
def test_course_group_conf_content_group_side_bar_help(self):
"""
Scenario: Help link in side bar under the 'content group' is working
on Group Configurations settings page
Given that I am on the Group Configurations settings page
And I want help about the process
And I click the 'Learn More' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_features/cohorts/cohorted_courseware.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.course_group_configuration_page,
href=expected_url,
help_text='Learn More'
)
@attr(shard=20)
class AdvancedSettingHelpTest(StudioCourseTest):
"""
Tests help links on course Advanced Settings page.
"""
def setUp(self, is_staff=False, test_xss=True):
super(AdvancedSettingHelpTest, self).setUp()
self.advanced_settings = AdvancedSettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.advanced_settings.visit()
def test_advanced_settings_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Advanced Settings page.
Given that I am on the Advanced Settings page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.advanced_settings,
href=expected_url,
)
@attr(shard=20)
class CertificatePageHelpTest(StudioCourseTest):
"""
Tests help links on course Certificate settings page.
"""
def setUp(self, is_staff=False, test_xss=True):
super(CertificatePageHelpTest, self).setUp()
self.certificates_page = CertificatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.certificates_page.visit()
def test_certificate_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Certificate settings page
Given that I am on the Certificate settings page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/studio_add_course_information/studio_creating_certificates.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.certificates_page,
href=expected_url,
)
def test_certificate_page_side_bar_help(self):
"""
Scenario: Help link in side bar is working Certificate settings page
Given that I am on the Certificate settings page
And I want help about the process
And I click the 'Learn more about certificates' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/studio_add_course_information/studio_creating_certificates.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.certificates_page,
href=expected_url,
help_text='Learn more about certificates',
)
@attr(shard=20)
class GroupExperimentConfigurationHelpTest(ContainerBase):
"""
Tests help links on course Group Configurations settings page
It is related to Experiment Group Configurations on the page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(GroupExperimentConfigurationHelpTest, self).setUp()
self.group_configuration_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# self.create_poorly_configured_split_instance()
self.group_configuration_page.visit()
def populate_course_fixture(self, course_fixture):
"""
Populates the course fixture.
We are modifying 'advanced_modules' setting of the
course.
"""
course_fixture.add_advanced_settings(
{u"advanced_modules": {"value": ["split_test"]}}
)
def test_course_group_configuration_experiment_side_bar_help(self):
"""
Scenario: Help link in side bar under the 'Experiment Group Configurations'
is working on Group Configurations settings page
Given that I am on the Group Configurations settings page
And I want help about the process
And I click the 'Learn More' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url(
'/course_features/content_experiments/content_experiments_configure.html'
'#set-up-group-configurations-in-edx-studio'
)
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.group_configuration_page,
href=expected_url,
help_text='Learn More',
)
@attr(shard=20)
class ToolsImportHelpTest(StudioCourseTest):
"""
Tests help links on tools import pages.
"""
def setUp(self, is_staff=False, test_xss=True):
super(ToolsImportHelpTest, self).setUp()
self.import_page = ImportCoursePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.import_page.visit()
def test_tools_import_nav_help(self):
"""
Scenario: Help link in navigation bar is working on tools Library import page
Given that I am on the Library import tools page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#import-a-course')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.import_page,
href=expected_url,
)
def test_tools_import_side_bar_help(self):
"""
Scenario: Help link in side bar is working on tools Library import page
Given that I am on the tools Library import page
And I want help about the process
And I click the 'Learn more about importing a course' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#import-a-course')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.import_page,
href=expected_url,
help_text='Learn more about importing a course',
)
@attr(shard=20)
class ToolsExportHelpTest(StudioCourseTest):
"""
Tests help links on tools export pages.
"""
def setUp(self, is_staff=False, test_xss=True):
super(ToolsExportHelpTest, self).setUp()
self.export_page = ExportCoursePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.export_page.visit()
def test_tools_import_nav_help(self):
"""
Scenario: Help link in navigation bar is working on tools Library export page
Given that I am on the Library export tools page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#export-a-course')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.export_page,
href=expected_url,
)
def test_tools_import_side_bar_help(self):
"""
Scenario: Help link in side bar is working on tools Library export page
Given that I am on the tools Library import page
And I want help about the process
And I click the 'Learn more about exporting a course' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#export-a-course')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.export_page,
href=expected_url,
help_text='Learn more about exporting a course',
)
@attr(shard=20)
class StudioWelcomeHelpTest(AcceptanceTest):
"""
Tests help link on 'Welcome' page ( User not logged in)
"""
def setUp(self):
super(StudioWelcomeHelpTest, self).setUp()
self.index_page = IndexPage(self.browser)
self.index_page.visit()
def test_welcome_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Welcome' page (User not logged in).
Given that I am on the 'Welcome' page.
And I want help about the edx
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.index_page,
href=expected_url,
signed_in=False
)
|
teltek/edx-platform
|
common/test/acceptance/tests/studio/test_studio_help.py
|
Python
|
agpl-3.0
| 41,125
|
[
"VisIt"
] |
b3be012a05ff3870892f26a1acda3fe0b2b9cc1def7b0dafea1a69038fb5a2bb
|
'''
This module contains the `RBF` class, which is used to symbolically define and
numerically evaluate a radial basis function. `RBF` instances have been
predefined in this module for some of the commonly used radial basis functions.
The predefined radial basis functions are shown in the table below. For each
expression in the table, :math:`r = ||x - c||_2` and :math:`\epsilon` is a
shape parameter. :math:`x` and :math:`c` are the evaluation points and radial
basis function centers, respectively. The names of the predefined `RBF`
instances are given in the "Abbreviation" column. The "Positive Definite"
column identifies whether the RBFs are always positive definite and, if not,
under what conditions they are positive definite. RBFs identified as being
"Conditional (order i)" are conditionally positive definite with order i as
defined in Section 7.1 of [1]. The Wendland class of RBFs are only positive
definite for the indicated number of spatial dimensions.
================================= ============ ===================== ======================================
Name Abbreviation Positive Definite Expression
================================= ============ ===================== ======================================
Eighth-order polyharmonic spline phs8 Conditional (order 5) :math:`-(\epsilon r)^8\log(\epsilon r)`
Seventh-order polyharmonic spline phs7 Conditional (order 4) :math:`(\epsilon r)^7`
Sixth-order polyharmonic spline phs6 Conditional (order 4) :math:`(\epsilon r)^6\log(\epsilon r)`
Fifth-order polyharmonic spline phs5 Conditional (order 3) :math:`-(\epsilon r)^5`
Fourth-order polyharmonic spline phs4 Conditional (order 3) :math:`-(\epsilon r)^4\log(\epsilon r)`
Third-order polyharmonic spline phs3 Conditional (order 2) :math:`(\epsilon r)^3`
Second-order polyharmonic spline phs2 Conditional (order 2) :math:`(\epsilon r)^2\log(\epsilon r)`
First-order polyharmonic spline phs1 Conditional (order 1) :math:`-\epsilon r`
Multiquadric mq Conditional (order 1) :math:`-(1 + (\epsilon r)^2)^{1/2}`
Inverse multiquadric imq Yes :math:`(1 + (\epsilon r)^2)^{-1/2}`
Inverse quadratic iq Yes :math:`(1 + (\epsilon r)^2)^{-1}`
Gaussian ga Yes :math:`\exp(-(\epsilon r)^2)`
Exponential exp Yes :math:`\exp(-r/\epsilon)`
Squared Exponential se Yes :math:`\exp(-r^2/(2\epsilon^2))`
Matern (v = 3/2) mat32 Yes :math:`(1 + \sqrt{3} r/\epsilon)\exp(-\sqrt{3} r/\epsilon)`
Matern (v = 5/2) mat52 Yes :math:`(1 + \sqrt{5} r/\epsilon + 5r^2/(3\epsilon^2))\exp(-\sqrt{5} r/\epsilon)`
Wendland (d=1, k=0) wen10 Yes (1-D only) :math:`(1 - r/\epsilon)_+`
Wendland (d=1, k=1) wen11 Yes (1-D only) :math:`(1 - r/\epsilon)_+^3(3r/\epsilon + 1)`
Wendland (d=1, k=2) wen12 Yes (1-D only) :math:`(1 - r/\epsilon)_+^5(8r^2/\epsilon^2 + 5r/\epsilon + 1)`
Wendland (d=3, k=0) wen30 Yes (1, 2, and 3-D) :math:`(1 - r/\epsilon)_+^2`
Wendland (d=3, k=1) wen31 Yes (1, 2, and 3-D) :math:`(1 - r/\epsilon)_+^4(4r/\epsilon + 1)`
Wendland (d=3, k=2) wen32 Yes (1, 2, and 3-D) :math:`(1 - r/\epsilon)_+^6(35r^2/\epsilon^2 + 18r/\epsilon + 3)/3`
================================= ============ ===================== ======================================
References
----------
[1] Fasshauer, G., Meshfree Approximation Methods with Matlab. World Scientific
Publishing Co, 2007.
'''
from __future__ import division
import logging
import weakref
import sympy
import numpy as np
from scipy.sparse import csc_matrix
from scipy.spatial import cKDTree
from sympy.utilities.autowrap import ufuncify
from sympy import lambdify
from rbf.poly import monomial_powers
from rbf.utils import assert_shape
logger = logging.getLogger(__name__)
# the method used to convert sympy expressions to numeric functions
_SYMBOLIC_TO_NUMERIC_METHOD = 'ufuncify'
def get_r():
'''
returns the symbolic variable for :math:`r` which is used to instantiate an
`RBF`
'''
return sympy.symbols('r')
def get_eps():
'''
returns the symbolic variable for :math:`\epsilon` which is used to
instantiate an `RBF`
'''
return sympy.symbols('eps')
_EPS = get_eps()
_R = get_r()
class RBF(object):
'''
Stores a symbolic expression of a Radial Basis Function (RBF) and evaluates
the expression numerically when called.
Parameters
----------
expr : sympy expression
Sympy expression for the RBF. This must be a function of the symbolic
variable `r`, which can be obtained by calling `get_r()` or
`sympy.symbols('r')`. `r` is the radial distance to the RBF center.
The expression may optionally be a function of `eps`, which is a shape
parameter obtained by calling `get_eps()` or `sympy.symbols('eps')`.
If `eps` is not provided then `r` is substituted with `r*eps`.
tol : float or sympy expression, optional
This is for when an RBF or its derivatives contain a removable
singularity at the center. If `tol` is specified, then the limiting
value of the RBF at its center will be evaluated symbolically, and that
limit will be returned for all evaluation points, `x`, that are within
`tol` of the RBF center, `c`. If the limit of the RBF at `x = c` is
known, then it can be manually specified with the `limits` arguments.
`tol` can be a float or a sympy expression containing `eps`.
limits : dict, optional
Contains the values of the RBF or its derivatives at the center. For
example, `{(0,1):2*eps}` indicates that the derivative with respect to
the second spatial dimension is `2*eps` at `x = c`. If this dictionary
is provided and `tol` is not `None`, then it will be searched before
estimating the limit with the method describe above.
Examples
--------
Instantiate an inverse quadratic RBF
>>> from rbf.basis import *
>>> r = get_r()
>>> eps = get_eps()
>>> iq_expr = 1/(1 + (eps*r)**2)
>>> iq = RBF(iq_expr)
Evaluate an inverse quadratic at 10 points ranging from -5 to 5. Note that
the evaluation points and centers are two dimensional arrays
>>> x = np.linspace(-5.0, 5.0, 10)[:, None]
>>> center = np.array([[0.0]])
>>> values = iq(x, center)
Instantiate a sinc RBF. This has a singularity at the RBF center and it
must be handled separately by specifying a number for `tol`.
>>> import sympy
>>> sinc_expr = sympy.sin(r)/r
>>> sinc = RBF(sinc_expr) # instantiate WITHOUT specifying `tol`
>>> x = np.array([[-1.0], [0.0], [1.0]])
>>> c = np.array([[0.0]])
>>> sinc(x, c) # this incorrectly evaluates to nan at the center
array([[ 0.84147098],
[ nan],
[ 0.84147098]])
>>> sinc = RBF(sinc_expr, tol=1e-10) # instantiate specifying `tol`
>>> sinc(x, c) # this now correctly evaluates to 1.0 at the center
array([[ 0.84147098],
[ 1. ],
[ 0.84147098]])
'''
_INSTANCES = []
@property
def expr(self):
# `expr` is read-only.
return self._expr
@property
def tol(self):
# `tol` is read-only
return self._tol
@property
def limits(self):
# `limits` is read-only
return self._limits
def __new__(cls, *args, **kwargs):
# this keeps track of RBF and RBF subclass instances
instance = object.__new__(cls)
cls._INSTANCES += [weakref.ref(instance)]
return instance
def __init__(self, expr, tol=None, limits=None):
## SANITIZE `EXPR`
# make sure `expr` is a sympy expression
if not issubclass(type(expr), sympy.Expr):
raise ValueError('`expr` must be a sympy expression')
# make sure that `expr` does not contain any symbols other than
# `r` and `eps`
other_symbols = expr.free_symbols.difference({_R, _EPS})
if len(other_symbols) != 0:
raise ValueError(
'`expr` cannot contain any symbols other than `r` and `eps`')
# make sure that `expr` at least has `r`
if not expr.has(_R):
raise ValueError('`expr` must contain the symbol `r`')
if not expr.has(_EPS):
# if `eps` is not in the expression then substitute `eps*r` for `r`
expr = expr.subs(_R, _EPS*_R)
self._expr = expr
## SANITIZE `TOL`
if tol is not None:
# make sure `tol` is a scalar or a sympy expression of `eps`
tol = sympy.sympify(tol)
other_symbols = tol.free_symbols.difference({_EPS})
if len(other_symbols) != 0:
raise ValueError(
'`tol` cannot contain any symbols other than `eps`')
self._tol = tol
## SANITIZE `LIMITS`
if limits is None:
limits = {}
self._limits = limits
## create the cache for numerical functions
self._cache = {}
def __call__(self, x, c, eps=1.0, diff=None):
'''
Numerically evaluates the RBF or its derivatives.
Parameters
----------
x : (..., N, D) float array
Evaluation points
c : (..., M, D) float array
RBF centers
eps : float or float array, optional
Shape parameter for each RBF
diff : (D,) int array, optional
Specifies the derivative order for each spatial dimension. For
example, if there are three spatial dimensions then providing
(2, 0, 1) would cause this function to return the RBF after
differentiating it twice along the first dimension and once along
the third dimension.
Returns
-------
(..., N, M) float array
The RBFs with centers `c` evaluated at `x`
Notes
-----
The default method for converting the symbolic RBF to a numeric
function limits the number of spatial dimensions `D` to 15. There is no
such limitation when the conversion method is set to "lambdify". Set
the conversion method using the function
`set_symbolic_to_numeric_method`.
The derivative order can be arbitrarily high, but some RBFs, such as
Wendland and Matern, become numerically unstable when the derivative
order exceeds 2.
'''
x = np.asarray(x, dtype=float)
assert_shape(x, (..., None, None), 'x')
ndim = x.shape[-1]
c = np.asarray(c, dtype=float)
assert_shape(c, (..., None, ndim), 'c')
eps = np.asarray(eps, dtype=float)
eps = np.broadcast_to(eps, c.shape[:-1])
# if `diff` is not given then take no derivatives
if diff is None:
diff = (0,)*ndim
else:
# make sure diff is immutable
diff = tuple(diff)
assert_shape(diff, (ndim,), 'diff')
# add numerical function to cache if not already
if diff not in self._cache:
self._add_diff_to_cache(diff)
# reshape x from (..., n, d) to (d, ..., n, 1)
x = np.einsum('...ij->j...i', x)[..., None]
# reshape c from (..., m, d) to (d, ..., 1, m)
c = np.einsum('...ij->j...i', c)[..., None, :]
# reshape eps from (..., m) to (..., 1, m)
eps = eps[..., None, :]
args = (tuple(x) + tuple(c) + (eps,))
# evaluate the cached function for the given `x`, `c`, and `eps`
out = self._cache[diff](*args)
return out
def center_value(self, eps=1.0, diff=(0,)):
'''
Returns the value at the center of the RBF for the given `eps` and
`diff`. This is a faster alternative to determining the center value
with `__call__`.
Parameters
----------
eps : float, optional
Shape parameter
diff : tuple, optional
Derivative order for each spatial dimension
Returns
-------
float
'''
diff = tuple(diff)
if diff not in self._cache:
self._add_diff_to_cache(diff)
args = (0.0,)*(2*len(diff)) + (eps,)
return self._cache[diff](*args)
def __repr__(self):
out = '<RBF : %s>' % str(self.expr)
return out
def _add_diff_to_cache(self, diff):
'''
Symbolically differentiates the RBF and then converts the expression to
a function which can be evaluated numerically.
'''
logger.debug(
'Creating a numerical function for the RBF %s with the derivative '
'%s ...' % (self, str(diff)))
dim = len(diff)
c_sym = sympy.symbols('c:%s' % dim)
x_sym = sympy.symbols('x:%s' % dim)
r_sym = sympy.sqrt(sum((xi-ci)**2 for xi, ci in zip(x_sym, c_sym)))
# substitute 'r' in the RBF expression with the cartesian spatial
# variables and differentiate the RBF with respect to them
expr = self.expr.subs(_R, r_sym)
for xi, order in zip(x_sym, diff):
if order == 0:
continue
expr = expr.diff(*(xi,)*order)
# if `tol` is given, form a separate expression for the RBF near its
# center
if self.tol is not None:
if diff in self.limits:
# use a user-specified limit if available
lim = self.limits[diff]
else:
logger.debug(
'Symbolically evaluating the RBF at its center ...')
# evaluate the limit of the RBF at (x0=tol+c0, x1=c1, x2=c2,
# ...) as tol goes to zero.
lim = expr.subs(zip(x_sym[1:], c_sym[1:]))
lim = lim.simplify()
lim = lim.limit(x_sym[0], c_sym[0])
logger.debug('Value of the RBF at its center: %s' % lim)
# create a piecewise symbolic function which is `lim` when
# `r_sym < tol` and `expr` otherwise
expr = sympy.Piecewise((lim, r_sym < self.tol), (expr, True))
if _SYMBOLIC_TO_NUMERIC_METHOD == 'ufuncify':
func = ufuncify(x_sym + c_sym + (_EPS,), expr, backend='numpy')
elif _SYMBOLIC_TO_NUMERIC_METHOD == 'lambdify':
func = lambdify(x_sym + c_sym + (_EPS,), expr, modules=['numpy'])
else:
raise ValueError()
self._cache[diff] = func
logger.debug('The numeric function has been created and cached')
def clear_cache(self):
'''
Clears the cache of numeric functions. Makes a cache dictionary if it
does not already exist
'''
self._cache = {}
def __getstate__(self):
# This method is needed for RBF instances to be picklable. The cached
# numerical functions are not picklable and so we need to remove them
# from the state dictionary.
# make a shallow copy of the instances __dict__ so that we do not mess
# with it
state = dict(self.__dict__)
state['_cache'] = {}
return state
class SparseRBF(RBF):
'''
Stores a symbolic expression of a compact Radial Basis Function (RBF) and
evaluates the expression numerically when called. Calling a `SparseRBF`
instance will return a csc sparse matrix.
Parameters
----------
expr : sympy expression
Sympy expression for the RBF. This must be a function of the symbolic
variable `r`, which can be obtained by calling `get_r()` or
`sympy.symbols('r')`. `r` is the radial distance to the RBF center.
The expression may optionally be a function of `eps`, which is a shape
parameter obtained by calling `get_eps()` or `sympy.symbols('eps')`.
If `eps` is not provided then `r` is substituted with `r*eps`.
support : float or sympy expression
Indicates the support of the RBF. The RBF is set to zero for radial
distances greater than `support`, regardless of what `expr` evaluates
to. This can be a float or a sympy expression containing `eps`.
tol : float or sympy expression, optional
This is for when an RBF or its derivatives contain a removable
singularity at the center. If `tol` is specified, then the limiting
value of the RBF at its center will be evaluated symbolically, and that
limit will be returned for all evaluation points, `x`, that are within
`tol` of the RBF center, `c`. If the limit of the RBF at `x = c` is
known, then it can be manually specified with the `limits` arguments.
`tol` can be a float or a sympy expression containing `eps`.
limits : dict, optional
Contains the values of the RBF or its derivatives at the center. For
example, `{(0, 1):2*eps}` indicates that the derivative with respect to
the second spatial dimension is `2*eps` at `x = c`. If this dictionary
is provided and `tol` is not `None`, then it will be searched before
estimating the limit with the method describe above.
'''
@property
def supp(self):
return self._supp
def __init__(self, expr, supp, **kwargs):
RBF.__init__(self, expr, **kwargs)
## SANITIZE `SUPP`
# make sure `supp` is a scalar or a sympy expression of `eps`
supp = sympy.sympify(supp)
other_symbols = supp.free_symbols.difference({_EPS})
if len(other_symbols) != 0:
raise ValueError(
'`supp` cannot contain any symbols other than `eps`')
self._supp = supp
def __call__(self, x, c, eps=1.0, diff=None):
'''
Numerically evaluates the RBF or its derivatives.
Parameters
----------
x : (N, D) float array
Evaluation points
c : (M, D) float array
RBF centers
eps : float, optional
Shape parameter
diff : (D,) int array, optional
Specifies the derivative order for each Cartesian direction. For
example, if there are three spatial dimensions then providing
(2, 0, 1) would cause this function to return the RBF after
differentiating it twice along the first axis and once along the
third axis.
Returns
-------
out : (N, M) csc sparse matrix
The RBFs with centers `c` evaluated at `x`
'''
x = np.asarray(x, dtype=float)
assert_shape(x, (None, None), 'x')
ndim = x.shape[1]
c = np.asarray(c, dtype=float)
assert_shape(c, (None, ndim), 'c')
if not np.isscalar(eps):
raise NotImplementedError('`eps` must be a scalar')
if diff is None:
diff = (0,)*ndim
else:
# make sure diff is immutable
diff = tuple(diff)
assert_shape(diff, (ndim,), 'diff')
# add numerical function to cache if not already
if diff not in self._cache:
self._add_diff_to_cache(diff)
# convert self.supp from a sympy expression to a float
supp = float(self.supp.subs(_EPS, eps))
# find the nonzero entries based on distances between `x` and `c`
xtree = cKDTree(x)
ctree = cKDTree(c)
# `idx` contains the indices of `x` that are within `supp` of each
# point in `c`
idx = ctree.query_ball_tree(xtree, supp)
# total nonzero entries in the output array
nnz = sum(len(i) for i in idx)
# allocate sparse matrix data
data = np.zeros(nnz, dtype=float)
rows = np.zeros(nnz, dtype=int)
cols = np.zeros(nnz, dtype=int)
# `n` is the total number of data entries thus far
n = 0
for i, idxi in enumerate(idx):
# `m` is the number of nodes in `x` close to `c[i]`
m = len(idxi)
args = tuple(x[idxi].T) + tuple(c[i]) + (eps,)
data[n:n + m] = self._cache[diff](*args)
rows[n:n + m] = idxi
cols[n:n + m] = i
n += m
# convert to a csc_matrix
out = csc_matrix((data, (rows, cols)), (len(x), len(c)))
return out
def __repr__(self):
out = (
'<SparseRBF : %s (support = %s)>' %
(str(self.expr), str(self.supp)))
return out
def clear_rbf_caches():
'''
Clear the caches of numerical functions for all the RBF instances
'''
for inst in RBF._INSTANCES:
if inst() is not None:
inst().clear_cache()
def get_rbf(val):
'''
Returns the `RBF` corresponding to `val`. If `val` is a string, then this
return the correspondingly named predefined `RBF`. If `val` is an RBF
instance then this returns `val`.
'''
if issubclass(type(val), RBF):
return val
elif val in _PREDEFINED:
return _PREDEFINED[val]
else:
raise ValueError(
"Cannot interpret '%s' as an RBF. Use one of %s"
% (val, set(_PREDEFINED.keys())))
def set_symbolic_to_numeric_method(method):
'''
Sets the method that all RBF instances will use for converting sympy
expressions to numeric functions. This can be either "ufuncify" or
"lambdify". "ufuncify" will write and compile C code for a numpy universal
function, and "lambdify" will evaluate the sympy expression using
python-level numpy functions. Calling this function will cause all caches
of numeric functions to be cleared.
'''
global _SYMBOLIC_TO_NUMERIC_METHOD
if method not in {'lambdify', 'ufuncify'}:
raise ValueError('`method` must be either "lambdify" or "ufuncify"')
_SYMBOLIC_TO_NUMERIC_METHOD = method
clear_rbf_caches()
## Instantiate some common RBFs
#####################################################################
_phs8_limits = {}
_phs8_limits.update((tuple(i), 0.0) for i in monomial_powers(7, 1))
_phs8_limits.update((tuple(i), 0.0) for i in monomial_powers(7, 2))
_phs8_limits.update((tuple(i), 0.0) for i in monomial_powers(7, 3))
phs8 = RBF(-(_EPS*_R)**8*sympy.log(_EPS*_R), tol=1e-10, limits=_phs8_limits)
_phs7_limits = {}
_phs7_limits.update((tuple(i), 0.0) for i in monomial_powers(6, 1))
_phs7_limits.update((tuple(i), 0.0) for i in monomial_powers(6, 2))
_phs7_limits.update((tuple(i), 0.0) for i in monomial_powers(6, 3))
phs7 = RBF((_EPS*_R)**7, tol=1e-10, limits=_phs7_limits)
_phs6_limits = {}
_phs6_limits.update((tuple(i), 0.0) for i in monomial_powers(5, 1))
_phs6_limits.update((tuple(i), 0.0) for i in monomial_powers(5, 2))
_phs6_limits.update((tuple(i), 0.0) for i in monomial_powers(5, 3))
phs6 = RBF((_EPS*_R)**6*sympy.log(_EPS*_R), tol=1e-10, limits=_phs6_limits)
_phs5_limits = {}
_phs5_limits.update((tuple(i), 0.0) for i in monomial_powers(4, 1))
_phs5_limits.update((tuple(i), 0.0) for i in monomial_powers(4, 2))
_phs5_limits.update((tuple(i), 0.0) for i in monomial_powers(4, 3))
phs5 = RBF(-(_EPS*_R)**5, tol=1e-10, limits=_phs5_limits)
_phs4_limits = {}
_phs4_limits.update((tuple(i), 0.0) for i in monomial_powers(3, 1))
_phs4_limits.update((tuple(i), 0.0) for i in monomial_powers(3, 2))
_phs4_limits.update((tuple(i), 0.0) for i in monomial_powers(3, 3))
phs4 = RBF(-(_EPS*_R)**4*sympy.log(_EPS*_R), tol=1e-10, limits=_phs4_limits)
_phs3_limits = {}
_phs3_limits.update((tuple(i), 0.0) for i in monomial_powers(2, 1))
_phs3_limits.update((tuple(i), 0.0) for i in monomial_powers(2, 2))
_phs3_limits.update((tuple(i), 0.0) for i in monomial_powers(2, 3))
phs3 = RBF((_EPS*_R)**3, tol=1e-10, limits=_phs3_limits)
_phs2_limits = {}
_phs2_limits.update((tuple(i), 0.0) for i in monomial_powers(1, 1))
_phs2_limits.update((tuple(i), 0.0) for i in monomial_powers(1, 2))
_phs2_limits.update((tuple(i), 0.0) for i in monomial_powers(1, 3))
phs2 = RBF((_EPS*_R)**2*sympy.log(_EPS*_R), tol=1e-10, limits=_phs2_limits)
_phs1_limits = {}
_phs1_limits.update((tuple(i), 0.0) for i in monomial_powers(0, 1))
_phs1_limits.update((tuple(i), 0.0) for i in monomial_powers(0, 2))
_phs1_limits.update((tuple(i), 0.0) for i in monomial_powers(0, 3))
phs1 = RBF(-_EPS*_R, tol=1e-10, limits=_phs1_limits)
# inverse multiquadric
imq = RBF(1/sympy.sqrt(1 + (_EPS*_R)**2))
# inverse quadratic
iq = RBF(1/(1 + (_EPS*_R)**2))
# Gaussian
ga = RBF(sympy.exp(-(_EPS*_R)**2))
# multiquadric
mq = RBF(-sympy.sqrt(1 + (_EPS*_R)**2))
# exponential
exp = RBF(sympy.exp(-_R/_EPS))
# squared exponential
se = RBF(sympy.exp(-_R**2/(2*_EPS**2)))
# Matern
_mat32_limits = {
(0,): 1.0,
(0, 0): 1.0,
(0, 0, 0): 1.0,
(1,): 0.0,
(1, 0): 0.0,
(0, 1): 0.0,
(1, 0, 0): 0.0,
(0, 1, 0): 0.0,
(0, 0, 1): 0.0,
(2,): -3.0/_EPS**2,
(2, 0): -3.0/_EPS**2,
(0, 2): -3.0/_EPS**2,
(2, 0, 0): -3.0/_EPS**2,
(0, 2, 0): -3.0/_EPS**2,
(0, 0, 2): -3.0/_EPS**2,
(1, 1): 0.0,
(1, 1, 0): 0.0,
(1, 0, 1): 0.0,
(0, 1, 1): 0.0}
_mat52_limits = {
(0,): 1.0,
(0, 0): 1.0,
(0, 0, 0): 1.0,
(1,): 0.0,
(1, 0): 0.0,
(0, 1): 0.0,
(1, 0, 0): 0.0,
(0, 1, 0): 0.0,
(0, 0, 1): 0.0,
(2,): -5.0/(3.0*_EPS**2),
(2, 0): -5.0/(3.0*_EPS**2),
(0, 2): -5.0/(3.0*_EPS**2),
(2, 0, 0): -5.0/(3.0*_EPS**2),
(0, 2, 0): -5.0/(3.0*_EPS**2),
(0, 0, 2): -5.0/(3.0*_EPS**2),
(1, 1): 0.0,
(1, 1, 0): 0.0,
(1, 0, 1): 0.0,
(0, 1, 1): 0.0}
mat32 = RBF(
(1 + sympy.sqrt(3)*_R/_EPS) * sympy.exp(-sympy.sqrt(3)*_R/_EPS),
tol=1e-8*_EPS,
limits=_mat32_limits)
mat52 = RBF(
(1 + sympy.sqrt(5)*_R/_EPS + 5*_R**2/(3*_EPS**2)) * sympy.exp(-sympy.sqrt(5)*_R/_EPS),
tol=1e-4*_EPS,
limits=_mat52_limits)
# Wendland
_wen10_limits = {(0,): 1.0}
_wen11_limits = {(0,): 1.0, (1,): 0.0, (2,): -12.0/_EPS**2}
_wen12_limits = {(0,): 1.0, (1,): 0.0, (2,): -14.0/_EPS**2}
_wen30_limits = {(0,): 1.0, (0, 0): 1.0, (0, 0, 0): 1.0}
_wen31_limits = {
(0,): 1.0,
(0, 0): 1.0,
(0, 0, 0): 1.0,
(1,): 0.0,
(1, 0): 0.0,
(0, 1): 0.0,
(1, 0, 0): 0.0,
(0, 1, 0): 0.0,
(0, 0, 1): 0.0,
(2,): -20.0/_EPS**2,
(2, 0): -20.0/_EPS**2,
(0, 2): -20.0/_EPS**2,
(2, 0, 0): -20.0/_EPS**2,
(0, 2, 0): -20.0/_EPS**2,
(0, 0, 2): -20.0/_EPS**2,
(1, 1): 0.0,
(1, 1, 0): 0.0,
(1, 0, 1): 0.0,
(0, 1, 1): 0.0}
_wen32_limits = {
(0,): 1.0,
(0, 0): 1.0,
(0, 0, 0): 1.0,
(1,): 0,
(1, 0): 0.0,
(0, 1): 0.0,
(1, 0, 0): 0.0,
(0, 1, 0): 0.0,
(0, 0, 1): 0.0,
(2,): -56.0/(3.0*_EPS**2),
(2, 0): -56.0/(3.0*_EPS**2),
(0, 2): -56.0/(3.0*_EPS**2),
(2, 0, 0): -56.0/(3.0*_EPS**2),
(0, 2, 0): -56.0/(3.0*_EPS**2),
(0, 0, 2): -56.0/(3.0*_EPS**2),
(1, 1): 0.0,
(1, 1, 0): 0.0,
(1, 0, 1): 0.0,
(0, 1, 1): 0.0}
wen10 = RBF(
sympy.Piecewise(((1 - _R/_EPS), _R < _EPS), (0.0, True)),
tol=1e-8*_EPS,
limits=_wen10_limits)
wen11 = RBF(
sympy.Piecewise(((1 - _R/_EPS)**3*(3*_R/_EPS + 1), _R < _EPS), (0.0, True)),
tol=1e-8*_EPS,
limits=_wen11_limits)
wen12 = RBF(
sympy.Piecewise(((1 - _R/_EPS)**5*(8*_R**2/_EPS**2 + 5*_R/_EPS + 1), _R < _EPS), (0.0, True)),
tol=1e-8*_EPS,
limits=_wen12_limits)
wen30 = RBF(
sympy.Piecewise(((1 - _R/_EPS)**2, _R < _EPS), (0.0, True)),
tol=1e-8*_EPS,
limits=_wen30_limits)
wen31 = RBF(
sympy.Piecewise(((1 - _R/_EPS)**4*(4*_R/_EPS + 1), _R < _EPS), (0.0, True)),
tol=1e-8*_EPS,
limits=_wen31_limits)
wen32 = RBF(
sympy.Piecewise(((1 - _R/_EPS)**6*(35*_R**2/_EPS**2 + 18*_R/_EPS + 3)/3, _R < _EPS), (0.0, True)),
tol=1e-8*_EPS,
limits=_wen32_limits)
# sparse Wendland
spwen10 = SparseRBF(
(1 - _R/_EPS), _EPS,
tol=1e-8*_EPS,
limits=_wen10_limits)
spwen11 = SparseRBF(
(1 - _R/_EPS)**3*(3*_R/_EPS + 1), _EPS,
tol=1e-8*_EPS,
limits=_wen11_limits)
spwen12 = SparseRBF(
(1 - _R/_EPS)**5*(8*_R**2/_EPS**2 + 5*_R/_EPS + 1), _EPS,
tol=1e-8*_EPS,
limits=_wen12_limits)
spwen30 = SparseRBF(
(1 - _R/_EPS)**2, _EPS,
tol=1e-8*_EPS,
limits=_wen30_limits)
spwen31 = SparseRBF(
(1 - _R/_EPS)**4*(4*_R/_EPS + 1), _EPS,
tol=1e-8*_EPS,
limits=_wen31_limits)
spwen32 = SparseRBF(
(1 - _R/_EPS)**6*(35*_R**2/_EPS**2 + 18*_R/_EPS + 3)/3, _EPS,
tol=1e-8*_EPS,
limits=_wen32_limits)
_PREDEFINED = {
'phs8':phs8, 'phs7':phs7, 'phs6':phs6, 'phs5':phs5, 'phs4':phs4,
'phs3':phs3, 'phs2':phs2, 'phs1':phs1, 'mq':mq, 'imq':imq, 'iq':iq,
'ga':ga, 'exp':exp, 'se':se, 'mat32':mat32, 'mat52':mat52, 'wen10':wen10,
'wen11':wen11, 'wen12':wen12, 'wen30':wen30, 'wen31':wen31, 'wen32':wen32,
'spwen10':spwen10, 'spwen11':spwen11, 'spwen12':spwen12, 'spwen30':spwen30,
'spwen31':spwen31, 'spwen32':spwen32}
|
treverhines/RBF
|
rbf/basis.py
|
Python
|
mit
| 29,352
|
[
"Gaussian"
] |
f0c92c47170c62334c3cf6fa7217bbc2a018b05d87792a7c35775b976f02702f
|
########################################################################
# File : CPUNormalization.py
# Author : Ricardo Graciani
########################################################################
""" DIRAC Workload Management System Client module that encapsulates all the
methods necessary to handle CPU normalization
"""
import os
from urllib.request import urlopen
from db12 import single_dirac_benchmark
import DIRAC
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getCESiteMapping
from DIRAC.Resources.Computing.BatchSystems.TimeLeft.TimeLeft import TimeLeft
# TODO: This should come from some place in the configuration
NORMALIZATIONCONSTANT = 60.0 / 250.0 # from minutes to seconds and from SI00 to HS06 (ie min * SI00 -> sec * HS06 )
UNITS = {"HS06": 1.0, "SI00": 1.0 / 250.0}
# TODO: This is still fetching directly from MJF rather than going through
# the MJF module and the values it saves in the local DIRAC configuration
def __getFeatures(envVariable, items):
"""Extract features"""
features = {}
featuresDir = os.environ.get(envVariable)
if featuresDir is None:
return features
for item in items:
fname = os.path.join(featuresDir, item)
try:
# Only keep features that do exist
features[item] = urlopen(fname).read()
except IOError:
pass
return features
def getMachineFeatures():
"""This uses the _old_ MJF information"""
return __getFeatures("MACHINEFEATURES", ("hs06", "jobslots", "log_cores", "phys_cores"))
# TODO: log_cores and phys_cores are deprecated and from old MJF specificationa and not collected
# by the MJF module!
def getJobFeatures():
"""This uses the _new_ MJF information"""
return __getFeatures("JOBFEATURES", ("hs06_job", "allocated_cpu"))
def getPowerFromMJF():
"""Extracts the machine power from either JOBFEATURES or MACHINEFEATURES"""
try:
features = getJobFeatures()
hs06Job = features.get("hs06_job")
# If the information is there and non zero, return, otherwise go to machine features
if hs06Job:
return round(float(hs06Job), 2)
features = getMachineFeatures()
totalPower = float(features.get("hs06", 0))
logCores = float(features.get("log_cores", 0))
physCores = float(features.get("phys_cores", 0))
jobSlots = float(features.get("jobslots", 0))
denom = min(max(logCores, physCores), jobSlots) if (logCores or physCores) and jobSlots else None
if totalPower and denom:
return round(totalPower / denom, 2)
return None
except ValueError as e:
gLogger.exception("Exception getting MJF information", lException=e)
return None
def queueNormalizedCPU(ceUniqueID):
"""Report Normalized CPU length of queue"""
result = getQueueInfo(ceUniqueID)
if not result["OK"]:
return result
ceInfoDict = result["Value"]
siteCSSEction = ceInfoDict["SiteCSSEction"]
queueCSSection = ceInfoDict["QueueCSSection"]
benchmarkSI00 = __getQueueNormalization(queueCSSection, siteCSSEction)
maxCPUTime = __getMaxCPUTime(queueCSSection)
if maxCPUTime and benchmarkSI00:
normCPUTime = NORMALIZATIONCONSTANT * maxCPUTime * benchmarkSI00
else:
if not benchmarkSI00:
subClusterUniqueID = ceInfoDict["SubClusterUniqueID"]
return S_ERROR("benchmarkSI00 info not available for %s" % subClusterUniqueID)
if not maxCPUTime:
return S_ERROR("maxCPUTime info not available")
return S_OK(normCPUTime)
def getQueueNormalization(ceUniqueID):
"""Report Normalization Factor applied by Site to the given Queue"""
result = getQueueInfo(ceUniqueID)
if not result["OK"]:
return result
ceInfoDict = result["Value"]
siteCSSEction = ceInfoDict["SiteCSSEction"]
queueCSSection = ceInfoDict["QueueCSSection"]
subClusterUniqueID = ceInfoDict["SubClusterUniqueID"]
benchmarkSI00 = __getQueueNormalization(queueCSSection, siteCSSEction)
if benchmarkSI00:
return S_OK(benchmarkSI00)
return S_ERROR("benchmarkSI00 info not available for %s" % subClusterUniqueID)
# errorList.append( ( subClusterUniqueID , 'benchmarkSI00 info not available' ) )
# exitCode = 3
def __getQueueNormalization(queueCSSection, siteCSSEction):
"""Query the CS and return the Normalization"""
benchmarkSI00Option = "%s/%s" % (queueCSSection, "SI00")
benchmarkSI00 = gConfig.getValue(benchmarkSI00Option, 0.0)
if not benchmarkSI00:
benchmarkSI00Option = "%s/%s" % (siteCSSEction, "SI00")
benchmarkSI00 = gConfig.getValue(benchmarkSI00Option, 0.0)
return benchmarkSI00
def __getMaxCPUTime(queueCSSection):
"""Query the CS and return the maxCPUTime"""
maxCPUTimeOption = "%s/%s" % (queueCSSection, "maxCPUTime")
maxCPUTime = gConfig.getValue(maxCPUTimeOption, 0.0)
# For some sites there are crazy values in the CS
maxCPUTime = max(maxCPUTime, 0)
maxCPUTime = min(maxCPUTime, 86400 * 12.5)
return maxCPUTime
def getCPUNormalization(reference="HS06", iterations=1):
"""Get Normalized Power of the current CPU in [reference] units"""
if reference not in UNITS:
return S_ERROR("Unknown Normalization unit %s" % str(reference))
try:
max(min(int(iterations), 10), 1)
except (TypeError, ValueError) as x:
return S_ERROR(x)
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
corr = Operations().getValue("JobScheduling/CPUNormalizationCorrection", 1.0)
result = single_dirac_benchmark(iterations)
if result is None:
return S_ERROR("Cannot get benchmark measurements")
return S_OK({"CPU": result["CPU"], "WALL": result["WALL"], "NORM": result["NORM"] / corr, "UNIT": reference})
def getCPUTime(cpuNormalizationFactor):
"""Trying to get CPUTime left for execution (in seconds).
It will first look to get the work left looking for batch system information useing the TimeLeft utility.
If it succeeds, it will convert it in real second, and return it.
If it fails, it tries to get it from the static info found in CS.
If it fails, it returns the default, which is a large 9999999, that we may consider as "Infinite".
This is a generic method, independent from the middleware of the resource if TimeLeft doesn't return a value
args:
cpuNormalizationFactor (float): the CPU power of the current Worker Node.
If not passed in, it's get from the local configuration
returns:
cpuTimeLeft (int): the CPU time left, in seconds
"""
cpuTimeLeft = 0.0
cpuWorkLeft = gConfig.getValue("/LocalSite/CPUTimeLeft", 0)
if not cpuWorkLeft:
# Try and get the information from the CPU left utility
result = TimeLeft().getTimeLeft()
if result["OK"]:
cpuWorkLeft = result["Value"]
if cpuWorkLeft > 0:
# This is in HS06sseconds
# We need to convert in real seconds
if not cpuNormalizationFactor: # if cpuNormalizationFactor passed in is 0, try get it from the local cfg
cpuNormalizationFactor = gConfig.getValue("/LocalSite/CPUNormalizationFactor", 0.0)
if cpuNormalizationFactor:
cpuTimeLeft = cpuWorkLeft / cpuNormalizationFactor
if not cpuTimeLeft:
# now we know that we have to find the CPUTimeLeft by looking in the CS
# this is not granted to be correct as the CS units may not be real seconds
gridCE = gConfig.getValue("/LocalSite/GridCE")
ceQueue = gConfig.getValue("/LocalSite/CEQueue")
if not ceQueue:
# we have to look for a ceQueue in the CS
# A bit hacky. We should better profit from something generic
gLogger.warn("No CEQueue in local configuration, looking to find one in CS")
siteName = DIRAC.siteName()
queueSection = "/Resources/Sites/%s/%s/CEs/%s/Queues" % (siteName.split(".")[0], siteName, gridCE)
res = gConfig.getSections(queueSection)
if not res["OK"]:
raise RuntimeError(res["Message"])
queues = res["Value"]
cpuTimes = [gConfig.getValue(queueSection + "/" + queue + "/maxCPUTime", 9999999.0) for queue in queues]
# These are (real, wall clock) minutes - damn BDII!
cpuTimeLeft = min(cpuTimes) * 60
else:
queueInfo = getQueueInfo("%s/%s" % (gridCE, ceQueue))
cpuTimeLeft = 9999999.0
if not queueInfo["OK"] or not queueInfo["Value"]:
gLogger.warn("Can't find a CE/queue, defaulting CPUTime to %d" % cpuTimeLeft)
else:
queueCSSection = queueInfo["Value"]["QueueCSSection"]
# These are (real, wall clock) minutes - damn BDII!
cpuTimeInMinutes = gConfig.getValue("%s/maxCPUTime" % queueCSSection, 0.0)
if cpuTimeInMinutes:
cpuTimeLeft = cpuTimeInMinutes * 60.0
gLogger.info("CPUTime for %s: %f" % (queueCSSection, cpuTimeLeft))
else:
gLogger.warn(
"Can't find maxCPUTime for %s, defaulting CPUTime to %f" % (queueCSSection, cpuTimeLeft)
)
return int(cpuTimeLeft)
def getQueueInfo(ceUniqueID, diracSiteName=""):
"""
Extract information from full CE Name including associate DIRAC Site
"""
try:
subClusterUniqueID = ceUniqueID.split("/")[0].split(":")[0]
queueID = ceUniqueID.split("/")[1]
except IndexError:
return S_ERROR("Wrong full queue Name")
if not diracSiteName:
gLogger.debug("SiteName not given, looking in /LocaSite/Site")
diracSiteName = gConfig.getValue("/LocalSite/Site", "")
if not diracSiteName:
gLogger.debug("Can't find LocalSite name, looking in CS")
result = getCESiteMapping(subClusterUniqueID)
if not result["OK"]:
return result
diracSiteName = result["Value"][subClusterUniqueID]
if not diracSiteName:
gLogger.error("Can not find corresponding Site in CS")
return S_ERROR("Can not find corresponding Site in CS")
gridType = diracSiteName.split(".")[0]
siteCSSEction = "/Resources/Sites/%s/%s/CEs/%s" % (gridType, diracSiteName, subClusterUniqueID)
queueCSSection = "%s/Queues/%s" % (siteCSSEction, queueID)
resultDict = {
"SubClusterUniqueID": subClusterUniqueID,
"QueueID": queueID,
"SiteName": diracSiteName,
"Grid": gridType,
"SiteCSSEction": siteCSSEction,
"QueueCSSection": queueCSSection,
}
return S_OK(resultDict)
|
DIRACGrid/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Client/CPUNormalization.py
|
Python
|
gpl-3.0
| 10,878
|
[
"DIRAC"
] |
b3e42d1251d9027c71b4c80da7682709af65bfb165f5aaa48d8193c4e388986d
|
"""
Copyright (C) <2010> Autin L.
This file ePMV_git/pmv_dev/APBSCommands_2x.py is part of ePMV.
ePMV is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ePMV is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ePMV. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
# An Adaptive Poisson-Boltzmann Solver Graphic User Interface for the Python
# Molecule Viewer: (APBS GUI for PMV)
# Authors: Hovig Bayandorian, Jessica Swanson, Sophie Coon, Michel Sanner,
# Sargis Dallakyan (sargis@scripps.edu)
#$Header: /opt/cvs/python/packages/share1.5/Pmv/APBSCommands.py,v 1.176 2010/09/03 17:17:03 sargis Exp $
#
#$Id: APBSCommands.py,v 1.176 2010/09/03 17:17:03 sargis Exp $
""" GUI for Adaptive Poisson-Boltzmann Solver
Minimal documentation follows.
Consult APBS or PMV documentation for more detail.
More documentation is planned for later releases.
How to set up an APBS run:
1. (Calculation tab) There are three types of calculations available:
electrostatic potential, binding energy, and solvation energy.
Select whichever is of interest.
2. (Calculation tab) Under Molecules, select PQR files corresponding to
the molecules of interest. Note that binding energy requires three PQRs:
one for the complex, and one for each compound.
3. (Grid tab) Autocenter and Autosize generate grid parameters based on
the current selection in PMV. You may also manually set these parameters.
It is wise to check that your machine has the system resources required to
perform the run.
4. (Physics tab) Enter the ions of interest and change the listed parameters
as desired.
5. (Calculation tab) All files will be stored in the specified project folder.
Unique project folder names are automatically generated.
6. (Calculation tab) If you wish to modify the the run you created in the GUI
later, save the profile.
7. (Calculation tab) To run APBS separately from the GUI, use the write APBS
parameter file button, which writes to the project folder. Then call apbs
(in say, a shell) on that file.
8. (Calculation tab) Run APBS!
"""
import string, os, pickle, sys, threading, select, time, shutil
import numpy.oldnumeric as Numeric
#import Tkinter, Pm
Tkinter = None
Pmw = None
InputFormDescr, InputForm =None, None
CallBackFunction = None
ThumbWheel = None
Button = None
SaveButton= None
SliderWidget= None
ExtendedSliderWidget= None
ProgressBar = None
ensureFontCase = None
#CallBackFunction
#from mglutil.gui.InputForm.Tk.gui import InputFormDescr, InputForm, \
#CallBackFunction
#from mglutil.gui.BasicWidgets.Tk.thumbwheel import ThumbWheel
#from mglutil.gui.BasicWidgets.Tk.customizedWidgets import LoadButton, \
#SaveButton, SliderWidget, ExtendedSliderWidget
#from mglutil.gui.BasicWidgets.Tk.progressBar import ProgressBar
#from mglutil.util.callback import CallBackFunction
#from mglutil.util.misc import ensureFontCase
from Pmv.mvCommand import MVCommand
from ViewerFramework.VFCommand import CommandGUI
from MolKit.pdbParser import PQRParser
from MolKit.molecule import Atom, MoleculeSet
import MolKit
import tkMessageBox
global APBS_ssl
APBS_ssl = False # Flags whether to run Secured APBS Web Services
from mglutil.util.packageFilePath import getResourceFolderWithVersion
ResourceFolder = getResourceFolderWithVersion()
#Proxy retrieved from the GAMA service
if ResourceFolder is not None:
APBS_proxy = ResourceFolder + os.sep + 'ws' + os.sep + 'proxy_gama'
else:
APBS_proxy = None
def closestMatch(value, _set):
"""Returns an element of the set that is closest to the supplied value"""
a = 0
element = _set[0]
while(a<len(_set)):
if((_set[a]-value)*(_set[a]-value)<(element-value)*(element-value)):
element=_set[a]
a = a + 1
return element
PROFILES = ('Default',)
from MolKit.APBSParameters import *
try:
import sys, webbrowser, urllib, httplib
APBSservicesFound = True
from mglutil.web.services.AppService_client import AppServiceLocator, launchJobRequest, \
getOutputsRequest, queryStatusRequest
from mglutil.web.services.AppService_types import ns0
class APBSCmdToWebService:
"""
This object takes an APBSParams instance the Pmv.APBSCommands.py
"""
def __init__(self, params, mol_1, mol_2 = None, _complex = None):
"""
Constructor for class APBSCmdToWebService
params is APBSPrams instance
mol_1,mol_2 and complex are molecule instances
Parallel_flag used to indicate parallel mode
npx npy npz are number of processors in x-, y- and z-directions
ofrac is the amount of overlap between processor meshes
"""
# set the parameters for the request
self.req = launchJobRequest()
inputFiles = []
input_molecule1 = ns0.InputFileType_Def('inputFile')
input_molecule1._name = os.path.split(params.molecule1Path)[-1]
input_molecule1._contents = open(params.molecule1Path).read()
inputFiles.append(input_molecule1)
if mol_2:
input_molecule2 = ns0.InputFileType_Def()
input_molecule2._name = os.path.split(params.molecule2Path)[-1]
input_molecule2._contents = open(params.molecule2Path).read()
inputFiles.append(input_molecule2)
if not _complex:
import warnings
warnings.warn("Complex is missing!")
return
input_complex = ns0.InputFileType_Def()
input_complex._name = os.path.split(params.complexPath)[-1]
input_complex._contents = open(params.complexPath).read()
inputFiles.append(input_complex)
apbs_input = "apbs-input-file.apbs"
apbs_input_path = params.projectFolder + os.path.sep + apbs_input
params.molecule1Path = os.path.basename(params.molecule1Path)
if mol_2:
params.molecule2Path = os.path.basename(params.molecule2Path)
params.complexPath = os.path.basename(params.complexPath)
params.SaveAPBSInput(apbs_input_path)
input_apbs = ns0.InputFileType_Def('inputFile')
input_apbs._name = apbs_input
input_apbs._contents = open(apbs_input_path).read()
inputFiles.append(input_apbs)
self.req._argList = apbs_input
self.req._inputFile = inputFiles
def run(self, portAddress):
"""Runs APBS through Web Services"""
# retrieve a reference to the remote port
import httplib
self.appLocator = AppServiceLocator()
global APBS_ssl
if APBS_ssl:
self.appServicePort = self.appLocator.getAppServicePortType(
portAddress,
ssl = 1, cert_file = APBS_proxy,
key_file = APBS_proxy, transport=httplib.HTTPSConnection)
else:
self.appServicePort = self.appLocator.getAppServicePort(
portAddress)
# make remote invocation
resp = self.appServicePort.launchJob(self.req)
self.JobID = resp._jobID
return resp
except ImportError:
APBSservicesFound = False
state_GUI = 'disabled'
blinker = 0
import upy
uiadaptor = upy.getUIClass()
##try :
## uiadaptor = upy.getUIClass()
## #from upy import uiadaptor
## #if uiadaptor is None :
## # uiadaptor = upy.getUIClass()
##except :
## uiadaptor = None
class APBSgui(uiadaptor):
def setup(self,sub=True,epmv=None,id=2000):
self.subdialog = sub
self.title = "APBS"
self.epmv = epmv
self.mv = epmv.mv
self.w = 350
self.h = 300
if self.subdialog:
self.block = True
witdh=350
if id is not None :
id=id
else:
id = self.bid
#define the button here
self.LABELS={}
self.BTN={}
self.COMBO={}
self.CHKBOX={}
self.INSTR={}
self.INFLOAT={}
self.SEP={}
self.COL={}
if self.epmv is not None : #work on the current_mol
self.LABELS["header"] = self._addElemt(label="APBS plugin for ePMV",width=120)
self.LABELS["current"] = self._addElemt(label="applied on the current epmv mol",width=120)
self.LABELS["geom"] = self._addElemt(label="Geometry to Color",width=100)
self.LABELS["offset"] = self._addElemt(label="normal offset",width=100)
self.LABELS["std"] = self._addElemt(label="std scale",width=100)
self.LABELS["color"] = self._addElemt(label="Change default ramp color",width=120)
self.BTN["run"] = self._addElemt(name="run",width=50,height=10,
label = 'run APBS computation using current paramters',
action=self.runAPBS,type="button")
self.BTN["color"] = self._addElemt(name="color",width=150,height=10,
label = 'color',
action=self.colorbyAPBS,type="button")
self.BTN["ok"] = self._addElemt(name="Close",width=50,height=10,
action=self.cancel,type="button")
self.CHKBOX["readG"] = self._addElemt(name="readgrid",width=80,height=10,
action=None,type="checkbox",icon=None,
variable=self.addVariable("int",0))
self.CHKBOX["readPqr"] = self._addElemt(name="readpqr",width=80,height=10,
action=None,type="checkbox",icon=None,
variable=self.addVariable("int",0))
self.INSTR["geom"] = self._addElemt(name="geom Name",width=100,height=10,
action=None,type="inputStr",icon=None,
value="",
variable=self.addVariable("str",""))
self.INFLOAT["offset"] = self._addElemt(name="offset",width=20,height=10,
action=None,type="inputFloat",icon=None,
value=1.0,mini=0.,maxi=10.,step=0.1,
variable=self.addVariable("float",1.0))
self.INFLOAT["std"] = self._addElemt(name="stddevM",width=20,height=10,
action=None,type="inputFloat",icon=None,
value=1.0,mini=0.,maxi=10.,step=0.1,
variable=self.addVariable("float",1.0))
self.SEP["run"]=self._addElemt(name="run",type="line",value="H")
self.SEP["color"]=self._addElemt(name="color",type="line",value="H")
self.SEP["close"]=self._addElemt(name="close",type="line",value="H")
self.COL["neg"]=self._addElemt(name="neg",action=None,
value=(0.,0.,1.),
variable = self.addVariable("col",(0.,0.,1.)),
type="color",width=30,height=15)
self.COL["mid"]=self._addElemt(name="mid",action=None,
value=(1.,1.,1.),
variable = self.addVariable("col",(1.,1.,1.)),
type="color",width=30,height=15)
self.COL["pos"]=self._addElemt(name="pos",action=None,
value=(1.,0.,0.),
variable = self.addVariable("col",(1.,0.,0.)),
type="color",width=30,height=15)
else : #need a load molecule button
pass
#then define the layout
self.setupLayout()
# self.restorePreferences()
return True
def setupLayout(self):
self._layout = []
self._layout.append([self.LABELS["header"],])
self._layout.append([self.LABELS["current"],])
self._layout.append([self.SEP["run"],])
self._layout.append([self.CHKBOX["readG"],self.CHKBOX["readPqr"]])
self._layout.append([self.BTN["run"],])
#separator ?s
self._layout.append([self.SEP["color"],])
self._layout.append([self.LABELS["offset"],self.INFLOAT["offset"]])
self._layout.append([self.LABELS["std"],self.INFLOAT["std"]])
self._layout.append([self.LABELS["geom"],self.INSTR["geom"]])
self._layout.append([self.LABELS["color"],])
self._layout.append([self.COL["neg"],self.COL["mid"],self.COL["pos"]])
self._layout.append([self.BTN["color"],])
self._layout.append([self.SEP["close"],])
self._layout.append([self.BTN["ok"],])
def CreateLayout(self):
self._createLayout()
return True
def cancel(self,*args):
self.close()
def runAPBS(self,*args):
if self.epmv is not None :
mname,mol,sel,selection = self.epmv.gui.getDsInfo()
dicD= self.mv.molDispl[mname]
self.mv.APBSRun(molecule1=mname)
params = self.epmv.mv.APBSSetup.params
#if checbox load grid -> load the grid
# print "parmfile" params.projectFolder+os.sep+mol.name+".potential.dx"
if self.getBool(self.CHKBOX["readG"]):
self.epmv.gui.gridData(file=params.projectFolder+os.sep+mol.name+".potential.dx")
if self.getBool(self.CHKBOX["readPqr"]):
self.epmv.gui.loadPDB(params.projectFolder+os.sep+mol.name+".pqr")
def colorbyAPBS(self,*args):
offset = self.getReal(self.INFLOAT["offset"])
std = self.getReal(self.INFLOAT["std"])
grid = self.epmv.gui.current_traj[0]
col1=self.getColor(self.COL["neg"])
col2=self.getColor(self.COL["mid"])
col3=self.getColor(self.COL["pos"])
if grid is None:
params = self.epmv.mv.APBSSetup.params
self.epmv.gui.gridData(file=params.projectFolder+os.sep+mol.name+".potential.dx")
grid = self.epmv.gui.current_traj[0]
geomName = self.getString(self.INSTR["geom"])
print "color "+geomName+" using grid ",grid,offset,std
geom = self.epmv.helper.getObject(geomName)
self.epmv.APBS2MSMS(grid,surf=geom,offset=offset,stddevM=std,rampcol=[col1,col2,col3])
def Command(self,*args):
# print args
self._command(args)
return True
class APBSSetup(MVCommand):
"""APBSSetup setups all necessary parameters for Adaptive Poisson-Boltzmann
Solver (APBS)\n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBSSetup
\nCommand name : APBSSetup
\nSynopsis:\n
None <--- APBSSetup(**kw)\n
\nDescription:\n
Pmv-->APBS-->Setup creates Pmw.NoteBook with three tabbed pages:\n
Calculation, Grid and Physics.
Calculation page contains the following groups:
Mathematics - is used to setup up Calculation type (kw['calculationType']),
Poisson-Boltzmann equation type (kw['pbeType']), Boundary conditions
(kw['boundaryConditions']),Charge discretization (kw['chargeDiscretization'])
Surface-based coefficients (kw['surfaceCalculation']), and Spline window in
Angstroms (kw['splineWindow'], present only when surfaceCalculation is set to
'Spline-based')
Molecules - allows to select molecule(s)
Output - sets output file formats
Profiles - is used to add, remove, load, save and run different profiles
APBS Web Services - is present only when APBSService_services module is
installed. It allows APBS to be run remotely
Grid page contains the following groups:
General - lets you select number of grid point along X, Y and Z directions
Coarse Grid - allows changing the length and the center of the coarse grid.
It also allows to autoceter, autosize as well as visualize the coarse grid.
Fine Grid - dos the same for the fine grid
System Resources - shows total grid point and memory to be allocated for APBS
Grid page contains the following groups:
Parameters - allows to change protein and solevent dielectric constants,
solvent radius and system temperature
Ions - allows to add and/or remove different ions
"""
def __init__(self, func=None):
"""Constructor for class APBSSetup"""
MVCommand.__init__(self)
self.params = APBSParams()
self.cmp_APBSParams = APBSParams()
self.flag_grid_changed = False
self.RememberLogin_var = False
self.salt_var = True
try:
self.RememberLogin_var = Tkinter.BooleanVar()
self.salt_var =Tkinter.BooleanVar()
self.salt_var.set(1)
except:
self.RememberLogin_var = False
self.salt_var = True
def doit(self,*args, **kw):
"""doit function"""
self.cmp_APBSParams.Set(**kw)
def __call__(self, **kw):
"""Call method"""
self.params.Set(**kw)
self.refreshAll()
def onAddObjectToViewer(self, object):
"""Called when object is added to viewer"""
if self.cmdForms.has_key('default'):
try:
ebn = self.cmdForms['moleculeSelect'].descr.entryByName
w = ebn['moleculeListSelect']['widget']
molNames = self.vf.Mols.name
w.setlist(molNames)
descr = self.cmdForms['default'].descr
descr.entryByName['APBSservicesLabel1']['widget'].\
configure(text = "")
descr.entryByName['APBSservicesLabel2']['widget'].\
configure(text = "")
descr.entryByName['APBSservicesLabel3']['widget'].\
configure(text = "")
descr.entryByName['APBSservicesLabel4']['widget'].\
configure(text = "")
except KeyError:
pass
if hasattr(object,'chains'):
object.APBSParams = {}
# object.APBSParams['Default'] = APBSParams()
def onRemoveObjectFromViewer(self, object):
"""Called when object is removed from viewer"""
if self.cmdForms.has_key('default'):
try:
ebn = self.cmdForms['moleculeSelect'].descr.entryByName
w = ebn['moleculeListSelect']['widget']
molNames = self.vf.Mols.name
w.setlist(molNames)
descr = self.cmdForms['default'].descr
if hasattr(object,'chains'):
molName = object.name
if molName in descr.entryByName['molecule1']['widget'].get():
descr.entryByName['molecule1']['widget'].setentry('')
if molName in descr.entryByName['molecule2']['widget'].get():
descr.entryByName['molecule2']['widget'].setentry('')
if molName in descr.entryByName['complex']['widget'].get():
descr.entryByName['complex']['widget'].setentry('')
except KeyError:
pass
def onAddCmdToViewer(self):
"""Called when APBSSetup are added to viewer"""
from DejaVu.bitPatterns import patternList
from opengltk.OpenGL import GL
from DejaVu import viewerConst
from DejaVu.Box import Box
face=((0,3,2,1),(3,7,6,2),(7,4,5,6),(0,1,5,4),(1,2,6,5),(0,4,7,3))
coords=((1,1,-1),(-1,1,-1),(-1,-1,-1),(1,-1,-1),(1,1,1),(-1,1,1),
(-1,-1,1),(1,-1,1))
materials=((0,0,1),(0,1,0),(0,0,1),(0,1,0),(1,0,0),(1,0,0))
box=Box('CoarseAPBSbox', materials=materials, vertices=coords,
faces=face, listed=0, inheritMaterial=0)
box.Set(frontPolyMode=GL.GL_FILL, tagModified=False)
box.polygonstipple.Set(pattern=patternList[0])
box.Set(matBind=viewerConst.PER_PART, visible=0,
inheritStipplePolygons=0, shading=GL.GL_FLAT, inheritShading=0,
stipplePolygons=1, frontPolyMode=GL.GL_FILL,
tagModified=False)
box.oldFPM = None
self.coarseBox = box
if self.vf.hasGui:
self.vf.GUI.VIEWER.AddObject(box, redo=0)
box=Box('FineAPBSbox', materials=materials, vertices=coords,
faces=face, listed=0, inheritMaterial=0)
box.polygonstipple.Set(pattern=patternList[3])
box.Set(matBind=viewerConst.PER_PART, visible=0,
inheritStipplePolygons=0, shading=GL.GL_FLAT,
inheritShading=0, stipplePolygons=1,
frontPolyMode=GL.GL_FILL, tagModified=False)
box.oldFPM = None
self.fineBox = box
if self.vf.hasGui:
self.vf.GUI.VIEWER.AddObject(box, redo=0)
def guiCallback(self):
"""GUI callback for APBSSetup"""
self.refreshAll()
mainform = self.showForm('default', modal=0, blocking=1,
initFunc=self.refreshAll)
if mainform:
# self.paramUpdateAll()
tmp_dict = {}
for key, value in self.params.__dict__.items():
if self.params.__dict__[key] != \
self.cmp_APBSParams.__dict__[key]:
if type(value) is types.TupleType:
value = value[0]
if key == 'ions':
for ion in value:
self.vf.message("self.APBSSetup.params.ions.\
append(Pmv.APBSCommands.Ion("+ion.toString()+"))")
self.vf.log("self.APBSSetup.params.ions.\
append(Pmv.APBSCommands.Ion("+ion.toString()+"))")
self.cmp_APBSParams.ions = self.params.ions
continue
tmp_dict[key] = value
if len(tmp_dict) != 0:
self.doitWrapper(**tmp_dict)
def dismiss(self, event = None):
"""Withdraws 'default' GUI"""
self.cmdForms['default'].withdraw()
def coarseResolutionX(self):
"""Returns coarse grid resolution in X direction"""
return self.params.coarseLengthX/float(self.params.gridPointsX-1)
def coarseResolutionY(self):
"""Returns coarse grid resolution in Y direction"""
return self.params.coarseLengthY/float(self.params.gridPointsY-1)
def coarseResolutionZ(self):
"""Returns coarse grid resolution in Z direction"""
return self.params.coarseLengthZ/float(self.params.gridPointsZ-1)
def fineResolutionX(self):
"""Returns fine grid resolution in X direction"""
return self.params.fineLengthX/float(self.params.gridPointsX-1)
def fineResolutionY(self):
"""Returns fine grid resolution in Y direction"""
return self.params.fineLengthY/float(self.params.gridPointsY-1)
def fineResolutionZ(self):
"""Returns fine grid resolution in Z direction"""
return self.params.fineLengthZ/float(self.params.gridPointsZ-1)
def memoryToBeAllocated(self):
"""Returns memory to be allocated for APBS run"""
return self.params.MEGABYTES_PER_GRID_POINT*self.totalGridPoints()
def totalGridPoints(self):
"""Returns total number of grid points"""
return self.params.gridPointsX*self.params.gridPointsY*\
self.params.gridPointsZ
def autocenterCoarseGrid(self):
"""Autocenters coarse grid"""
coords = self.getCoords()
center=(Numeric.maximum.reduce(coords)+Numeric.minimum.reduce(coords))*0.5
center = center.tolist()
self.params.coarseCenterX = round(center[0],4)
self.params.coarseCenterY = round(center[1],4)
self.params.coarseCenterZ = round(center[2],4)
self.refreshGridPage()
def autosizeCoarseGrid(self):
"""Autosizes coarse grid"""
coords = self.getCoords()
length = Numeric.maximum.reduce(coords) - Numeric.minimum.reduce(coords)
self.params.coarseLengthX = self.params.CFAC*(length.tolist())[0] + 10.
self.params.coarseLengthY = self.params.CFAC*(length.tolist())[1] + 10.
self.params.coarseLengthZ = self.params.CFAC*(length.tolist())[2] + 10.
self.refreshGridPage()
def autocenterFineGrid(self):
"""Autocenters fine grid"""
coords = self.getCoords()
center=(Numeric.maximum.reduce(coords)+Numeric.minimum.reduce(coords))*0.5
center = center.tolist()
self.params.fineCenterX = round(center[0],4)
self.params.fineCenterY = round(center[1],4)
self.params.fineCenterZ = round(center[2],4)
self.refreshGridPage()
def autosizeFineGrid(self):
"""Autosizes fine grid"""
coords = self.getCoords()
length=Numeric.maximum.reduce(coords)-Numeric.minimum.reduce(coords)
self.params.fineLengthX = (length.tolist())[0] + 10.0
self.params.fineLengthY = (length.tolist())[1] + 10.0
self.params.fineLengthZ = (length.tolist())[2] + 10.0
self.refreshGridPage()
def getCoords(self):
"""Returns coordinates of atoms included in calculation"""
if not hasattr(self, 'mol1Name'): return [[0,0,0]]
mol = self.vf.getMolFromName(self.mol1Name)
coords = mol.findType(Atom).coords
if self.params.calculationType == 'Binding energy':
if hasattr(self, 'mol2Name'):
mol = self.vf.getMolFromName(self.mol2Name)
if mol:
coords += mol.findType(Atom).coords
if hasattr(self, 'complexName'):
mol = self.vf.getMolFromName(self.complexName)
if mol:
coords += mol.findType(Atom).coords
return coords
# Callbacks
def refreshCalculationPage(self):
"""Refreshes calculation page"""
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
if(self.params.calculationType=='Binding energy'):
apply(descr.entryByName['molecule2Select']['widget'].grid,
(), descr.entryByName['molecule2Select']['gridcfg'])
apply(descr.entryByName['molecule2']['widget'].grid, (),
descr.entryByName['molecule2']['gridcfg'])
apply(descr.entryByName['complexSelect']['widget'].grid, (),
descr.entryByName['complexSelect']['gridcfg'])
apply(descr.entryByName['complex']['widget'].grid, (),
descr.entryByName['complex']['gridcfg'])
#self.params.energyOutput = 'Total'
elif(self.params.calculationType=='Solvation energy'):
descr.entryByName['molecule2Select']['widget'].grid_forget()
descr.entryByName['molecule2']['widget'].grid_forget()
descr.entryByName['complexSelect']['widget'].grid_forget()
descr.entryByName['complex']['widget'].grid_forget()
#self.params.energyOutput = 'Total'
elif(self.params.calculationType=='Electrostatic potential'):
descr.entryByName['molecule2Select']['widget'].grid_forget()
descr.entryByName['molecule2']['widget'].grid_forget()
descr.entryByName['complexSelect']['widget'].grid_forget()
descr.entryByName['complex']['widget'].grid_forget()
descr.entryByName['calculationType']['widget'].\
selectitem(self.params.calculationType)
descr.entryByName['pbeType']['widget'].\
selectitem(self.params.pbeType)
descr.entryByName['boundaryConditions']['widget'].\
selectitem(self.params.boundaryConditions)
descr.entryByName['chargeDiscretization']['widget'].\
selectitem(self.params.chargeDiscretization)
descr.entryByName['surfaceCalculation']['widget'].\
selectitem(self.params.surfaceCalculation)
descr.entryByName['sdens']['widget'].setentry(self.params.sdens)
descr.entryByName['splineWindow']['widget'].\
setentry(self.params.splineWindow)
if self.params.surfaceCalculation == 'Cubic B-spline' or \
self.params.surfaceCalculation == '7th Order Polynomial':
apply(descr.entryByName['splineWindowLabel']['widget'].grid,
(), descr.entryByName['splineWindowLabel']['gridcfg'])
apply(descr.entryByName['splineWindow']['widget'].grid, (),
descr.entryByName['splineWindow']['gridcfg'])
descr.entryByName['sdensLabel']['widget'].grid_forget()
descr.entryByName['sdens']['widget'].grid_forget()
else:
apply(descr.entryByName['sdensLabel']['widget'].grid,
(), descr.entryByName['sdensLabel']['gridcfg'])
apply(descr.entryByName['sdens']['widget'].grid, (),
descr.entryByName['sdens']['gridcfg'])
descr.entryByName['splineWindowLabel']['widget'].grid_forget()
descr.entryByName['splineWindow']['widget'].grid_forget()
descr.entryByName['molecule1']['widget'].\
setentry(self.params.molecule1Path)
descr.entryByName['molecule2']['widget'].\
setentry(self.params.molecule2Path)
descr.entryByName['complex']['widget'].\
setentry(self.params.complexPath)
descr.entryByName['energyOutput']['widget'].\
selectitem(self.params.energyOutput)
descr.entryByName['forceOutput']['widget'].\
selectitem(self.params.forceOutput)
descr.entryByName['forceOutput']['widget'].\
selectitem(self.params.forceOutput)
descr.entryByName['Profiles']['widget'].\
selectitem(self.params.name)
def testCalculationWidgets(self):
"""Tests calculation widgets"""
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
if(descr.entryByName['splineWindow']['widget'].get() == ''):
self.errorMsg = 'You must enter a spline window value.'
errorform = self.showForm('error',modal=1,blocking=1,force = 1)
return 1
return 0
def calculationParamUpdate(self, selectItem=0):
"""Updates calculation parameters"""
if self.cmdForms.has_key('default'):
if selectItem == 'Binding energy':
self.params.calculationType = 'Binding energy'
self.refreshCalculationPage()
return
descr = self.cmdForms['default'].descr
# Prevent forcing a particular calculation type on the user
self.params.calculationType = descr.entryByName\
['calculationType']['widget'].get()
if self.testCalculationWidgets()==0:
self.params.calculationType = descr.entryByName\
['calculationType']['widget'].get()
self.params.pbeType = descr.entryByName['pbeType']['widget'].\
get()
self.params.boundaryConditions = descr.entryByName\
['boundaryConditions']['widget'].get()
self.params.chargeDiscretization = descr.entryByName\
['chargeDiscretization']['widget'].get()
self.params.surfaceCalculation = descr.entryByName\
['surfaceCalculation']['widget'].get()
self.params.sdens = float(descr.entryByName['sdens']['widget'].\
get())
self.params.splineWindow = float(descr.entryByName\
['splineWindow']['widget'].get())
self.params.molecule1Path = descr.entryByName['molecule1']\
['widget'].get()
self.params.molecule2Path = descr.entryByName['molecule2']\
['widget'].get()
self.params.complexPath = descr.entryByName['complex']\
['widget'].get()
self.params.energyOutput = descr.entryByName['energyOutput']\
['widget'].get()
self.params.forceOutput = descr.entryByName['forceOutput']\
['widget'].get()
self.params.name = descr.entryByName['Profiles']['widget'].\
get()
else:
return "ERROR"
self.refreshCalculationPage()
def refreshGridPage(self):
"""Refreshes grid page"""
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
descr.entryByName['gridPointsX']['widget'].set(closestMatch(self.
params.gridPointsX, self.params.GRID_VALUES), update = 0)
descr.entryByName['gridPointsY']['widget'].set(closestMatch(self.
params.gridPointsY, self.params.GRID_VALUES), update = 0)
descr.entryByName['gridPointsZ']['widget'].set(closestMatch(self.
params.gridPointsZ, self.params.GRID_VALUES), update = 0)
descr.entryByName['coarseLengthX']['widget'].set(self.params.
coarseLengthX, update = 0)
descr.entryByName['coarseLengthY']['widget'].set(self.params.
coarseLengthY, update = 0)
descr.entryByName['coarseLengthZ']['widget'].set(self.params.
coarseLengthZ, update = 0)
descr.entryByName['coarseCenterX']['widget'].set(self.params.
coarseCenterX, update = 0)
descr.entryByName['coarseCenterY']['widget'].set(self.params.
coarseCenterY, update = 0)
descr.entryByName['coarseCenterZ']['widget'].set(self.params.
coarseCenterZ, update = 0)
descr.entryByName['coarseResolutionX']['widget'].configure(text =
"%5.3f"%self.coarseResolutionX())
descr.entryByName['coarseResolutionY']['widget'].configure(text =
"%5.3f"%self.coarseResolutionY())
descr.entryByName['coarseResolutionZ']['widget'].configure(text =
"%5.3f"%self.coarseResolutionZ())
descr.entryByName['fineLengthX']['widget'].set(self.params.
fineLengthX, update = 0)
descr.entryByName['fineLengthY']['widget'].set(self.params.
fineLengthY, update = 0)
descr.entryByName['fineLengthZ']['widget'].set(self.params.
fineLengthZ, update = 0)
descr.entryByName['fineCenterX']['widget'].set(self.params.
fineCenterX, update = 0)
descr.entryByName['fineCenterY']['widget'].set(self.
params.fineCenterY, update = 0)
descr.entryByName['fineCenterZ']['widget'].set(self.params.
fineCenterZ, update = 0)
descr.entryByName['fineResolutionX']['widget'].configure(text =
"%5.3f"%self.fineResolutionX())
descr.entryByName['fineResolutionY']['widget'].configure(text =
"%5.3f"%self.fineResolutionY())
descr.entryByName['fineResolutionZ']['widget'].configure(text =
"%5.3f"%self.fineResolutionZ())
descr.entryByName['gridPointsNumberLabel']['widget'].\
configure(text = "%d"%self.totalGridPoints())
descr.entryByName['mallocSizeLabel']['widget'].configure(text =
"%5.3f"%self.memoryToBeAllocated())
self.coarseBox.Set(visible = descr.\
entryByName['showCoarseGrid']['wcfg']['variable'].get(),
xside = self.params.coarseLengthX,
yside = self.params.coarseLengthY,
zside = self.params.coarseLengthZ,
center = [self.params.coarseCenterX, self.params.coarseCenterY,
self.params.coarseCenterZ], tagModified=False)
self.fineBox.Set(visible = descr.\
entryByName['showFineGrid']['wcfg']['variable'].get(),
xside = self.params.fineLengthX,yside = self.params.fineLengthY,
zside = self.params.fineLengthZ,
center = [self.params.fineCenterX, self.params.fineCenterY,
self.params.fineCenterZ], tagModified=False)
self.vf.GUI.VIEWER.Redraw()
def testGridWidgets(self):
"""Tests grid widget"""
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
#Boundary check: make sure coarse grid encloses fine grid
ccx = descr.entryByName['coarseCenterX']['widget'].value
ccy = descr.entryByName['coarseCenterY']['widget'].value
ccz = descr.entryByName['coarseCenterZ']['widget'].value
clx = descr.entryByName['coarseLengthX']['widget'].value/2
cly = descr.entryByName['coarseLengthY']['widget'].value/2
clz = descr.entryByName['coarseLengthZ']['widget'].value/2
fcx = descr.entryByName['fineCenterX']['widget'].value
fcy = descr.entryByName['fineCenterY']['widget'].value
fcz = descr.entryByName['fineCenterZ']['widget'].value
flx = descr.entryByName['fineLengthX']['widget'].value/2
fly = descr.entryByName['fineLengthY']['widget'].value/2
flz = descr.entryByName['fineLengthZ']['widget'].value/2
if (fcx+flx>ccx+clx) or (fcx-flx<ccx-clx) or (fcy+fly>ccy+cly) or \
(fcy-fly<ccy-cly) or (fcz+flz>ccz+clz) or (fcz-flz<ccz-clz):
self.errorMsg = 'The coarse grid must enclose the fine grid.'
errorform = self.showForm('error',modal=1,blocking=1,force=1)
return 1
return 0
else :
#Boundary check: make sure coarse grid encloses fine grid
ccx = self.params.coarseCenterX
ccy = self.params.coarseCenterY
ccz = self.params.coarseCenterZ
clx = self.params.coarseLengthX
cly = self.params.coarseLengthY
clz = self.params.coarseLengthZ
fcx = self.params.fineCenterX
fcy = self.params.fineCenterY
fcz = self.params.fineCenterZ
flx = self.params.fineLengthX
fly = self.params.fineLengthY
flz = self.params.fineLengthZ
if (fcx+flx>ccx+clx) or (fcx-flx<ccx-clx) or (fcy+fly>ccy+cly) or \
(fcy-fly<ccy-cly) or (fcz+flz>ccz+clz) or (fcz-flz<ccz-clz):
self.errorMsg = 'The coarse grid must enclose the fine grid.'
errorform = self.showForm('error',modal=1,blocking=1,force=1)
return 1
return 0
def gridParamUpdate(self, selectItem=0):
"""Updates grid parameters. Returns "ERROR" is failed"""
if self.testGridWidgets() == 0:
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
self.params.gridPointsX = closestMatch(descr.entryByName
['gridPointsX']['widget'].get(), self.params.GRID_VALUES)
self.params.gridPointsY = closestMatch(descr.entryByName
['gridPointsY']['widget'].get(), self.params.GRID_VALUES)
self.params.gridPointsZ = closestMatch(descr.entryByName
['gridPointsZ']['widget'].get(), self.params.GRID_VALUES)
self.params.coarseLengthX = descr.entryByName['coarseLengthX']\
['widget'].value
self.params.coarseLengthY = descr.entryByName['coarseLengthY']\
['widget'].value
self.params.coarseLengthZ = descr.entryByName['coarseLengthZ']\
['widget'].value
self.params.coarseCenterX = descr.entryByName['coarseCenterX']\
['widget'].value
self.params.coarseCenterY = descr.entryByName['coarseCenterY']\
['widget'].value
self.params.coarseCenterZ = descr.entryByName['coarseCenterZ']\
['widget'].value
self.params.fineLengthX = descr.entryByName['fineLengthX']\
['widget'].value
self.params.fineLengthY = descr.entryByName['fineLengthY']\
['widget'].value
self.params.fineLengthZ = descr.entryByName['fineLengthZ']\
['widget'].value
self.params.fineCenterX = descr.entryByName['fineCenterX']\
['widget'].value
self.params.fineCenterY = descr.entryByName['fineCenterY']\
['widget'].value
self.params.fineCenterZ = descr.entryByName['fineCenterZ']\
['widget'].value
self.flag_grid_changed = True
else:
return "ERROR"
self.refreshGridPage()
def refreshPhysicsPage(self):
"""Refreshes physics page"""
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
descr.entryByName['proteinDielectric']['widget'].\
setentry(self.params.proteinDielectric)
descr.entryByName['solventDielectric']['widget'].\
setentry(self.params.solventDielectric)
descr.entryByName['solventRadius']['widget'].\
setentry(self.params.solventRadius)
descr.entryByName['systemTemperature']['widget'].\
setentry(self.params.systemTemperature)
descr.entryByName['ionsList']['widget'].clear()
for i in range(len(self.params.ions)):
descr.entryByName['ionsList']['widget'].\
insert('end', self.params.ions[i].toString())
if self.params.saltConcentration:
self.salt_var.set(1)
else:
self.salt_var.set(0)
def testPhysicsWidgets(self):
"""Tests physics widget"""
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
if(descr.entryByName['proteinDielectric']['widget'].get() == ''):
self.errorMsg = 'You must enter a protein dielectric\
value.'
errorform = self.showForm('error', modal=1, blocking=1,
force = 1)
return 1
if(descr.entryByName['solventDielectric']['widget'].get() == ''):
self.errorMsg = 'You must enter a solvent dielectric\
value.'
errorform = self.showForm('error', modal=1, blocking=1,
force = 1)
return 1
if(descr.entryByName['solventRadius']['widget'].get() == ''):
self.errorMsg = 'You must enter a solvent radius value.'
errorform = self.showForm('error', modal=1, blocking=1,
force = 1)
return 1
if(descr.entryByName['systemTemperature']['widget'].get() == ''):
self.errorMsg = 'You must enter a system temperature \
value.'
errorform = self.showForm('error', modal=1, blocking=1,
force = 1)
return 1
return 0
def physicsParamUpdate(self):
"""Updates physics parameter. Returns "ERROR" is failed"""
if self.testPhysicsWidgets() != 1:
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
self.params.proteinDielectric = float(descr.entryByName\
['proteinDielectric']['widget'].get())
self.params.solventDielectric = float(descr.entryByName\
['solventDielectric']['widget'].get())
self.params.solventRadius = float(descr.entryByName\
['solventRadius']['widget'].get())
self.params.systemTemperature = float(descr.entryByName\
['systemTemperature']['widget'].get())
salt = self.salt_var.get()
if salt:
self.params.saltConcentration = float(descr.entryByName\
['saltConcentration']['widget'].get())
else:
self.params.saltConcentration = 0
else:
return "ERROR"
self.refreshPhysicsPage()
def refreshAll(self,cmdForm = None):
"""Refreshes calculation, grid and physics pages"""
if cmdForm:
self.cmdForms['default'] = cmdForm
descr = cmdForm.descr
if APBSservicesFound:
ResourceFolder = getResourceFolderWithVersion()
if os.path.isdir(ResourceFolder):
pass
else:
os.mkdir(ResourceFolder)
self.rc_apbs = ResourceFolder + os.sep + "ws"
if os.path.isdir(self.rc_apbs):
pass
else:
os.mkdir(self.rc_apbs)
self.rc_apbs += os.sep + "rc_apbs"
if not os.path.exists(self.rc_apbs):
open(self.rc_apbs,'w')
else:
file = open(self.rc_apbs)
text = file.read()
text = text.split()
for line in text:
tmp_line = line.split('User:')
if len(tmp_line) > 1:
descr.entryByName['UserName_Entry']['wcfg']\
['textvariable'].set(tmp_line[1])
tmp_line = line.split('Password:')
if len(tmp_line) > 1:
descr.entryByName['Password_Entry']['wcfg']\
['textvariable'].set(tmp_line[1])
file.close()
# descr.entryByName['ParallelGroup']['widget'].toggle()
if not descr.entryByName['web service address']['widget'].get():
descr.entryByName['web service address']['widget']\
.selectitem(0)
url = descr.entryByName['web service address']['widget'].get()
url = url.strip()
if url.find('https://') != 0:
descr.entryByName['UserName_Label']['widget'].grid_forget()
descr.entryByName['UserName_Entry']['widget'].grid_forget()
descr.entryByName['Password_Label']['widget'].grid_forget()
descr.entryByName['Password_Entry']['widget'].grid_forget()
descr.entryByName['Remember_Label']['widget'].grid_forget()
descr.entryByName['Remember_Checkbutton']['widget']\
.grid_forget()
self.progressBar = ProgressBar(
descr.entryByName['WS_ProgressBar']['widget']
, labelside=None,
width=200, height=20, mode='percent')
self.progressBar.setLabelText('Progress...')
self.progressBar.set(0)
descr.entryByName['WS_ProgressBar']['widget'].grid_forget()
else:
descr.entryByName['WS_http']['widget'].bind(
sequence = "<Button-1>", func = self.WS_http)
descr.entryByName['calculationType']['widget']._entryWidget.\
config(state = 'readonly')
descr.entryByName['pbeType']['widget']._entryWidget.\
config(state = 'readonly')
descr.entryByName['boundaryConditions']['widget']._entryWidget.\
config(state = 'readonly')
descr.entryByName['chargeDiscretization']['widget'].\
_entryWidget.config(state = 'readonly')
descr.entryByName['surfaceCalculation']['widget']._entryWidget.\
config(state = 'readonly')
descr.entryByName['energyOutput']['widget']._entryWidget.\
config(state = 'readonly')
descr.entryByName['forceOutput']['widget']._entryWidget.\
config(state = 'readonly')
if self.vf.hasGui:
self.refreshCalculationPage()
self.refreshGridPage()
self.refreshPhysicsPage()
def paramUpdateAll(self):
"""Updates all parameters. Returns "ERROR" if failed """
if self.calculationParamUpdate() == "ERROR":
return "ERROR"
if self.gridParamUpdate() == "ERROR":
return "ERROR"
if self.physicsParamUpdate() == "ERROR":
return "ERROR"
def setOutputFiles(self):
"""Sets output files using outputFilesForm GUI"""
outputFilesForm = self.showForm('outputFilesForm', \
modal = 1, blocking = 1,force=1,master=self.cmdForms['default'].f)
descr = self.cmdForms['outputFilesForm'].descr
self.params.chargeDistributionFile = descr.entryByName\
['chargeDistributionFile']['widget'].get()
self.params.potentialFile = descr.entryByName['potentialFile']\
['widget'].get()
self.params.solventAccessibilityFile = descr.entryByName\
['solventAccessibilityFile']['widget'].get()
self.params.splineBasedAccessibilityFile = descr.entryByName\
['splineBasedAccessibilityFile']['widget'].get()
self.params.VDWAccessibilityFile = descr.entryByName\
['VDWAccessibilityFile']['widget'].get()
self.params.ionAccessibilityFile = descr.entryByName\
['ionAccessibilityFile']['widget'].get()
self.params.laplacianOfPotentialFile = descr.entryByName\
['laplacianOfPotentialFile']['widget'].get()
self.params.energyDensityFile = descr.entryByName\
['energyDensityFile']['widget'].get()
self.params.ionNumberFile = descr.entryByName\
['ionNumberFile']['widget'].get()
self.params.ionChargeDensityFile = descr.entryByName\
['ionChargeDensityFile']['widget'].get()
self.params.xShiftedDielectricFile = descr.entryByName\
['xShiftedDielectricFile']['widget'].get()
self.params.yShiftedDielectricFile = descr.entryByName\
['yShiftedDielectricFile']['widget'].get()
self.params.zShiftedDielectricFile = descr.entryByName\
['zShiftedDielectricFile']['widget'].get()
self.params.kappaFunctionFile = descr.entryByName\
['kappaFunctionFile']['widget'].get()
def addIon(self):
"""Adds an Ion"""
ionForm = self.showForm('ionForm', modal = 0, blocking = 1,
master=self.cmdForms['default'].f)
descr = self.cmdForms['ionForm'].descr
ion = Ion()
ion.charge = float(descr.entryByName['ionCharge']['widget'].get())
ion.concentration = float(descr.entryByName['ionConcentration']
['widget'].get())
ion.radius = float(descr.entryByName['ionRadius']['widget'].get())
self.params.ions.append(ion)
self.vf.message("self.APBSSetup.params.ions.append(Pmv.APBSCommands.Ion\
("+ion.toString()+"))")
self.vf.log("self.APBSSetup.params.ions.append(Pmv.APBSCommands.Ion(" \
+ion.toString()+"))")
f = self.cmdForms['default']
f.descr.entryByName['ionsList']['widget'].insert('end', ion.toString())
def removeIon(self):
"""Removes an Ion"""
descr = self.cmdForms['default'].descr
s = repr(descr.entryByName['ionsList']['widget'].getcurselection())
for i in range(descr.entryByName['ionsList']['widget'].size()):
if(string.find(s,descr.entryByName['ionsList']['widget']
.get(i))>-1):
break
descr.entryByName['ionsList']['widget'].delete(i)
self.params.ions.pop(i)
self.vf.message("self.APBSSetup.params.ions.pop("+`i`+")")
self.vf.log("self.APBSSetup.params.ions.pop("+`i`+")")
def moleculeListSelect(self, molName):
"""None <--- moleculeListSelect(molName)\n
Selects molecule with molName.\n
If the molecule was not read as pqr file moleculeListSelect\n
"""
if self.vf.hasGui:
if self.cmdForms.has_key('default'):
self.cmdForms['default'].root.config(cursor='watch')
self.vf.GUI.ROOT.config(cursor='watch')
self.vf.GUI.VIEWER.master.config(cursor='watch')
#self.vf.GUI.MESSAGE_BOX.tx.component('text').config(cursor='watch')
molName = molName.replace('-','_')
mol = self.vf.getMolFromName(molName)
assert mol, "Error: molecule is not loaded " + molName
file, ext = os.path.splitext(mol.parser.filename)
if ext:
ext = ext.lower()
if ext == '.pqr':
filename = mol.parser.filename
mol.flag_copy_pqr = True
else: #create pqr file using pdb2pqr.py
filename = mol.name+".pqr"
#full_filename = os.path.join(self.params.projectFolder,filename)
#filename = full_filename
flag_overwrite = True
if not os.path.exists(filename) or \
self.vf.APBSPreferences.overwrite_pqr:
if not self.vf.commands.has_key('writePDB'):
self.vf.browseCommands("fileCommands",
commands=['writePDB',])
from user import home
tmp_pdb = home + os.path.sep + 'tmp.pdb'
filename = home + os.path.sep + filename
self.vf.writePDB(mol,tmp_pdb, pdbRec=('ATOM','HETATM'), log=0)
# Exe_String = sys.executable + \
# " -Wignore::DeprecationWarning " + self.params.pdb2pqr_Path\
# + " --ff="+self.params.pdb2pqr_ForceField + " tmp.pdb " + \
# "\""+full_filename+"\""
sys.argv = [sys.executable , self.params.pdb2pqr_Path]
if self.vf.embeded :
python = "python"
if sys.platform == "win32":
python="C:\\Python26\\python.exe"
sys.argv = [python, self.params.pdb2pqr_Path]
if self.vf.hasGui:
if self.vf.APBSPreferences.nodebump.get():
sys.argv.append('--nodebump')
if self.vf.APBSPreferences.nohopt.get():
sys.argv.append('--noopt')
else :
if self.vf.APBSPreferences.nodebump:
sys.argv.append('--nodebump')
if self.vf.APBSPreferences.nohopt:
sys.argv.append('--noopt')
sys.argv.append('--ff='+self.params.pdb2pqr_ForceField)
sys.argv.append(tmp_pdb)
sys.argv.append(filename)
os.path.split(self.params.pdb2pqr_Path)[0]
print "exec",sys.argv
import subprocess
returncode = subprocess.call(sys.argv, shell=self.vf.hasGui)
if returncode:
if not hasattr(self.vf,"spin"):
#spin is not available during unit testing
return ''
msg = "Could not convert " + mol.name +""" to pqr! Please try the latest pdb2pqr from: http://pdb2pqr.sourceforge.net."""
if self.vf.hasGui:
if self.cmdForms.has_key('default') and \
self.cmdForms['default'].f.winfo_toplevel().wm_state()==\
'normal':
tkMessageBox.showerror("ERROR: ", msg,
parent = self.cmdForms['default'].root)
else:
tkMessageBox.showerror("ERROR: ", msg)
self.vf.GUI.ROOT.config(cursor='')
self.vf.GUI.VIEWER.master.config(cursor='')
if self.cmdForms.has_key('default'):
self.cmdForms['default'].root.config(cursor='')
return ''
try:
os.remove(mol.name + '-typemap.html')
except:
pass
if self.vf.hasGui:
if self.cmdForms.has_key('default'):
self.cmdForms['default'].root.config(cursor='')
self.vf.GUI.ROOT.config(cursor='')
self.vf.GUI.VIEWER.master.config(cursor='')
#self.vf.GUI.MESSAGE_BOX.tx.component('text').config(cursor='xterm')
os.remove(tmp_pdb)
if self.vf.hasGui:
n = mol.name
self.vf.deleteMol(mol,topCommand=0)
if self.vf.hasGui:
mol_tmp = self.vf.readPQR(filename, topCommand=0)
else :
mol_tmp = self.vf.readMolecule(filename, topCommand=0)
mol_tmp.name = str(n)
mol = mol_tmp
mol.flag_copy_pqr = False
self.vf.assignAtomsRadii(mol, overwrite=True,log=False)
if self.vf.hasGui:
change_Menu_state(self.vf.APBSSaveProfile, 'normal')
if self.cmdForms.has_key('default'):
self.cmdForms['default'].root.config(cursor='')
self.vf.GUI.ROOT.config(cursor='')
self.vf.GUI.VIEWER.master.config(cursor='')
# self.vf.GUI.MESSAGE_BOX.tx.component('text').config(cursor='xterm')
if self.cmdForms.has_key('default'):
form_descr = self.cmdForms['default'].descr
#form_descr.entryByName['Profiles']['widget'].setentry('Default')
form_descr.entryByName['Profiles_Add']['widget'].config(state =
"normal")
form_descr.entryByName['Profiles_Remove']['widget'].config(state =
"normal")
form_descr.entryByName['Profiles_Run']['widget'].config(state =
"normal")
form_descr.entryByName['Profiles_Save']['widget'].config(state =
"normal")
form_descr.entryByName['Profiles_Load']['widget'].config(state =
"normal")
if APBSservicesFound:
form_descr.entryByName['WS_Run']['widget'].config(state =
"normal")
else:
global state_GUI
state_GUI = 'normal'
mol = self.vf.getMolFromName(molName.replace('-','_'))
if self.vf.hasGui:
if self.cmdForms.has_key('default'):
APBSParamName = self.cmdForms['default'].descr.\
entryByName['Profiles']['widget'].get()
mol.APBSParams[APBSParamName] = self.params
else:
mol.APBSParams['Default'] = self.params
self.flag_grid_changed = False #to call autosize Grid when running APBS
return filename
def molecule1Select(self):
"""Seclects molecule1 and setups molecule1Path"""
val = self.showForm('moleculeSelect', modal = 0, \
blocking = 1,master=self.cmdForms['default'].f)
if val:
if len(val['moleculeListSelect'])==0: return
molName = val['moleculeListSelect'][0]
self.params.molecule1Path = self.moleculeListSelect(molName)
self.mol1Name = molName
if not self.params.molecule1Path:
return
self.refreshCalculationPage()
if not self.vf.APBSSetup.flag_grid_changed:
self.autocenterCoarseGrid()
self.autosizeCoarseGrid()
self.autocenterFineGrid()
self.autosizeFineGrid()
self.refreshGridPage()
def molecule2Select(self):
"""Seclects molecule2 and setups molecule2Path"""
val = self.showForm('moleculeSelect', modal = 0, \
blocking = 1,master=self.cmdForms['default'].f)
if val:
if len(val['moleculeListSelect'])==0: return
molName = val['moleculeListSelect'][0]
self.params.molecule2Path = self.moleculeListSelect(molName)
self.mol2Name = molName
self.refreshCalculationPage()
def complexSelect(self):
"""Seclects complex and setups complexPath"""
val = self.showForm('moleculeSelect', modal=0, blocking=1,\
master=self.cmdForms['default'].f)
if val:
if len(val['moleculeListSelect'])==0: return
molName = val['moleculeListSelect'][0]
self.params.complexPath = self.moleculeListSelect(molName)
self.complexName = molName
self.refreshCalculationPage()
if not self.params.complexPath:
return
if not self.vf.APBSSetup.flag_grid_changed:
self.autocenterCoarseGrid()
self.autosizeCoarseGrid()
self.autocenterFineGrid()
self.autosizeFineGrid()
self.refreshGridPage()
def apbsOutput(self, molecule1=None, molecule2=None, _complex=None, blocking=False ):
"""Runs APBS using mglutil.popen2Threads.SysCmdInThread"""
self.add_profile()
if self.paramUpdateAll() == "ERROR":
return
if molecule1:
self.params.SaveAPBSInput(self.params.projectFolder+os.path.sep\
+"apbs-input-file.apbs")
self.changeMenuState('disabled')
cmdstring = "\""+self.params.APBS_Path+"\" "+'apbs-input-file.apbs'
self.cwd = os.getcwd()
if blocking==False and self.vf.hasGui:
from mglutil.popen2Threads import SysCmdInThread
os.chdir(self.params.projectFolder)
self.cmd = SysCmdInThread(cmdstring, shell=True)
self.cmd.start()
time.sleep(1)
else:
#windows
from subprocess import Popen,PIPE
os.chdir(self.params.projectFolder)
cmdstring = [self.params.APBS_Path,"apbs-input-file.apbs"]#self.params.projectFolder+os.path.sep\
exec_cmd = Popen(cmdstring,stdout=PIPE)
status = exec_cmd.communicate()
print (status[0])
print (status[1])
print (cmdstring)
# from popen2 import Popen4
# os.chdir(self.params.projectFolder)
# exec_cmd = Popen4(cmdstring)
# status = exec_cmd.wait()
# if status==0:
# print exec_cmd.fromchild.read()
# else:
# print exec_cmd.childerr.read()
self.SaveResults(self.params.name, )
os.chdir(self.cwd)
else:
file_name, ext = os.path.splitext(self.params.molecule1Path)
molecule1 = os.path.split(file_name)[-1]
if self.cmdForms.has_key('default'):
APBSParamName = self.cmdForms['default'].descr.\
entryByName['Profiles']['widget'].get()
else:
APBSParamName = 'Default'
if self.params.calculationType == 'Binding energy':
file_name, ext = os.path.splitext(self.params.molecule2Path)
molecule2 = os.path.split(file_name)[-1]
file_name, ext = os.path.splitext(self.params.complexPath)
_complex = os.path.split(file_name)[-1]
try:
self.doitWrapper(self.params.__dict__)
self.vf.APBSRun(molecule1, molecule2, _complex, APBSParamName=APBSParamName)
except Exception, inst:
print inst
tkMessageBox.showerror("Error Running APBS", "Please make sure that Molecule(s) have corrent path.\n\n"+
"Use Select botton to ensure your molecules(s) exists.",
parent=self.vf.APBSSetup.cmdForms['default'].root)
def SaveResults(self,params_name):
"""Checks the queue for results until we get one"""
if hasattr(self, 'cmd') is False \
or self.cmd.ok.configure()['state'][-1] == 'normal':
self.saveProfile(Profilename=params_name, fileFlag=True)
self.changeMenuState('normal')
if hasattr(self, 'cmd') is True:
self.cmd.com.wait()
potential_dx = self.params.projectFolder
file_name, ext = os.path.splitext(self.params.molecule1Path)
mol_name = os.path.split(file_name)[-1]
potential = os.path.join(potential_dx, mol_name+'.potential.dx')
if not os.path.exists(potential):
return
self.vf.Grid3DReadAny(potential, show=False, normalize=False)
self.vf.grids3D[mol_name+'.potential.dx'].geomContainer['Box'].Set(visible=0)
self.potential = mol_name+'.potential.dx'
if self.vf.hasGui:
change_Menu_state(self.vf.APBSDisplayIsocontours, 'normal')
change_Menu_state(self.vf.APBSDisplayOrthoSlice, 'normal')
if hasattr(self.vf,'APBSVolumeRender'):
change_Menu_state(self.vf.APBSVolumeRender, 'normal')
return
else:
self.vf.GUI.ROOT.after(10, self.SaveResults, params_name)
def select_profile(self,profile_name):
"""Selects profile"""
if self.paramUpdateAll() == "ERROR":
self.remove_profile()
return
file_name, ext = os.path.splitext(self.params.molecule1Path)
tmp_mol_name = os.path.split(file_name)[-1]
molecule1 = self.vf.getMolFromName(tmp_mol_name.replace('-','_'))
if molecule1.APBSParams.has_key(profile_name):
self.params = molecule1.APBSParams[profile_name]
self.refreshAll()
else:
self.params.name = profile_name
molecule1.APBSParams[profile_name] = self.params
def add_profile(self):
"""Adds profile"""
if self.cmdForms.has_key('default'):
ComboBox = self.cmdForms['default'].descr.entryByName['Profiles']\
['widget']
profile_name = ComboBox._entryfield.get()
list_items = ComboBox._list.get()
if not profile_name in list_items:
list_items += (profile_name,)
file_name, ext = os.path.splitext(self.params.molecule1Path)
tmp_mol_name = os.path.split(file_name)[-1]
molecule1 = self.vf.getMolFromName(tmp_mol_name.replace('-','_'))
self.params.name = profile_name
molecule1.APBSParams[profile_name] = self.params
ComboBox.setlist(list_items)
ComboBox.setentry(profile_name)
def remove_profile(self):
"""Removes current profile"""
ComboBox = self.cmdForms['default'].descr.entryByName['Profiles']\
['widget']
profile_name = ComboBox._entryfield.get()
list_items = ComboBox._list.get()
if profile_name in list_items:
list_items = list(list_items)
list_items.remove(profile_name)
list_items = tuple(list_items)
ComboBox.clear()
ComboBox.setlist(list_items)
try:
ComboBox.setentry(list_items[0])
except IndexError:
pass
file_name, ext = os.path.splitext(self.params.molecule1Path)
tmp_mol_name = os.path.split(file_name)[-1]
molecule1 = self.vf.getMolFromName(tmp_mol_name.replace('-','_'))
if molecule1 and molecule1.APBSParams.has_key(profile_name):
del molecule1.APBSParams[profile_name]
def saveProfile(self, Profilename="Default", fileFlag=False, flagCommand=False):
"""Saves current profile
fileFlag is used to decide if Profilename is a file: False means that we need to aks for a file first
flagCommand is isued to check if this functionm is called from APBSSave_Profile Command """
if fileFlag:
if not flagCommand and self.cmdForms.has_key('default'):
Profilename = self.cmdForms['default'].descr.\
entryByName['Profiles']['widget'].get()
file_name,ext = os.path.splitext(self.params.molecule1Path)
mol_name = os.path.split(file_name)[-1]
potential_dx = os.path.join(self.params.projectFolder, mol_name+
'.potential.dx')
if os.path.exists(potential_dx):
tmp_string = os.path.basename(Profilename).\
replace(".apbs.pf","")
dest_path = os.path.join(self.params.projectFolder, tmp_string +
'_' + mol_name + '_potential.dx')
shutil.copyfile(potential_dx,dest_path)
if self.params.calculationType == 'Solvation energy':
potential_dx = os.path.join(self.params.projectFolder, mol_name+
'_Vacuum.potential.dx')
if os.path.exists(potential_dx):
tmp_string = os.path.basename(Profilename).\
replace(".apbs.pf","")
dest_path = os.path.join(self.params.projectFolder,
tmp_string + '_' + mol_name + '_Vacuum_potential.dx')
shutil.copyfile(potential_dx,dest_path)
if self.params.calculationType == 'Binding energy':
file_name,ext = os.path.splitext(self.params.molecule2Path)
mol_name = os.path.split(file_name)[-1]
potential_dx = os.path.join(self.params.projectFolder, mol_name+
'.potential.dx')
if os.path.exists(potential_dx):
tmp_string = os.path.basename(Profilename).\
replace(".apbs.pf","")
dest_path = os.path.join(self.params.projectFolder,
tmp_string + '_' + mol_name + '_potential.dx')
shutil.copyfile(potential_dx,dest_path)
file_name,ext = os.path.splitext(self.params.complexPath)
mol_name = os.path.split(file_name)[-1]
potential_dx = os.path.join(self.params.projectFolder, mol_name+
'.potential.dx')
if os.path.exists(potential_dx):
tmp_string = os.path.basename(Profilename).\
replace(".apbs.pf","")
dest_path = os.path.join(self.params.projectFolder,
tmp_string + '_' + mol_name + '_potential.dx')
shutil.copyfile(potential_dx,dest_path)
if os.path.isdir(self.params.projectFolder):
Profilename = os.path.join(self.params.projectFolder, Profilename)
if(string.find(Profilename,'.apbs.pf')<0):
Profilename = Profilename + '.apbs.pf'
fp = open(Profilename, 'w')
pickle.dump(self.params, fp)
pickle.dump(self.params.ions, fp)
fp.close()
else:
self.vf.APBSSaveProfile()
def loadProfile(self, filename = None):
"""Loads profile"""
if filename:
fp = open(filename, 'r')
self.params = pickle.load(fp)
self.doit(**self.params.__dict__)
fp.close()
profile_name = os.path.basename(filename).replace(".apbs.pf","")
if self.params.calculationType=='Solvation energy' or self.params.\
calculationType=='Electrostatic potential':
if not self.vf.getMolFromName(os.path.basename(os.path.\
splitext(self.params.molecule1Path)[0])):
molecule1Path = os.path.join(self.params.projectFolder,
self.params.molecule1Path)
self.vf.readPQR(molecule1Path, topCommand=0)
if(self.params.calculationType=='Binding energy'):
if not self.vf.getMolFromName(os.path.basename(os.path.\
splitext(self.params.molecule1Path)[0])):
molecule1Path = os.path.join(self.params.projectFolder,
self.params.molecule1Path)
self.vf.readPQR(molecule1Path, topCommand=0)
if not self.vf.getMolFromName(os.path.basename(os.path.\
splitext(self.params.molecule2Path)[0])):
molecule2Path = os.path.join(self.params.projectFolder,
self.params.molecule2Path)
self.vf.readPQR(molecule2Path, topCommand=0)
if not self.vf.getMolFromName(os.path.basename(os.path.\
splitext(self.params.complexPath)[0])):
complexPath = os.path.join(self.params.projectFolder,
self.params.complexPath)
self.vf.readPQR(complexPath, topCommand=0)
# the following part updates Profiles ComboBox
if self.cmdForms.has_key('default'):
ComboBox = self.cmdForms['default'].descr.entryByName\
['Profiles']['widget']
list_items = ComboBox._list.get()
if not profile_name in list_items:
list_items += (profile_name,)
ComboBox.setlist(list_items)
ComboBox.setentry(profile_name)
else:
global PROFILES
PROFILES += (profile_name,)
if self.cmdForms.has_key('default'):
form_descr = self.cmdForms['default'].descr
form_descr.entryByName['Profiles_Add']['widget'].config(state =
"normal")
form_descr.entryByName['Profiles_Remove']['widget'].config(state
= "normal")
form_descr.entryByName['Profiles_Run']['widget'].config(state =
"normal")
form_descr.entryByName['Profiles_Save']['widget'].config(state =
"normal")
form_descr.entryByName['Profiles_Load']['widget'].config(state =
"normal")
if APBSservicesFound:
form_descr.entryByName['WS_Run']['widget'].config(state =
"normal")
else:
global state_GUI
state_GUI = 'normal'
if self.vf.hasGui:
change_Menu_state(self.vf.APBSSaveProfile, 'normal')
self.refreshAll()
file_name,ext = os.path.splitext(self.params.molecule1Path)
mol_name = os.path.split(file_name)[-1]
file_potential = os.path.join(self.params.projectFolder,profile_name
+ '_' + mol_name + '_potential.dx')
if os.path.exists(file_potential):
shutil.copyfile(file_potential,os.path.join(self.params.\
projectFolder,mol_name + '.potential.dx'))
self.changeMenuState('normal')
self.potential = mol_name+'.potential.dx'
if self.vf.hasGui:
change_Menu_state(self.vf.APBSDisplayIsocontours, 'normal')
change_Menu_state(self.vf.APBSDisplayOrthoSlice, 'normal')
if hasattr(self.vf,'APBSVolumeRender'):
change_Menu_state(self.vf.APBSVolumeRender, 'normal')
self.vf.Grid3DReadAny(os.path.join(self.params.projectFolder,mol_name + '.potential.dx'),
show=False, normalize=False)
self.vf.grids3D[mol_name+'.potential.dx'].geomContainer['Box'].Set(visible=0)
else:
self.vf.APBSLoadProfile()
def apbsRunRemote(self):
"""Runs APBS Web Services in a thread and checks for the results"""
if self.paramUpdateAll() == "ERROR":
return
file_name, ext = os.path.splitext(self.params.molecule1Path)
tmp_mol_name = os.path.split(file_name)[-1]
mol = self.vf.getMolFromName(tmp_mol_name.replace('-','_'))
f = self.cmdForms['default']
address = f.descr.entryByName['web service address']['widget'].get()
address = address.strip()
global APBS_ssl
if address.find('https://') != 0:
#first check to see if APBS Web Services is up and running
import urllib
opener = urllib.FancyURLopener({})
try:
servlet = opener.open(address)
except IOError:
self.errorMsg=address+" could not be found"
self.errorMsg += "\nPlease make sure that server is up and running"
self.errorMsg += "\nFor more info on APBS Web Services visit http://www.nbcr.net/services"
self.showForm('error')
return
APBS_ssl = False
else:
from mgltools.web.services.SecuritymyproxyloginImplService_services import \
loginUserMyProxyRequestWrapper, \
SecuritymyproxyloginImplServiceLocator
gamaLoginLocator = SecuritymyproxyloginImplServiceLocator()
gamaLoginService = gamaLoginLocator.getSecuritymyproxyloginImpl(
ssl=1,transport=httplib.HTTPSConnection)
req = loginUserMyProxyRequestWrapper()
username = self.cmdForms['default'].descr.\
entryByName['UserName_Entry']['widget'].get()
passwd = self.cmdForms['default'].descr.\
entryByName['Password_Entry']['widget'].get()
req._username = username
req._passwd = passwd
resp = gamaLoginService.loginUserMyProxy(req)
f = open(APBS_proxy, "w")
f.write(resp._loginUserMyProxyReturn)
f.close()
APBS_ssl = True
if self.RememberLogin_var.get():
file = open(self.rc_apbs,'w')
user = self.cmdForms['default'].descr.entryByName\
['UserName_Entry']['widget'].get()
passwd = self.cmdForms['default'].descr.entryByName\
['Password_Entry']['widget'].get()
file.write("User:%s\nPassword:%s\n"%(user,passwd))
self.params.projectFolder=os.path.join(os.getcwd(),"apbs-"+mol.name)
from thread import start_new_thread
if self.params.calculationType == 'Binding energy':
file_name, ext = os.path.splitext(self.params.molecule2Path)
tmp_mol_name = os.path.split(file_name)[-1]
mol2 = self.vf.getMolFromName(tmp_mol_name.replace('-','_'))
file_name, ext = os.path.splitext(self.params.complexPath)
tmp_mol_name = os.path.split(file_name)[-1]
_complex = self.vf.getMolFromName(tmp_mol_name.replace('-','_'))
self.params.projectFolder += "_" + mol2.name + "_"+ _complex.name
if not os.path.exists(self.params.projectFolder):
os.mkdir(self.params.projectFolder)
self.runWS(address, self.params, mol, mol2, _complex)
else:
if not os.path.exists(self.params.projectFolder):
os.mkdir(self.params.projectFolder)
self.runWS(address, self.params, mol)
#start_new_thread( self.checkForRemoteResults, (self.webServiceResultsQueue,))
def runWS(self, address, params, mol1, mol2 = None, _complex = None):
"""Runs APBS Web Services"""
if self.cmdForms.has_key('default'):
self.apbsWS = APBSCmdToWebService(params, mol1,mol2, _complex)
self.Parallel_flag = False
else:
self.apbsWS = APBSCmdToWebService(params, mol1, mol2, _complex)
self.Parallel_flag = False
try:
f = self.cmdForms['default']
f.descr.entryByName['APBSservicesLabel1']['widget'].\
configure(text = 'Connecting to '+ address)
f.descr.entryByName['APBSservicesLabel2']['widget'].\
configure(text = "")
f.descr.entryByName['APBSservicesLabel4']['widget'].\
configure(text = "")
f.descr.entryByName['APBSservicesLabel3']['widget'].\
configure(text = "Please wait ...")
f.descr.entryByName['APBSservicesLabel4']['widget'].\
configure(text = "")
self.vf.GUI.ROOT.update()
resp = self.apbsWS.run(address)
f.descr.entryByName['APBSservicesLabel1']['widget'].\
configure(text = "Received Job ID: " + resp._jobID)
self.vf.GUI.ROOT.after(5, self.checkForRemoteResults)
f.descr.entryByName['WS_Run']['widget'].configure(state = 'disabled')
# f.descr.entryByName['APBSservicesLabel1']['widget'].\
# configure(text = 'Remote APBS calculation is done')
self.rml = mol1.name
except Exception, inst:
f.descr.entryByName['APBSservicesLabel3']['widget'].\
configure(text = "")
from ZSI import FaultException
if isinstance(inst, FaultException):
tmp_str = inst.fault.AsSOAP()
tmp_str = tmp_str.split('<message>')
tmp_str = tmp_str[1].split('</message>')
if self.cmdForms.has_key('default') and \
self.cmdForms['default'].f.winfo_toplevel().wm_state() == \
'normal':
tkMessageBox.showerror("ERROR: ",tmp_str[0],parent =
self.cmdForms['default'].root)
else:
tkMessageBox.showerror("ERROR: ",tmp_str[0])
else:
import traceback
traceback.print_stack()
traceback.print_exc()
f.descr.entryByName['APBSservicesLabel1']['widget'].\
configure(text = "")
f.descr.entryByName['APBSservicesLabel2']['widget'].\
configure(text = "ERROR!!! Unable to complete the Run")
f.descr.entryByName['APBSservicesLabel3']['widget'].\
configure(text = "Please open Python Shell for Traceback")
def checkForRemoteResults(self):
"""Checks the queue for remote results until we get one"""
resp = self.apbsWS.appServicePort.queryStatus(queryStatusRequest(self.apbsWS.JobID))
if resp._code == 8: # 8 = GramJob.STATUS_DONE
f = self.cmdForms['default']
f.descr.entryByName['APBSservicesLabel2']['widget'].\
configure(text = resp._message)
webbrowser.open(resp._baseURL)
f.descr.entryByName['APBSservicesLabel3']['widget'].\
configure(text = resp._baseURL,fg='Blue',cursor='hand1')
def openurl(event):
webbrowser.open(resp._baseURL)
f.descr.entryByName['APBSservicesLabel3']['widget'].\
bind(sequence="<Button-1>",func = openurl)
# read the potential back
opener = urllib.FancyURLopener(cert_file = APBS_proxy, key_file = APBS_proxy)
if self.Parallel_flag:
if self.npx*self.npy*self.npz == 1:
f.descr.entryByName['APBSservicesLabel4']['widget'].\
configure(text = "Downloading %s.potential-PE0.dx"%self.rml)
f.descr.entryByName['WS_ProgressBar']['widget'].\
grid(sticky='ew', row = 9, column = 0, columnspan = 2)
f.descr.entryByName['APBS_WS_DX_Label']['widget'].\
configure(text = "URI: "+resp._baseURL+"/%s.potential-PE0.dx"%self.rml)
self.progressBar.configure(progressformat='precent',
labeltext='Progress ... ', max =100)
self.progressBar.set(0)
self._dx = opener.open(resp._baseURL+"/%s.potential-PE0.dx"%self.rml)
self._dx_out = open(os.path.join(self.params.projectFolder,
"%s.potential.dx"%self.rml),"w")
bytes = int(self._dx.headers.dict['content-length'])
self._progress_counter = 0
self._download_bytes = bytes/100
if self._download_bytes == 0: self._download_bytes = 1
self.Download()
else:
f.descr.entryByName['APBSservicesLabel4']['widget'].\
configure(text = "Downloading %s.potential.dx. Please wait ..."%self.rml)
f.descr.entryByName['WS_ProgressBar']['widget'].\
grid(sticky='ew', row = 9, column = 0, columnspan = 2)
f.descr.entryByName['APBS_WS_DX_Label']['widget'].\
configure(text = "URI: "+resp._baseURL+"/%s.potential-PE*.dx"%self.rml)
self.progressBar.configure(progressformat='ratio',
labeltext='Progress ... ', max =self.npx*self.npy*self.npz)
self._progress_counter = 0
self.progressBar.set(0)
self._dx_files = []
for i in range(self.npx*self.npy*self.npz):
self._dx_files.append(opener.open(resp._baseURL+
"/%s.potential-PE%d.dx"%(self.rml,i)))
self._dx_out = open(os.path.join(self.params.projectFolder,
"%s.potential.dx"%self.rml),"w")
self._dx_out.write("# Data from %s\n"%resp._baseURL)
self._dx_out.write("#\n# POTENTIAL (kT/e)\n#\n")
self.Download_and_Merge()
else:
f.descr.entryByName['APBSservicesLabel4']['widget'].\
configure(text = "Downloading %s.potential.dx"%self.rml)
f.descr.entryByName['WS_ProgressBar']['widget'].\
grid(sticky='ew', row = 9, column = 0, columnspan = 2)
f.descr.entryByName['APBS_WS_DX_Label']['widget'].\
configure(text = "URI: "+resp._baseURL + "/%s.potential.dx"%self.rml)
self.progressBar.configure(progressformat='percent',
labeltext='Progress ... ', max =100)
self.progressBar.set(0)
self._dx = opener.open(resp._baseURL + "/%s.potential.dx"%self.rml)
filePath = os.path.join(self.params.projectFolder,"%s.potential.dx"%self.rml)
try:
self._dx_out = open(filePath,"w")
except IOError:
showerror("Download Failed!",
"Permission denied: " +filePath)
bytes = int(self._dx.headers.dict['content-length'])
self._progress_counter = 0
self._download_bytes = bytes/100
if self._download_bytes == 0: self._download_bytes = 1
self.Download()
return
else:
f = self.cmdForms['default']
f.descr.entryByName['APBSservicesLabel2']['widget'].\
configure(text = "Status: " + resp._message)
self.vf.GUI.ROOT.after(500, self.checkForRemoteResults)
def Download(self):
self._progress_counter += 1
if self._progress_counter > 100:
self._progress_counter = 100
self.progressBar.set(self._progress_counter)
tmp = self._dx.read(self._download_bytes)
if tmp:
self._dx_out.write(tmp)
else:
self._dx.close()
self._dx_out.close()
f = self.cmdForms['default']
f.descr.entryByName['WS_ProgressBar']['widget'].grid_forget()
f.descr.entryByName['APBS_WS_DX_Label']['widget'].\
configure(text = '')
f.descr.entryByName['APBSservicesLabel4']['widget'].\
configure(text="%s.potential.dx has been saved"%self.rml)
self.saveProfile(self.params.name, fileFlag=True)
self.changeMenuState('normal')
f.descr.entryByName['WS_Run']['widget'].configure(state = 'normal')
return
self.vf.GUI.ROOT.after(10, self.Download)
def Download_and_Merge(self):
self._dx_files[0].readline()
self._dx_files[0].readline()
self._dx_files[0].readline()
self._dx_files[0].readline()
tmp_str = self._dx_files[0].readline()
from string import split
w = split(tmp_str)
nx, ny, nz = int(w[5]), int(w[6]), int(w[7])
self._dx_out.write("object 1 class gridpositions counts %d %d %d\n"
%(nx*self.npx,ny*self.npy,nz*self.npz))
self._dx_out.write(self._dx_files[0].readline())
self._dx_out.write(self._dx_files[0].readline())
self._dx_out.write(self._dx_files[0].readline())
self._dx_out.write(self._dx_files[0].readline())
self._dx_out.write("object 2 class gridconnections counts %d %d %d\n"
%(nx*self.npx,ny*self.npy,nz*self.npz))
self._dx_out.write("object 3 class array type double rank 0 items %d"
%(nx*self.npx*ny*self.npy*nz*self.npz)+" data follows\n")
for file in self._dx_files[1:]:
for i in range(11):
file.readline()
self._dx_files[0].readline()
self._dx_files[0].readline()
arrays = []
for file in self._dx_files:
self._progress_counter += 1
self.progressBar.set(self._progress_counter)
data = file.readlines()
file.close()
array = Numeric.zeros( (nx,ny,nz), Numeric.Float32)
values = map(split, data[0:-5])
ind=0
size = nx*ny*nz
for line in values:
if ind>=size:
break
l = len(line)
array.flat[ind:ind+l] = map(float, line)
ind = ind + l
arrays.append(array)
self.progressBar.configure(labeltext='Merging ... ')
for k in range(self.npz):
for j in range(self.npy):
for i in range(self.npx):
if i == 0:
array_x = arrays[self.npx*j+
self.npx*self.npy*k]
else:
array_x = Numeric.concatenate(
(array_x,arrays[i+self.npx*j+
self.npx*self.npy*k]),axis=0)
if j == 0:
array_y = array_x
else:
array_y = Numeric.concatenate(
(array_y,array_x),axis=1)
if k == 0:
array_out = array_y
else:
array_out = Numeric.concatenate(
(array_out,array_y),axis=2)
for z in array_out:
for y in z:
for x in y:
self._dx_out.write(str(x)+" ")
self._dx_out.write('\n')
self._dx_out.write("attribute \"dep\" string \"positions\"\n")
self._dx_out.write("object \"regular positions regular connections\" class field\n")
self._dx_out.write("component \"positions\" value 1\n")
self._dx_out.write("component \"connections\" value 2\n")
self._dx_out.write("component \"data\" value 3\n")
self._dx_out.close()
f = self.cmdForms['default']
f.descr.entryByName['WS_ProgressBar']['widget'].grid_forget()
f.descr.entryByName['APBS_WS_DX_Label']['widget'].\
configure(text = '')
f.descr.entryByName['APBSservicesLabel4']['widget'].\
configure(text="%s.potential.dx has been saved"%self.rml)
self.saveProfile(self.params.name, fileFlag=True)
self.changeMenuState('normal')
f.descr.entryByName['WS_Run']['widget'].configure(state = 'normal')
# Forms defined here
def buildFormDescr(self, formName):
"""Builds 'error','ionForm','outputFilesForm','moleculeSelect' and
'default' forms'"""
if formName == 'error':
if self.cmdForms.has_key('default') and \
self.cmdForms['default'].f.winfo_toplevel().wm_state() == \
'normal':
tkMessageBox.showerror("ERROR: ", self.errorMsg,parent =
self.cmdForms['default'].root)
else:
tkMessageBox.showerror("ERROR: ", self.errorMsg)
return
if formName == 'ionForm':
ifd = InputFormDescr(title = "Add Ion")
ifd.append({'name':'ionChargeLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Charge (e):'},
'gridcfg':{'row':0, 'column':0, 'sticky':'wens'}
})
ifd.append({'widgetType':Pmw.EntryField,
'name':'ionCharge',
'wcfg':{'validate':{'validator':'real'}, 'value':1},
'gridcfg':{'row':0, 'column':1, 'sticky':'wens'}
})
ifd.append({'name':'ionConcentrationLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Concentration (M):'},
'gridcfg':{'row':1, 'column':0, 'sticky':'wens'}
})
ifd.append({'widgetType':ThumbWheel,
'name':'ionConcentration',
'wcfg':{'text':None, 'showLabel':1,
'min':0,
'value':0.01, 'oneTurn':0.1,
'type':'float',
'increment':0.01,
'wheelLabcfg1':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'grey'},
'wheelLabcfg2':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'black'},
'continuous':1,
'wheelPad':1, 'width':150,'height':14},
'gridcfg':{'row':1, 'column':1, 'sticky':'wens'}
})
ifd.append({'name':'ionRadiusLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Radius (Angstroms):'},
'gridcfg':{'row':2, 'column':0, 'sticky':'wens'}
})
ifd.append({'widgetType':Pmw.EntryField,
'name':'ionRadius',
'wcfg':{'validate':{'validator':'real','min':0}, 'value':1},
'gridcfg':{'row':2, 'column':1, 'sticky':'wens'}
})
return ifd
elif formName =='outputFilesForm':
ifd = InputFormDescr(title = "Select output files")
ifd.append({'name':'fileTypeLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'File Type'},
'gridcfg':{'sticky':'e', 'row':1, 'column':0}
})
ifd.append({'name':'fileFormatLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'File format'},
'gridcfg':{'sticky':'e', 'row':1, 'column':1}
})
ifd.append({'name':'chargeDistributionFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Charge distribution file: '},
'gridcfg':{'sticky':'e', 'row':2, 'column':0}
})
ifd.append({'name':'chargeDistributionFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'dropdown':1, 'history':0,},
'defaultValue':self.params.chargeDistributionFile,
'gridcfg':{'sticky':'wens', 'row':2, 'column':1}
})
ifd.append({'name':'potentialFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Potential file: '},
'gridcfg':{'sticky':'e', 'row':3, 'column':0}
})
ifd.append({'name':'potentialFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.potentialFile,
'gridcfg':{'sticky':'wens', 'row':3, 'column':1}
})
ifd.append({'name':'solventAccessibilityFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Solvent accessibility file: '},
'gridcfg':{'sticky':'e', 'row':4, 'column':0}
})
ifd.append({'name':'solventAccessibilityFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.solventAccessibilityFile,
'gridcfg':{'sticky':'wens', 'row':4, 'column':1}
})
ifd.append({'name':'splineBasedAccessibilityFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Spline-based accessibility file: '},
'gridcfg':{'sticky':'e', 'row':5, 'column':0}
})
ifd.append({'name':'splineBasedAccessibilityFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.splineBasedAccessibilityFile,
'gridcfg':{'sticky':'wens', 'row':5, 'column':1}
})
ifd.append({'name':'VDWAccessibilityFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'VDW accessibility file: '},
'gridcfg':{'sticky':'e', 'row':6, 'column':0}
})
ifd.append({'name':'VDWAccessibilityFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.VDWAccessibilityFile,
'gridcfg':{'sticky':'wens', 'row':6, 'column':1}
})
ifd.append({'name':'ionAccessibilityFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Ion accessibility file: '},
'gridcfg':{'sticky':'e', 'row':7, 'column':0}
})
ifd.append({'name':'ionAccessibilityFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.ionAccessibilityFile,
'gridcfg':{'sticky':'wens', 'row':7, 'column':1}
})
ifd.append({'name':'laplacianOfPotentialFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Laplacian of potential file: '},
'gridcfg':{'sticky':'e', 'row':8, 'column':0}
})
ifd.append({'name':'laplacianOfPotentialFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.laplacianOfPotentialFile,
'gridcfg':{'sticky':'wens', 'row':8, 'column':1}
})
ifd.append({'name':'energyDensityFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Energy density file: '},
'gridcfg':{'sticky':'e', 'row':9, 'column':0}
})
ifd.append({'name':'energyDensityFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.energyDensityFile,
'gridcfg':{'sticky':'wens', 'row':9, 'column':1}
})
ifd.append({'name':'ionNumberFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Ion number file: '},
'gridcfg':{'sticky':'e', 'row':10, 'column':0}
})
ifd.append({'name':'ionNumberFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.ionNumberFile,
'gridcfg':{'sticky':'wens', 'row':10, 'column':1}
})
ifd.append({'name':'ionChargeDensityFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Ion charge density file: '},
'gridcfg':{'sticky':'e', 'row':11, 'column':0}
})
ifd.append({'name':'ionChargeDensityFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.ionChargeDensityFile,
'gridcfg':{'sticky':'wens', 'row':11, 'column':1}
})
ifd.append({'name':'xShiftedDielectricFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'X-shifted dielectric file: '},
'gridcfg':{'sticky':'e', 'row':12, 'column':0}
})
ifd.append({'name':'xShiftedDielectricFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.xShiftedDielectricFile,
'gridcfg':{'sticky':'wens', 'row':12, 'column':1}
})
ifd.append({'name':'yShiftedDielectricFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Y-shifted dielectric file: '},
'gridcfg':{'sticky':'e', 'row':13, 'column':0}
})
ifd.append({'name':'yShiftedDielectricFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.yShiftedDielectricFile,
'gridcfg':{'sticky':'wens', 'row':13, 'column':1}
})
ifd.append({'name':'zShiftedDielectricFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Z-shifted dielectric file: '},
'gridcfg':{'sticky':'e', 'row':14, 'column':0}
})
ifd.append({'name':'zShiftedDielectricFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.zShiftedDielectricFile,
'gridcfg':{'sticky':'wens', 'row':14, 'column':1}
})
ifd.append({'name':'kappaFunctionFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Kappa function file: '},
'gridcfg':{'sticky':'e', 'row':15, 'column':0}
})
ifd.append({'name':'kappaFunctionFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.kappaFunctionFile,
'gridcfg':{'sticky':'wens', 'row':15, 'column':1}
})
return ifd
elif formName == 'moleculeSelect':
ifd=InputFormDescr(title="Select a molecule")
self.selectedFilename = ''
molNames = self.vf.Mols.name
if molNames is None:
molNames = []
ifd.append({'name':'moleculeListSelect',
'widgetType':Pmw.ScrolledListBox,
'tooltip':'Select a molecule loaded in PMV to run APBS ',
'wcfg':{'label_text':'Select Molecule: ',
'labelpos':'nw',
'items':molNames,
'listbox_selectmode':'single',
'listbox_exportselection':0,
'usehullsize': 1,
'hull_width':100,'hull_height':150,
'listbox_height':5},
'gridcfg':{'sticky':'nsew', 'row':1, 'column':0}})
elif formName == 'default':
ifd = InputFormDescr(title="APBS Profile Setup and Execution")
## NOTEBOOK WIDGET
ifd.append({'widgetType':Pmw.NoteBook,
'name':'hovigNotebook',
'container':{'Calculation':
"w.page('Calculation')",
'Physics':"w.page('Physics')",
'Web Service':"w.page('Web Service')",
'Grid':"w.page('Grid')"},
'wcfg':{'borderwidth':3},
'componentcfg':[{'name':'Calculation',
'cfg':{}},
{'name':'Grid', 'cfg':{}},
{'name':'Physics','cfg':{}},
{'name':'Web Service','cfg':{}} ],
'gridcfg':{'sticky':'we'},
})
## CALCULATION PAGE
## MATH GROUP
ifd.append({'name':"mathGroup",
'widgetType':Pmw.Group,
'parent':'Calculation',
'container':{'mathGroup':'w.interior()'},
'wcfg':{'tag_text':"Mathematics"},
'gridcfg':{'sticky':'wne'}
})
ifd.append({'name':'calculationTypeLabel',
'widgetType':Tkinter.Label,
'parent':'mathGroup',
'wcfg':{'text':'Calculation type:'},
'gridcfg':{'row':0, 'column':0, 'sticky':'e'}
})
ifd.append({'name':'calculationType',
'widgetType':Pmw.ComboBox,
'parent':'mathGroup',
'wcfg':{'scrolledlist_items':self.params.CALCULATIONTYPES,
'history':0, 'dropdown':1,
'selectioncommand':self.calculationParamUpdate,
'listheight':80
},
'gridcfg':{'sticky':'we', 'row':0, 'column':1}
})
ifd.append({'name':'pbeTypeLabel',
'widgetType':Tkinter.Label,
'parent':'mathGroup',
'wcfg':{'text':'Poisson-Boltzmann equation type:'},
'gridcfg':{'row':1, 'column':0, 'sticky':'e'}
})
ifd.append({'name':'pbeType',
'widgetType':Pmw.ComboBox,
'parent':'mathGroup',
'wcfg':{'scrolledlist_items':self.params.PBETYPES,
'history':0,'dropdown':1, 'listheight':80,
'selectioncommand':self.calculationParamUpdate},
'gridcfg':{'sticky':'we', 'row':1, 'column':1}
})
ifd.append({'name':'boundaryConditionsLabel',
'widgetType':Tkinter.Label,
'parent':'mathGroup',
'wcfg':{'text':'Boundary conditions:'},
'gridcfg':{'row':2, 'column':0, 'sticky':'e'}
})
ifd.append({'name':'boundaryConditions',
'widgetType':Pmw.ComboBox,
'parent':'mathGroup',
'wcfg':{'scrolledlist_items':self.params.BOUNDARYTYPES,
'history':0, 'dropdown':1, 'listheight':80,
'selectioncommand':self.calculationParamUpdate},
'gridcfg':{'sticky':'we', 'row':2, 'column':1}
})
ifd.append({'name':'chargeDiscretizationLabel',
'widgetType':Tkinter.Label,
'parent':'mathGroup',
'wcfg':{'text':'Charge discretization:'},
'gridcfg':{'sticky':'e', 'row':3, 'column':0}
})
ifd.append({'name':'chargeDiscretization',
'widgetType':Pmw.ComboBox,
'parent':'mathGroup',
'wcfg':{'scrolledlist_items':
self.params.CHARGEDISCRETIZATIONTYPES,'history':0,
'dropdown':1, 'listheight':80,
'selectioncommand':self.calculationParamUpdate},
'gridcfg':{'sticky':'we', 'row':3, 'column':1}
})
ifd.append({'name':'surfaceCalculationLabel',
'widgetType':Tkinter.Label,
'parent':'mathGroup',
'wcfg':{'text':'Surface smoothing method:'},
'gridcfg':{'sticky':'e', 'row':4, 'column':0}
})
ifd.append({'name':'surfaceCalculation',
'widgetType':Pmw.ComboBox,
'parent':'mathGroup',
'wcfg':{'scrolledlist_items':
self.params.SURFACECALCULATIONTYPES,'history':0,
'dropdown':1, 'listheight':80,
'selectioncommand':self.calculationParamUpdate},
'gridcfg':{'sticky':'we', 'row':4, 'column':1}
})
ifd.append({'name':'sdensLabel',
'widgetType':Tkinter.Label,
'parent':'mathGroup',
'wcfg':{'text':'Sphere density:'},
'gridcfg':{'row':5, 'column':0, 'sticky':'e'}
})
ifd.append({'widgetType':Pmw.EntryField,
'name':'sdens',
'parent':'mathGroup',
'wcfg':{'command':self.calculationParamUpdate,
'value':self.params.sdens,
'validate':{'validator':'real', 'min':0.01}},
'gridcfg':{'sticky':'nsew', 'row':5, 'column':1}
})
ifd.append({'name':'splineWindowLabel',
'widgetType':Tkinter.Label,
'parent':'mathGroup',
'wcfg':{'text':'Spline window (Angstroms):'},
'gridcfg':{'row':6, 'column':0, 'sticky':'e'}
})
ifd.append({'widgetType':Pmw.EntryField,
'name':'splineWindow',
'parent':'mathGroup',
'wcfg':{'command':self.calculationParamUpdate,
'value':self.params.splineWindow,
'validate':{'validator':'real', 'min':0.01}},
'gridcfg':{'sticky':'nsew', 'row':6, 'column':1}
})
# MOLECULES GROUP
ifd.append({'name':"moleculesGroup",
'widgetType':Pmw.Group,
'parent':'Calculation',
'container':{'moleculesGroup':'w.interior()'},
'wcfg':{'tag_text':'Molecules'},
'gridcfg':{'sticky':'nswe'}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'molecule1Select',
'parent':'moleculesGroup',
'wcfg':{'text':'Select Molecule 1 ...',
'command':self.molecule1Select},
'gridcfg':{'sticky':'ew', 'row':0, 'column':0}
})
ifd.append({'widgetType':Pmw.EntryField,
'name':'molecule1',
'parent':'moleculesGroup',
'tooltip':"Click on Select Molecule 1 button to set this.",
'wcfg':{'command':self.calculationParamUpdate,
'entry_state':'disabled',
'value':self.params.molecule1Path},
'gridcfg':{'sticky':'ew', 'row':0, 'column':1}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'molecule2Select',
'parent':'moleculesGroup',
'wcfg':{'text':'Select Molecule 2 ...',
'command':self.molecule2Select},
'gridcfg':{'sticky':'ew', 'row':1, 'column':0}
})
ifd.append({'widgetType':Pmw.EntryField,
'name':'molecule2',
'parent':'moleculesGroup',
'tooltip':"Click on Select Molecule 2 button to set this.",
'wcfg':{'command':self.calculationParamUpdate,
'entry_state':'disabled',
'value':self.params.molecule2Path},
'gridcfg':{'sticky':'ew', 'row':1, 'column':1}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'complexSelect',
'parent':'moleculesGroup',
'wcfg':{'text':'Select Complex ...',
'command':self.complexSelect},
'gridcfg':{'sticky':'ew', 'row':2, 'column':0}
})
ifd.append({'widgetType':Pmw.EntryField,
'name':'complex',
'parent':'moleculesGroup',
'tooltip':"Click on Select Complex button to set this.",
'wcfg':{'command':self.calculationParamUpdate,
'entry_state':'disabled',
'value':self.params.complexPath},
'gridcfg':{'sticky':'ew', 'row':2, 'column':1}
})
## FILE GROUP
ifd.append({'name':"fileGroup",
'widgetType':Pmw.Group,
'parent':'Calculation',
'container':{'fileGroup':'w.interior()'},
'wcfg':{'tag_text':'Output'},
'gridcfg':{'sticky':'nwe'}
})
ifd.append({'name':'energyTypesLabel',
'widgetType':Tkinter.Label,
'parent':'fileGroup',
'wcfg':{'text':'Energy: '},
'gridcfg':{'sticky':'e','row':0, 'column':0}
})
ifd.append({'name':'energyOutput',
'widgetType':Pmw.ComboBox,
'parent':'fileGroup',
'wcfg':{'scrolledlist_items':
self.params.ENERGYOUTPUTTYPES,
'dropdown':1, 'history':0,'listheight':80,
'selectioncommand':self.calculationParamUpdate},
'gridcfg':{'sticky':'we', 'row':0, 'column':1}
})
ifd.append({'name':'forceTypesLabel',
'widgetType':Tkinter.Label,
'parent':'fileGroup',
'wcfg':{'text':'Force: '},
'gridcfg':{'sticky':'e', 'row':1,'column':0}
})
ifd.append({'name':'forceOutput',
'widgetType':Pmw.ComboBox,
'parent':'fileGroup',
'wcfg':{'scrolledlist_items':
self.params.FORCEOUTPUTTYPES,
'dropdown':1, 'history':0, 'listheight':80,
'selectioncommand':self.calculationParamUpdate},
'gridcfg':{'sticky':'we','row':1,'column':1}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'outputFilesSelect',
'parent':'fileGroup',
'wcfg':{'text':'More output options ...',
'command':self.setOutputFiles},
'gridcfg':{'sticky':'ew','row':2,'column':1}
})
## PROFILES GROUP
ifd.append({'name':"ProfilesGroup",
'widgetType':Pmw.Group,
'parent':'Calculation',
'container':{'ProfilesGroup':'w.interior()'},
'wcfg':{'tag_text':'Profiles'},
'gridcfg':{'sticky':'we'}
})
ifd.append({'name':'Profiles',
'widgetType':Pmw.ComboBox,
'parent':'ProfilesGroup',
'wcfg':{'scrolledlist_items':PROFILES, 'listheight':80,
'dropdown':1,'history':1,'autoclear':1,
'selectioncommand':self.select_profile
},
'gridcfg':{'sticky':'we', 'row':0, 'column':0}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'Profiles_Add',
'parent':'ProfilesGroup',
'wcfg':{'text':'Add',
'command':self.add_profile,
'state':state_GUI},
'gridcfg':{'sticky':'ew', 'row':0, 'column':1}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'Profiles_Remove',
'parent':'ProfilesGroup',
'wcfg':{'text':'Remove',
'state':state_GUI,
'command':self.remove_profile},
'gridcfg':{'sticky':'ew', 'row':0, 'column':2}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'Profiles_Run',
'parent':'ProfilesGroup',
'wcfg':{'text':'Run',
'state':state_GUI,
'command':self.apbsOutput},
'gridcfg':{'sticky':'we', 'row':1, 'column':0}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'Profiles_Load',
'parent':'ProfilesGroup',
'wcfg':{'text':'Load',
'command':self.loadProfile},
'gridcfg':{'sticky':'we', 'row':1, 'column':1}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'Profiles_Save',
'parent':'ProfilesGroup',
'wcfg':{'text':'Save',
'state':state_GUI,
'command':self.saveProfile},
'gridcfg':{'sticky':'we', 'row':1, 'column':2}
})
## GRID PAGE
ifd.append({'name':"generalGridGroup",
'widgetType':Pmw.Group,
'parent':'Grid',
'container':{'generalGridGroup':'w.interior()'},
'wcfg':{'tag_text':'General'},
'gridcfg':{'sticky':'wnse'}
})
ifd.append({'name':'generalXLabel',
'widgetType':Tkinter.Label,
'parent':'generalGridGroup',
'wcfg':{'text':'X','fg':'red','font':(ensureFontCase('times'), 15, 'bold')},
'gridcfg':{'row':0, 'column':1}
})
ifd.append({'name':'generalYLabel',
'widgetType':Tkinter.Label,
'parent':'generalGridGroup',
'wcfg':{'text':'Y','fg':'green','font':(ensureFontCase('times'),15,'bold')},
'gridcfg':{'row':0, 'column':2}
})
ifd.append({'name':'generalZLabel',
'widgetType':Tkinter.Label,
'parent':'generalGridGroup',
'wcfg':{'text':'Z','fg':'blue','font':(ensureFontCase('times'),15,' bold')},
'gridcfg':{'row':0, 'column':3}
})
ifd.append({'name':'gridPointsLabel',
'widgetType':Tkinter.Label,
'parent':'generalGridGroup',
'wcfg':{'text':'Grid Points:'},
'gridcfg':{'row':1, 'column':0}
})
ifd.append({'widgetType':SliderWidget,
'name':'gridPointsX',
'parent':'generalGridGroup',
'wcfg':{'label':' ',
'minval':9,'maxval':689,
'left':15,
'command':self.gridParamUpdate,
'init':65,'immediate':1,
'sliderType':'int',
'lookup': self.params.GRID_VALUES},
'gridcfg':{'sticky':'wens', 'row':1, 'column':1}
})
ifd.append({'widgetType':SliderWidget,
'name':'gridPointsY',
'parent':'generalGridGroup',
'wcfg':{'label':' ',
'minval':9,'maxval':689,
'left':15,
'command':self.gridParamUpdate,
'init':65,'immediate':1,
'sliderType':'int',
'lookup': self.params.GRID_VALUES},
'gridcfg':{'sticky':'wens', 'row':1, 'column':2}
})
ifd.append({'widgetType':SliderWidget,
'name':'gridPointsZ',
'parent':'generalGridGroup',
'wcfg':{'label':' ',
'minval':9,'maxval':689,
'left':15,
'command':self.gridParamUpdate,
'init':65,'immediate':1,
'sliderType':'int',
'lookup': self.params.GRID_VALUES},
'gridcfg':{'sticky':'wens', 'row':1, 'column':3}
})
ifd.append({'name':"coarseGridGroup",
'widgetType':Pmw.Group,
'parent':'Grid',
'container':{'coarseGridGroup':'w.interior()'},
'wcfg':{'tag_text':'Coarse Grid'},
'gridcfg':{'sticky':'wnse'}
})
ifd.append({'name':'coarseXLabel',
'widgetType':Tkinter.Label,
'parent':'coarseGridGroup',
'wcfg':{'text':'X','fg':'red','font':(ensureFontCase('times'), 15, 'bold')},
'gridcfg':{'row':1, 'column':1}
})
ifd.append({'name':'coarseYLabel',
'widgetType':Tkinter.Label,
'parent':'coarseGridGroup',
'wcfg':{'text':'Y','fg':'green','font':(ensureFontCase('times'),15,'bold')},
'gridcfg':{'row':1, 'column':2}
})
ifd.append({'name':'coarseZLabel',
'widgetType':Tkinter.Label,
'parent':'coarseGridGroup',
'wcfg':{'text':'Z','fg':'blue','font':(ensureFontCase('times'),15, 'bold')},
'gridcfg':{'row':1, 'column':3}
})
ifd.append({'widgetType':Tkinter.Checkbutton,
'name':'showCoarseGrid',
'parent':'coarseGridGroup',
'defaultValue':0,
'wcfg':{'text':'Show Coarse Grid',
'command':self.gridParamUpdate,
'variable':Tkinter.BooleanVar()},
'gridcfg':{'sticky':'w','row':5, 'column':0}
})
ifd.append({'name':'coarseLengthLabel',
'widgetType':Tkinter.Label,
'parent':'coarseGridGroup',
'wcfg':{'text':'Length:'},
'gridcfg':{'row':2, 'column':0}
})
ifd.append({'name':'coarseLengthX',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type a value manually""",
'parent':'coarseGridGroup',
'gridcfg':{'row':2, 'column':1, 'sticky':'wnse'},
'wcfg':{'text':None, 'showLabel':1,
'min':2,
'lockBMin':1, 'lockBMax':1,
'lockBIncrement':1,
'value':self.params.coarseLengthX, 'oneTurn':1000,
'type':'float',
'increment':1,
'canvascfg':{'bg':'red'},
'wheelLabcfg1':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'grey'},
'wheelLabcfg2':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'black'},
'callback':self.gridParamUpdate,
'continuous':1,
'wheelPad':1, 'width':100,'height':15}
})
ifd.append({'name':'coarseLengthY',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type a value manually""",
'parent':'coarseGridGroup',
'gridcfg':{'row':2, 'column':2, 'sticky':'wnse'},
'wcfg':{ 'showLabel':1,
'min':2,
'lockBMin':1, 'lockBMax':1,
'lockBIncrement':1,
'value':self.params.coarseLengthY, 'oneTurn':1000,
'type':'float',
'increment':1,
'canvascfg':{'bg':'green'},
'wheelLabcfg1':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'grey'},
'wheelLabcfg2':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'black'},
'callback':self.gridParamUpdate,
'continuous':1,
'wheelPad':1, 'width':100,'height':15}
})
ifd.append({'name':'coarseLengthZ',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type a value manually""",
'parent':'coarseGridGroup',
'gridcfg':{'row':2, 'column':3, 'sticky':'wnse'},
'wcfg':{'showLabel':1,
'min':2,
'lockBMin':1, 'lockBMax':1,
'lockBIncrement':1,
'value':self.params.coarseLengthZ, 'oneTurn':1000,
'type':'float',
'increment':1,
'canvascfg':{'bg':'blue'},
'wheelLabcfg1':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'grey'},
'wheelLabcfg2':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'black'},
'callback':self.gridParamUpdate,
'continuous':1,
'wheelPad':1, 'width':100,'height':15}
})
ifd.append({'name':'coarseCenterLabel',
'widgetType':Tkinter.Label,
'parent':'coarseGridGroup',
'wcfg':{'text':'Center:'},
'gridcfg':{'row':3, 'column':0}
})
ifd.append({'name':'coarseCenterX',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type a value manually""",
'parent':'coarseGridGroup',
'gridcfg':{'row':3, 'column':1, 'sticky':'wnse'},
'wcfg':{ 'showLabel':1,
'min':None,
'lockBMin':1, 'lockBMax':1,
'lockBIncrement':1,
'value':self.params.coarseCenterX, 'oneTurn':1000,
'type':'float',
'increment':1,
'canvascfg':{'bg':'red'},
'wheelLabcfg1':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'grey'},
'wheelLabcfg2':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'black'},
'callback':self.gridParamUpdate,
'continuous':1,
'wheelPad':1, 'width':100,'height':15}
})
ifd.append({'name':'coarseCenterY',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type a value manually""",
'parent':'coarseGridGroup',
'gridcfg':{'row':3, 'column':2, 'sticky':'wnse'},
'wcfg':{'showLabel':1,
'min':None,
'lockBMin':1, 'lockBMax':1,
'lockBIncrement':1,
'value':self.params.coarseCenterY, 'oneTurn':1000,
'type':'float',
'increment':1,
'canvascfg':{'bg':'green'},
'wheelLabcfg1':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'grey'},
'wheelLabcfg2':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'black'},
'callback':self.gridParamUpdate,
'continuous':1,
'wheelPad':1, 'width':100,'height':15}
})
ifd.append({'name':'coarseCenterZ',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type a value manually""",
'parent':'coarseGridGroup',
'gridcfg':{'row':3, 'column':3, 'sticky':'wnse'},
'wcfg':{'text':None, 'showLabel':1,
'min':None,
'lockBMin':1, 'lockBMax':1,
'lockBIncrement':1,
'value':self.params.coarseCenterZ, 'oneTurn':1000,
'type':'float',
'increment':1,
'canvascfg':{'bg':'blue'},
'wheelLabcfg1':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'grey'},
'wheelLabcfg2':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'black'},
'callback':self.gridParamUpdate,
'continuous':1,
'wheelPad':1, 'width':100,'height':15}
})
ifd.append({'name':'coarseResolutionLabel',
'widgetType':Tkinter.Label,
'parent':'coarseGridGroup',
'wcfg':{'text':'Resolution:'},
'gridcfg':{'row':4, 'column':0}
})
ifd.append({'name':'coarseResolutionX',
'widgetType':Tkinter.Label,
'parent':'coarseGridGroup',
'wcfg':{'text':"%5.3f"%self.coarseResolutionX()},
'gridcfg':{'row':4, 'column':1}
})
ifd.append({'name':'coarseResolutionY',
'widgetType':Tkinter.Label,
'parent':'coarseGridGroup',
'wcfg':{'text':"%5.3f"%self.coarseResolutionY()},
'gridcfg':{'row':4, 'column':2}
})
ifd.append({'name':'coarseResolutionZ',
'widgetType':Tkinter.Label,
'parent':'coarseGridGroup',
'wcfg':{'text':"%5.3f"%self.coarseResolutionZ()},
'gridcfg':{'row':4, 'column':3}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'autocenterCoarseGrid',
'parent':'coarseGridGroup',
'wcfg':{'text':'Autocenter',
'command':self.autocenterCoarseGrid},
'gridcfg':{'sticky':'ew', 'row':5, 'column':1}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'autosizeCoarseGrid',
'parent':'coarseGridGroup',
'wcfg':{'text':'Autosize',
'command':self.autosizeCoarseGrid},
'gridcfg':{'sticky':'ew', 'row':5, 'column':2}
})
ifd.append({'name':"fineGridGroup",
'widgetType':Pmw.Group,
'parent':'Grid',
'container':{'fineGridGroup':'w.interior()'},
'wcfg':{'tag_text':'Fine Grid'},
'gridcfg':{'sticky':'wnse'}
})
ifd.append({'name':'fineXLabel',
'widgetType':Tkinter.Label,
'parent':'fineGridGroup',
'wcfg':{'text':'X','fg':'red','font':(ensureFontCase('times'), 15, 'bold')},
'gridcfg':{'row':1, 'column':1}
})
ifd.append({'name':'fineYLabel',
'widgetType':Tkinter.Label,
'parent':'fineGridGroup',
'wcfg':{'text':'Y','fg':'green','font':(ensureFontCase('times'),15,'bold')},
'gridcfg':{'row':1, 'column':2}
})
ifd.append({'name':'fineZLabel',
'widgetType':Tkinter.Label,
'parent':'fineGridGroup',
'wcfg':{'text':'Z','fg':'blue','font':(ensureFontCase('times'),15, 'bold')},
'gridcfg':{'row':1, 'column':3}
})
ifd.append({'widgetType':Tkinter.Checkbutton,
'name':'showFineGrid',
'parent':'fineGridGroup',
'defaultValue':0,
'wcfg':{'text':'Show Fine Grid',
'command':self.gridParamUpdate,
'variable':Tkinter.BooleanVar()},
'gridcfg':{'sticky':'w','row':5, 'column':0}
})
ifd.append({'name':'fineLengthLabel',
'widgetType':Tkinter.Label,
'parent':'fineGridGroup',
'wcfg':{'text':'Length:'},
'gridcfg':{'row':2, 'column':0}
})
ifd.append({'name':'fineLengthX',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type a value manually""",
'parent':'fineGridGroup',
'gridcfg':{'row':2, 'column':1, 'sticky':'wnse'},
'wcfg':{'showLabel':1,
'min':2,
'lockBMin':1, 'lockBMax':1,
'lockBIncrement':1,
'value':self.params.fineLengthX, 'oneTurn':1000,
'type':'float',
'increment':.25,
'canvascfg':{'bg':'red'},
'wheelLabcfg1':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'grey'},
'wheelLabcfg2':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'black'},
'callback':self.gridParamUpdate,
'continuous':1,
'wheelPad':1, 'width':100,'height':15}
})
ifd.append({'name':'fineLengthY',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type a value manually""",
'parent':'fineGridGroup',
'gridcfg':{'row':2, 'column':2, 'sticky':'wnse'},
'wcfg':{'showLabel':1,
'min':2,
'lockBMin':1, 'lockBMax':1,
'lockBIncrement':1,
'value':self.params.fineLengthY, 'oneTurn':1000,
'type':'float',
'increment':.25,
'canvascfg':{'bg':'green'},
'wheelLabcfg1':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'grey'},
'wheelLabcfg2':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'black'},
'callback':self.gridParamUpdate,
'continuous':1,
'wheelPad':1, 'width':100,'height':15}
})
ifd.append({'name':'fineLengthZ',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type a value manually""",
'parent':'fineGridGroup',
'gridcfg':{'row':2, 'column':3, 'sticky':'wnse'},
'wcfg':{'showLabel':1,
'min':2,
'lockBMin':1, 'lockBMax':1,
'lockBIncrement':1,
'value':self.params.fineLengthZ, 'oneTurn':1000,
'type':'float',
'increment':.25,
'canvascfg':{'bg':'blue'},
'wheelLabcfg1':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'grey'},
'wheelLabcfg2':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'black'},
'callback':self.gridParamUpdate,
'continuous':1,
'wheelPad':1, 'width':100,'height':15}
})
ifd.append({'name':'fineCenterLabel',
'widgetType':Tkinter.Label,
'parent':'fineGridGroup',
'wcfg':{'text':'Center:'},
'gridcfg':{'row':3, 'column':0}
})
ifd.append({'name':'fineCenterX',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type a value manually""",
'parent':'fineGridGroup',
'gridcfg':{'row':3, 'column':1, 'sticky':'wnse'},
'wcfg':{'showLabel':1,
'min':None,
'lockBMin':1, 'lockBMax':1,
'lockBIncrement':1,
'value':self.params.fineCenterX, 'oneTurn':1000,
'type':'float',
'increment':.25,
'canvascfg':{'bg':'red'},
'wheelLabcfg1':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'grey'},
'wheelLabcfg2':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'black'},
'callback':self.gridParamUpdate,
'continuous':1,
'wheelPad':1, 'width':100,'height':15}
})
ifd.append({'name':'fineCenterY',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type a value manually""",
'parent':'fineGridGroup',
'gridcfg':{'row':3, 'column':2, 'sticky':'wnse'},
'wcfg':{'showLabel':1,
'min':None,
'lockBMin':1, 'lockBMax':1,
'lockBIncrement':1,
'value':self.params.fineCenterY, 'oneTurn':1000,
'type':'float',
'increment':.25,
'canvascfg':{'bg':'green'},
'wheelLabcfg1':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'grey'},
'wheelLabcfg2':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'black'},
'callback':self.gridParamUpdate,
'continuous':1,
'wheelPad':1, 'width':100,'height':15}
})
ifd.append({'name':'fineCenterZ',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type a value manually""",
'parent':'fineGridGroup',
'gridcfg':{'row':3, 'column':3, 'sticky':'wnse'},
'wcfg':{'showLabel':1,
'min':None,
'lockBMin':1, 'lockBMax':1,
'lockBIncrement':1,
'value':self.params.fineCenterZ, 'oneTurn':1000,
'type':'float',
'increment':.25,
'canvascfg':{'bg':'blue'},
'wheelLabcfg1':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'grey'},
'wheelLabcfg2':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'black'},
'callback':self.gridParamUpdate,
'continuous':1,
'wheelPad':1, 'width':100,'height':15}
})
ifd.append({'name':'fineResolutionLabel',
'widgetType':Tkinter.Label,
'parent':'fineGridGroup',
'wcfg':{'text':'Resolution:'},
'gridcfg':{'row':4, 'column':0}
})
ifd.append({'name':'fineResolutionX',
'widgetType':Tkinter.Label,
'parent':'fineGridGroup',
'wcfg':{'text':"%5.3f"%self.fineResolutionX()},
'gridcfg':{'row':4, 'column':1}
})
ifd.append({'name':'fineResolutionY',
'widgetType':Tkinter.Label,
'parent':'fineGridGroup',
'wcfg':{'text':"%5.3f"%self.fineResolutionY()},
'gridcfg':{'row':4, 'column':2}
})
ifd.append({'name':'fineResolutionZ',
'widgetType':Tkinter.Label,
'parent':'fineGridGroup',
'wcfg':{'text':"%5.3f"%self.fineResolutionZ()},
'gridcfg':{'row':4, 'column':3}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'autocenterFineGrid',
'parent':'fineGridGroup',
'wcfg':{'text':'Autocenter',
'command':self.autocenterFineGrid},
'gridcfg':{'sticky':'ew', 'row':5, 'column':1}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'autosizeFineGrid',
'parent':'fineGridGroup',
'wcfg':{'text':'Autosize',
'command':self.autosizeFineGrid},
'gridcfg':{'sticky':'ew', 'row':5, 'column':2}
})
ifd.append({'name':"systemResourcesGroup",
'widgetType':Pmw.Group,
'parent':'Grid',
'container':{'systemResourcesGroup':'w.interior()'},
'wcfg':{'tag_text':'System Resources'},
'gridcfg':{'sticky':'wnse'}
})
ifd.append({'name':'gridPointsLabel',
'widgetType':Tkinter.Label,
'parent':'systemResourcesGroup',
'wcfg':{'text':'Total grid points: '},
'gridcfg':{'row':0, 'column':0, 'sticky':'e'}
})
ifd.append({'name':'gridPointsNumberLabel',
'widgetType':Tkinter.Label,
'parent':'systemResourcesGroup',
'wcfg':{'text':"%d"%self.totalGridPoints()},
'gridcfg':{'row':0, 'column':1, 'sticky':'w'}
})
ifd.append({'name':'mallocLabel',
'widgetType':Tkinter.Label,
'parent':'systemResourcesGroup',
'wcfg':{'text':'Memory to be allocated (MB): '},
'gridcfg':{'row':1, 'column':0, 'sticky':'e'}
})
ifd.append({'name':'mallocSizeLabel',
'widgetType':Tkinter.Label,
'parent':'systemResourcesGroup',
'wcfg':{'text':"%5.3f"%self.memoryToBeAllocated()},
'gridcfg':{'row':1, 'column':1, 'sticky':'w'}
})
## PHYSICS PAGE
ifd.append({'name':'parametersGroup',
'widgetType':Pmw.Group,
'parent':'Physics',
'container':{'parametersGroup':'w.interior()'},
'wcfg':{'tag_text':"Parameters"},
'gridcfg':{'sticky':'snwe'}
})
ifd.append({'name':'proteinDielectricLabel',
'widgetType':Tkinter.Label,
'parent':'parametersGroup',
'wcfg':{'text':'Protein dielectric:'},
'gridcfg':{'row':0, 'column':0, 'sticky':'e'}
})
ifd.append({'widgetType':Pmw.EntryField,
'name':'proteinDielectric',
'parent':'parametersGroup',
'wcfg':{'command':self.physicsParamUpdate,
'value':self.params.proteinDielectric,
'validate':{'validator':'real', 'min':0}},
'gridcfg':{'sticky':'ew', 'row':0, 'column':1}
})
ifd.append({'name':'solventDielectricLabel',
'widgetType':Tkinter.Label,
'parent':'parametersGroup',
'wcfg':{'text':'Solvent dielectric:'},
'gridcfg':{'row':1, 'column':0, 'sticky':'e'}
})
ifd.append({'widgetType':Pmw.EntryField,
'name':'solventDielectric',
'parent':'parametersGroup',
'wcfg':{'command':self.physicsParamUpdate,
'value':self.params.solventDielectric,
'validate':{'validator':'real', 'min':0}},
'gridcfg':{'sticky':'nsew', 'row':1, 'column':1}
})
ifd.append({'name':'solventRadiusLabel',
'widgetType':Tkinter.Label,
'parent':'parametersGroup',
'wcfg':{'text':'Solvent radius (Angstroms):'},
'gridcfg':{'row':2, 'column':0, 'sticky':'e'}
})
ifd.append({'widgetType':Pmw.EntryField,
'name':'solventRadius',
'parent':'parametersGroup',
'wcfg':{'command':self.physicsParamUpdate,
'value':self.params.solventRadius,
'validate':{'validator':'real', 'min':0}},
'gridcfg':{'sticky':'nsew', 'row':2, 'column':1}
})
ifd.append({'name':'systemTemperatureLabel',
'widgetType':Tkinter.Label,
'parent':'parametersGroup',
'wcfg':{'text':'System temperature (Kelvin):'},
'gridcfg':{'row':3, 'column':0, 'sticky':'e'}
})
ifd.append({'widgetType':Pmw.EntryField,
'name':'systemTemperature',
'parent':'parametersGroup',
'wcfg':{'command':self.physicsParamUpdate,
'value':self.params.systemTemperature,
'validate':{'validator':'real', 'min':0}},
'gridcfg':{'sticky':'nsew', 'row':3, 'column':1}
})
ifd.append({'name':'ionsGroup',
'widgetType':Pmw.Group,
'parent':'Physics',
'container':{'ionsGroup':'w.interior()'},
'wcfg':{'tag_text':"Ions"},
'gridcfg':{'sticky':'wnse'}
})
ifd.append({'widgetType':Pmw.Group,
'name':'SaltGroup',
'container':{'SaltGroup':'w.interior()'},
'parent':'ionsGroup',
'wcfg':{
'tag_pyclass':Tkinter.Checkbutton,
'tag_text':'Salt',
'tag_command':self.SaltUpdate,
'tag_variable': self.salt_var,
},
})
# ifd.append({'name':'ionConcentrationLabel',
# 'widgetType':Tkinter.Label,
# 'wcfg':{'text':'Salt contains ions with radius 2 (Angstrom), and charges +1(e) and -1(e)'},
# 'parent':'SaltGroup',
# 'gridcfg':{'row':0, 'column':0,'columnspan':2, 'sticky':'we'}
# })
ifd.append({'name':'ionConcentrationLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Concentration (M):'},
'parent':'SaltGroup',
'gridcfg':{'row':1, 'column':0, 'sticky':'e'}
})
ifd.append({'widgetType':ThumbWheel,
'name':'saltConcentration',
'parent':'SaltGroup',
'wcfg':{'text':None, 'showLabel':1,
'min':0,
'value':0.01, 'oneTurn':0.1,
'type':'float',
'increment':0.01,
'wheelLabcfg1':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'grey'},
'wheelLabcfg2':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'black'},
'continuous':1,
'wheelPad':1, 'width':150,'height':14},
'gridcfg':{'row':1, 'column':1, 'sticky':'w'}
})
ifd.append({'name':'ionsButtons',
'widgetType':Pmw.ButtonBox,
'parent':'ionsGroup',
'wcfg':{},
'componentcfg':[{'name':'Add More...',
'cfg':{'command':self.addIon}},
{'name':'Remove', 'cfg':{'command':self.removeIon}}]
})
ifd.append({'name':'ionsListLabel',
'widgetType':Tkinter.Label,
'parent':'ionsGroup',
'wcfg':{'text':'Charge, Concentration, Radius'}
})
ifd.append({'widgetType':Pmw.ScrolledListBox,
'name':'ionsList',
'parent':'ionsGroup',
'wcfg':{}
})
## WEB SERVICES PAGE
if APBSservicesFound:
ifd.append({'name':"APBSservicesGroup",
'widgetType':Pmw.Group,
'parent':'Web Service',
'container':{'APBSservicesGroup':'w.interior()'},
'wcfg':{'tag_text':'APBS Web Services'},
'gridcfg':{'sticky':'wen'}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'WS_Run',
'parent':'APBSservicesGroup',
'wcfg':{'text':'Run APBS Remote',
'state':state_GUI,
'command':self.apbsRunRemote},
'gridcfg':{'sticky':'ew', 'row':0, 'column':0}
})
ifd.append({'widgetType':Pmw.ComboBox,
'name':'web service address',
'parent':'APBSservicesGroup',
'wcfg':{'scrolledlist_items':
('http://ws.nbcr.net/opal2/services/ApbsOpalService',),
'selectioncommand':self.toggle_usrpass,
'listheight':100,
'dropdown':1, 'history':1, 'autoclear':1},
'gridcfg':{'sticky':'ew', 'row':0, 'column':1}
})
ifd.append({'widgetType':Tkinter.Label,
'name':'UserName_Label',
'parent':'APBSservicesGroup',
'wcfg':{'text':'User Name'},
'gridcfg':{'sticky':'e', 'row':1, 'column':0}
})
ifd.append({'widgetType':Tkinter.Entry,
'name':'UserName_Entry',
'parent':'APBSservicesGroup',
'wcfg':{},
'gridcfg':{'sticky':'ew', 'row':1, 'column':1}
})
ifd.append({'widgetType':Tkinter.Label,
'name':'Password_Label',
'parent':'APBSservicesGroup',
'wcfg':{'text':'Password'},
'gridcfg':{'sticky':'e', 'row':2, 'column':0}
})
ifd.append({'widgetType':Tkinter.Entry,
'name':'Password_Entry',
'parent':'APBSservicesGroup',
'wcfg':{'show':'*'},
'gridcfg':{'sticky':'ew', 'row':2, 'column':1}
})
ifd.append({'widgetType':Tkinter.Label,
'name':'Remember_Label',
'parent':'APBSservicesGroup',
'wcfg':{'text':'Remember User Name and Password'},
'gridcfg':{'sticky':'e', 'row':3, 'column':0}
})
ifd.append({'widgetType':Tkinter.Checkbutton,
'name':'Remember_Checkbutton',
'parent':'APBSservicesGroup',
'variable':self.RememberLogin_var,
'gridcfg':{'sticky':'w', 'row':3, 'column':1}
})
# self.Parallel_var =Tkinter.BooleanVar()
# self.Parallel_var.set(0)
# ifd.append({'widgetType':Pmw.Group,
# 'name':'ParallelGroup',
# 'container':{'ParallelGroup':'w.interior()'},
# 'parent':'APBSservicesGroup',
# 'wcfg':{
# 'tag_pyclass':Tkinter.Checkbutton,
# 'tag_text':'Parallel',
# 'tag_command':self.ParallelParamUpdate,
# 'tag_variable': self.Parallel_var,
# },
# 'gridcfg':{'sticky':'new','row':4, 'column':0,'columnspan':2
# , 'pady':'10' }
# })
# ifd.append({'widgetType':Pmw.EntryField,
# 'name':'npx',
# 'parent':'ParallelGroup',
# 'state':'disabled',
# 'wcfg':{
# 'validate':{'validator':'integer', 'min':1},
# 'label_text':'The number of processors in the X direction (npx):',
# 'labelpos':'w',
# 'value':2,},
# 'gridcfg':{ 'row':0, 'column':0}
# })
# ifd.append({'widgetType':Pmw.EntryField,
# 'name':'npy',
# 'parent':'ParallelGroup',
# 'wcfg':{
# 'validate':{'validator':'integer', 'min':1},
# 'label_text':'The number of processors in the Y direction (npy):',
# 'labelpos':'w',
# 'value':1,},
# 'gridcfg':{'row':1, 'column':0}
# })
# ifd.append({'widgetType':Pmw.EntryField,
# 'name':'npz',
# 'parent':'ParallelGroup',
#
# 'wcfg':{
# 'validate':{'validator':'integer', 'min':1},
# 'label_text':'The number of processors in the Z direction (npz):',
# 'labelpos':'w',
# 'value':1,
# },
# 'gridcfg':{'row':2, 'column':0}
# })
# ifd.append({'widgetType':Pmw.EntryField,
# 'name':'ofrac',
# 'parent':'ParallelGroup',
# 'wcfg':{
# 'validate':{'validator':'real', 'min':0,'max':1},
# 'label_text':'Overlap factor (ofrac); a value between 0 and 1 :',
# 'labelpos':'w',
# 'value':0.1,
# },
# 'gridcfg':{'row':3, 'column':0}
# })
ifd.append({'name':'APBSservicesLabel1',
'widgetType':Tkinter.Label,
'parent':'APBSservicesGroup',
'wcfg':{'text':''},
'gridcfg':{'sticky':'ensw', 'row':5,'column':0,
'columnspan':2}
})
ifd.append({'name':'APBSservicesLabel2',
'widgetType':Tkinter.Label,
'parent':'APBSservicesGroup',
'wcfg':{'text':''},
'gridcfg':{'sticky':'ensw', 'row':6,'column':0,
'columnspan':2}
})
ifd.append({'name':'APBSservicesLabel3',
'widgetType':Tkinter.Label,
'parent':'APBSservicesGroup',
'wcfg':{'text':''},
'gridcfg':{'sticky':'ensw', 'row':7,'column':0,
'columnspan':2}
})
ifd.append({'name':'APBSservicesLabel4',
'widgetType':Tkinter.Label,
'parent':'APBSservicesGroup',
'wcfg':{'text':''},
'gridcfg':{'sticky':'ensw', 'row':8,'column':0,
'columnspan':2}
})
ifd.append({'name':'WS_ProgressBar',
'widgetType':Tkinter.Frame,
'parent':'APBSservicesGroup',
'wcfg':{'height':30},
'gridcfg':{'sticky':'ew', 'row':9,'column':0,
'columnspan':2}
})
ifd.append({'name':'APBS_WS_DX_Label',
'widgetType':Tkinter.Label,
'parent':'APBSservicesGroup',
'wcfg':{'text':''},
'gridcfg':{'sticky':'ensw', 'row':10,'column':0,
'columnspan':2}
})
else:
ifd.append({'name':'WS_Not_Found',
'parent':'Web Service',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Error importing APBS Web Services.',
'bg':'Red'},
})
ifd.append({'name':'WS_install',
'parent':'Web Service',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Please make sure that ZSI and PyZML packages are properly installed.'},
})
ifd.append({'name':'WS_http',
'parent':'Web Service',
'widgetType':Tkinter.Label,
'wcfg':{'text':'http://nbcr.sdsc.edu/services/apbs/apbs-py.html',
'fg':'Blue','cursor':'hand1'},
})
self.ifd = ifd
return ifd
def SaltUpdate(self):
"Toggles ParallelGroup widget"
self.cmdForms['default'].descr.entryByName['SaltGroup']['widget'].\
toggle()
def changeMenuState(self, state):
"Updates the state of DisplayIsocontours, MapPotential2MSMS and SisplayOrthoSlice manues."
#change_Menu_state(self.vf.APBSDisplayIsocontours, state)
if self.vf.hasGui:
if hasattr(self.vf, 'APBSMapPotential2MSMS'):
change_Menu_state(self.vf.APBSMapPotential2MSMS, state)
#change_Menu_state(self.vf.APBSDisplayOrthoSlice, state)
def ParallelParamUpdate(self):
"Toggles ParallelGroup widget"
self.cmdForms['default'].descr.entryByName['ParallelGroup']['widget'].\
toggle()
def WS_http(self, event):
"Opens webbrowser at http://nbcr.sdsc.edu/services/apbs/apbs-py.html"
import webbrowser
webbrowser.open('http://nbcr.sdsc.edu/services/apbs/apbs-py.html')
def toggle_usrpass(self, event):
"Toggles User Name and Parssword entry and label"
descr = self.cmdForms['default'].descr
address = descr.entryByName['web service address']['widget'].get()
address = address.strip()
if address.find('https://') != 0:
descr.entryByName['UserName_Label']['widget'].grid_forget()
descr.entryByName['UserName_Entry']['widget'].grid_forget()
descr.entryByName['Password_Label']['widget'].grid_forget()
descr.entryByName['Password_Entry']['widget'].grid_forget()
descr.entryByName['Remember_Label']['widget'].grid_forget()
descr.entryByName['Remember_Checkbutton']['widget'].grid_forget()
else:
apply(descr.entryByName['UserName_Label']['widget'].grid, () ,
descr.entryByName['UserName_Label']['gridcfg'])
apply(descr.entryByName['UserName_Entry']['widget'].grid, () ,
descr.entryByName['UserName_Entry']['gridcfg'])
apply(descr.entryByName['Password_Label']['widget'].grid, () ,
descr.entryByName['Password_Label']['gridcfg'])
apply(descr.entryByName['Password_Entry']['widget'].grid, () ,
descr.entryByName['Password_Entry']['gridcfg'])
apply(descr.entryByName['Remember_Label']['widget'].grid, () ,
descr.entryByName['Remember_Label']['gridcfg'])
apply(descr.entryByName['Remember_Checkbutton']['widget'].grid, () ,
descr.entryByName['Remember_Checkbutton']['gridcfg'])
cascadeName = "Electrostatics"
APBSSetupGUI = CommandGUI()
APBSSetupGUI.addMenuCommand('menuRoot','Compute','Setup',
cascadeName=cascadeName, separatorAbove=1)
class APBSRun(MVCommand):
"""APBSRun runs Adaptive Poisson-Boltzmann Solver (APBS)\n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBSRun
\nCommand name : APBSRun
\nSynopsis:\n
None <--- APBSRun(molName, APBSParamName = "Default", **kw)
\nOptional Arguments:\n
molecule1 - name of the molecule1
molecule2 - name of the molecule2
complex - name of the complex
APBSParamName - Name of the key in mol.APBSParams dictionary
"""
def onAddCmdToViewer(self):
"""Called when APBSRun is loaded"""
if self.vf.hasGui:
change_Menu_state(self, 'disabled')
def onAddObjectToViewer(self, object):
"""Called when object is added to viewer"""
if self.vf.hasGui:
change_Menu_state(self, 'normal')
def onRemoveObjectFromViewer(self, object):
"""Called when object is removed from viewer"""
if self.vf.hasGui:
if len(self.vf.Mols) == 0:
change_Menu_state(self, 'disabled')
def guiCallback(self):
"""GUI callback"""
if self.vf.APBSSetup.params.projectFolder == 'apbs-project':
self.doit()
else:
self.doit(self.vf.APBSSetup.params)
def __call__(self, molecule1=None, molecule2=None, _complex=None,
APBSParamName='Default', blocking=False, **kw):
"""None <--- APBSRun(nodes, **kw)\n
\nOptional Arguments :\n
molecule1 - molecule1 as MolKit object or a string
molecule2 - name of the molecule2
complex - name of the complex
APBSParamName = Name of the key in mol.APBSParams dictionary
"""
if not molecule1 and len(self.vf.Mols) == 1:
molecule1 = self.vf.Mols[0].name
if not molecule1:
#tkMessageBox.showinfo("No Molecule Selected", "Please use Compute -> Electrostatics -> Setup and select a molecule.")
return 'ERROR'
mol1 = self.vf.expandNodes(molecule1)
assert isinstance(mol1, MoleculeSet)
assert len(mol1) == 1
if not mol1: return 'ERROR'
molecule1 = mol1[0].name
if molecule2:
mol2 = self.vf.expandNodes(molecule2)
assert isinstance(mol2, MoleculeSet)
assert len(mol2) == 1
if not mol2: return 'ERROR'
molecule2 = mol2[0].name
params = self.vf.APBSSetup.params
if molecule1:
params.projectFolder=os.path.join(os.getcwd(),
"apbs-"+molecule1)
params.molecule1Path = \
self.vf.APBSSetup.moleculeListSelect(molecule1)
self.vf.APBSSetup.mol1Name = molecule1
if not params.molecule1Path:
return
if molecule2:
if not _complex:
import warnings
warnings.warn("Complex is missing!")
return
params.projectFolder += "_"+molecule2+"_"+_complex
params.molecule2Path = \
self.vf.APBSSetup.moleculeListSelect(molecule2)
self.vf.APBSSetup.mol2Name = molecule2
params.complexPath = \
self.vf.APBSSetup.moleculeListSelect(_complex)
self.vf.APBSSetup.complexName = _complex
if not os.path.exists(params.projectFolder):
try:
os.mkdir(params.projectFolder)
except:
from user import home
tmp = os.path.split(params.projectFolder)
params.projectFolder = home + os.sep + tmp[-1]
if not os.path.exists(params.projectFolder):
os.mkdir(params.projectFolder)
try:
open(params.projectFolder+os.sep+'io.mc','w')
except:
from user import home
tmp = os.path.split(params.projectFolder)
params.projectFolder = home + os.sep + tmp[-1]
if not os.path.exists(params.projectFolder):
os.mkdir(params.projectFolder)
if molecule2:
abs_path = os.path.join(params.projectFolder,
molecule2+".pqr")
if not os.path.exists(abs_path) or \
self.vf.APBSPreferences.overwrite_pqr:
mol = self.vf.getMolFromName(molecule2)
self.vf.APBSSetup.mol2Name = molecule2
if hasattr(mol,'flag_copy_pqr') and mol.flag_copy_pqr:
self.copyFix(params.molecule2Path, abs_path)
else:
shutil.move(params.molecule2Path, abs_path)
mol.parser.filename = abs_path
params.molecule2Path = molecule2+".pqr"
params.molecule2Path = \
os.path.split(params.molecule2Path)[-1]
abs_path = os.path.join(params.projectFolder, _complex +".pqr")
mol = self.vf.getMolFromName(_complex)
self.vf.APBSSetup.complexName = _complex
if not os.path.exists(abs_path) or \
self.vf.APBSPreferences.overwrite_pqr:
if hasattr(mol,'flag_copy_pqr') and mol.flag_copy_pqr:
self.copyFix(params.complexPath, abs_path)
else:
shutil.move(params.complexPath, abs_path)
mol.parser.filename = abs_path
params.complexPath = _complex +".pqr"
params.complexPath = os.path.split(params.complexPath)[-1]
abs_path = os.path.join(params.projectFolder, molecule1+".pqr")
if not os.path.exists(abs_path) or \
self.vf.APBSPreferences.overwrite_pqr:
mol = self.vf.getMolFromName(molecule1.replace('-','_'))
self.vf.APBSSetup.mol1Name = mol.name
if hasattr(mol,'flag_copy_pqr') and mol.flag_copy_pqr:
self.copyFix(params.molecule1Path,abs_path)
else:
shutil.move(params.molecule1Path,abs_path)
mol.parser.filename = abs_path
self.vf.APBSPreferences.overwrite_pqr = False
params.molecule1Path = molecule1+".pqr"
params.molecule1Path = os.path.split(params.molecule1Path)[-1]
if self.vf.APBSSetup.cmdForms.has_key('default'):
self.vf.APBSSetup.cmdForms['default'].descr.entryByName\
['molecule1']['widget'].\
setentry(params.molecule1Path)
if APBSParamName != 'Default':
mol = self.vf.getMolFromName(molecule1.replace('-','_'))
self.vf.APBSSetup.mol1Name = mol.name
params = mol.APBSParams[APBSParamName]
dest_path = os.path.join(params.projectFolder,
APBSParamName+'_potential.dx')
pickle_name = os.path.join(params.projectFolder,
APBSParamName+".apbs.pf")
if os.path.exists(pickle_name):
fp = open(pickle_name, 'r')
tmp_params = pickle.load(fp)
fp.close()
flag_run = True # this flags if apbs with the same paramters
# has been already run
for key in tmp_params.__dict__:
if key != 'ions':
if tmp_params.__dict__[key] != \
params.__dict__[key]:
flag_run = False
break
else:
if len(tmp_params.ions) != \
len(params.ions):
flag_run = False
break
for i in range(len(tmp_params.ions)):
if tmp_params.ions[i].charge != \
params.ions[i].charge:
flag_run = False
break
if tmp_params.ions[i].concentration != \
params.ions[i].concentration:
flag_run = False
break
if tmp_params.ions[i].radius != \
params.ions[i].radius:
flag_run = False
break
if flag_run == True:
answer = False
if self.vf.APBSSetup.cmdForms.has_key('default') and \
self.vf.APBSSetup.cmdForms['default'].f.winfo_toplevel().\
wm_state() == 'normal':
answer = tkMessageBox.askyesno("WARNING",\
"APBS with the same parameters has been already run."+
"\n\nWould you like to continue?",
parent=self.vf.APBSSetup.cmdForms['default'].root)
else:
answer = tkMessageBox.askyesno("WARNING",\
"APBS with the same parameters has been already run."+
"\n\nWould you like to continue?")
if answer != True:
#self.vf.APBSSetup.loadProfile(pickle_name)
return
self.vf.APBSSetup.refreshCalculationPage()
if not self.vf.APBSSetup.flag_grid_changed:
self.vf.APBSSetup.autocenterCoarseGrid()
self.vf.APBSSetup.autosizeCoarseGrid()
self.vf.APBSSetup.autocenterFineGrid()
self.vf.APBSSetup.autosizeFineGrid()
self.vf.APBSSetup.refreshGridPage()
if self.doitWrapper(molecule1, molecule2, _complex,
APBSParamName=APBSParamName,
blocking=blocking) == 'error':
self.vf.APBSSetup.showForm('default', \
modal=0, blocking=1,initFunc=self.vf.APBSSetup.refreshAll)
return
def copyFix(self, fileSource, fileDest):
"""Copies a file from fileSource to fileDest and fixes end-of-lines"""
newlines = []
for line in open(fileSource, 'rb').readlines():
if line[-2:] == '\r\n':
line = line[:-2] + '\n'
newlines.append(line)
open(fileDest, 'w').writelines(newlines)
def doit(self, molecule1=None, molecule2=None, _complex=None,
APBSParamName = 'Default', blocking=False):
"""doit function"""
return self.vf.APBSSetup.apbsOutput(molecule1, molecule2, _complex,
blocking=blocking)
APBSRun_GUI = CommandGUI()
APBSRun_GUI.addMenuCommand('menuRoot', 'Compute',
'Compute Potential Using APBS', cascadeName=cascadeName)
class APBSMap_Potential_to_MSMS(MVCommand):
"""APBSMapPotential2MSMS maps APBS Potential into MSMS Surface\n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBSMap_Potential_to_MSMS
\nCommand name : APBSMapPotential2MSMS
\nSynopsis:\n
None <--- APBSMapPotential2MSMS(mol = mol, potential = potential)
\nRequired Arguments:\n
mol = name of the molecule\n
potential = string representing where potential.dx is located\n"""
def onAddCmdToViewer(self):
"""Called when added to viewer"""
if self.vf.hasGui:
change_Menu_state(self, 'disabled')
self.custom_quality = 5.0
def onAddObjectToViewer(self, object):
"""Called when object is added to viewer"""
if self.vf.hasGui:
change_Menu_state(self, 'normal')
def onRemoveObjectFromViewer(self, object):
"""Called when object is removed from viewer"""
if self.vf.hasGui:
if len(self.vf.Mols) == 0:
change_Menu_state(self, 'disabled')
cmap = self.vf.GUI.VIEWER.FindObjectByName('root|cmap')
if cmap:
cmap.Set(visible=False)
self.vf.GUI.VIEWER.Redraw()
def doit(self, mol = None, potential = None):
"""doit function for APBSMap_Potential_to_MSMS"""
self.vf.GUI.ROOT.config(cursor='watch')
self.vf.GUI.VIEWER.master.config(cursor='watch')
self.vf.GUI.MESSAGE_BOX.tx.component('text').config(cursor='watch')
from MolKit.molecule import Molecule
if isinstance(mol, Molecule):
mol = self.vf.getMolFromName(mol.name.replace('-','_'))
elif type(mol) == str:
mol = self.vf.getMolFromName(mol.replace('-','_'))
else:
import warnings
warnings.warn("APBSMap_Potential_to_MSMS doit(): mol should either be a molecule object or molecule name.")
return
self.vf.assignAtomsRadii(mol, overwrite=True,log=False)
mol.allAtoms._radii = {}
for atom in mol.allAtoms:
if hasattr(atom,'pqrRadius'):
atom._radii['pqrRadius'] = atom.pqrRadius
if hasattr(atom,'vdwRadius'):
atom._radii['vdwRadius'] = atom.vdwRadius
if hasattr(atom,'covalentRadius'):
atom._radii['covalentRadius'] = atom.covalentRadius
if self.quality == 'low':
self.vf.computeMSMS(mol, density = 1.0, log = False)
elif self.quality == 'medium':
self.vf.computeMSMS(mol, density = 3.0, log = False)
elif self.quality == 'high':
self.vf.computeMSMS(mol, density = 6.0, log = False)
else:
self.vf.computeMSMS(mol, density = self.custom_quality, log = False)
if not self.vf.commands.has_key('vision'):
self.vf.browseCommands('visionCommands',log=False)
g = mol.geomContainer.geoms['MSMS-MOL']
g.Set(inheritSharpColorBoundaries = False, sharpColorBoundaries =False)
self.vf.GUI.ROOT.config(cursor='watch')
self.vf.GUI.VIEWER.master.config(cursor='watch')
self.vf.GUI.MESSAGE_BOX.tx.component('text').config(cursor='watch')
if self.vf.vision.ed is None:
self.vf.vision(log=False)
self.vf.vision(log=False)
self.APBS_MSMS_Net = self.vf.vision.ed.getNetworkByName("APBSPot2MSMS")
if not self.APBS_MSMS_Net:
from mglutil.util.packageFilePath import findFilePath
Network_Path = findFilePath("VisionInterface/APBSPot2MSMS_net.py",
'Pmv')
self.vf.vision.ed.loadNetwork(Network_Path, takefocus=False)
self.APBS_MSMS_Net = self.vf.vision.ed.getNetworkByName\
("APBSPot2MSMS")[0]
else:
self.APBS_MSMS_Net = self.APBS_MSMS_Net[0]
mol_node = self.APBS_MSMS_Net.getNodeByName('Choose Molecule')[0]
mol_node.run(force=1)
mol_node.inputPorts[1].widget.set(mol.name)
self.vf.GUI.ROOT.config(cursor='watch')
self.vf.GUI.VIEWER.master.config(cursor='watch')
self.vf.GUI.MESSAGE_BOX.tx.component('text').config(cursor='watch')
file_DX = self.APBS_MSMS_Net.getNodeByName('Pmv Grids')[0]
potentialName = os.path.basename(potential)
if not self.vf.grids3D.has_key(potentialName):
self.vf.Grid3DReadAny(potential, show=False, normalize=False)
self.vf.grids3D[potentialName].geomContainer['Box'].Set(visible=0)
file_DX.inputPorts[0].widget.set(potentialName)
file_DX.run(force=1)
macro = self.APBS_MSMS_Net.getNodeByName('Map Pot On Geom')[0]
offset = macro.macroNetwork.getNodeByName('Offset')[0]
check = offset.getInputPortByName('dial')
check.widget.set(self.distance)
button = macro.macroNetwork.getNodeByName('Checkbutton')[0]
check = button.getInputPortByName('button')
check.widget.set(0)
colormap = macro.macroNetwork.getNodeByName('Color Map')[0]
colormap.run(force=1) # this forces Color node to run
self.APBS_MSMS_Net.run()
colormap.outputPortByName['legend'].data.Set(unitsString='kT/e')
def guiCallback(self):
"""GUI callback for APBSMap_Potential_to_MSMS"""
if not self.vf.commands.has_key('computeMSMS'):
self.vf.browseCommands("msmsCommands",log=False)
file_name,ext = os.path.splitext(self.vf.APBSSetup.params.molecule1Path)
mol_name = os.path.split(file_name)[-1]
mol_list = ()
for name in self.vf.Mols.name:
mol_list += (name,)
ifd = InputFormDescr(title = 'Map Potential to Surface Parameters')
if mol_name in mol_list:
default_mol = mol_name
else:
default_mol = mol_list[0]
from MolKit.molecule import Molecule
if len(self.vf.selection):
selection = self.vf.selection[0]
if isinstance(selection, Molecule):
default_mol = selection.name
self.default_mol = default_mol
ifd.append({'name':'mol_list',
'widgetType':Pmw.ComboBox,
'tooltip':
"""Click on the fliparrow to view
the list of available molecules""" ,
'defaultValue':default_mol,
'wcfg':{'labelpos':'new','label_text':'Please select molecule',
'scrolledlist_items':mol_list, 'history':0,'entry_width':15,
'fliparrow':1, 'dropdown':1, 'listheight':80},
'gridcfg':{'sticky':'we', 'row':1, 'column':0}
})
ifd.append({'name':'quality',
'widgetType':Pmw.RadioSelect,
'tooltip':
""" low, medium and high correspond to molecular
surface density of 1, 3, and 6 points respectively""" ,
'listtext':['low', 'medium', 'high', 'custom'],
'defaultValue':'medium',
'wcfg':{'orient':'vertical','labelpos':'w',
'label_text':'Surface \nquality ',
'command':self.select_custom,
'hull_relief':'ridge', 'hull_borderwidth':2,
'padx':0,
'buttontype':'radiobutton'},
'gridcfg':{'sticky': 'ewns', 'row':2, 'column':0, }})
ifd.append({'name':'distance',
'widgetType':ThumbWheel,
'tooltip':
"""offset along the surface normal at which the potential will be looked up""",
'gridcfg':{'sticky':'we'},
'wcfg':{'value':1.0,'oneTurn':10,
'type':'float',
'increment':0.1,
'precision':1,
'continuous':False,
'wheelPad':3,'width':140,'height':20,
'labCfg':{'text':'Distance from surface',
'side':'top'},
'gridcfg':{'sticky': 'we', 'row':3, 'column':0, }
}
})
val = self.vf.getUserInput(ifd,initFunc = self.initFunc)
if not val: return
self.quality = val['quality']
self.distance = val['distance']
molecule_selected = val['mol_list'][0]
if molecule_selected == mol_name:
potential_dx = os.path.join(self.vf.APBSSetup.params.projectFolder,
mol_name + '.potential.dx')
self.doitWrapper(mol = mol_name, potential = potential_dx)
else:
potential_dx = os.path.join(os.getcwd(), "apbs-"+ molecule_selected)
potential_dx = os.path.join(potential_dx, molecule_selected + \
'.potential.dx')
if not os.path.exists(potential_dx):
self.vf.APBSRun(molecule1 = molecule_selected)
while self.vf.APBSSetup.cmd.ok.configure()['state'][-1] != \
'normal':
self.vf.GUI.ROOT.update()
self.doitWrapper(mol=molecule_selected, potential=potential_dx)
self.vf.APBSSetup.potential = os.path.basename(potential_dx)
if self.vf.hasGui:
change_Menu_state(self.vf.APBSDisplayIsocontours, 'normal')
change_Menu_state(self.vf.APBSDisplayOrthoSlice, 'normal')
if hasattr(self.vf,'APBSVolumeRender'):
change_Menu_state(self.vf.APBSVolumeRender, 'normal')
def __call__(self, mol=None, potential=None, quality='medium', **kw):
"""Maps potential.dx into MSMS using\n
VisionInterface/APBSPot2MSMS_net.py\n
Required Arguments:\n
mol = name of the molecule\n
potential = location of the potential.dx file\n"""
molNode = self.vf.expandNodes(mol)
assert isinstance(molNode, MoleculeSet)
assert len(molNode) == 1
if not molNode: return 'ERROR'
mol = molNode[0].name
kw['mol'] = mol
if potential is None:
potential_dx = os.path.join(os.getcwd(), "apbs-"+ mol)
potential_dx = os.path.join(potential_dx, mol + '.potential.dx')
kw['potential'] = potential_dx
kw['quality'] = quality
else:
kw['potential'] = potential
if kw.has_key('mol') and kw.has_key('potential'):
if kw.has_key('quality'):
self.quality = kw['quality']
else:
self.quality = 'medium' #default
if kw.has_key('distance'):
self.distance = kw['ditance']
else:
self.distance = 1.0 #default
self.doitWrapper(mol=kw['mol'],potential=kw['potential'])
else:
print >>sys.stderr, "mol and/or potential is missing"
return
def select_custom(self, evt):
if evt == 'custom':
ifd = InputFormDescr(title='Select Surface Density')
ifd.append({'name':'density',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type a value manually""",
'gridcfg':{'sticky':'we'},
'wcfg':{'value':self.custom_quality,'oneTurn':2,
'type':'float',
'increment':0.1,
'precision':1,
'continuous':False,
'wheelPad':2,'width':145,'height':18,
'labCfg':{'text':'Density '},
}
})
val = self.vf.getUserInput(ifd,)
if val:
self.custom_quality = val['density']
def initFunc(self, ifd):
"""This function initializes GUI for APBSMap_Potential_to_MSMS"""
ifd.descr.entryByName['mol_list']['widget']._entryWidget.\
config(state='readonly')
def setupUndoBefore(self, mol = None, potential = None ):
# The undo of this display command depends on which displayMSMS cmd
# was used previously and results in displaying what faces were displayed previously
undoCmd = """self.displayMSMS(self.getMolFromName('%s'), surfName=['MSMS-MOL'], negate=1, redraw =1, topCommand=0)
cmap = self.GUI.VIEWER.FindObjectByName('root|cmap')
if cmap:
cmap.Set(visible=False)"""%(mol)
self.vf.undo.addEntry((undoCmd), ('Map Potential to Molecular Surface'))
APBSMap_Potential_to_MSMS_GUI = CommandGUI()
APBSMap_Potential_to_MSMS_GUI.addMenuCommand('menuRoot','Compute',
'Map Potential to Surface', cascadeName=cascadeName)
class APBSDisplay_Isocontours(MVCommand):
"""APBSDisplayIsocontours displays APBS Potential Isocontours\n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBS_Display_Isocontours
\nCommand name : APBSDisplayIsocontours
\nSynopsis:\n
None <--- APBSDisplayIsocontours(potential = potential)
\nRequired Arguments:\n
potential = string representing where potential.dx is located\n"""
def onAddCmdToViewer(self):
"""Called when added to viewer"""
if self.vf.hasGui:
change_Menu_state(self, 'disabled')
# def onAddObjectToViewer(self, object):
# """Called when object is added to viewer"""
# change_Menu_state(self, 'normal')
def onRemoveObjectFromViewer(self, object):
"""Called when object is removed from viewer"""
if self.vf.hasGui:
if len(self.vf.Mols) == 0:
change_Menu_state(self, 'disabled')
def dismiss(self, event = None):
"""Withdraws GUI form"""
self.cancel = True
self.ifd.entryByName['-visible']['wcfg']['variable'].set(False)
self.ifd.entryByName['+visible']['wcfg']['variable'].set(False)
self.Left_Visible()
self.Right_Visible()
self.form.withdraw()
def doit(self, potential = None):
"""doit function"""
self.vf.GUI.ROOT.config(cursor='watch')
self.vf.GUI.VIEWER.master.config(cursor='watch')
self.vf.GUI.MESSAGE_BOX.tx.component('text').config(cursor='watch')
if not self.vf.commands.has_key('vision'):
self.vf.browseCommands('visionCommands',log=False)
if self.vf.vision.ed is None:
self.vf.vision(log=False)
self.vf.vision(log=False)
self.APBS_Iso_Net = self.vf.vision.ed.getNetworkByName("APBSIsoContour")
if not self.APBS_Iso_Net:
from mglutil.util.packageFilePath import findFilePath
Network_Path = findFilePath("VisionInterface/APBSIsoContour_net.py",
'Pmv')
self.vf.vision.ed.loadNetwork(Network_Path, takefocus=False)
self.APBS_Iso_Net = self.vf.vision.ed.\
getNetworkByName("APBSIsoContour")[0]
else:
self.APBS_Iso_Net = self.APBS_Iso_Net[0]
file_DX = self.APBS_Iso_Net.getNodeByName('Pmv Grids')[0]
potentialName = os.path.basename(potential)
if not self.vf.grids3D.has_key(potentialName):
grid = self.vf.Grid3DReadAny(potential, show=False, normalize=False)
if grid:
self.vf.grids3D[potentialName].geomContainer['Box'].Set(visible=0)
else:
return
file_DX.inputPorts[0].widget.set(potentialName)
self.APBS_Iso_Net.run()
def guiCallback(self):
"""GUI callback"""
file_name,ext = os.path.splitext(self.vf.APBSSetup.params.molecule1Path)
mol_name = os.path.split(file_name)[-1]
self.mol_list = ()
for name in self.vf.Mols.name:
potential_dx = os.path.join(os.getcwd(), "apbs-" + name)
potential_dx = os.path.join(potential_dx, name + '.potential.dx')
if os.path.exists(potential_dx):
self.mol_list += (name,)
potential_dx = os.path.join(self.vf.APBSSetup.params.projectFolder,
mol_name + '.potential.dx')
if os.path.exists(potential_dx):
self.mol_list += (mol_name,)
if len(self.mol_list) == 0:
self.vf.warningMsg("Please run APBS to generate potential.dx", "ERROR potential.dx is missing")
return
if mol_name in self.mol_list:
default_mol = mol_name
else:
default_mol = self.mol_list[0]
from MolKit.molecule import Molecule
if len(self.vf.selection):
selection = self.vf.selection[0]
if isinstance(selection, Molecule):
default_mol = selection.name
self.combo_default = default_mol
potential_dx = os.path.join(os.getcwd(), "apbs-" + default_mol)
potential_dx = os.path.join(potential_dx, default_mol + '.potential.dx')
self.doitWrapper(potential = potential_dx)
self.Isocontour_L = self.APBS_Iso_Net.getNodeByName('Left_Isocontour')[0]
self.Isocontour_R = self.APBS_Iso_Net.getNodeByName('Right_Isocontour')[0]
self.cancel = False
if not hasattr(self, 'ifd'):
self.buildForm()
else:
self.form.deiconify()
self.ifd.entryByName['mol_list']['widget'].setlist(self.mol_list)
self.ifd.entryByName['+Silder']['widget'].canvas.config(bg="Blue")
self.ifd.entryByName['-Silder']['widget'].canvas.config(bg="Red")
self.ifd.entryByName['-visible']['wcfg']['variable'].set(True)
self.ifd.entryByName['+visible']['wcfg']['variable'].set(True)
self.ifd.entryByName['mol_list']['widget'].setentry(self.mol_list[0])
self.ifd.entryByName['mol_list']['widget']._entryWidget.\
config(state='readonly')
self.APBS_Iso_Net.run()
self.Left_Visible()
self.Right_Visible()
self.vf.GUI.ROOT.config(cursor='')
self.vf.GUI.VIEWER.master.config(cursor='')
self.vf.GUI.MESSAGE_BOX.tx.component('text').config(cursor='xterm')
def run(self):
"""Animates isocontours"""
inv_d = 1./(self.maxi - self.mini)
data = Numeric.arange(inv_d,inv_d*500,inv_d*15).tolist()
data += Numeric.arange(inv_d*500,inv_d*5000,inv_d*150).tolist()
for values in data:
if self.cancel:
return
self.ifd.entryByName['+Silder']['widget'].set(values)
#self.Isocontour_L.getInputPortByName('isovalue').widget.set(values)
self.ifd.entryByName['-Silder']['widget'].set(-values)
#self.Isocontour_R.getInputPortByName('isovalue').widget.set(-values)
self.vf.GUI.VIEWER.update()
def __call__(self, **kw):
"""Displays APBS Potential Isocontours using\n
VisionInterface/APBSIsoContour_net.py\n
Required Arguments:\n
potential = location of the potential.dx file\n"""
if kw.has_key('potential'):
self.doitWrapper(potential = kw['potential'])
else:
print >>sys.stderr, "potential is missing"
return
def buildForm(self):
"""Builds 'default' GUI form'"""
VolumeStats = self.APBS_Iso_Net.getNodeByName('VolumeStats')[0]
self.maxi = VolumeStats.getOutputPortByName('maxi').data
self.mini = VolumeStats.getOutputPortByName('mini').data
self.Update(1);self.Update(-1)
self.ifd = ifd = InputFormDescr(title="Isocontours Control Panel")
ifd.append({'name':'mol_list',
'widgetType':Pmw.ComboBox,
'tooltip':
"""Click on the fliparrow to view
the list of available molecules""" ,
'defaultValue': self.combo_default,
'wcfg':{'labelpos':'e','label_text':'Select molecule',
'scrolledlist_items':self.mol_list, 'history':0,
'selectioncommand':self.Combo_Selection,
'entry_width':5,
'fliparrow':1, 'dropdown':1, 'listheight':80},
'gridcfg':{'sticky':'we', 'row':0, 'column':0,'columnspan':2}
})
ifd.append({'name':'+Silder',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type the isovalue manually""",
'wcfg':{'value':1.0,'oneTurn':10,
'type':'float',
'increment':0.1,
'min':0,
'precision':2,
'wheelPad':2,'width':120,'height':19,
'callback':self.Update,
}
})
ifd.append({'name':'-Silder',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type the isovalue manually""",
'wcfg':{'value':-1.0,'oneTurn':10,
'type':'float',
'increment':0.1,
'precision':2,
'max':-0.000000001,
'wheelPad':2,'width':120,'height':19,
'callback':self.Update,
},
})
ifd.append({'widgetType':Tkinter.Checkbutton,
'tooltip':"""(De)select this checkbutton to
(un)display blue isocontour""",
'name':'+visible',
'defaultValue':1,
'wcfg':{'text':'Blue isocontour',
'command':self.Left_Visible,
'bg':'Blue','fg':'White',
'variable':Tkinter.BooleanVar()},
'gridcfg':{'sticky':'e','row':1, 'column':1}
})
ifd.append({'widgetType':Tkinter.Checkbutton,
'tooltip':"""(De)select this checkbutton to
(un)display red isocontour""",
'name':'-visible',
'defaultValue':1,
'wcfg':{'text':'Red isocontour',
'command':self.Right_Visible,
'bg':'Red','fg':'White',
'variable':Tkinter.BooleanVar()},
'gridcfg':{'sticky':'e','row':2, 'column':1}
})
ifd.append({'name':'dismiss',
'widgetType':Tkinter.Button,
'wcfg':{'text':'Cancel',
'command':self.dismiss},
'gridcfg':{'sticky':'wens','row':3, 'column':0}
})
ifd.append({'name':'run',
'widgetType':Tkinter.Button,
'wcfg':{'text':'Animate',
'command':self.run},
'gridcfg':{'sticky':'wens','row':3, 'column':1}
})
self.form = self.vf.getUserInput(self.ifd, modal=0, blocking=0)
return ifd
def Combo_Selection(self, mol_name):
"""
This command is triggered as selectioncommand for ComboBox mol_list
"""
potential_dx = os.path.join(os.getcwd(), "apbs-" + mol_name)
potential_dx = os.path.join(potential_dx, mol_name + '.potential.dx')
self.doitWrapper(potential = potential_dx)
def Left_Visible(self):
"""Sets "+polygons" and "left_label" objects visible state"""
left_object = self.vf.GUI.VIEWER.GUI.objectByName('+polygons')
left_label = self.vf.GUI.VIEWER.GUI.objectByName('LeftLabel')
visible = self.ifd.entryByName['+visible']['wcfg']['variable'].get()
left_object.Set(visible = visible)
left_label.Set(visible = visible)
self.vf.GUI.VIEWER.Redraw()
def Right_Visible(self):
"""Sets "-polygons" and "right_label" objects visible states"""
right_object = self.vf.GUI.VIEWER.GUI.objectByName('-polygons')
right_label = self.vf.GUI.VIEWER.GUI.objectByName('RightLabel')
visible = self.ifd.entryByName['-visible']['wcfg']['variable'].get()
right_object.Set(visible = visible)
right_label.Set(visible = visible)
self.vf.GUI.VIEWER.Redraw()
def Update(self,val):
"""Updates Isocontour_L or Isocontour_R"""
if val > 0:
self.Isocontour_R.getInputPortByName('isovalue').widget.set(val)
else:
self.Isocontour_L.getInputPortByName('isovalue').widget.set(val)
APBSDisplay_Isocontours_GUI = CommandGUI()
APBSDisplay_Isocontours_GUI.addMenuCommand('menuRoot', 'Compute', \
'Isocontour Potential', cascadeName=cascadeName)
from DejaVu.colorTool import RedWhiteBlueARamp
class APBSDisplayOrthoSlice(MVCommand):
"""APBSDisplayOrthoslice displays APBS Potential Orthoslice\n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBSDisplayOrthoslice
\nCommand name : APBSDisplayOrthoslice
\nSynopsis:\n
None <--- APBSDisplayOrthoslice()
"""
def onAddCmdToViewer(self):
"""Called when added to viewer"""
if self.vf.hasGui:
change_Menu_state(self, 'disabled')
# def onAddObjectToViewer(self, object):
# """Called when object is added to viewer"""
# change_Menu_state(self, 'normal')
def onRemoveObjectFromViewer(self, object):
"""Called when object is removed from viewer"""
if self.vf.hasGui:
if len(self.vf.Mols) == 0:
change_Menu_state(self, 'disabled')
potential = object.name +'.potential.dx'
try:
self.vf.Grid3DCommands.select(potential)
self.vf.Grid3DAddRemove.remove()
except:
pass #can't remove from 3D Grid Rendering widget
def doit(self):
"""doit function"""
self.vf.Grid3DCommands.show()
self.vf.Grid3DCommands.select(self.vf.APBSSetup.potential)
self.vf.Grid3DCommands.Checkbuttons['OrthoSlice'].invoke()
grid = self.vf.grids3D[self.vf.APBSSetup.potential]
self.vf.Grid3DOrthoSlice.select()
self.vf.Grid3DOrthoSlice.X_vis.set(True)
self.vf.Grid3DOrthoSlice.Y_vis.set(True)
self.vf.Grid3DOrthoSlice.Z_vis.set(True)
self.vf.Grid3DOrthoSlice.createX()
self.vf.Grid3DOrthoSlice.createY()
self.vf.Grid3DOrthoSlice.createZ()
self.vf.Grid3DOrthoSlice.ifd.entryByName['X_Slice']['widget'].set(grid.dimensions[0]/2)
self.vf.Grid3DOrthoSlice.ifd.entryByName['Y_Slice']['widget'].set(grid.dimensions[1]/2)
self.vf.Grid3DOrthoSlice.ifd.entryByName['Z_Slice']['widget'].set(grid.dimensions[2]/2)
mini = - grid.std/10.
maxi = grid.std/10.
grid.geomContainer['OrthoSlice']['X'].colormap.configure(ramp=RedWhiteBlueARamp(), mini=mini, maxi=maxi)
grid.geomContainer['OrthoSlice']['Y'].colormap.configure(ramp=RedWhiteBlueARamp(), mini=mini, maxi=maxi)
grid.geomContainer['OrthoSlice']['Z'].colormap.configure(ramp=RedWhiteBlueARamp(), mini=mini, maxi=maxi)
def guiCallback(self):
"""GUI callback"""
self.doitWrapper()
def __call__(self, **kw):
"""Displays APBS Potential Isocontours using\n
VisionInterface/APBSIsoContour_net.py\n
Required Arguments:\n
potential = location of the potential.dx file\n"""
self.doitWrapper()
APBSDisplayOrthoSlice_GUI = CommandGUI()
APBSDisplayOrthoSlice_GUI.addMenuCommand('menuRoot', 'Compute', \
'Display OrthoSlice', cascadeName=cascadeName)
class APBSVolumeRender(MVCommand):
"""APBSVolumeRender \n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBSVolumeRender
\nCommand name : APBSVolumeRender
\nSynopsis:\n
None <--- APBSAPBSVolumeRender()
"""
# def checkDependencies(self, vf):
# if not vf.hasGui:
# return 'ERROR'
# from Volume.Renderers.UTVolumeLibrary import UTVolumeLibrary
# test = UTVolumeLibrary.VolumeRenderer()
# flagVolume = test.initRenderer()
# if not flagVolume:
# return 'ERROR'
def onAddCmdToViewer(self):
"""Called when added to viewer"""
if self.vf.hasGui:
change_Menu_state(self, 'disabled')
# def onAddObjectToViewer(self, object):
# """Called when object is added to viewer"""
# change_Menu_state(self, 'normal')
def onRemoveObjectFromViewer(self, object):
"""Called when object is removed from viewer"""
if self.vf.hasGui:
if len(self.vf.Mols) == 0:
change_Menu_state(self, 'disabled')
def doit(self):
"""doit function"""
grid = self.vf.grids3D[self.vf.APBSSetup.potential]
mini = - grid.std/10.
maxi = grid.std/10.
tmpMax = grid.maxi
tmpMin = grid.mini
grid.mini = mini
grid.maxi = maxi
self.vf.Grid3DCommands.show()
self.vf.Grid3DCommands.select(self.vf.APBSSetup.potential)
self.vf.Grid3DCommands.Checkbuttons['VolRen'].invoke()
self.vf.Grid3DVolRen.select()
widget = self.vf.Grid3DVolRen.ifd.entryByName['VolRen']['widget']
widget.colorGUI()
ramp = RedWhiteBlueARamp()
ramp[:,3] = Numeric.arange(0,0.25,1./(4*256.),'f')
grid = self.vf.grids3D[self.vf.APBSSetup.potential]
widget.ColorMapGUI.configure(ramp=ramp, mini=mini, maxi=maxi)
widget.ColorMapGUI.apply_cb()
grid.mini = tmpMin
grid.maxi = tmpMax
def guiCallback(self):
"""GUI callback"""
self.doitWrapper()
def __call__(self, **kw):
"""Displays APBS Potential Isocontours using\n
VisionInterface/APBSIsoContour_net.py\n
Required Arguments:\n
potential = location of the potential.dx file\n"""
self.doitWrapper()
APBSVolumeRender_GUI = CommandGUI()
APBSVolumeRender_GUI.addMenuCommand('menuRoot', 'Compute', \
'Volume Renderer', cascadeName=cascadeName)
from tkFileDialog import *
class APBSLoad_Profile(MVCommand):
"""APBSLoadProfile loads APBS parameters\n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBSLoad_Profile
\nCommand name : APBSLoadProfile
\nSynopsis:\n
None <--- APBSLoadProfile(filename = None)
\nOptional Arguments:\n
filename = name of the file containing APBS parameters\n
"""
def doit(self, filename = None):
"""doit function"""
self.vf.APBSSetup.loadProfile(filename=filename)
def guiCallback(self):
"""GUI callback"""
filename=askopenfilename(filetypes=[('APBS Profile','*.apbs.pf')],\
title="Load APBS Profile")
if filename:
self.doitWrapper(filename=filename)
def __call__(self, **kw):
"""None <--- APBSSave_Profile()\n
Calls APBSSetup.loadProfile\n"""
if kw.has_key('filename'):
self.doitWrapper(filename=kw['filename'])
else:
if self.vf.APBSSetup.cmdForms.has_key('default') and \
self.vf.APBSSetup.cmdForms['default'].f.winfo_toplevel().\
wm_state() == 'normal':
filename=askopenfilename(filetypes=\
[('APBS Profile','*.apbs.pf')],
title="Load APBS Profile",
parent=self.vf.APBSSetup.cmdForms['default'].root)
else:
filename = askopenfilename(filetypes =
[('APBS Profile','*.apbs.pf')], title = "Load APBS Profile")
if filename:
self.doitWrapper(filename=filename)
APBSLoad_Profile_GUI = CommandGUI()
APBSLoad_Profile_GUI.addMenuCommand('menuRoot', 'Compute', 'Load Profile',
cascadeName=cascadeName, separatorAbove=1)
class APBSSave_Profile(MVCommand):
"""APBSSaveProfile saves APBS parameters\n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBSSave_Profile
\nCommand name : APBSSaveProfile
\nSynopsis:\n
None <--- APBSSaveProfile(filename = None)
\nOptional Arguments:\n
filename = name of the file where APBS parameters are to be saved\n
"""
def onAddCmdToViewer(self):
"""Called when added to viewer"""
if self.vf.hasGui:
change_Menu_state(self, 'disabled')
def onRemoveObjectFromViewer(self, object):
"""Called when object is removed from viewer"""
if self.vf.hasGui:
if len(self.vf.Mols) == 0:
change_Menu_state(self, 'disabled')
def doit(self, Profilename=None):
"""doit function"""
self.vf.APBSSetup.saveProfile(Profilename=Profilename, fileFlag=True, flagCommand=True)
def guiCallback(self):
"""GUI callback"""
filename=asksaveasfilename(filetypes=[('APBS Profile','*.apbs.pf')],
title="Save APBS Profile As")
if filename:
self.doitWrapper(Profilename=filename)
def __call__(self, **kw):
"""None <--- APBSSave_Profile(filename = None)\n
Calls APBSSetup.saveProfile\n"""
if kw.has_key('Profilename'):
self.doitWrapper(Profilename=kw['Profilename'])
else:
if self.vf.APBSSetup.cmdForms.has_key('default') and \
self.vf.APBSSetup.cmdForms['default'].f.winfo_toplevel().\
wm_state() == 'normal':
filename = asksaveasfilename(filetypes=[('APBS Profile',
'*.apbs.pf')],title="Save APBS Profile As",
parent = self.vf.APBSSetup.cmdForms['default'].root)
else:
filename = asksaveasfilename(filetypes =
[('APBS Profile','*.apbs.pf')],title = "Save APBS Profile As")
if filename:
self.doitWrapper(Profilename=filename)
APBSSave_Profile_GUI = CommandGUI()
APBSSave_Profile_GUI.addMenuCommand('menuRoot', 'Compute', 'Save Profile',
cascadeName=cascadeName)
class APBSWrite_APBS_Parameter_File(MVCommand):
"""APBSOutputWrite writes APBS input file\n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBSWrite_APBS_Parameter_File
\nCommand name : APBSOutputWrite
\nSynopsis:\n
None <--- APBSOutputWrite(filename)
\nRequired Arguments:\n
filename = name of the apbs input file \n
"""
def doit(self, filename = None):
"""doit function for APBSWrite_APBS_Parameter_File"""
if filename:
self.vf.APBSSetup.params.SaveAPBSInput(filename)
def guiCallback(self, **kw):
"""
GUI Callback for APBSWrite_APBS_Parameter_File
Asks for the file name to save current parameters
"""
filename=asksaveasfilename(filetypes=[('APBS Paramter File','*.apbs')],
title="Save APBS Parameters As ")
apply ( self.doitWrapper, (filename,), kw)
APBSWrite_Parameter_File_GUI = CommandGUI()
APBSWrite_Parameter_File_GUI.addMenuCommand('menuRoot', 'Compute', \
'Write APBS Parameter File', cascadeName=cascadeName)
class APBSPreferences(MVCommand):
"""APBSPreferences allows to change APBS Preferences\n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBSPreferences
\nCommand name : APBSPreferences
\nSynopsis:\n
None <--- APBSPreferences(APBS_Path = None, pdb2pqr_Path = None, ff = None,
debump = None, hopt = None, hdebump = None, watopt = None)
\nOptional Arguments:\n
APBS_Path -- path to apbs executable
pdb2pqr_Path -- path to pdb2pqr.py script
ff -- Force Field for pdb2pqr ('amber', 'charmm' or 'parse')
nodebump : Do not perform the debumping operation
nohopt : Do not perform hydrogen optimization
nohdebump : Do not perform hydrogen debumping
nowatopt : Do not perform water optimization
"""
def doit(self, APBS_Path = None, pdb2pqr_Path = None, ff = None,
nodebump = False, nohopt = False):
"""
doit function for APBSPreferences class
\nOptional Arguments:\n
APBS_Path -- path to apbs executable
pdb2pqr_Path -- path to pdb2pqr.py script
ff -- Force Field for pdb2pqr ('amber', 'charmm' or 'parse')
nodebump : Do not perform the debumping operation
nohopt : Do not perform hydrogen optimization
nohdebump : Do not perform hydrogen debumping
nowatopt : Do not perform water optimization
"""
self.overwrite_pqr = False
if APBS_Path:
self.vf.APBSSetup.params.APBS_Path = APBS_Path
if pdb2pqr_Path:
self.vf.APBSSetup.params.pdb2pqr_Path = pdb2pqr_Path
if ff:
self.vf.APBSSetup.params.pdb2pqr_ForceField = ff
if self.vf.hasGui:
if nodebump != self.nodebump_past:
self.nodebump_past = nodebump
self.nodebump.set(nodebump)
self.overwrite_pqr = True
if nohopt != self.nohopt_past:
self.nohopt_past = nohopt
self.nohopt.set(nohopt)
self.overwrite_pqr = True
else :
if nodebump != self.nodebump_past:
self.nodebump_past = nodebump
self.nodebump = nodebump
self.overwrite_pqr = True
if nohopt != self.nohopt_past:
self.nohopt_past = nohopt
self.nohopt = nohopt
self.overwrite_pqr = True
def __init__(self):
MVCommand.__init__(self)
try:
self.nodebump = Tkinter.BooleanVar()
self.nodebump.set(False)
self.nohopt = Tkinter.BooleanVar()
self.nohopt.set(False)
except:
self.nodebump = False
self.nohopt = False
self.nodebump_past = False
self.nohopt_past = False
self.overwrite_pqr = False
def guiCallback(self):
"""GUI Callback for APBSPreferences"""
self.APBS_Path = self.vf.APBSSetup.params.APBS_Path
self.pdb2pqr_Path = self.vf.APBSSetup.params.pdb2pqr_Path
self.ff_arg = Tkinter.StringVar()
self.ff_arg.set(self.vf.APBSSetup.params.pdb2pqr_ForceField)
self.ifd = ifd = InputFormDescr(title="APBS Preferences")
## APBS PATH GROUP
ifd.append({'name':"APBS_Path",
'widgetType':Pmw.Group,
'container':{'APBS_Path':'w.interior()'},
'wcfg':{'tag_text':"Path to APBS executable"},
'gridcfg':{'sticky':'nswe','columnspan':5}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'APBS_Browse',
'parent':'APBS_Path',
'wcfg':{'text':'Browse ...',
'command':self.set_APBS_Path},
'gridcfg':{'sticky':'we', 'row':1, 'column':0}
})
ifd.append({'widgetType':Pmw.EntryField,
'name':'APBS_Location',
'parent':'APBS_Path',
'wcfg':{'value':self.APBS_Path},
'gridcfg':{'sticky':'ew', 'row':1, 'column':1}
})
ifd.append({'name':'APBSLabel',
'widgetType':Tkinter.Label,
'parent':'APBS_Path',
'wcfg':{'text':"""\nAdaptive Poisson-Boltzmann Solver \
(APBS) should be installed\n before it can be run locally. \n Source code \
and/or binaries for APBS can be downloaded from
http://agave.wustl.edu/apbs/download \n
Details on how to run APBS using Pmv can be found at
http://mccammon.ucsd.edu/pmv_apbs\n"""},
'gridcfg':{'columnspan':2, 'sticky':'ew', 'row':2, 'column':0} })
##pdb2pqr.py PATH GROUP
ifd.append({'name':"pqd2pqr_Path",
'widgetType':Pmw.Group,
'container':{'pdb2pqr_Path':'w.interior()'},
'wcfg':{'tag_text':"Path to pdb2pqr.py"},
'gridcfg':{'sticky':'nswe','columnspan':5}
})
ifd.append({'widgetType':Tkinter.Button,
'name':'pdb2pqr_Browse',
'parent':'pdb2pqr_Path',
'wcfg':{'text':'Browse ...',
'command':self.set_pdb2pqr_Path},
'gridcfg':{'sticky':'we', 'row':1, 'column':0}
})
ifd.append({'widgetType':Pmw.EntryField,
'name':'pdb2pqr_Location',
'parent':'pdb2pqr_Path',
'wcfg':{'value':self.pdb2pqr_Path},
'gridcfg':{'sticky':'ew', 'row':1, 'column':1}
})
ifd.append({'name':'pdb2pqrLabel',
'widgetType':Tkinter.Label,
'parent':'pdb2pqr_Path',
'wcfg':{'text':"""\npdb2pqr.py is needed to create PQR \
files used by APBS. \n One can also use PDB2PQR Server to convert PDB files \
into PQR\n http://agave.wustl.edu/pdb2pqr, \
and read that PQR file instead\n"""},
'gridcfg':{'columnspan':2, 'sticky':'ew', 'row':2, 'column':0}
})
##pdb2pqr.py FORCE FIELD GROUP
ifd.append({'name':"pdb2pqr_ForceField",
'widgetType':Pmw.Group,
'container':{'pdb2pqr_ForceField':'w.interior()'},
'wcfg':{'tag_text':"pdb2pqr ForceField"},
'gridcfg':{'sticky':'nswe','columnspan':5}
})
ifd.append({'name':"Radiobutton_AMBER",
'widgetType':Tkinter.Radiobutton,
'parent':'pdb2pqr_ForceField',
'wcfg':{'value':'amber',
'variable':self.ff_arg},
'gridcfg':{'sticky':'w','row':0, 'column':0}
})
ifd.append({'name':'Label_AMBER',
'widgetType':Tkinter.Label,
'parent':'pdb2pqr_ForceField',
'wcfg':{'text':'AMBER '},
'gridcfg':{'sticky':'w', 'row':0, 'column':1} })
ifd.append({'name':"Radiobutton_CHARMM",
'widgetType':Tkinter.Radiobutton,
'parent':'pdb2pqr_ForceField',
'wcfg':{'value':'charmm',
'variable':self.ff_arg},
'gridcfg':{'sticky':'w','row':0, 'column':2}
})
ifd.append({'name':'Label_CHARMM',
'widgetType':Tkinter.Label,
'parent':'pdb2pqr_ForceField',
'wcfg':{'text':'CHARMM '},
'gridcfg':{'sticky':'w', 'row':0, 'column':3} })
ifd.append({'name':"Radiobutton_PARSE",
'widgetType':Tkinter.Radiobutton,
'parent':'pdb2pqr_ForceField',
'wcfg':{'value':'parse',
'variable':self.ff_arg},
'gridcfg':{'sticky':'w','row':0, 'column':4}
})
ifd.append({'name':'Label_PARSE',
'widgetType':Tkinter.Label,
'parent':'pdb2pqr_ForceField',
'wcfg':{'text':'PARSE '},
'gridcfg':{'sticky':'w', 'row':0, 'column':5} })
##pdb2pqr.py OPTIONS GROUP
ifd.append({'name':"pdb2pqr_Options",
'widgetType':Pmw.Group,
'container':{'pdb2pqr_Options':'w.interior()'},
'wcfg':{'tag_text':"pdb2pqr Options"},
'gridcfg':{'sticky':'nswe','columnspan':5}
})
ifd.append({'name':"pdb2pqr_debump_Checkbutton",
'widgetType':Tkinter.Checkbutton,
'parent':'pdb2pqr_Options',
'wcfg':{
'variable':self.nodebump},
'gridcfg':{'sticky':'w','row':0, 'column':0}
})
ifd.append({'name':'pdb2pqr_debump_Label',
'widgetType':Tkinter.Label,
'parent':'pdb2pqr_Options',
'wcfg':{'text':'Do not perform the debumping operation'},
'gridcfg':{'sticky':'w', 'row':0, 'column':1} })
ifd.append({'name':"pdb2pqr_nohopt_Checkbutton",
'widgetType':Tkinter.Checkbutton,
'parent':'pdb2pqr_Options',
'wcfg':{
'variable':self.nohopt},
'gridcfg':{'sticky':'w','row':1, 'column':0}
})
ifd.append({'name':'pdb2pqr_nohopt_Label',
'widgetType':Tkinter.Label,
'parent':'pdb2pqr_Options',
'wcfg':{'text':'Do not perform hydrogen optimization'},
'gridcfg':{'sticky':'w', 'row':1, 'column':1} })
val = self.vf.getUserInput(ifd)
if val:
self.doitWrapper(val['APBS_Location'],
val['pdb2pqr_Location'],self.ff_arg.get(),\
nodebump = self.nodebump.get(), nohopt = self.nohopt.get())
def set_APBS_Path(self):
"""Sets APBS Path"""
filename=askopenfilename(filetypes=[('APBS Executable','apbs*')],\
title="Please select APBS Executable",parent=self.ifd[3]['widget'])
# FIXME: Maybe there is a better way to get the parent
if filename:
self.APBS_Path = filename
self.ifd.entryByName['APBS_Location']['widget'].setentry(filename)
def set_pdb2pqr_Path(self):
"""Sets pdb2pqr Path"""
filename=askopenfilename(filetypes=[('Python script','pdb2pqr.py')],
title="Please select pdb2pqr.py",parent=self.ifd[3]['widget'])
if filename:
self.pdb2pqr_Path = filename
self.ifd.entryByName['pdb2pqr_Location']['widget'].\
setentry(filename)
APBSPreferences_GUI = CommandGUI()
APBSPreferences_GUI.addMenuCommand('menuRoot', 'Compute', 'Preferences',
cascadeName=cascadeName )
commandList = [{'name':'APBSRun','cmd':APBSRun(),'gui':APBSRun_GUI}]
flagMSMS = False
try:
import mslib
flagMSMS = True
except:
pass
if flagMSMS:
commandList.append({'name':'APBSMapPotential2MSMS', 'cmd':
APBSMap_Potential_to_MSMS(),'gui':APBSMap_Potential_to_MSMS_GUI},
)
commandList.extend([
{'name':'APBSDisplayIsocontours', 'cmd':
APBSDisplay_Isocontours(),'gui':APBSDisplay_Isocontours_GUI},
{'name':'APBSDisplayOrthoSlice', 'cmd':
APBSDisplayOrthoSlice(),'gui':APBSDisplayOrthoSlice_GUI},
{'name':'APBSVolumeRender', 'cmd':
APBSVolumeRender(),'gui':APBSVolumeRender_GUI}
])
## flagVolume = False
## try:
## from Volume.Renderers.UTVolumeLibrary import UTVolumeLibrary
## test = UTVolumeLibrary.VolumeRenderer()
## flagVolume = test.initRenderer()
## except:
## pass
## if flagVolume:
## commandList.append({'name':'APBSVolumeRender', 'cmd':
## APBSVolumeRender(),'gui':APBSVolumeRender_GUI})
commandList.extend([
{'name':'APBSLoadProfile','cmd':APBSLoad_Profile(),'gui':
APBSLoad_Profile_GUI},
{'name':'APBSSaveProfile','cmd':APBSSave_Profile(),'gui':
APBSSave_Profile_GUI},
{'name':'APBSOutputWrite','cmd':APBSWrite_APBS_Parameter_File(),
'gui':APBSWrite_Parameter_File_GUI},
{'name':'APBSSetup','cmd':APBSSetup(),'gui':APBSSetupGUI},
{'name':'APBSPreferences','cmd':APBSPreferences(),'gui':
APBSPreferences_GUI}])
def initModule(viewer):
for _dict in commandList:
viewer.addCommand(_dict['cmd'],_dict['name'],_dict['gui'])
def change_Menu_state(self, state):
index = self.GUI.menuButton.menu.children[cascadeName].\
index(self.GUI.menu[4]['label'])
self.GUI.menuButton.menu.children[cascadeName].entryconfig(index, \
state = state)
|
corredD/ePMV
|
pmv_dev/APBSCommands_2x.py
|
Python
|
gpl-3.0
| 238,741
|
[
"Amber",
"CHARMM",
"VisIt"
] |
362cee4e6f40c240feb2cd39435ec61da159edff0d3bf60ae69af30aaff1c732
|
'''
Created on 18 may 2018
@author: Yonatan-Carlos Carranza-Alarcon
About how to use the Imprecise Gaussian Discriminant.
'''
import classifip
# We start by creating an instance of the base classifier we want to use
print("Example of Imprecise Linear Discriminant Analyse for Classification - Data set IRIS \n")
model = classifip.models.qda.LinearDiscriminant()
data = classifip.dataset.uci_data_set.export_data_set('iris.data')
# Learning
X = data.iloc[:, :-1].values
y = data.iloc[:, -1].tolist()
model.learn(X=X, y=y, ell=5)
# Evaluation : we can set the method for minimize convex problem with quadratic
test, _ = model.evaluate(query=X[2], method="quadratic")
# The output is a list of probability intervals, we can print each instance :
print("\nPrediction using interval dominance criterion with 0/1 costs + quadratic method\n")
print(test)
# Evaluation : we can set the method for minimize convex problem with non-linear
test, _= model.evaluate(query=X[2], method="nonlinear")
# The output is a list of probability intervals, we can print each instance :
print("\nPrediction using interval dominance criterion with 0/1 costs + nonlinear method\n")
print(test)
|
sdestercke/classifip
|
examples/classification/qdatest.py
|
Python
|
gpl-2.0
| 1,176
|
[
"Gaussian"
] |
ea3d9cf4031b0966ce66de338fd7c9f85afcc512a5a8cfe9b50797be8adea80e
|
#! /usr/bin/env python
# linearizedGP -- Implementation of extended and unscented Gaussian processes.
# Copyright (C) 2014 National ICT Australia (NICTA)
#
# This file is part of linearizedGP.
#
# linearizedGP is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# linearizedGP is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with linearizedGP. If not, see <http://www.gnu.org/licenses/>.
""" Generate random datasets for testing the nonlinear GPs (used in the NIPS
2014 submission).
Author: Daniel Steinberg (daniel.steinberg@nicta.com.au)
modified by Simon O'Callaghan
Institute: NICTA
Date: 4 Sep 2014
"""
import os
from linearizedGP import gputils, kernels
import numpy as np
import scipy.io as sio
# Some parameters for the dataset
npoints = 1000 # Testing and training points
noise = 0.2
folds = 5
plot = False
kfunc = kernels.kern_m52
# kfunc = kernels.kern_se
k_sigma = 0.8
k_length = 0.6
savedir = 'data'
# Nonlinear functions
savenameList = []
fctnList = []
dfctnList = []
savenameList = savenameList + ["signdata.mat"]
fctnList = fctnList + ["2 * np.sign(f) + f**3"]
dfctnList = dfctnList + [""]
savenameList = savenameList + ["tanhdata.mat"]
fctnList = fctnList + ["np.tanh(2*f)"]
dfctnList = dfctnList + ["2 - 2 * np.tanh(2*f)**2"]
savenameList = savenameList + ["sindata.mat"]
fctnList = fctnList + ["np.sin(f)"]
dfctnList = dfctnList + ["np.cos(f)"]
savenameList = savenameList + ["lineardata.mat"]
fctnList = fctnList + ["f"]
dfctnList = dfctnList + ["np.ones(f.shape)"]
savenameList = savenameList + ['poly3data.mat']
fctnList = fctnList + ["f**3 + f**2 + f"]
dfctnList = dfctnList + ["3*f**2 + 2*f + 1"]
savenameList = savenameList + ["expdata.mat"]
fctnList = fctnList + ["np.exp(f)"]
dfctnList = dfctnList + ["np.exp(f)"]
for fwdmdlInd in range(len(savenameList)):
nlfunc = lambda f: eval(fctnList[fwdmdlInd])
dnlfunc = lambda f: eval(dfctnList[fwdmdlInd])
# Construct the dataset
x = np.linspace(-2 * np.pi, 2 * np.pi, npoints)
fseed = np.random.randn(npoints)
U, S, V = np.linalg.svd(kfunc(x[np.newaxis, :], x[np.newaxis, :], k_sigma,
k_length))
L = U.dot(np.diag(np.sqrt(S))).dot(V)
f = fseed.dot(L)
g = nlfunc(f)
y = g + np.random.randn(npoints) * noise
# Make the dictionary to save into a mat structure
datadic = {
'noise': noise,
'func': fctnList[fwdmdlInd],
'dfunc': dfctnList[fwdmdlInd],
'x': x,
'f': f,
'g': g,
'y': y,
'train': [],
'test': []
}
# Save the data to disk
if not os.path.exists(savedir):
os.mkdir(savedir)
for k, (sind, rind) in enumerate(gputils.k_fold_CV_ind(npoints, k=folds)):
datadic['train'].append(rind)
datadic['test'].append(sind)
datadic['train'] = np.array(datadic['train'])
datadic['test'] = np.array(datadic['test'])
sio.savemat(os.path.join(savedir, savenameList[fwdmdlInd]), datadic)
|
NICTA/linearizedGP
|
experiments/datagen.py
|
Python
|
gpl-3.0
| 3,553
|
[
"Gaussian"
] |
366d9fcede29ecaec5cc74e20864819f45b83d1ce2aa388745129f489d24499f
|
"""
KeepNote
Notebook indexing
"""
#
# KeepNote
# Copyright (c) 2008-2009 Matt Rasmussen
# Author: Matt Rasmussen <rasmus@mit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# python imports
import os
import sys
import traceback
from thread import get_ident
import sqlite3 as sqlite
sqlite.enable_shared_cache(True)
#sqlite.threadsafety = 0
# keepnote imports
import keepnote
# index filename
INDEX_FILE = u"index.sqlite"
INDEX_VERSION = 1
def get_index_file(notebook):
"""Get the index filename for a notebook"""
index_dir = notebook.pref.get("index_dir", default=u"")
if not index_dir or not os.path.exists(index_dir):
index_dir = notebook.get_pref_dir()
return os.path.join(index_dir, INDEX_FILE)
def preorder(node):
"""Iterate through nodes in pre-order traversal"""
queue = [node]
while len(queue) > 0:
node = queue.pop()
yield node
for child in node.iter_temp_children():
queue.append(child)
class NoteBookIndexDummy (object):
"""Index for a NoteBook"""
def __init__(self, notebook):
pass
def open(self):
"""Open connection to index"""
pass
def get_con(self):
"""Get connection for thread"""
pass
def close(self):
"""Close connection to index"""
pass
def init_index(self):
"""Initialize the tables in the index if they do not exist"""
pass
def add_node(self, node):
"""Add a node to the index"""
pass
def remove_node(self, node):
"""Remove node from index"""
pass
def get_node_path(self, nodeid):
"""Get node path for a nodeid"""
return None
def search_titles(self, query, cols=[]):
"""Return nodeids of nodes with matching titles"""
return []
def save(self):
"""Save index"""
pass
class AttrIndex (object):
"""Indexing information for an attribute"""
def __init__(self, name, type, multivalue=False, index_value=False):
self._name = name
self._type = type
self._table_name = "Attr_" + name
self._index_name = "IdxAttr_" + name + "_nodeid"
self._multivalue = multivalue
self._index_value = index_value
self._index_value_name = "IdxAttr_" + name + "_value"
def get_name(self):
return self._name
def get_table_name(self):
return self._table_name
def get_is_multivalue(self):
return self._multivalue
def init(self, cur):
"""Initialize attribute index for database"""
cur.execute(u"""CREATE TABLE IF NOT EXISTS %s
(nodeid TEXT,
value %s);
""" % (self._table_name, self._type))
cur.execute(u"""CREATE INDEX IF NOT EXISTS %s
ON %s (nodeid);""" % (self._index_name,
self._table_name))
if self._index_value:
cur.execute(u"""CREATE INDEX IF NOT EXISTS %s
ON %s (value);""" % (self._index_value_name,
self._table_name))
def add_node(self, cur, node):
"""Add a node's information to the index"""
nodeid = node.get_attr("nodeid")
value = node.get_attr(self._name)
self.set(cur, nodeid, value)
def remove_node(self, cur, node):
"""Remove node from index"""
cur.execute(u"DELETE FROM %s WHERE nodeid=?" % self._table_name,
(node.get_attr("nodeid"),))
def get(self, cur, nodeid):
"""Get information for a node from the index"""
cur.execute(u"""SELECT value FROM %s WHERE nodeid = ?""" %
self._table_name, (nodeid,))
values = [row[0] for row in cur.fetchall()]
# return value
if self._multivalue:
return values
else:
if len(values) == 0:
return None
else:
return values[0]
def set(self, cur, nodeid, value):
"""Set the information for a node in the index"""
rows = list(cur.execute((u"SELECT 1 "
u"FROM %s "
u"WHERE nodeid = ?") % self._table_name,
(nodeid,)))
if rows:
row = rows[0]
if row[0] != value:
# record update
ret = cur.execute((u"UPDATE %s SET "
u"value=? "
u"WHERE nodeid = ?") %
self._table_name,
(value, nodeid))
else:
# insert new row
cur.execute(u"""INSERT INTO %s VALUES
(?, ?)""" % self._table_name,
(nodeid, value))
class NoteBookIndex (object):
"""Index for a NoteBook"""
def __init__(self, notebook):
self._notebook = notebook
self._uniroot = notebook.get_universal_root_id()
self._attrs = {}
self._need_index = False
self._corrupt = False
self.con = None
self.cur = None
self.open()
self.add_node(notebook)
def open(self):
"""
Open connection to index
"""
try:
index_file = get_index_file(self._notebook)
self._corrupt = False
self.con = sqlite.connect(index_file, isolation_level="DEFERRED",
check_same_thread=False)
self.cur = self.con.cursor()
self.con.execute(u"PRAGMA read_uncommitted = true;")
self.init_index()
except sqlite.DatabaseError, e:
self._on_corrupt(e, sys.exc_info()[2])
def close(self):
"""Close connection to index"""
if self.con is not None:
try:
self.con.commit()
self.con.close()
except:
# close should always happen without propogating errors
pass
self.con = None
self.cur = None
def is_corrupt(self):
"""Return True if database appear corrupt"""
return self._corrupt
def _get_version(self):
"""Get version from database"""
self.con.execute(u"""CREATE TABLE IF NOT EXISTS Version
(version INTEGER, update_date DATE);""")
version = self.con.execute(u"SELECT MAX(version) FROM Version").fetchone()
if version is not None:
version = version[0]
return version
def _set_version(self, version=INDEX_VERSION):
"""Set the version of the database"""
self.con.execute(u"INSERT INTO Version VALUES (?, datetime('now'));",
(version,))
def init_index(self):
"""Initialize the tables in the index if they do not exist"""
self._need_index = False
con = self.con
try:
# check database version
version = self._get_version()
if version is None or version != INDEX_VERSION:
# version does not match, drop all tables
self._drop_tables()
# update version
self._set_version()
self._need_index = True
# init NodeGraph table
con.execute(u"""CREATE TABLE IF NOT EXISTS NodeGraph
(nodeid TEXT,
parentid TEXT,
basename TEXT,
symlink BOOLEAN);
""")
con.execute(u"""CREATE INDEX IF NOT EXISTS IdxNodeGraphNodeid
ON NodeGraph (nodeid);""")
con.execute(u"""CREATE INDEX IF NOT EXISTS IdxNodeGraphParentid
ON NodeGraph (parentid);""")
# TODO: make an Attr table
# this will let me query whether an attribute is currently being
# indexed and in what table it is in.
#con.execute(u"""CREATE TABLE IF NOT EXISTS AttrDefs
# (attr TEXT,
# type );
# """)
# initialize attribute tables
for attr in self._attrs.itervalues():
attr.init(self.cur)
con.commit()
except sqlite.DatabaseError, e:
self._on_corrupt(e, sys.exc_info()[2])
def add_attr(self, attr):
self._attrs[attr.get_name()] = attr
if self.cur:
attr.init(self.cur)
return attr
def _drop_tables(self):
"""clear index"""
self.con.execute(u"DROP TABLE IF EXISTS NodeGraph")
self.con.execute(u"DROP INDEX IF EXISTS IdxNodeGraphNodeid")
self.con.execute(u"DROP INDEX IF EXISTS IdxNodeGraphParentid")
def index_needed(self):
"""Returns True if indexing is needed"""
return self._need_index
def clear(self):
"""Erases database file and reinitializes"""
self.close()
index_file = get_index_file(self._notebook)
if os.path.exists(index_file):
os.remove(index_file)
self.open()
def index_all(self, root=None):
"""
Reindex all nodes under root
This function returns an iterator which must be iterated to completion.
"""
if root is None:
root = self._notebook
visit = set()
queue = []
# record nodes that change while indexing
def changed_callback(nodes, recurse):
for node in nodes:
if node not in visit:
queue.append(node)
self._notebook.node_changed.add(changed_callback)
# perform indexing
for node in preorder(root):
self.add_node(node)
visit.add(node)
yield node
# walk through nodes missed in original pass
while len(queue) > 0:
node = queue.pop()
if node not in visit:
for node2 in preorder(node):
self.add_node(node)
visit.add(node)
yield node
# remove callback for notebook changes
self._notebook.node_changed.remove(changed_callback)
# record index complete
self._need_index = False
def add_node(self, node):
"""Add a node to the index"""
# DEBUG
#return
if self.con is None:
return
con, cur = self.con, self.cur
try:
# TODO: remove single parent assumption
# get info
nodeid = node.get_attr("nodeid")
parent = node.get_parent()
if parent:
parentid = parent.get_attr("nodeid")
basename = node.get_basename()
else:
parentid = self._uniroot
basename = u""
symlink = False
title = node.get_title()
#------------------
# NodeGraph
rows = list(cur.execute(u"SELECT parentid, basename "
u"FROM NodeGraph "
u"WHERE nodeid = ?", (nodeid,)))
if rows:
row = rows[0]
if row[0] != parentid or row[1] != basename:
# record update
ret = cur.execute(u"UPDATE NodeGraph SET "
u"parentid=?, "
u"basename=?, "
u"symlink=? "
u"WHERE nodeid = ?",
(parentid, basename, symlink, nodeid))
else:
# insert new row
cur.execute(u"""
INSERT INTO NodeGraph VALUES
(?, ?, ?, ?)""",
(nodeid,
parentid,
basename,
symlink,
))
# update attrs
for attr in self._attrs.itervalues():
attr.add_node(cur, node)
except sqlite.DatabaseError, e:
self._on_corrupt(e, sys.exc_info()[2])
def remove_node(self, node):
"""Remove node from index"""
if self.con is None:
return
con, cur = self.con, self.cur
try:
# get info
nodeid = node.get_attr("nodeid")
# delete node
cur.execute(
u"DELETE FROM NodeGraph WHERE nodeid=?", (nodeid,))
#con.commit()
# update attrs
for attr in self._attrs.itervalues():
attr.remove_node(cur, node)
except sqlite.DatabaseError, e:
self._on_corrupt(e, sys.exc_info()[2])
def get_node_path(self, nodeid, visit=None):
"""Get node path for a nodeid"""
# TODO: handle multiple parents
con, cur = self.con, self.cur
if visit is None:
visit = set()
visit.add(nodeid)
try:
def walk(nodeid):
cur.execute(u"""SELECT nodeid, parentid, basename
FROM NodeGraph
WHERE nodeid=?""", (nodeid,))
row = cur.fetchone()
if row:
nodeid, parentid, basename = row
if parentid in visit:
return None
if parentid != self._uniroot:
path = self.get_node_path(parentid, visit)
if path is not None:
path.append(basename)
return path
else:
return None
else:
return [basename]
return walk(nodeid)
except sqlite.DatabaseError, e:
self._on_corrupt(e, sys.exc_info()[2])
def search_titles(self, query):
"""Return nodeids of nodes with matching titles"""
if "title" not in self._attrs:
return
# order titles by exact matches and then alphabetically
self.cur.execute(
u"""SELECT nodeid, value FROM %s WHERE value LIKE ?
ORDER BY value != ?, value """ %
self._attrs["title"].get_table_name(),
(u"%" + query + u"%", query))
return list(self.cur.fetchall())
def get_attr(self, nodeid, key):
attr = self._attrs.get(key, None)
if attr:
return attr.get(self.cur, nodeid)
else:
return []
def save(self):
"""Save index"""
if self.con is not None:
self.con.commit()
def _on_corrupt(self, error, tracebk=None):
self._corrupt = True
# display error
keepnote.log_error(error, tracebk)
# TODO: reload database?
|
vatslav/perfectnote
|
keepnote/notebook/index.py
|
Python
|
gpl-2.0
| 15,994
|
[
"VisIt"
] |
304c9f20c9123c8540e7e4477d343d10b23b742e488c399dee32560c9bd7c966
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import matplotlib
matplotlib.use('WXAgg')
import espressomd
espressomd.assert_features(["LENNARD_JONES"])
from espressomd import thermostat
from espressomd import visualization
import numpy as np
from matplotlib import pyplot
from threading import Thread
from traits.api import HasTraits, Button, Any, Range, List, Enum, Float
from traitsui.api import View, Group, Item, CheckListEditor, RangeEditor, EnumEditor
import sys
import time
use_opengl = "opengl" in sys.argv
use_mayavi = "mayavi" in sys.argv
if not use_opengl and not use_mayavi:
use_mayavi = True
assert use_opengl != use_mayavi
if use_mayavi:
from espressomd.visualization_mayavi import mlab
if use_opengl:
from pyface.api import GUI
try:
import midi
except:
try:
from pygame import midi
except:
from portmidi import midi
midi.init()
# if log flag is set, midi controller will change pressure logarithmically
pressure_log_flag = True
mayavi_autozoom = False # autozoom is buggy... works only for rotation
old_pressure = -1
# NPT variables
#############################################################
NPTGamma0 = 1.0
#NPTInitPistonMass = 1e-06
#NPTMinPistonMass = 1e-06
NPTMinPistonMass = 1e-04
NPTMaxPistonMass = 1.0
NPTInitPistonMass = NPTMinPistonMass
# System parameters
#############################################################
# 300 Particles
box_l = 7.5395
density = 0.7
#global_boxlen = box_l
#mainthread_boxlen = box_l
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 2.5 * lj_sig
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
system.time_step = 0.01
system.cell_system.skin = 0.4
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
system.cell_system.set_n_square(use_verlet_lists=False)
# do the warmup until the particles have at least the distance min_dist
min_dist = 0.9
# integration
int_steps = 1
int_n_times = 5000000
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.box_l = [box_l, box_l, box_l]
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.force_cap = lj_cap
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
system.analysis.dist_to(0)
act_min_dist = system.analysis.min_dist()
system.cell_system.max_num_cells = 2744
if use_mayavi:
vis = visualization.mayaviLive(system)
elif use_opengl:
vis = visualization.openGLLive(system)
mayavi_rotation_angle = 45.
mayavi_rotation_angle_step = 5.
mayavi_zoom = 36.
mayavi_zoom_old = mayavi_zoom
mayavi_zoom_step = 3.
plot_max_data_len = 20
#############################################################
# GUI Controls #
#############################################################
inputs, outputs = [], []
for i in range(midi.get_count()):
interf, name, input, output, opened = midi.get_device_info(i)
if input:
inputs.append((i, interf + " " + name))
if output:
outputs.append((i, interf + " " + name))
class Controls(HasTraits):
if len(inputs) == 1:
default_input = inputs
for i in inputs:
if not "Through Port" in i[1]:
default_input = i
break
default_input = default_input if inputs else None
default_output = -1
through_port_output = None
for i in outputs:
if not "Through Port" in i[1]:
default_output = i
break
else:
through_port_output = i
default_output = default_output if len(
outputs) > 1 else through_port_output
if default_input is None or default_output is None:
print('Cannot connect to any MIDI device')
input_device = List(value=default_input,
editor=CheckListEditor(values=inputs))
output_device = List(value=default_output,
editor=CheckListEditor(values=outputs))
max_temp = 2.
min_temp = 0.5
max_press = 10.
min_press = 5e-4
max_vol = 100000.
min_vol = 50.
max_n = 1000
min_n = 50
temperature = Range(min_temp, max_temp, 1., )
volume = Float(box_l**3.)
pressure = Float(1.)
number_of_particles = Range(min_n, max_n, n_part, )
ensemble = Enum('NVT', 'NPT')
midi_input = None
midi_output = None
MIDI_BASE = 224
MIDI_NUM_TEMPERATURE = MIDI_BASE + 0
MIDI_NUM_VOLUME = MIDI_BASE + 1
MIDI_NUM_PRESSURE = MIDI_BASE + 2
MIDI_NUM_NUMBEROFPARTICLES = MIDI_BASE + 3
MIDI_ROTATE = 0
MIDI_ZOOM = 144
_ui = Any
view = View(
Group(
Item('temperature', editor=RangeEditor(
low_name='min_temp', high_name='max_temp')),
Item('volume', editor=RangeEditor(
low_name='min_vol', high_name='max_vol')),
Item('pressure', editor=RangeEditor(
low_name='min_press', high_name='max_press')),
Item('number_of_particles', editor=RangeEditor(
low_name='min_n', high_name='max_n', is_float=False)),
Item('ensemble', style='custom'),
show_labels=True,
label='Parameters'
),
Group(
Item('input_device'),
Item('output_device'),
show_labels=True,
label='MIDI devices'
),
buttons=[],
title='Control',
height=0.2,
width=0.3
)
def __init__(self, **traits):
super(Controls, self).__init__(**traits)
self._ui = self.edit_traits()
self.push_current_values()
def push_current_values(self):
"""send the current values to the MIDI controller"""
self._temperature_fired()
self._volume_fired()
self._pressure_fired()
self._number_of_particles_fired()
self._ensemble_fired()
def _input_device_fired(self):
if self.midi_input is not None:
self.midi_input.close()
if self.input_device:
self.midi_input = midi.Input(self.input_device[0])
def _output_device_fired(self):
if self.midi_output is not None:
self.midi_output.close()
self.midi_output = midi.Output(self.output_device[0])
self.push_current_values()
def _temperature_fired(self):
status = self.MIDI_NUM_TEMPERATURE
data1 = int((self.temperature - self.min_temp) /
(self.max_temp - self.min_temp) * 127)
data2 = data1
if self.midi_output is not None:
self.midi_output.write_short(status, data1, data2)
def _volume_fired(self):
status = self.MIDI_NUM_VOLUME
data1 = limit_range(int((system.box_l[0]**3. - self.min_vol) /
(self.max_vol - self.min_vol) * 127), minval=0, maxval=127)
data2 = data1
if self.midi_output is not None:
self.midi_output.write_short(status, data1, data2)
def _pressure_fired(self):
status = self.MIDI_NUM_PRESSURE
if pressure_log_flag:
data1 = limit_range(int(127 * (np.log(self.pressure) - np.log(self.min_press)) / (
np.log(self.max_press) - np.log(self.min_press))), minval=0, maxval=127)
else:
data1 = limit_range(int((self.pressure - self.min_press) /
(self.max_press - self.min_press) * 127), minval=0, maxval=127)
data2 = data1
if self.midi_output is not None:
self.midi_output.write_short(status, data1, data2)
def _number_of_particles_fired(self):
status = self.MIDI_NUM_NUMBEROFPARTICLES
data1 = int(self.number_of_particles / self.max_n * 127)
data2 = data1
if self.midi_output is not None:
self.midi_output.write_short(status, data1, data2)
def _ensemble_fired(self):
if self.midi_output is not None:
self.midi_output.write_short(144, 0, 127) # T
self.midi_output.write_short(
144, 1, 127 * (self.ensemble != 'NPT')) # V
self.midi_output.write_short(
144, 2, 127 * (self.ensemble == 'NPT')) # P
self.midi_output.write_short(144, 3, 127) # N
#############################################################
# Integration #
#############################################################
# get initial observables
pressure = system.analysis.pressure()
temperature = 0.0
# TODO: this is some terrible polynomial fit, replace it with a better expression
# equation of state
pyplot.subplot(131)
pyplot.semilogy()
pyplot.title("Phase diagram")
pyplot.xlabel("Temperature")
pyplot.ylabel("Pressure")
pyplot.xlim(0.5, 2.0)
pyplot.ylim(5e-5, 2e1)
xx = np.linspace(0.5, 0.7, 200)
pyplot.plot(xx, -6.726 * xx**4 + 16.92 * xx**3 -
15.85 * xx**2 + 6.563 * xx - 1.015, 'k-')
xx = np.linspace(0.7, 1.3, 600)
pyplot.plot(xx, -0.5002 * xx**4 + 2.233 * xx**3 -
3.207 * xx**2 + 1.917 * xx - 0.4151, 'k-')
xx = np.linspace(0.6, 2.2, 1500)
pyplot.plot(xx, 16.72 * xx**4 - 88.28 * xx**3 +
168 * xx**2 - 122.4 * xx + 29.79, 'k-')
cursor = pyplot.scatter(temperature, pressure['total'], 200, 'g')
#cursor2 = pyplot.scatter(-1, -1, 200, 'r')
pyplot.text(0.6, 10, 'solid')
pyplot.text(1, 1, 'liquid')
pyplot.text(1, 10**-3, 'gas')
pyplot.subplot(132)
pyplot.title("Temperature")
plot1, = pyplot.plot([0], [temperature])
pyplot.xlabel("Time")
pyplot.ylabel("Temperature")
pyplot.subplot(133)
pyplot.title("Pressure")
plot2, = pyplot.plot([0], [pressure['total']])
pyplot.xlabel("Time")
pyplot.ylabel("Pressure")
# pyplot.legend()
pyplot.show(block=False)
plt1_x_data = np.zeros(1)
plt1_y_data = np.zeros(1)
plt2_x_data = np.zeros(1)
plt2_y_data = np.zeros(1)
def limit_range(val, minval=0., maxval=1.):
if val > maxval:
ret_val = maxval
elif val < minval:
ret_val = minval
else:
ret_val = val
if isinstance(val, int):
return int(ret_val)
elif isinstance(val, float):
return float(ret_val)
else:
return ret_val
def pressure_from_midi_val(midi_val, pmin, pmax, log_flag=pressure_log_flag):
if log_flag:
return pmin * (float(pmax) / pmin)**(float(midi_val) / 127)
else:
return (midi_val * (pmax - pmin) / 127 + pmin)
def main_loop():
global energies, plt1_x_data, plt1_y_data, plt2_x_data, plt2_y_data, old_pressure
system.integrator.run(steps=int_steps)
vis.update()
# increase LJ cap during warmup
if system.force_cap > 0:
if system.analysis.min_dist() < min_dist:
system.force_cap = system.force_cap + 0.1
else:
system.force_cap = 0
print("Switching off force capping")
# make sure the parameters are valid
# not sure if this is necessary after using limit_range
if controls.volume == 0:
controls.volume = controls.min_vol
if controls.number_of_particles == 0:
controls.number_of_particles = 1
if controls.pressure == 0:
controls.pressure = controls.min_press
pressure = system.analysis.pressure()
# update the parameters set in the GUI
if system.thermostat.get_state()[0]['kT'] != controls.temperature:
system.thermostat.set_langevin(kT=controls.temperature, gamma=1.0)
print("temperature changed")
system.force_cap = lj_cap
if controls.ensemble == 'NPT':
# reset Vkappa when target pressure has changed
if old_pressure != controls.pressure:
system.analysis.v_kappa('reset')
print("pressure changed")
old_pressure = controls.pressure
system.force_cap = lj_cap
newVkappa = system.analysis.v_kappa('read')['Vk1']
newVkappa = newVkappa if newVkappa > 0. else 4.0 / \
(NPTGamma0 * NPTGamma0 * NPTInitPistonMass)
pistonMass = limit_range(4.0 / (NPTGamma0 * NPTGamma0 * newVkappa),
NPTMinPistonMass, NPTMaxPistonMass)
system.integrator.set_isotropic_npt(
controls.pressure, pistonMass, cubic_box=True)
controls.volume = system.box_l[0]**3.
else:
system.integrator.set_nvt()
controls.pressure = pressure['total']
new_box = np.ones(3) * controls.volume**(1. / 3.)
if np.any(np.array(system.box_l) != new_box):
for i in range(len(system.part)):
system.part[i].pos = system.part[i].pos * \
new_box / system.box_l[0]
print("volume changed")
system.force_cap = lj_cap
system.box_l = new_box
new_part = controls.number_of_particles
if new_part > len(system.part):
for i in range(len(system.part), new_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
print("particles added")
system.force_cap = lj_cap
elif new_part < len(system.part):
for i in range(new_part, len(system.part)):
system.part[i].remove()
print("particles removed")
plt1_x_data = plot1.get_xdata()
plt1_y_data = plot1.get_ydata()
plt2_x_data = plot2.get_xdata()
plt2_y_data = plot2.get_ydata()
plt1_x_data = np.append(
plt1_x_data[-plot_max_data_len + 1:], system.time)
plt1_y_data = np.append(plt1_y_data[-plot_max_data_len + 1:],
2. / (3. * len(system.part))
* system.analysis.energy()["kinetic"])
plt2_x_data = np.append(
plt2_x_data[-plot_max_data_len + 1:], system.time)
plt2_y_data = np.append(
plt2_y_data[-plot_max_data_len + 1:], pressure['total'])
def main_thread():
for i in range(int_n_times):
main_loop()
def midi_thread():
global mayavi_rotation_angle, mayavi_zoom
while True:
try:
if controls.midi_input is not None and controls.midi_input.poll():
events = controls.midi_input.read(1000)
for event in events:
status, data1, data2, data3 = event[0]
if status == controls.MIDI_NUM_TEMPERATURE:
temperature = data2 * \
(controls.max_temp - controls.min_temp) / \
127 + controls.min_temp
controls.temperature = limit_range(
temperature, controls.min_temp, controls.max_temp)
elif status == controls.MIDI_NUM_VOLUME:
volume = data2 * \
(controls.max_vol - controls.min_vol) / \
127 + controls.min_vol
controls.volume = limit_range(
volume, controls.min_vol, controls.max_vol)
controls.ensemble = 'NVT'
elif status == controls.MIDI_NUM_PRESSURE:
pressure = pressure_from_midi_val(
data2, controls.min_press, controls.max_press)
controls.pressure = limit_range(
pressure, controls.min_press, controls.max_press)
controls.ensemble = 'NPT'
elif status == controls.MIDI_NUM_NUMBEROFPARTICLES:
npart = int(data2 * controls.max_n / 127)
controls.number_of_particles = limit_range(
npart, controls.min_n, controls.max_n)
elif status == controls.MIDI_ROTATE:
if data2 < 65:
# rotate clockwise
mayavi_rotation_angle += mayavi_rotation_angle_step * \
data2
elif data2 >= 65:
# rotate counterclockwise
mayavi_rotation_angle -= mayavi_rotation_angle_step * \
(data2 - 64)
elif status == controls.MIDI_ZOOM:
if data1 == 99 and data2 == 127:
# zoom in
mayavi_zoom -= mayavi_zoom_step
elif data1 == 98 and data2 == 127:
# zoom out
mayavi_zoom += mayavi_zoom_step
# else:
# print("Unknown Status {0} with data1={1} and
# data2={2}".format(status, data1, data2))
except Exception as e:
print(e)
time.sleep(0.01)
last_plotted = 0
def rotate_scene():
global mayavi_rotation_angle
if use_mayavi and mayavi_rotation_angle:
# mlab.yaw(mayavi_rotation_angle)
if mayavi_autozoom:
mlab.view(azimuth=mayavi_rotation_angle, distance='auto')
else:
current_view_vals = mlab.view()
mlab.view(azimuth=mayavi_rotation_angle,
elevation=current_view_vals[1],
distance=current_view_vals[2],
focalpoint=current_view_vals[3])
mayavi_rotation_angle %= 360.
def zoom_scene():
global mayavi_zoom, mayavi_zoom_old
if use_mayavi:
mlab.view(distance=mayavi_zoom)
elif use_opengl:
if mayavi_zoom_old < mayavi_zoom:
vis.camera.move_backward()
mayavi_zoom_old = mayavi_zoom
elif mayavi_zoom_old > mayavi_zoom:
vis.camera.move_forward()
help(vis.camera.move_forward)
mayavi_zoom_old = mayavi_zoom
def update_plot():
global last_plotted
# rotate_scene()
zoom_scene()
data_len = np.array([len(plt1_x_data), len(plt1_y_data),
len(plt2_x_data), len(plt2_y_data)]).min()
plot1.set_xdata(plt1_x_data[:data_len])
plot1.set_ydata(plt1_y_data[:data_len])
plot2.set_xdata(plt2_x_data[:data_len])
plot2.set_ydata(plt2_y_data[:data_len])
cursor.set_offsets([plt1_y_data[data_len - 1], plt2_y_data[data_len - 1]])
# cursor2.set_offsets([controls.temperature, controls.pressure])
current_time = plot1.get_xdata()[-1]
if last_plotted == current_time:
return
last_plotted = current_time
plot1.axes.set_xlim(plot1.get_xdata()[0], plot1.get_xdata()[-1])
plot1.axes.set_ylim(0.8 * plot1.get_ydata().min(),
1.2 * plot1.get_ydata().max())
plot2.axes.set_xlim(plot2.get_xdata()[0], plot2.get_xdata()[-1])
plot2.axes.set_ylim(0.8 * plot2.get_ydata().min(),
1.2 * plot2.get_ydata().max())
pyplot.draw()
t = Thread(target=main_thread)
t.daemon = True
vis.register_callback(update_plot, interval=1000)
controls = Controls()
t.start()
if controls.midi_input is not None:
t2 = Thread(target=midi_thread)
t2.daemon = True
t2.start()
if use_opengl:
gui = GUI()
vis.register_callback(gui.process_events, interval=1000)
vis.start()
|
mkuron/espresso
|
samples/lj-demo.py
|
Python
|
gpl-3.0
| 20,362
|
[
"ESPResSo",
"Mayavi"
] |
ba0e3e7c086c7bc1aca97e00e98bbb8a1720d56c56cf3ca4c55a51607d23ee62
|
# -*- coding: utf-8 -*-
"""
Unit tests for instructor.api methods.
"""
import datetime
import ddt
import random
import pytz
import io
import json
import requests
import shutil
import tempfile
from urllib import quote
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.http import HttpRequest, HttpResponse
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from django.utils.timezone import utc
from django.utils.translation import ugettext as _
from mock import Mock, patch
from nose.tools import raises
from nose.plugins.attrib import attr
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from course_modes.models import CourseMode
from courseware.models import StudentModule
from courseware.tests.factories import StaffFactory, InstructorFactory, BetaTesterFactory, UserProfileFactory
from courseware.tests.helpers import LoginEnrollmentTestCase
from django_comment_common.models import FORUM_ROLE_COMMUNITY_TA
from django_comment_common.utils import seed_permissions_roles
from microsite_configuration import microsite
from shoppingcart.models import (
RegistrationCodeRedemption, Order, CouponRedemption,
PaidCourseRegistration, Coupon, Invoice, CourseRegistrationCode, CourseRegistrationCodeInvoiceItem,
InvoiceTransaction)
from shoppingcart.pdf import PDFInvoice
from student.models import (
CourseEnrollment, CourseEnrollmentAllowed, NonExistentCourseError,
ManualEnrollmentAudit, UNENROLLED_TO_ENROLLED, ENROLLED_TO_UNENROLLED,
ALLOWEDTOENROLL_TO_UNENROLLED, ENROLLED_TO_ENROLLED, UNENROLLED_TO_ALLOWEDTOENROLL,
UNENROLLED_TO_UNENROLLED, ALLOWEDTOENROLL_TO_ENROLLED
)
from student.tests.factories import UserFactory, CourseModeFactory, AdminFactory
from student.roles import CourseBetaTesterRole, CourseSalesAdminRole, CourseFinanceAdminRole, CourseInstructorRole
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.fields import Date
from courseware.models import StudentFieldOverride
import instructor_task.api
import instructor.views.api
from instructor.views.api import require_finance_admin
from instructor.tests.utils import FakeContentTask, FakeEmail, FakeEmailInfo
from instructor.views.api import _split_input_list, common_exceptions_400, generate_unique_password
from instructor_task.api_helper import AlreadyRunningError
from openedx.core.djangoapps.course_groups.cohorts import set_course_cohort_settings
from .test_tools import msk_from_problem_urlname
DATE_FIELD = Date()
EXPECTED_CSV_HEADER = (
'"code","redeem_code_url","course_id","company_name","created_by","redeemed_by","invoice_id","purchaser",'
'"customer_reference_number","internal_reference"'
)
EXPECTED_COUPON_CSV_HEADER = '"Coupon Code","Course Id","% Discount","Description","Expiration Date",' \
'"Is Active","Code Redeemed Count","Total Discounted Seats","Total Discounted Amount"'
# ddt data for test cases involving reports
REPORTS_DATA = (
{
'report_type': 'grade',
'instructor_api_endpoint': 'calculate_grades_csv',
'task_api_endpoint': 'instructor_task.api.submit_calculate_grades_csv',
'extra_instructor_api_kwargs': {}
},
{
'report_type': 'enrolled learner profile',
'instructor_api_endpoint': 'get_students_features',
'task_api_endpoint': 'instructor_task.api.submit_calculate_students_features_csv',
'extra_instructor_api_kwargs': {'csv': '/csv'}
},
{
'report_type': 'detailed enrollment',
'instructor_api_endpoint': 'get_enrollment_report',
'task_api_endpoint': 'instructor_task.api.submit_detailed_enrollment_features_csv',
'extra_instructor_api_kwargs': {}
},
{
'report_type': 'enrollment',
'instructor_api_endpoint': 'get_students_who_may_enroll',
'task_api_endpoint': 'instructor_task.api.submit_calculate_may_enroll_csv',
'extra_instructor_api_kwargs': {},
},
{
'report_type': 'proctored exam results',
'instructor_api_endpoint': 'get_proctored_exam_results',
'task_api_endpoint': 'instructor_task.api.submit_proctored_exam_results_report',
'extra_instructor_api_kwargs': {},
}
)
# ddt data for test cases involving executive summary report
EXECUTIVE_SUMMARY_DATA = (
{
'report_type': 'executive summary',
'instructor_api_endpoint': 'get_exec_summary_report',
'task_api_endpoint': 'instructor_task.api.submit_executive_summary_report',
'extra_instructor_api_kwargs': {}
},
)
@common_exceptions_400
def view_success(request): # pylint: disable=unused-argument
"A dummy view for testing that returns a simple HTTP response"
return HttpResponse('success')
@common_exceptions_400
def view_user_doesnotexist(request): # pylint: disable=unused-argument
"A dummy view that raises a User.DoesNotExist exception"
raise User.DoesNotExist()
@common_exceptions_400
def view_alreadyrunningerror(request): # pylint: disable=unused-argument
"A dummy view that raises an AlreadyRunningError exception"
raise AlreadyRunningError()
@attr('shard_1')
class TestCommonExceptions400(TestCase):
"""
Testing the common_exceptions_400 decorator.
"""
def setUp(self):
super(TestCommonExceptions400, self).setUp()
self.request = Mock(spec=HttpRequest)
self.request.META = {}
def test_happy_path(self):
resp = view_success(self.request)
self.assertEqual(resp.status_code, 200)
def test_user_doesnotexist(self):
self.request.is_ajax.return_value = False
resp = view_user_doesnotexist(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
def test_user_doesnotexist_ajax(self):
self.request.is_ajax.return_value = True
resp = view_user_doesnotexist(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("User does not exist", result["error"])
def test_alreadyrunningerror(self):
self.request.is_ajax.return_value = False
resp = view_alreadyrunningerror(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("Task is already running", resp.content)
def test_alreadyrunningerror_ajax(self):
self.request.is_ajax.return_value = True
resp = view_alreadyrunningerror(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("Task is already running", result["error"])
@attr('shard_1')
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message'))
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorAPIDenyLevels(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users cannot access endpoints they shouldn't be able to.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIDenyLevels, cls).setUpClass()
cls.course = CourseFactory.create()
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = cls.problem_location.to_deprecated_string()
def setUp(self):
super(TestInstructorAPIDenyLevels, self).setUp()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
_module = StudentModule.objects.create(
student=self.user,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
# Endpoints that only Staff or Instructors can access
self.staff_level_endpoints = [
('students_update_enrollment',
{'identifiers': 'foo@example.org', 'action': 'enroll'}),
('get_grading_config', {}),
('get_students_features', {}),
('get_student_progress_url', {'unique_student_identifier': self.user.username}),
('reset_student_attempts',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
('update_forum_role_membership',
{'unique_student_identifier': self.user.email, 'rolename': 'Moderator', 'action': 'allow'}),
('list_forum_members', {'rolename': FORUM_ROLE_COMMUNITY_TA}),
('send_email', {'send_to': 'staff', 'subject': 'test', 'message': 'asdf'}),
('list_instructor_tasks', {}),
('list_background_email_tasks', {}),
('list_report_downloads', {}),
('list_financial_report_downloads', {}),
('calculate_grades_csv', {}),
('get_students_features', {}),
('get_enrollment_report', {}),
('get_students_who_may_enroll', {}),
('get_exec_summary_report', {}),
('get_proctored_exam_results', {}),
]
# Endpoints that only Instructors can access
self.instructor_level_endpoints = [
('bulk_beta_modify_access', {'identifiers': 'foo@example.org', 'action': 'add'}),
('modify_access', {'unique_student_identifier': self.user.email, 'rolename': 'beta', 'action': 'allow'}),
('list_course_role_members', {'rolename': 'beta'}),
('rescore_problem',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
]
def _access_endpoint(self, endpoint, args, status_code, msg):
"""
Asserts that accessing the given `endpoint` gets a response of `status_code`.
endpoint: string, endpoint for instructor dash API
args: dict, kwargs for `reverse` call
status_code: expected HTTP status code response
msg: message to display if assertion fails.
"""
url = reverse(endpoint, kwargs={'course_id': self.course.id.to_deprecated_string()})
if endpoint in ['send_email', 'students_update_enrollment', 'bulk_beta_modify_access']:
response = self.client.post(url, args)
else:
response = self.client.get(url, args)
self.assertEqual(
response.status_code,
status_code,
msg=msg
)
def test_student_level(self):
"""
Ensure that an enrolled student can't access staff or instructor endpoints.
"""
self.client.login(username=self.user.username, password='test')
for endpoint, args in self.staff_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
def test_staff_level(self):
"""
Ensure that a staff member can't access instructor endpoints.
"""
staff_member = StaffFactory(course_key=self.course.id)
CourseEnrollment.enroll(staff_member, self.course.id)
CourseFinanceAdminRole(self.course.id).add_users(staff_member)
self.client.login(username=staff_member.username, password='test')
# Try to promote to forums admin - not working
# update_forum_role(self.course.id, staff_member, FORUM_ROLE_ADMINISTRATOR, 'allow')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'list_forum_members']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Staff member should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Staff member should not be allowed to access endpoint " + endpoint
)
def test_instructor_level(self):
"""
Ensure that an instructor member can access all endpoints.
"""
inst = InstructorFactory(course_key=self.course.id)
CourseEnrollment.enroll(inst, self.course.id)
CourseFinanceAdminRole(self.course.id).add_users(inst)
self.client.login(username=inst.username, password='test')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
# TODO: make this work
if endpoint in ['rescore_problem']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
@attr('shard_1')
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
class TestInstructorAPIBulkAccountCreationAndEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test Bulk account creation and enrollment from csv file
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIBulkAccountCreationAndEnrollment, cls).setUpClass()
cls.course = CourseFactory.create()
cls.url = reverse('register_and_enroll_students', kwargs={'course_id': cls.course.id.to_deprecated_string()})
def setUp(self):
super(TestInstructorAPIBulkAccountCreationAndEnrollment, self).setUp()
self.request = RequestFactory().request()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.not_enrolled_student = UserFactory(
username='NotEnrolledStudent',
email='nonenrolled@test.com',
first_name='NotEnrolled',
last_name='Student'
)
@patch('instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv_with_blank_lines(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = "\ntest_student@example.com,test_student_1,tester1,USA\n\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('instructor.views.api.log.info')
def test_email_and_username_already_exist(self, info_log):
"""
If the email address and username already exists
and the user is enrolled in the course, do nothing (including no email gets sent out)
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA\n" \
"test_student@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with(
u"user already exists with username '%s' and email '%s'",
'test_student_1',
'test_student@example.com'
)
def test_file_upload_type_not_csv(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.jpg", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'Make sure that the file you upload is in CSV format with no extraneous characters or rows.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_bad_file_upload_type(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.csv", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'Could not read uploaded file.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_insufficient_data(self):
"""
Try uploading a CSV file which does not have the exact four columns of data
"""
csv_content = "test_student@example.com,test_student_1\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 1)
self.assertEquals(data['general_errors'][0]['response'], 'Data in row #1 must have exactly four columns: email, username, full name, and country')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_invalid_email_in_csv(self):
"""
Test failure case of a poorly formatted email field
"""
csv_content = "test_student.example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
data = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Invalid email {0}.'.format('test_student.example.com'))
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
@patch('instructor.views.api.log.info')
def test_csv_user_exist_and_not_enrolled(self, info_log):
"""
If the email address and username already exists
and the user is not enrolled in the course, enrolled him/her and iterate to next one.
"""
csv_content = "nonenrolled@test.com,NotEnrolledStudent,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
info_log.assert_called_with(
u'user %s enrolled in the course %s',
u'NotEnrolledStudent',
self.course.id
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertTrue(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
def test_user_with_already_existing_email_in_csv(self):
"""
If the email address already exists, but the username is different,
assume it is the correct user and just register the user in the course.
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA\n" \
"test_student@example.com,test_student_2,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
warning_message = 'An account with email {email} exists but the provided username {username} ' \
'is different. Enrolling anyway with {email}.'.format(email='test_student@example.com', username='test_student_2')
self.assertNotEquals(len(data['warnings']), 0)
self.assertEquals(data['warnings'][0]['response'], warning_message)
user = User.objects.get(email='test_student@example.com')
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertTrue(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
def test_user_with_already_existing_username_in_csv(self):
"""
If the username already exists (but not the email),
assume it is a different user and fail to create the new account.
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Username {user} already exists.'.format(user='test_student_1'))
def test_csv_file_not_attached(self):
"""
Test when the user does not attach a file
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'file_not_found': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'File is not attached.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_raising_exception_in_auto_registration_and_enrollment_case(self):
"""
Test that exceptions are handled well
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
with patch('instructor.views.api.create_and_enroll_user') as mock:
mock.side_effect = NonExistentCourseError()
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'NonExistentCourseError')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_generate_unique_password(self):
"""
generate_unique_password should generate a unique password string that excludes certain characters.
"""
password = generate_unique_password([], 12)
self.assertEquals(len(password), 12)
for letter in password:
self.assertNotIn(letter, 'aAeEiIoOuU1l')
def test_users_created_and_enrolled_successfully_if_others_fail(self):
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student3@example.com,test_student_1,tester3,CA\n" \
"test_student2@example.com,test_student_2,tester2,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Username {user} already exists.'.format(user='test_student_1'))
self.assertTrue(User.objects.filter(username='test_student_1', email='test_student1@example.com').exists())
self.assertTrue(User.objects.filter(username='test_student_2', email='test_student2@example.com').exists())
self.assertFalse(User.objects.filter(email='test_student3@example.com').exists())
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 2)
@patch.object(instructor.views.api, 'generate_random_string',
Mock(side_effect=['first', 'first', 'second']))
def test_generate_unique_password_no_reuse(self):
"""
generate_unique_password should generate a unique password string that hasn't been generated before.
"""
generated_password = ['first']
password = generate_unique_password(generated_password, 12)
self.assertNotEquals(password, 'first')
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': False})
def test_allow_automated_signups_flag_not_set(self):
csv_content = "test_student1@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEquals(response.status_code, 403)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
@attr('shard_1')
@ddt.ddt
class TestInstructorAPIEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test enrollment modification endpoint.
This test does NOT exhaustively test state changes, that is the
job of test_enrollment. This tests the response and action switch.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIEnrollment, cls).setUpClass()
cls.course = CourseFactory.create()
# Email URL values
cls.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
cls.about_path = '/courses/{}/about'.format(cls.course.id)
cls.course_path = '/courses/{}/'.format(cls.course.id)
def setUp(self):
super(TestInstructorAPIEnrollment, self).setUp()
self.request = RequestFactory().request()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.enrolled_student = UserFactory(username='EnrolledStudent', first_name='Enrolled', last_name='Student')
CourseEnrollment.enroll(
self.enrolled_student,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent', first_name='NotEnrolled',
last_name='Student')
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='robot-allowed@robot.org', course_id=self.course.id)
cea.save()
self.allowed_email = 'robot-allowed@robot.org'
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': action})
self.assertEqual(response.status_code, 400)
def test_invalid_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': 'percivaloctavius@', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius@',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_invalid_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': 'percivaloctavius', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_with_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'enroll',
'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": self.notenrolled_student.username,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll',
'email_students': False})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
@ddt.data('http', 'https')
def test_enroll_with_email(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been enrolled in {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear NotEnrolled Student\n\nYou have been enrolled in {} "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to NotEnrolled Student".format(
self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the "
"registration form making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, "
"visit {proto}://{site}{about_path} to join the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name, proto=protocol, site=self.site_name, about_path=self.about_path
)
)
@ddt.data('http', 'https')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_mktgsite(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"You can then enroll in {display_name}.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name, proto=protocol, site=self.site_name
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered_autoenroll(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account,"
" you will see {display_name} listed on your dashboard.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, display_name=self.course.display_name
)
)
def test_unenroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll',
'email_students': False})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_unenroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll',
'email_students': True})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Enrolled Student\n\nYou have been un-enrolled in {display_name} "
"at edx.org by a member of the course staff. "
"The course will no longer appear on your edx.org dashboard.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to Enrolled Student".format(
display_name=self.course.display_name,
)
)
def test_unenroll_with_email_allowed_student(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.allowed_email, 'action': 'unenroll', 'email_students': True})
print "type(self.allowed_email): {}".format(type(self.allowed_email))
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.allowed_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": True,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ALLOWEDTOENROLL_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Student,\n\nYou have been un-enrolled from course {display_name} by a member of the course staff. "
"Please disregard the invitation previously sent.\n\n----\n"
"This email was automatically sent from edx.org to robot-allowed@robot.org".format(
display_name=self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name} at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{about_path} and register for the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, about_path=self.about_path,
display_name=self.course.display_name,
)
)
@patch('instructor.enrollment.uses_shib')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_shib_mktgsite(self, mock_uses_shib):
# Try with marketing site enabled and shib on
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.post(url, {'identifiers': self.notregistered_email, 'action': 'enroll',
'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib_autoenroll(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{course_path} and login.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
def test_enroll_already_enrolled_student(self):
"""
Ensure that already enrolled "verified" students cannot be downgraded
to "honor"
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# make this enrollment "verified"
course_enrollment.mode = u'verified'
course_enrollment.save()
self.assertEqual(course_enrollment.mode, u'verified')
# now re-enroll the student through the instructor dash
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
# affirm that the student is still in "verified" mode
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_ENROLLED)
self.assertEqual(course_enrollment.mode, u"verified")
def create_paid_course(self):
"""
create paid course mode.
"""
paid_course = CourseFactory.create()
CourseModeFactory.create(course_id=paid_course.id, min_price=50)
CourseInstructorRole(paid_course.id).add_users(self.instructor)
return paid_course
def test_reason_field_should_not_be_empty(self):
"""
test to check that reason field should not be empty when
manually enrolling the students for the paid courses.
"""
paid_course = self.create_paid_course()
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"error": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenrolled_allowed_to_enroll_user(self):
"""
test to unenroll allow to enroll user.
"""
paid_course = self.create_paid_course()
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing..'}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
# now registered the user
UserFactory(email=self.notregistered_email)
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing'}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 2)
self.assertEqual(manual_enrollments[1].state_transition, ALLOWEDTOENROLL_TO_ENROLLED)
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notregistered_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": True,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": True,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenrolled_already_not_enrolled_user(self):
"""
test unenrolled user already not enrolled in a course.
"""
paid_course = self.create_paid_course()
course_enrollment = CourseEnrollment.objects.filter(
user__email=self.notregistered_email, course_id=paid_course.id
)
self.assertEqual(course_enrollment.count(), 0)
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'unenroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing'}
response = self.client.post(url, params)
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notregistered_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenroll_and_enroll_verified(self):
"""
Test that unenrolling and enrolling a student from a verified track
results in that student being in an honor track
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# upgrade enrollment
course_enrollment.mode = u'verified'
course_enrollment.save()
self.assertEqual(course_enrollment.mode, u'verified')
self._change_student_enrollment(self.enrolled_student, self.course, 'unenroll')
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
self.assertEqual(course_enrollment.mode, u'honor')
def _change_student_enrollment(self, user, course, action):
"""
Helper function that posts to 'students_update_enrollment' to change
a student's enrollment
"""
url = reverse(
'students_update_enrollment',
kwargs={'course_id': course.id.to_deprecated_string()},
)
params = {
'identifiers': user.email,
'action': action,
'email_students': True,
'reason': 'change user enrollment'
}
response = self.client.post(url, params)
self.assertEqual(response.status_code, 200)
return response
@attr('shard_1')
@ddt.ddt
class TestInstructorAPIBulkBetaEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test bulk beta modify access endpoint.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIBulkBetaEnrollment, cls).setUpClass()
cls.course = CourseFactory.create()
# Email URL values
cls.site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
cls.about_path = '/courses/{}/about'.format(cls.course.id)
cls.course_path = '/courses/{}/'.format(cls.course.id)
def setUp(self):
super(TestInstructorAPIBulkBetaEnrollment, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.beta_tester = BetaTesterFactory(course_key=self.course.id)
CourseEnrollment.enroll(
self.beta_tester,
self.course.id
)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
self.notenrolled_student = UserFactory(username='NotEnrolledStudent')
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
self.request = RequestFactory().request()
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.beta_tester.email, 'action': action})
self.assertEqual(response.status_code, 400)
def add_notenrolled(self, response, identifier):
"""
Test Helper Method (not a test, called by other tests)
Takes a client response from a call to bulk_beta_modify_access with 'email_students': False,
and the student identifier (email or username) given as 'identifiers' in the request.
Asserts the reponse returns cleanly, that the student was added as a beta tester, and the
response properly contains their identifier, 'error': False, and 'userDoesNotExist': False.
Additionally asserts no email was sent.
"""
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": identifier,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_add_notenrolled_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
@ddt.data('http', 'https')
def test_add_notenrolled_with_email(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"Visit {proto}://{site}{about_path} to join "
"the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
about_path=self.about_path
)
)
@ddt.data('http', 'https')
def test_add_notenrolled_with_email_autoenroll(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
course_path=self.course_path
)
)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_add_notenrolled_email_mktgsite(self):
# Try with marketing site enabled
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
u"Dear {}\n\nYou have been invited to be a beta tester "
"for {} at edx.org by a member of the course staff.\n\n"
"Visit edx.org to enroll in the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {}".format(
self.notenrolled_student.profile.name,
self.course.display_name,
self.notenrolled_student.email,
)
)
def test_enroll_with_email_not_registered(self):
# User doesn't exist
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.notregistered_email, 'action': 'add', 'email_students': True,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notregistered_email,
"error": True,
"userDoesNotExist": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_without_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': False,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': True,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been removed from a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear {full_name}\n\nYou have been removed as a beta tester for "
"{display_name} at edx.org by a member of the course staff. "
"The course will remain on your dashboard, but you will no longer "
"be part of the beta testing group.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to {email_address}".format(
display_name=self.course.display_name,
full_name=self.beta_tester.profile.name,
email_address=self.beta_tester.email
)
)
@attr('shard_1')
class TestInstructorAPILevelsAccess(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change permissions
of other users.
This test does NOT test whether the actions had an effect on the
database, that is the job of test_access.
This tests the response and action switch.
Actually, modify_access does not have a very meaningful
response yet, so only the status code is tested.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPILevelsAccess, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorAPILevelsAccess, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.other_instructor = InstructorFactory(course_key=self.course.id)
self.other_staff = StaffFactory(course_key=self.course.id)
self.other_user = UserFactory()
def test_modify_access_noparams(self):
""" Test missing all query parameters. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_action(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'robot-not-an-action',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_role(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'robot-not-a-roll',
'action': 'revoke',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_allow(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.email,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_allow_with_uname(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_instructor.username,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_with_username(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.username,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_with_fake_user(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': 'GandalfTheGrey',
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': 'GandalfTheGrey',
'userDoesNotExist': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_with_inactive_user(self):
self.other_user.is_active = False
self.other_user.save() # pylint: disable=no-member
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.username,
'rolename': 'beta',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': self.other_user.username,
'inactiveUser': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_revoke_not_allowed(self):
""" Test revoking access that a user does not have. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_self(self):
"""
Test that an instructor cannot remove instructor privelages from themself.
"""
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'unique_student_identifier': self.instructor.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'unique_student_identifier': self.instructor.username,
'rolename': 'instructor',
'action': 'revoke',
'removingSelfAsInstructor': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_noparams(self):
""" Test missing all query parameters. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_bad_rolename(self):
""" Test with an invalid rolename parameter. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'robot-not-a-rolename',
})
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_staff(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'staff',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'staff': [
{
'username': self.other_staff.username,
'email': self.other_staff.email,
'first_name': self.other_staff.first_name,
'last_name': self.other_staff.last_name,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_beta(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'rolename': 'beta',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'beta': []
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_update_forum_role_membership(self):
"""
Test update forum role membership with user's email and username.
"""
# Seed forum roles for course.
seed_permissions_roles(self.course.id)
for user in [self.instructor, self.other_user]:
for identifier_attr in [user.email, user.username]:
for rolename in ["Administrator", "Moderator", "Community TA"]:
for action in ["allow", "revoke"]:
self.assert_update_forum_role_membership(user, identifier_attr, rolename, action)
def assert_update_forum_role_membership(self, current_user, identifier, rolename, action):
"""
Test update forum role membership.
Get unique_student_identifier, rolename and action and update forum role.
"""
url = reverse('update_forum_role_membership', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(
url,
{
'unique_student_identifier': identifier,
'rolename': rolename,
'action': action,
}
)
# Status code should be 200.
self.assertEqual(response.status_code, 200)
user_roles = current_user.roles.filter(course_id=self.course.id).values_list("name", flat=True)
if action == 'allow':
self.assertIn(rolename, user_roles)
elif action == 'revoke':
self.assertNotIn(rolename, user_roles)
@attr('shard_1')
@ddt.ddt
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class TestInstructorAPILevelsDataDump(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints that show data without side effects.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPILevelsDataDump, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorAPILevelsDataDump, self).setUp()
self.course_mode = CourseMode(course_id=self.course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=40)
self.course_mode.save()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.cart = Order.get_cart_for_user(self.instructor)
self.coupon_code = 'abcde'
self.coupon = Coupon(code=self.coupon_code, description='testing code', course_id=self.course.id,
percentage_discount=10, created_by=self.instructor, is_active=True)
self.coupon.save()
# Create testing invoice 1
self.sale_invoice_1 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw', recipient_email='test1@test.com', customer_reference_number='2Fwe23S',
internal_reference="A", course_id=self.course.id, is_valid=True
)
self.invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.sale_invoice_1,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
self.students = [UserFactory() for _ in xrange(6)]
for student in self.students:
CourseEnrollment.enroll(student, self.course.id)
self.students_who_may_enroll = self.students + [UserFactory() for _ in range(5)]
for student in self.students_who_may_enroll:
CourseEnrollmentAllowed.objects.create(
email=student.email, course_id=self.course.id
)
def register_with_redemption_code(self, user, code):
"""
enroll user using a registration code
"""
redeem_url = reverse('register_code_redemption', args=[code])
self.client.login(username=user.username, password='test')
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
def test_invalidate_sale_record(self):
"""
Testing the sale invalidating scenario.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
data = {'invoice_number': self.sale_invoice_1.id, 'event_type': "invalidate"}
url = reverse('sale_validation', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.assert_request_status_code(200, url, method="POST", data=data)
#Now try to fetch data against not existing invoice number
test_data_1 = {'invoice_number': 100, 'event_type': "invalidate"}
self.assert_request_status_code(404, url, method="POST", data=test_data_1)
# Now invalidate the same invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("The sale associated with this invoice has already been invalidated.", response.content)
# now re_validate the invoice number
data['event_type'] = "re_validate"
self.assert_request_status_code(200, url, method="POST", data=data)
# Now re_validate the same active invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("This invoice is already active.", response.content)
test_data_2 = {'invoice_number': self.sale_invoice_1.id}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_2)
self.assertIn("Missing required event_type parameter", response.content)
test_data_3 = {'event_type': "re_validate"}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_3)
self.assertIn("Missing required invoice_number parameter", response.content)
# submitting invalid invoice number
data['invoice_number'] = 'testing'
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("invoice_number must be an integer, {value} provided".format(value=data['invoice_number']), response.content)
def test_get_sale_order_records_features_csv(self):
"""
Test that the response from get_sale_order_records is in csv format.
"""
# add the coupon code for the course
coupon = Coupon(
code='test_code', description='test_description', course_id=self.course.id,
percentage_discount='10', created_by=self.instructor, is_active=True
)
coupon.save()
self.cart.order_type = 'business'
self.cart.save()
self.cart.add_billing_details(company_name='Test Company', company_contact_name='Test',
company_contact_email='test@123', recipient_name='R1',
recipient_email='', customer_reference_number='PO#23')
paid_course_reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': paid_course_reg_item.id, 'qty': '4'})
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': coupon.code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
# get the updated item
item = self.cart.orderitem_set.all().select_subclasses()[0]
# get the redeemed coupon information
coupon_redemption = CouponRedemption.objects.select_related('coupon').filter(order=self.cart)
sale_order_url = reverse('get_sale_order_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(sale_order_url)
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertIn('36', response.content.split('\r\n')[1])
self.assertIn(str(item.unit_cost), response.content.split('\r\n')[1],)
self.assertIn(str(item.list_price), response.content.split('\r\n')[1],)
self.assertIn(item.status, response.content.split('\r\n')[1],)
self.assertIn(coupon_redemption[0].coupon.code, response.content.split('\r\n')[1],)
def test_coupon_redeem_count_in_ecommerce_section(self):
"""
Test that checks the redeem count in the instructor_dashboard coupon section
"""
# add the coupon code for the course
coupon = Coupon(
code='test_code', description='test_description', course_id=self.course.id,
percentage_discount='10', created_by=self.instructor, is_active=True
)
coupon.save()
# Coupon Redeem Count only visible for Financial Admins.
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': coupon.code})
self.assertEqual(resp.status_code, 200)
# URL for instructor dashboard
instructor_dashboard = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
# visit the instructor dashboard page and
# check that the coupon redeem count should be 0
resp = self.client.get(instructor_dashboard)
self.assertEqual(resp.status_code, 200)
self.assertIn('Number Redeemed', resp.content)
self.assertIn('<td>0</td>', resp.content)
# now make the payment of your cart items
self.cart.purchase()
# visit the instructor dashboard page and
# check that the coupon redeem count should be 1
resp = self.client.get(instructor_dashboard)
self.assertEqual(resp.status_code, 200)
self.assertIn('Number Redeemed', resp.content)
self.assertIn('<td>1</td>', resp.content)
def test_get_sale_records_features_csv(self):
"""
Test that the response from get_sale_records is in csv format.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
url = reverse(
'get_sale_records',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
response = self.client.get(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_sale_records_features_json(self):
"""
Test that the response from get_sale_records is in json format.
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
for res in res_json['sale']:
self.validate_sale_records_response(
res,
course_registration_code,
self.sale_invoice_1,
0,
invoice_item=self.invoice_item
)
def test_get_sale_records_features_with_multiple_invoices(self):
"""
Test that the response from get_sale_records is in json format for multiple invoices
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='qwerty{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
# Create test invoice 2
sale_invoice_2 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw_2', recipient_email='test2@test.com', customer_reference_number='2Fwe23S',
internal_reference="B", course_id=self.course.id
)
invoice_item_2 = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=sale_invoice_2,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='xyzmn{}'.format(i), course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor, invoice=sale_invoice_2, invoice_item=invoice_item_2, mode_slug='honor'
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
self.validate_sale_records_response(
res_json['sale'][0],
course_registration_code,
self.sale_invoice_1,
0,
invoice_item=self.invoice_item
)
self.validate_sale_records_response(
res_json['sale'][1],
course_registration_code,
sale_invoice_2,
0,
invoice_item=invoice_item_2
)
def validate_sale_records_response(self, res, course_registration_code, invoice, used_codes, invoice_item):
"""
validate sale records attribute values with the response object
"""
self.assertEqual(res['total_amount'], invoice.total_amount)
self.assertEqual(res['recipient_email'], invoice.recipient_email)
self.assertEqual(res['recipient_name'], invoice.recipient_name)
self.assertEqual(res['company_name'], invoice.company_name)
self.assertEqual(res['company_contact_name'], invoice.company_contact_name)
self.assertEqual(res['company_contact_email'], invoice.company_contact_email)
self.assertEqual(res['internal_reference'], invoice.internal_reference)
self.assertEqual(res['customer_reference_number'], invoice.customer_reference_number)
self.assertEqual(res['invoice_number'], invoice.id)
self.assertEqual(res['created_by'], course_registration_code.created_by.username)
self.assertEqual(res['course_id'], invoice_item.course_id.to_deprecated_string())
self.assertEqual(res['total_used_codes'], used_codes)
self.assertEqual(res['total_codes'], 5)
def test_get_students_features(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
self.assertEqual(student_json['username'], student.username)
self.assertEqual(student_json['email'], student.email)
@ddt.data(True, False)
def test_get_students_features_cohorted(self, is_cohorted):
"""
Test that get_students_features includes cohort info when the course is
cohorted, and does not when the course is not cohorted.
"""
url = reverse('get_students_features', kwargs={'course_id': unicode(self.course.id)})
set_course_cohort_settings(self.course.id, is_cohorted=is_cohorted)
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertEqual('cohort' in res_json['feature_names'], is_cohorted)
def test_get_students_who_may_enroll(self):
"""
Test whether get_students_who_may_enroll returns an appropriate
status message when users request a CSV file of students who
may enroll in a course.
"""
url = reverse(
'get_students_who_may_enroll',
kwargs={'course_id': unicode(self.course.id)}
)
# Successful case:
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertNotIn('currently being created', res_json['status'])
# CSV generation already in progress:
with patch('instructor_task.api.submit_calculate_may_enroll_csv') as submit_task_function:
error = AlreadyRunningError()
submit_task_function.side_effect = error
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertIn('currently being created', res_json['status'])
def test_get_student_exam_results(self):
"""
Test whether get_proctored_exam_results returns an appropriate
status message when users request a CSV file.
"""
url = reverse(
'get_proctored_exam_results',
kwargs={'course_id': unicode(self.course.id)}
)
# Successful case:
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertNotIn('currently being created', res_json['status'])
# CSV generation already in progress:
with patch('instructor_task.api.submit_proctored_exam_results_report') as submit_task_function:
error = AlreadyRunningError()
submit_task_function.side_effect = error
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertIn('currently being created', res_json['status'])
def test_access_course_finance_admin_with_invalid_course_key(self):
"""
Test assert require_course fiance_admin before generating
a detailed enrollment report
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
response = decorated_func(request, 'invalid_course_key')
self.assertEqual(response.status_code, 404)
self.assertFalse(func.called)
def mock_request(self):
"""
mock request
"""
request = Mock()
request.user = self.instructor
return request
def test_access_course_finance_admin_with_valid_course_key(self):
"""
Test to check the course_finance_admin role with valid key
but doesn't have access to the function
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
response = decorated_func(request, 'valid/course/key')
self.assertEqual(response.status_code, 403)
self.assertFalse(func.called)
def test_add_user_to_fiance_admin_role_with_valid_course(self):
"""
test to check that a function is called using a fiance_admin
rights.
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
decorated_func(request, self.course.id.to_deprecated_string())
self.assertTrue(func.called)
def test_enrollment_report_features_csv(self):
"""
test to generate enrollment report.
enroll users, admin staff using registration codes.
"""
InvoiceTransaction.objects.create(
invoice=self.sale_invoice_1,
amount=self.sale_invoice_1.total_amount,
status='completed',
created_by=self.instructor,
last_modified_by=self.instructor
)
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
admin_user = AdminFactory()
admin_cart = Order.get_cart_for_user(admin_user)
PaidCourseRegistration.add_to_order(admin_cart, self.course.id)
admin_cart.purchase()
# create a new user/student and enroll
# in the course using a registration code
# and then validates the generated detailed enrollment report
test_user = UserFactory()
self.register_with_redemption_code(test_user, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
UserProfileFactory.create(user=self.students[0], meta='{"company": "asdasda"}')
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_bulk_purchase_detailed_report(self):
"""
test to generate detailed enrollment report.
1 Purchase registration codes.
2 Enroll users via registration code.
3 Validate generated enrollment report.
"""
paid_course_reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'),
{'ItemId': paid_course_reg_item.id, 'qty': '4'})
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
course_reg_codes = CourseRegistrationCode.objects.filter(order=self.cart)
self.register_with_redemption_code(self.instructor, course_reg_codes[0].code)
test_user = UserFactory()
test_user_cart = Order.get_cart_for_user(test_user)
PaidCourseRegistration.add_to_order(test_user_cart, self.course.id)
test_user_cart.purchase()
InvoiceTransaction.objects.create(
invoice=self.sale_invoice_1,
amount=-self.sale_invoice_1.total_amount,
status='refunded',
created_by=self.instructor,
last_modified_by=self.instructor
)
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_create_registration_code_without_invoice_and_order(self):
"""
test generate detailed enrollment report,
used a registration codes which has been created via invoice or bulk
purchase scenario.
"""
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_invoice_payment_is_still_pending_for_registration_codes(self):
"""
test generate enrollment report
enroll a user in a course using registration code
whose invoice has not been paid yet
"""
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
@patch.object(instructor.views.api, 'anonymous_id_for_user', Mock(return_value='42'))
@patch.object(instructor.views.api, 'unique_id_for_user', Mock(return_value='41'))
def test_get_anon_ids(self):
"""
Test the CSV output for the anonymized user ids.
"""
url = reverse('get_anon_ids', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(
'"User ID","Anonymized User ID","Course Specific Anonymized User ID"'
'\n"{user_id}","41","42"\n'.format(user_id=self.students[0].id)
))
self.assertTrue(
body.endswith('"{user_id}","41","42"\n'.format(user_id=self.students[-1].id))
)
def test_list_report_downloads(self):
url = reverse('list_report_downloads', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor_task.models.LocalFSReportStore.links_for') as mock_links_for:
mock_links_for.return_value = [
('mock_file_name_1', 'https://1.mock.url'),
('mock_file_name_2', 'https://2.mock.url'),
]
response = self.client.get(url, {})
expected_response = {
"downloads": [
{
"url": "https://1.mock.url",
"link": "<a href=\"https://1.mock.url\">mock_file_name_1</a>",
"name": "mock_file_name_1"
},
{
"url": "https://2.mock.url",
"link": "<a href=\"https://2.mock.url\">mock_file_name_2</a>",
"name": "mock_file_name_2"
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected_response)
@ddt.data(*REPORTS_DATA)
@ddt.unpack
def test_calculate_report_csv_success(self, report_type, instructor_api_endpoint, task_api_endpoint, extra_instructor_api_kwargs):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
with patch(task_api_endpoint):
response = self.client.get(url, {})
success_status = "The {report_type} report is being created.".format(report_type=report_type)
self.assertIn(success_status, response.content)
@ddt.data(*EXECUTIVE_SUMMARY_DATA)
@ddt.unpack
def test_executive_summary_report_success(
self,
report_type,
instructor_api_endpoint,
task_api_endpoint,
extra_instructor_api_kwargs
):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
with patch(task_api_endpoint):
response = self.client.get(url, {})
success_status = "The {report_type} report is being created." \
" To view the status of the report, see Pending" \
" Instructor Tasks" \
" below".format(report_type=report_type)
self.assertIn(success_status, response.content)
@ddt.data(*EXECUTIVE_SUMMARY_DATA)
@ddt.unpack
def test_executive_summary_report_already_running(
self,
report_type,
instructor_api_endpoint,
task_api_endpoint,
extra_instructor_api_kwargs
):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
with patch(task_api_endpoint) as mock:
mock.side_effect = AlreadyRunningError()
response = self.client.get(url, {})
already_running_status = "The {report_type} report is currently being created." \
" To view the status of the report, see Pending Instructor Tasks below." \
" You will be able to download the report" \
" when it is" \
" complete.".format(report_type=report_type)
self.assertIn(already_running_status, response.content)
def test_get_student_progress_url(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].email.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_from_uname(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
url += "?unique_student_identifier={}".format(
quote(self.students[0].username.encode("utf-8"))
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_noparams(self):
""" Test that the endpoint 404's without the required query params. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_get_student_progress_url_nostudent(self):
""" Test that the endpoint 400's when requesting an unknown email. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
@attr('shard_1')
class TestInstructorAPIRegradeTask(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change student grades.
This includes resetting attempts and starting rescore tasks.
This test does NOT test whether the actions had an effect on the
database, that is the job of task tests and test_enrollment.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIRegradeTask, cls).setUpClass()
cls.course = CourseFactory.create()
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = cls.problem_location.to_deprecated_string()
def setUp(self):
super(TestInstructorAPIRegradeTask, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
def test_reset_student_attempts_deletall(self):
""" Make sure no one can delete all students state on a problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_single(self):
""" Test reset single student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk)
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(instructor_task.api, 'submit_reset_problem_attempts_for_all_students')
def test_reset_student_attempts_all(self, act):
""" Test reset all student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_missingmodule(self):
""" Test reset for non-existant problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': 'robot-not-a-real-module',
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_delete(self):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.module_to_reset.course_id,
# module_id=self.module_to_reset.module_id,
).count(),
0
)
def test_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single_from_uname(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.username,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_all_students')
def test_rescore_problem_all(self, act):
""" Test rescoring for all students. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_course_has_entrance_exam_in_student_attempts_reset(self):
""" Test course has entrance exam id set while resetting attempts"""
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
'delete_module': False,
})
self.assertEqual(response.status_code, 400)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test course has entrance exam id set while re-scoring. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
@attr('shard_1')
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
class TestEntranceExamInstructorAPIRegradeTask(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can rescore student grades,
reset student attempts and delete state for entrance exam.
"""
@classmethod
def setUpClass(cls):
super(TestEntranceExamInstructorAPIRegradeTask, cls).setUpClass()
cls.course = CourseFactory.create(
org='test_org',
course='test_course',
run='test_run',
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
cls.course_with_invalid_ee = CourseFactory.create(entrance_exam_id='invalid_exam')
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.entrance_exam = ItemFactory.create(
parent=cls.course,
category='chapter',
display_name='Entrance exam'
)
subsection = ItemFactory.create(
parent=cls.entrance_exam,
category='sequential',
display_name='Subsection 1'
)
vertical = ItemFactory.create(
parent=subsection,
category='vertical',
display_name='Vertical 1'
)
cls.ee_problem_1 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 1"
)
cls.ee_problem_2 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 2"
)
def setUp(self):
super(TestEntranceExamInstructorAPIRegradeTask, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
# Add instructor to invalid ee course
CourseInstructorRole(self.course_with_invalid_ee.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
ee_module_to_reset1 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_1.location,
state=json.dumps({'attempts': 10, 'done': True}),
)
ee_module_to_reset2 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_2.location,
state=json.dumps({'attempts': 10, 'done': True}),
)
self.ee_modules = [ee_module_to_reset1.module_state_key, ee_module_to_reset2.module_state_key]
def test_reset_entrance_exam_student_attempts_deletall(self):
""" Make sure no one can delete all students state on entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_entrance_exam_student_attempts_single(self):
""" Test reset single student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
for changed_module in changed_modules:
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(instructor_task.api, 'submit_reset_problem_attempts_in_entrance_exam')
def test_reset_entrance_exam_all_student_attempts(self, act):
""" Test reset all student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_invalid_entrance_exam(self):
""" Test reset for invalid entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_entrance_exam_sttudent_delete_state(self):
""" Test delete single student entrance exam state. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
self.assertEqual(changed_modules.count(), 0)
def test_entrance_exam_delete_state_with_staff(self):
""" Test entrance exam delete state failure with staff access. """
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
self.client.login(username=staff_user.username, password='test')
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 403)
def test_entrance_exam_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(instructor_task.api, 'submit_rescore_entrance_exam_for_student')
def test_rescore_entrance_exam_single_student(self, act):
""" Test re-scoring of entrance exam for single student. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_rescore_entrance_exam_all_student(self):
""" Test rescoring for all students. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'all_students': True,
})
self.assertEqual(response.status_code, 200)
def test_rescore_entrance_exam_all_student_and_single(self):
""" Test re-scoring with both all students and single student parameters. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test re-scoring of entrance exam with invalid exam. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_list_entrance_exam_instructor_tasks_student(self):
""" Test list task history for entrance exam AND student. """
# create a re-score entrance exam task
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
tasks = json.loads(response.content)['tasks']
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0]['status'], _('Complete'))
def test_list_entrance_exam_instructor_tasks_all_student(self):
""" Test list task history for entrance exam AND all student. """
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
tasks = json.loads(response.content)['tasks']
self.assertEqual(len(tasks), 0)
def test_list_entrance_exam_instructor_with_invalid_exam_key(self):
""" Test list task history for entrance exam failure if course has invalid exam. """
url = reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.get(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_skip_entrance_exam_student(self):
""" Test skip entrance exam api for student. """
# create a re-score entrance exam task
url = reverse('mark_student_can_skip_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
message = _('This student (%s) will skip the entrance exam.') % self.student.email
self.assertContains(response, message)
# post again with same student
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
# This time response message should be different
message = _('This student (%s) is already allowed to skip the entrance exam.') % self.student.email
self.assertContains(response, message)
@attr('shard_1')
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message'))
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorSendEmail(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Checks that only instructors have access to email endpoints, and that
these endpoints are only accessible with courses that actually exist,
only with valid email messages.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorSendEmail, cls).setUpClass()
cls.course = CourseFactory.create()
test_subject = u'\u1234 test subject'
test_message = u'\u6824 test message'
cls.full_test_message = {
'send_to': 'staff',
'subject': test_subject,
'message': test_message,
}
def setUp(self):
super(TestInstructorSendEmail, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_send_email_as_logged_in_instructor(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
def test_send_email_but_not_logged_in(self):
self.client.logout()
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_not_staff(self):
self.client.logout()
student = UserFactory()
self.client.login(username=student.username, password='test')
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_course_not_exist(self):
url = reverse('send_email', kwargs={'course_id': 'GarbageCourse/DNE/NoTerm'})
response = self.client.post(url, self.full_test_message)
self.assertNotEqual(response.status_code, 200)
def test_send_email_no_sendto(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_subject(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_message(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': 'staff',
'subject': 'test subject',
})
self.assertEqual(response.status_code, 400)
class MockCompletionInfo(object):
"""Mock for get_task_completion_info"""
times_called = 0
def mock_get_task_completion_info(self, *args): # pylint: disable=unused-argument
"""Mock for get_task_completion_info"""
self.times_called += 1
if self.times_called % 2 == 0:
return True, 'Task Completed'
return False, 'Task Errored In Some Way'
@attr('shard_1')
class TestInstructorAPITaskLists(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor task list endpoint.
"""
class FakeTask(object):
""" Fake task object """
FEATURES = [
'task_type',
'task_input',
'task_id',
'requester',
'task_state',
'created',
'status',
'task_message',
'duration_sec'
]
def __init__(self, completion):
for feature in self.FEATURES:
setattr(self, feature, 'expected')
# created needs to be a datetime
self.created = datetime.datetime(2013, 10, 25, 11, 42, 35)
# set 'status' and 'task_message' attrs
success, task_message = completion()
if success:
self.status = "Complete"
else:
self.status = "Incomplete"
self.task_message = task_message
# Set 'task_output' attr, which will be parsed to the 'duration_sec' attr.
self.task_output = '{"duration_ms": 1035000}'
self.duration_sec = 1035000 / 1000.0
def make_invalid_output(self):
"""Munge task_output to be invalid json"""
self.task_output = 'HI MY NAME IS INVALID JSON'
# This should be given the value of 'unknown' if the task output
# can't be properly parsed
self.duration_sec = 'unknown'
def to_dict(self):
""" Convert fake task to dictionary representation. """
attr_dict = {key: getattr(self, key) for key in self.FEATURES}
attr_dict['created'] = attr_dict['created'].isoformat()
return attr_dict
@classmethod
def setUpClass(cls):
super(TestInstructorAPITaskLists, cls).setUpClass()
cls.course = CourseFactory.create(
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = cls.problem_location.to_deprecated_string()
def setUp(self):
super(TestInstructorAPITaskLists, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.module = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
mock_factory = MockCompletionInfo()
self.tasks = [self.FakeTask(mock_factory.mock_get_task_completion_info) for _ in xrange(7)]
self.tasks[-1].make_invalid_output()
@patch.object(instructor_task.api, 'get_running_instructor_tasks')
def test_list_instructor_tasks_running(self, act):
""" Test list of all running tasks. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_background_email_tasks(self, act):
"""Test list of background email tasks."""
act.return_value = self.tasks
url = reverse('list_background_email_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem(self, act):
""" Test list task history for problem. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem_student(self, act):
""" Test list task history for problem AND student. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch('instructor.views.instructor_task_helpers.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_location_str': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@attr('shard_1')
@patch.object(instructor_task.api, 'get_instructor_task_history')
class TestInstructorEmailContentList(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test the instructor email content history endpoint.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorEmailContentList, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorEmailContentList, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.tasks = {}
self.emails = {}
self.emails_info = {}
def setup_fake_email_info(self, num_emails, with_failures=False):
""" Initialize the specified number of fake emails """
for email_id in range(num_emails):
num_sent = random.randint(1, 15401)
if with_failures:
failed = random.randint(1, 15401)
else:
failed = 0
self.tasks[email_id] = FakeContentTask(email_id, num_sent, failed, 'expected')
self.emails[email_id] = FakeEmail(email_id)
self.emails_info[email_id] = FakeEmailInfo(self.emails[email_id], num_sent, failed)
def get_matching_mock_email(self, **kwargs):
""" Returns the matching mock emails for the given id """
email_id = kwargs.get('id', 0)
return self.emails[email_id]
def get_email_content_response(self, num_emails, task_history_request, with_failures=False):
""" Calls the list_email_content endpoint and returns the repsonse """
self.setup_fake_email_info(num_emails, with_failures)
task_history_request.return_value = self.tasks.values()
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.side_effect = self.get_matching_mock_email
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
return response
def check_emails_sent(self, num_emails, task_history_request, with_failures=False):
""" Tests sending emails with or without failures """
response = self.get_email_content_response(num_emails, task_history_request, with_failures)
self.assertTrue(task_history_request.called)
expected_email_info = [email_info.to_dict() for email_info in self.emails_info.values()]
actual_email_info = json.loads(response.content)['emails']
self.assertEqual(len(actual_email_info), num_emails)
for exp_email, act_email in zip(expected_email_info, actual_email_info):
self.assertDictEqual(exp_email, act_email)
self.assertEqual(expected_email_info, actual_email_info)
def test_content_list_one_email(self, task_history_request):
""" Test listing of bulk emails when email list has one email """
response = self.get_email_content_response(1, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should have one email
self.assertEqual(len(email_info), 1)
# Email content should be what's expected
expected_message = self.emails[0].html_message
returned_email_info = email_info[0]
received_message = returned_email_info[u'email'][u'html_message']
self.assertEqual(expected_message, received_message)
def test_content_list_no_emails(self, task_history_request):
""" Test listing of bulk emails when email list empty """
response = self.get_email_content_response(0, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should be empty
self.assertEqual(len(email_info), 0)
def test_content_list_email_content_many(self, task_history_request):
""" Test listing of bulk emails sent large amount of emails """
self.check_emails_sent(50, task_history_request)
def test_list_email_content_error(self, task_history_request):
""" Test handling of error retrieving email """
invalid_task = FakeContentTask(0, 0, 0, 'test')
invalid_task.make_invalid_input()
task_history_request.return_value = [invalid_task]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_email_info = json.loads(response.content)['emails']
self.assertEqual(len(returned_email_info), 1)
returned_info = returned_email_info[0]
for info in ['created', 'sent_to', 'email', 'number_sent', 'requester']:
self.assertEqual(returned_info[info], None)
def test_list_email_with_failure(self, task_history_request):
""" Test the handling of email task that had failures """
self.check_emails_sent(1, task_history_request, True)
def test_list_many_emails_with_failures(self, task_history_request):
""" Test the handling of many emails with failures """
self.check_emails_sent(50, task_history_request, True)
def test_list_email_with_no_successes(self, task_history_request):
task_info = FakeContentTask(0, 0, 10, 'expected')
email = FakeEmail(0)
email_info = FakeEmailInfo(email, 0, 10)
task_history_request.return_value = [task_info]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.return_value = email
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_info_list = json.loads(response.content)['emails']
self.assertEqual(len(returned_info_list), 1)
returned_info = returned_info_list[0]
expected_info = email_info.to_dict()
self.assertDictEqual(expected_info, returned_info)
@attr('shard_1')
class TestInstructorAPIHelpers(TestCase):
""" Test helpers for instructor.api """
def test_split_input_list(self):
strings = []
lists = []
strings.append(
"Lorem@ipsum.dolor, sit@amet.consectetur\nadipiscing@elit.Aenean\r convallis@at.lacus\r, ut@lacinia.Sed")
lists.append(['Lorem@ipsum.dolor', 'sit@amet.consectetur', 'adipiscing@elit.Aenean', 'convallis@at.lacus',
'ut@lacinia.Sed'])
for (stng, lst) in zip(strings, lists):
self.assertEqual(_split_input_list(stng), lst)
def test_split_input_list_unicode(self):
self.assertEqual(_split_input_list('robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
[u'robot@robot.edu', 'robot2@robot.edu'])
scary_unistuff = unichr(40960) + u'abcd' + unichr(1972)
self.assertEqual(_split_input_list(scary_unistuff), [scary_unistuff])
def test_msk_from_problem_urlname(self):
course_id = SlashSeparatedCourseKey('MITx', '6.002x', '2013_Spring')
name = 'L2Node1'
output = 'i4x://MITx/6.002x/problem/L2Node1'
self.assertEqual(msk_from_problem_urlname(course_id, name).to_deprecated_string(), output)
@raises(ValueError)
def test_msk_from_problem_urlname_error(self):
args = ('notagoodcourse', 'L2Node1')
msk_from_problem_urlname(*args)
def get_extended_due(course, unit, user):
"""
Gets the overridden due date for the given user on the given unit. Returns
`None` if there is no override set.
"""
try:
override = StudentFieldOverride.objects.get(
course_id=course.id,
student=user,
location=unit.location,
field='due'
)
return DATE_FIELD.from_json(json.loads(override.value))
except StudentFieldOverride.DoesNotExist:
return None
@attr('shard_1')
class TestDueDateExtensions(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test data dumps for reporting.
"""
@classmethod
def setUpClass(cls):
super(TestDueDateExtensions, cls).setUpClass()
cls.course = CourseFactory.create()
cls.due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=utc)
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.week1 = ItemFactory.create(due=cls.due)
cls.week2 = ItemFactory.create(due=cls.due)
cls.week3 = ItemFactory.create() # No due date
cls.course.children = [
cls.week1.location.to_deprecated_string(),
cls.week2.location.to_deprecated_string(),
cls.week3.location.to_deprecated_string()
]
cls.homework = ItemFactory.create(
parent_location=cls.week1.location,
due=cls.due
)
cls.week1.children = [cls.homework.location.to_deprecated_string()]
def setUp(self):
"""
Fixtures.
"""
super(TestDueDateExtensions, self).setUp()
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_change_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=utc),
get_extended_due(self.course, self.week1, self.user1))
def test_change_to_invalid_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '01/01/2009 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_change_nonexistent_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week3.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week3, self.user1)
)
def test_reset_date(self):
self.test_change_due_date()
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_reset_nonexistent_extension(self):
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 400, response.content)
@SharedModuleStoreTestCase.modifies_courseware
def test_reset_extension_to_deleted_date(self):
"""
Test that we can delete a due date extension after deleting the normal
due date, without causing an error.
"""
self.test_change_due_date()
self.week1.due = None
self.week1 = self.store.update_item(self.week1, self.user1.id)
# Now, week1's normal due date is deleted but the extension still exists.
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_show_unit_extensions(self):
self.test_change_due_date()
url = reverse('show_unit_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'url': self.week1.location.to_deprecated_string()})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Full Name': self.user1.profile.name,
u'Username': self.user1.username}],
u'header': [u'Username', u'Full Name', u'Extended Due Date'],
u'title': u'Users with due date extensions for %s' %
self.week1.display_name})
def test_show_student_extensions(self):
self.test_change_due_date()
url = reverse('show_student_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url, {'student': self.user1.username})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Unit': self.week1.display_name}],
u'header': [u'Unit', u'Extended Due Date'],
u'title': u'Due date extensions for %s (%s)' % (
self.user1.profile.name, self.user1.username)})
@attr('shard_1')
@override_settings(REGISTRATION_CODE_LENGTH=8)
class TestCourseRegistrationCodes(SharedModuleStoreTestCase):
"""
Test data dumps for E-commerce Course Registration Codes.
"""
@classmethod
def setUpClass(cls):
super(TestCourseRegistrationCodes, cls).setUpClass()
cls.course = CourseFactory.create()
cls.url = reverse(
'generate_registration_codes',
kwargs={'course_id': cls.course.id.to_deprecated_string()}
)
def setUp(self):
"""
Fixtures.
"""
super(TestCourseRegistrationCodes, self).setUp()
CourseModeFactory.create(course_id=self.course.id, min_price=50)
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
CourseSalesAdminRole(self.course.id).add_users(self.instructor)
data = {
'total_registration_codes': 12, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street',
'address_line_2': '', 'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(self.url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(5):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(5):
i += 1
registration_code_redemption = RegistrationCodeRedemption(
registration_code_id=i,
redeemed_by=self.instructor
)
registration_code_redemption.save()
@override_settings(FINANCE_EMAIL='finance@example.com')
def test_finance_email_in_recipient_list_when_generating_registration_codes(self):
"""
Test to verify that the invoice will also be sent to the FINANCE_EMAIL when
generating registration codes
"""
url_reg_code = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 5, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 121.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': 'True'
}
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# check for the last mail.outbox, The FINANCE_EMAIL has been appended at the
# very end, when generating registration codes
self.assertEqual(mail.outbox[-1].to[0], 'finance@example.com')
def test_user_invoice_copy_preference(self):
"""
Test to remember user invoice copy preference
"""
url_reg_code = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 5, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 121.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': 'True'
}
# user invoice copy preference will be saved in api user preference; model
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], True)
# updating the user invoice copy preference during code generation flow
data['invoice'] = ''
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], False)
def test_generate_course_registration_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 15, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 17)
def test_generate_course_registration_with_redeem_url_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 15, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 17)
rows = body.split('\n')
index = 1
while index < len(rows):
if rows[index]:
row_data = rows[index].split(',')
code = row_data[0].replace('"', '')
self.assertTrue(row_data[1].startswith('"http')
and row_data[1].endswith('/shoppingcart/register/redeem/{0}/"'.format(code)))
index += 1
@patch.object(instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'second', 'third', 'fourth']))
def test_generate_course_registration_codes_matching_existing_coupon_code(self):
"""
Test the generated course registration code is already in the Coupon Table
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
coupon = Coupon(code='first', course_id=self.course.id.to_deprecated_string(), created_by=self.instructor)
coupon.save()
data = {
'total_registration_codes': 3, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 5) # 1 for headers, 1 for new line at the end and 3 for the actual data
@patch.object(instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'first', 'second', 'third']))
def test_generate_course_registration_codes_integrity_error(self):
"""
Test for the Integrity error against the generated code
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 2, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 4)
def test_spent_course_registration_codes_csv(self):
"""
Test to generate a response of all the spent course registration codes
"""
url = reverse('spent_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'spent_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 7)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'unit_price': 122.45, 'company_contact_email': 'Test@company.com', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(9):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(9):
i += 13
registration_code_redemption = RegistrationCodeRedemption(
registration_code_id=i,
redeemed_by=self.instructor
)
registration_code_redemption.save()
data = {'spent_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_active_course_registration_codes_csv(self):
"""
Test to generate a response of all the active course registration codes
"""
url = reverse('active_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'active_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 9)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'active_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_get_all_course_registration_codes_csv(self):
"""
Test to generate a response of all the course registration codes
"""
url = reverse(
'get_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {'download_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 14)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'download_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_pdf_file_throws_exception(self):
"""
test to mock the pdf file generation throws an exception
when generating registration codes.
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
with patch.object(PDFInvoice, 'generate_pdf', side_effect=Exception):
response = self.client.post(generate_code_url, data)
self.assertEqual(response.status_code, 200, response.content)
def test_get_codes_with_sale_invoice(self):
"""
Test to generate a response of all the course registration codes
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 5.5, 'company_name': 'Group Invoice', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': True
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
url = reverse('get_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'download_company_name': 'Group Invoice'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
def test_with_invalid_unit_price(self):
"""
Test to generate a response of all the course registration codes
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 10, 'company_name': 'Group Invoice', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 'invalid', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': True
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 400, response.content)
self.assertIn('Could not parse amount as', response.content)
def test_get_historical_coupon_codes(self):
"""
Test to download a response of all the active coupon codes
"""
get_coupon_code_url = reverse(
'get_coupon_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
for i in range(10):
coupon = Coupon(
code='test_code{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True
)
coupon.save()
#now create coupons with the expiration dates
for i in range(5):
coupon = Coupon(
code='coupon{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True,
expiration_date=datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=2)
)
coupon.save()
response = self.client.get(get_coupon_code_url)
self.assertEqual(response.status_code, 200, response.content)
# filter all the coupons
for coupon in Coupon.objects.all():
self.assertIn(
'"{coupon_code}","{course_id}","{discount}","{description}","{expiration_date}","{is_active}",'
'"{code_redeemed_count}","{total_discounted_seats}","{total_discounted_amount}"'.format(
coupon_code=coupon.code,
course_id=coupon.course_id,
discount=coupon.percentage_discount,
description=coupon.description,
expiration_date=coupon.display_expiry_date,
is_active=coupon.is_active,
code_redeemed_count="0",
total_discounted_seats="0",
total_discounted_amount="0",
), response.content
)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_COUPON_CSV_HEADER))
@attr('shard_1')
class TestBulkCohorting(SharedModuleStoreTestCase):
"""
Test adding users to cohorts in bulk via CSV upload.
"""
@classmethod
def setUpClass(cls):
super(TestBulkCohorting, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestBulkCohorting, self).setUp()
self.staff_user = StaffFactory(course_key=self.course.id)
self.non_staff_user = UserFactory.create()
self.tempdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tempdir)
def call_add_users_to_cohorts(self, csv_data, suffix='.csv', method='POST'):
"""
Call `add_users_to_cohorts` with a file generated from `csv_data`.
"""
# this temporary file will be removed in `self.tearDown()`
__, file_name = tempfile.mkstemp(suffix=suffix, dir=self.tempdir)
with open(file_name, 'w') as file_pointer:
file_pointer.write(csv_data.encode('utf-8'))
with open(file_name, 'r') as file_pointer:
url = reverse('add_users_to_cohorts', kwargs={'course_id': unicode(self.course.id)})
if method == 'POST':
return self.client.post(url, {'uploaded-file': file_pointer})
elif method == 'GET':
return self.client.get(url, {'uploaded-file': file_pointer})
def expect_error_on_file_content(self, file_content, error, file_suffix='.csv'):
"""
Verify that we get the error we expect for a given file input.
"""
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content, suffix=file_suffix)
self.assertEqual(response.status_code, 400)
result = json.loads(response.content)
self.assertEqual(result['error'], error)
def verify_success_on_file_content(self, file_content, mock_store_upload, mock_cohort_task):
"""
Verify that `addd_users_to_cohorts` successfully validates the
file content, uploads the input file, and triggers the
background task.
"""
mock_store_upload.return_value = (None, 'fake_file_name.csv')
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content)
self.assertEqual(response.status_code, 204)
self.assertTrue(mock_store_upload.called)
self.assertTrue(mock_cohort_task.called)
def test_no_cohort_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a cohort field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'username,email\n', "The file must contain a 'cohort' column containing cohort names."
)
def test_no_username_or_email_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a username or email field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'cohort\n', "The file must contain a 'username' column, an 'email' column, or both."
)
def test_empty_csv(self):
"""
Verify that we get a descriptive verification error when we haven't
included any data in the uploaded CSV.
"""
self.expect_error_on_file_content(
'', "The file must contain a 'cohort' column containing cohort names."
)
def test_wrong_extension(self):
"""
Verify that we get a descriptive verification error when we haven't
uploaded a file with a '.csv' extension.
"""
self.expect_error_on_file_content(
'', "The file must end with the extension '.csv'.", file_suffix='.notcsv'
)
def test_non_staff_no_access(self):
"""
Verify that we can't access the view when we aren't a staff user.
"""
self.client.login(username=self.non_staff_user.username, password='test')
response = self.call_add_users_to_cohorts('')
self.assertEqual(response.status_code, 403)
def test_post_only(self):
"""
Verify that we can't call the view when we aren't using POST.
"""
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts('', method='GET')
self.assertEqual(response.status_code, 405)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_username(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call a background task when
the CSV has username and cohort columns.
"""
self.verify_success_on_file_content(
'username,cohort\nfoo_username,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has email and cohort columns.
"""
self.verify_success_on_file_content(
'email,cohort\nfoo_email,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_username_and_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has username, email and cohort columns.
"""
self.verify_success_on_file_content(
'username,email,cohort\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_carriage_return(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns.
"""
self.verify_success_on_file_content(
'username,email,cohort\rfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('instructor.views.api.instructor_task.api.submit_cohort_students')
@patch('instructor.views.api.store_uploaded_file')
def test_success_carriage_return_line_feed(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns and line
feeds.
"""
self.verify_success_on_file_content(
'username,email,cohort\r\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
|
zerobatu/edx-platform
|
lms/djangoapps/instructor/tests/test_api.py
|
Python
|
agpl-3.0
| 196,429
|
[
"VisIt"
] |
cc17eafee7e9589e532327e74e9d68da852c6033f37d649f68cc2af8f273bd3b
|
"""
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
import re
import os
import numpy
from cctbx import crystal
from cctbx import miller
from cctbx import uctbx
from cctbx.array_family import flex
from libtbx.utils import null_out
from yamtbx.dataproc.xds import re_xds_kwd
def is_xds_ascii(filein):
if not os.path.isfile(filein): return False
line = open(filein).readline()
return "FORMAT=XDS_ASCII" in line
# is_xds_ascii()
class XDS_ASCII:
def __init__(self, filein, log_out=None, read_data=True, i_only=False):
self._log = null_out() if log_out is None else log_out
self._filein = filein
self.indices = flex.miller_index()
self.i_only = i_only
self.iobs, self.sigma_iobs, self.xd, self.yd, self.zd, self.rlp, self.peak, self.corr = [flex.double() for i in xrange(8)]
self.iframe = flex.int()
self.iset = flex.int() # only for XSCALE
self.input_files = {} # only for XSCALE [iset:(filename, wavelength), ...]
self.by_dials = False
self.read_header()
if read_data:
self.read_data()
def read_header(self):
re_item = re.compile("!ITEM_([^=]+)=([0-9]+)")
colindex = {} # {"H":1, "K":2, "L":3, ...}
nitemfound = 0
flag_data_start = False
num_hkl = 0
headers = []
for line in open(self._filein):
if flag_data_start:
if line.startswith("!END_OF_DATA"):
break
num_hkl += 1
continue
if line.startswith('!END_OF_HEADER'):
flag_data_start = True
continue
if line.startswith("!Generated by dials"):
self.by_dials = True
continue
if line.startswith("! ISET="):
pars = dict(re_xds_kwd.findall(line))
iset = int(pars["ISET"])
if iset not in self.input_files: self.input_files[iset] = [None, None, None]
if "INPUT_FILE" in pars:
self.input_files[iset][0] = pars["INPUT_FILE"]
elif "X-RAY_WAVELENGTH" in pars:
tmp = pars["X-RAY_WAVELENGTH"]
if " (" in tmp: tmp = tmp[:tmp.index(" (")]
self.input_files[iset][1] = tmp
elif "UNIT_CELL_CONSTANTS" in pars:
tmp = pars["UNIT_CELL_CONSTANTS"]
self.input_files[iset][2] = tmp
else:
headers.extend(re_xds_kwd.findall(line[line.index("!")+1:]))
self.nx, self.ny, self.anomalous, self.distance, self.wavelength, self.zmin, self.zmax = (None,)*7
for key, val in headers:
if key == "NUMBER_OF_ITEMS_IN_EACH_DATA_RECORD":
nitem = int(val.strip())
print >>self._log, 'number of items according to header is', nitem
elif key == "UNIT_CELL_CONSTANTS":
a, b, c, al, be, ga = map(lambda x:float(x), val.strip().split())
elif key == "UNIT_CELL_A-AXIS":
self.a_axis = tuple(map(float, val.split()))
elif key == "UNIT_CELL_B-AXIS":
self.b_axis = tuple(map(float, val.split()))
elif key == "UNIT_CELL_C-AXIS":
self.c_axis = tuple(map(float, val.split()))
elif key.startswith("ITEM_"):
item, ind = key[len("ITEM_"):], int(val)
colindex[item] = ind - 1
nitemfound += 1
elif key == "NX":
self.nx = int(val)
elif key == "NY":
self.ny = int(val)
elif key == "QX":
self.qx = float(val)
elif key == "QY":
self.qy = float(val)
elif key == "ORGX":
self.orgx = float(val)
elif key == "ORGY":
self.orgy = float(val)
elif key == "DATA_RANGE":
self.zmin, self.zmax = map(lambda x:int(x), val.strip().split())
elif key == "SPACE_GROUP_NUMBER":
ispgrp = int(val.strip())
elif key == "FRIEDEL'S_LAW":
assert val.strip() in ("TRUE", "FALSE")
self.anomalous = val.strip() == "FALSE"
elif key == "DETECTOR_DISTANCE":
self.distance = float(val)
elif key == "X-RAY_WAVELENGTH":
self.wavelength = float(val.split()[0])
elif key == "INCIDENT_BEAM_DIRECTION":
self.incident_axis = tuple(map(float, val.split()))
elif key == "ROTATION_AXIS":
self.rotation_axis = tuple(map(float, val.split()))
elif key == "OSCILLATION_RANGE":
self.osc_range = float(val.split()[0])
elif key == "VARIANCE_MODEL":
self.variance_model = tuple(map(float, val.split()))
assert nitem == len(colindex)
self._colindex = colindex
self._num_hkl = num_hkl
self.symm = crystal.symmetry(unit_cell=(a, b, c, al, be, ga),
space_group=ispgrp)
self.symm.show_summary(self._log)
print >>self._log, 'data_range=', self.zmin, self.zmax
# read_header()
def read_data(self):
colindex = self._colindex
is_xscale = "RLP" not in colindex
flag_data_start = False
col_H, col_K, col_L = colindex["H"], colindex["K"], colindex["L"]
col_i, col_sig, col_xd, col_yd, col_zd = colindex["IOBS"], colindex["SIGMA(IOBS)"], colindex["XD"], colindex["YD"], colindex["ZD"]
col_rlp, col_peak, col_corr, col_iset = colindex.get("RLP", None), colindex.get("PEAK", None), colindex.get("CORR", None), colindex.get("ISET", None)
self.indices = []
self.xd, self.yd, self.zd = [], [], []
self.iframe, self.rlp, self.peak, self.corr, self.iset = [], [], [], [], []
for line in open(self._filein):
if flag_data_start:
if line.startswith("!END_OF_DATA"):
break
sp = line.split()
h, k, l = int(sp[col_H]), int(sp[col_K]), int(sp[col_L])
self.indices.append([h,k,l])
self.iobs.append(float(sp[col_i]))
self.sigma_iobs.append(float(sp[col_sig]))
if not self.i_only:
self.xd.append(float(sp[col_xd]))
self.yd.append(float(sp[col_yd]))
self.zd.append(float(sp[col_zd]))
self.iframe.append(int(self.zd[-1])+1)
if not is_xscale:
self.rlp.append(float(sp[col_rlp]))
self.peak.append(float(sp[col_peak]))
self.corr.append(float(sp[col_corr]))
else:
self.iset.append(int(sp[col_iset]))
#res = symm.unit_cell().d((h,k,l))
if self.iframe[-1] < 0:
self.iframe[-1] = 0
print >>self._log, 'reflection with surprisingly low z-value:', self.zd[-1]
if line.startswith('!END_OF_HEADER'):
flag_data_start = True
self.indices = flex.miller_index(self.indices)
self.iobs, self.sigma_iobs, self.xd, self.yd, self.zd, self.rlp, self.peak, self.corr = [flex.double(x) for x in (self.iobs, self.sigma_iobs, self.xd, self.yd, self.zd, self.rlp, self.peak, self.corr)]
self.iframe = flex.int(self.iframe)
self.iset = flex.int(self.iset) # only for XSCALE
print >>self._log, "Reading data done.\n"
# read_data()
def get_frame_range(self):
"""quick function only to get frame number range"""
flag_data_start = False
col_zd = self._colindex["ZD"]
min_frame, max_frame = float("inf"), -float("inf")
for line in open(self._filein):
if flag_data_start:
if line.startswith("!END_OF_DATA"):
break
sp = line.split()
iframe = int(float(sp[col_zd]))+1
if iframe > 0 and iframe < min_frame: min_frame = iframe
if iframe > max_frame: max_frame = iframe
if line.startswith('!END_OF_HEADER'):
flag_data_start = True
return min_frame, max_frame
# get_frame_range()
def as_miller_set(self, anomalous_flag=None):
if anomalous_flag is None:
anomalous_flag = self.anomalous
return miller.set(crystal_symmetry=self.symm,
indices=self.indices,
anomalous_flag=anomalous_flag)
# as_miller_set()
def i_obs(self, anomalous_flag=None):
array_info = miller.array_info(source_type="xds_ascii")#, wavelength=)
return miller.array(self.as_miller_set(anomalous_flag),
data=self.iobs, sigmas=self.sigma_iobs).set_info(array_info).set_observation_type_xray_intensity()
# i_obs()
def remove_selection(self, sel):
params = ("indices", "iobs", "sigma_iobs")
if not self.i_only:
params += ("xd", "yd", "zd", "rlp", "peak", "corr", "iframe", "iset")
for p in params:
if not getattr(self, p): continue
setattr(self, p, getattr(self, p).select(~sel))
# remove_selection()
def remove_rejected(self):
sel = self.sigma_iobs <= 0
self.remove_selection(sel)
# remove_rejected()
def write_selected(self, sel, hklout):
ofs = open(hklout, "w")
data_flag = False
count = 0
for line in open(self._filein):
if line.startswith('!END_OF_HEADER'):
ofs.write(line)
data_flag = True
elif line.startswith("!END_OF_DATA"):
ofs.write(line)
break
elif not data_flag:
ofs.write(line)
elif data_flag:
if sel[count]: ofs.write(line)
count += 1
# write_selected()
def write_reindexed(self, op, hklout, space_group=None):
"""
XXX Assuming hkl has 6*3 width!!
"""
ofs = open(hklout, "w")
col_H, col_K, col_L = map(lambda x:self._colindex[x], "HKL")
assert col_H==0 and col_K==1 and col_L==2
tr_mat = numpy.array(op.c_inv().r().as_double()).reshape(3,3).transpose()
transformed = numpy.dot(tr_mat, numpy.array([self.a_axis, self.b_axis, self.c_axis]))
data_flag = False
for line in open(self._filein):
if line.startswith('!UNIT_CELL_CONSTANTS='):
# XXX split by fixed columns
cell = uctbx.unit_cell(line[line.index("=")+1:].strip())
cell_tr = cell.change_basis(op)
if space_group is not None: cell_tr = space_group.average_unit_cell(cell_tr)
ofs.write("!UNIT_CELL_CONSTANTS=%10.3f%10.3f%10.3f%8.3f%8.3f%8.3f\n" % cell_tr.parameters())
elif line.startswith('!SPACE_GROUP_NUMBER=') and space_group is not None:
ofs.write("!SPACE_GROUP_NUMBER=%5d \n" % space_group.type().number())
elif line.startswith("!UNIT_CELL_A-AXIS="):
ofs.write("!UNIT_CELL_A-AXIS=%10.3f%10.3f%10.3f\n" % tuple(transformed[0,:]))
elif line.startswith("!UNIT_CELL_B-AXIS="):
ofs.write("!UNIT_CELL_B-AXIS=%10.3f%10.3f%10.3f\n" % tuple(transformed[1,:]))
elif line.startswith("!UNIT_CELL_C-AXIS="):
ofs.write("!UNIT_CELL_C-AXIS=%10.3f%10.3f%10.3f\n" % tuple(transformed[2,:]))
elif line.startswith('!END_OF_HEADER'):
ofs.write(line)
data_flag = True
elif line.startswith("!END_OF_DATA"):
ofs.write(line)
break
elif not data_flag:
ofs.write(line)
elif data_flag:
if not self.by_dials:
hkl = tuple(map(int, line[:18].split()))
hkl = op.apply(hkl)
ofs.write("%6d%6d%6d"%hkl)
ofs.write(line[18:])
else:
sp = line.split()
hkl = op.apply(tuple(map(int, sp[:3])))
ofs.write(" ".join(map(str, hkl)))
ofs.write(" ")
ofs.write(" ".join(sp[3:]))
ofs.write("\n")
return cell_tr
# write_selected()
#class XDS_ASCII
|
keitaroyam/yamtbx
|
yamtbx/dataproc/xds/xds_ascii.py
|
Python
|
bsd-3-clause
| 12,714
|
[
"CRYSTAL"
] |
b9d3955ec895640750964ff533ff9c311041cd47701f4b656699c7491e2dde06
|
import pytest
@pytest.fixture(params=[True, False])
def raw(request):
return request.param
@pytest.fixture(
params=[
"triang",
"blackman",
"hamming",
"bartlett",
"bohman",
"blackmanharris",
"nuttall",
"barthann",
]
)
def win_types(request):
return request.param
@pytest.fixture(params=["kaiser", "gaussian", "general_gaussian", "exponential"])
def win_types_special(request):
return request.param
@pytest.fixture(
params=["sum", "mean", "median", "max", "min", "var", "std", "kurt", "skew"]
)
def arithmetic_win_operators(request):
return request.param
@pytest.fixture(params=["right", "left", "both", "neither"])
def closed(request):
return request.param
@pytest.fixture(params=[True, False])
def center(request):
return request.param
@pytest.fixture(params=[None, 1])
def min_periods(request):
return request.param
|
toobaz/pandas
|
pandas/tests/window/conftest.py
|
Python
|
bsd-3-clause
| 935
|
[
"Gaussian"
] |
98406e5e247f9f33af0e80c4b065aa05de054bed33dc727fc0fc9796116f086e
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
TODO: Modify unittest doc.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "5/22/14"
import unittest
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.analysis.diffraction.xrd import XRDCalculator
from pymatgen.util.testing import PymatgenTest
class XRDCalculatorTest(PymatgenTest):
def test_get_xrd_data(self):
s = self.get_structure("CsCl")
c = XRDCalculator()
data = c.get_xrd_data(s, two_theta_range=(0, 90))
#Check the first two peaks
self.assertAlmostEqual(data[0][0], 21.107738329639844)
self.assertAlmostEqual(data[0][1], 36.483184003748946)
self.assertEqual(data[0][2], {(1, 0, 0): 6})
self.assertAlmostEqual(data[0][3], 4.2089999999999996)
self.assertAlmostEqual(data[1][0], 30.024695921112777)
self.assertAlmostEqual(data[1][1], 100)
self.assertEqual(data[1][2], {(1, 1, 0): 12})
self.assertAlmostEqual(data[1][3], 2.976212442014178)
s = self.get_structure("LiFePO4")
data = c.get_xrd_data(s, two_theta_range=(0, 90))
self.assertAlmostEqual(data[1][0], 17.03504233621785)
self.assertAlmostEqual(data[1][1], 50.400928948337075)
s = self.get_structure("Li10GeP2S12")
data = c.get_xrd_data(s, two_theta_range=(0, 90))
self.assertAlmostEqual(data[1][0], 14.058274883353876)
self.assertAlmostEqual(data[1][1], 4.4111123641667671)
# Test a hexagonal structure.
s = self.get_structure("Graphite")
data = c.get_xrd_data(s, two_theta_range=(0, 90))
self.assertAlmostEqual(data[0][0], 26.21057350859598)
self.assertAlmostEqual(data[0][1], 100)
self.assertAlmostEqual(len(list(data[0][2].keys())[0]), 4)
#Add test case with different lengths of coefficients.
#Also test d_hkl.
coords = [[0.25, 0.25, 0.173], [0.75, 0.75, 0.827], [0.75, 0.25, 0],
[0.25, 0.75, 0], [0.25, 0.25, 0.676], [0.75, 0.75, 0.324]]
sp = ["Si", "Si", "Ru", "Ru", "Pr", "Pr"]
s = Structure(Lattice.tetragonal(4.192, 6.88), sp, coords)
data = c.get_xrd_data(s)
self.assertAlmostEqual(data[0][0], 12.86727341476735)
self.assertAlmostEqual(data[0][1], 31.448239816769796)
self.assertAlmostEqual(data[0][3], 6.88)
self.assertEqual(len(data), 42)
data = c.get_xrd_data(s, two_theta_range=[0, 60])
self.assertEqual(len(data), 18)
#Test with and without Debye-Waller factor
tungsten = Structure(Lattice.cubic(3.1653), ["W"] * 2,
[[0, 0, 0], [0.5, 0.5, 0.5]])
data = c.get_xrd_data(tungsten, scaled=False)
self.assertAlmostEqual(data[0][0], 40.294828554672264)
self.assertAlmostEqual(data[0][1], 2414237.5633093244)
self.assertAlmostEqual(data[0][3], 2.2382050944897789)
c = XRDCalculator(debye_waller_factors={"W": 0.1526})
data = c.get_xrd_data(tungsten, scaled=False)
self.assertAlmostEqual(data[0][0], 40.294828554672264)
self.assertAlmostEqual(data[0][1], 2377745.2296686019)
self.assertAlmostEqual(data[0][3], 2.2382050944897789)
if __name__ == '__main__':
unittest.main()
|
rousseab/pymatgen
|
pymatgen/analysis/diffraction/tests/test_xrd.py
|
Python
|
mit
| 3,465
|
[
"pymatgen"
] |
4194d7ed94d6e2b66845d54b4557e0c5143ed6a2809bdcd7101851332487c942
|
"""
Implements Autodock Vina's pose-generation in tensorflow.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import warnings
import numpy as np
import tensorflow as tf
from deepchem.models import Model
from deepchem.nn import model_ops
import deepchem.utils.rdkit_util as rdkit_util
def compute_neighbor_list(coords, nbr_cutoff, N, M, n_cells, ndim=3, k=5):
"""Computes a neighbor list from atom coordinates.
Parameters
----------
coords: tf.Tensor
Shape (N, ndim)
N: int
Max number atoms
M: int
Max number neighbors
ndim: int
Dimensionality of space.
k: int
Number of nearest neighbors to pull down.
Returns
-------
nbr_list: tf.Tensor
Shape (N, M) of atom indices
"""
start = tf.to_int32(tf.reduce_min(coords))
stop = tf.to_int32(tf.reduce_max(coords))
cells = get_cells(start, stop, nbr_cutoff, ndim=ndim)
# Associate each atom with cell it belongs to. O(N*n_cells)
# Shape (n_cells, k)
atoms_in_cells, _ = put_atoms_in_cells(coords, cells, N, n_cells, ndim, k)
# Shape (N, 1)
cells_for_atoms = get_cells_for_atoms(coords, cells, N, n_cells, ndim)
# Associate each cell with its neighbor cells. Assumes periodic boundary
# conditions, so does wrapround. O(constant)
# Shape (n_cells, 26)
neighbor_cells = compute_neighbor_cells(cells, ndim, n_cells)
# Shape (N, 26)
neighbor_cells = tf.squeeze(tf.gather(neighbor_cells, cells_for_atoms))
# coords of shape (N, ndim)
# Shape (N, 26, k, ndim)
tiled_coords = tf.tile(tf.reshape(coords, (N, 1, 1, ndim)), (1, 26, k, 1))
# Shape (N, 26, k)
nbr_inds = tf.gather(atoms_in_cells, neighbor_cells)
# Shape (N, 26, k)
atoms_in_nbr_cells = tf.gather(atoms_in_cells, neighbor_cells)
# Shape (N, 26, k, ndim)
nbr_coords = tf.gather(coords, atoms_in_nbr_cells)
# For smaller systems especially, the periodic boundary conditions can
# result in neighboring cells being seen multiple times. Maybe use tf.unique to
# make sure duplicate neighbors are ignored?
# TODO(rbharath): How does distance need to be modified here to
# account for periodic boundary conditions?
# Shape (N, 26, k)
dists = tf.reduce_sum((tiled_coords - nbr_coords)**2, axis=3)
# Shape (N, 26*k)
dists = tf.reshape(dists, [N, -1])
# TODO(rbharath): This will cause an issue with duplicates!
# Shape (N, M)
closest_nbr_locs = tf.nn.top_k(dists, k=M)[1]
# N elts of size (M,) each
split_closest_nbr_locs = [
tf.squeeze(locs) for locs in tf.split(closest_nbr_locs, N)
]
# Shape (N, 26*k)
nbr_inds = tf.reshape(nbr_inds, [N, -1])
# N elts of size (26*k,) each
split_nbr_inds = [tf.squeeze(split) for split in tf.split(nbr_inds, N)]
# N elts of size (M,) each
neighbor_list = [
tf.gather(nbr_inds, closest_nbr_locs)
for (nbr_inds,
closest_nbr_locs) in zip(split_nbr_inds, split_closest_nbr_locs)
]
# Shape (N, M)
neighbor_list = tf.stack(neighbor_list)
return neighbor_list
def get_cells_for_atoms(coords, cells, N, n_cells, ndim=3):
"""Compute the cells each atom belongs to.
Parameters
----------
coords: tf.Tensor
Shape (N, ndim)
cells: tf.Tensor
(box_size**ndim, ndim) shape.
Returns
-------
cells_for_atoms: tf.Tensor
Shape (N, 1)
"""
n_cells = int(n_cells)
# Tile both cells and coords to form arrays of size (n_cells*N, ndim)
tiled_cells = tf.tile(cells, (N, 1))
# N tensors of shape (n_cells, 1)
tiled_cells = tf.split(tiled_cells, N)
# Shape (N*n_cells, 1) after tile
tiled_coords = tf.reshape(tf.tile(coords, (1, n_cells)), (n_cells * N, ndim))
# List of N tensors of shape (n_cells, 1)
tiled_coords = tf.split(tiled_coords, N)
# Lists of length N
coords_rel = [
tf.to_float(coords) - tf.to_float(cells)
for (coords, cells) in zip(tiled_coords, tiled_cells)
]
coords_norm = [tf.reduce_sum(rel**2, axis=1) for rel in coords_rel]
# Lists of length n_cells
# Get indices of k atoms closest to each cell point
closest_inds = [tf.nn.top_k(-norm, k=1)[1] for norm in coords_norm]
# TODO(rbharath): tf.stack for tf 1.0
return tf.stack(closest_inds)
def compute_closest_neighbors(coords,
cells,
atoms_in_cells,
neighbor_cells,
N,
n_cells,
ndim=3,
k=5):
"""Computes nearest neighbors from neighboring cells.
TODO(rbharath): Make this pass test
Parameters
---------
atoms_in_cells: list
Of length n_cells. Each entry tensor of shape (k, ndim)
neighbor_cells: tf.Tensor
Of shape (n_cells, 26).
N: int
Number atoms
"""
n_cells = int(n_cells)
# Tensor of shape (n_cells, k, ndim)
#atoms_in_cells = tf.stack(atoms_in_cells)
cells_for_atoms = get_cells_for_atoms(coords, cells, N, n_cells, ndim)
all_closest = []
for atom in range(N):
atom_vec = coords[atom]
cell = cells_for_atoms[atom]
nbr_inds = tf.gather(neighbor_cells, tf.to_int32(cell))
# Tensor of shape (26, k, ndim)
nbr_atoms = tf.gather(atoms_in_cells, nbr_inds)
# Reshape to (26*k, ndim)
nbr_atoms = tf.reshape(nbr_atoms, (-1, 3))
# Subtract out atom vector. Still of shape (26*k, ndim) due to broadcast.
nbr_atoms = nbr_atoms - atom_vec
# Dists of shape (26*k, 1)
nbr_dists = tf.reduce_sum(nbr_atoms**2, axis=1)
# Of shape (k, ndim)
closest_inds = tf.nn.top_k(nbr_dists, k=k)[1]
all_closest.append(closest_inds)
return all_closest
def get_cells(start, stop, nbr_cutoff, ndim=3):
"""Returns the locations of all grid points in box.
Suppose start is -10 Angstrom, stop is 10 Angstrom, nbr_cutoff is 1.
Then would return a list of length 20^3 whose entries would be
[(-10, -10, -10), (-10, -10, -9), ..., (9, 9, 9)]
Returns
-------
cells: tf.Tensor
(box_size**ndim, ndim) shape.
"""
ranges = [tf.range(start, stop, nbr_cutoff) for _ in range(ndim)]
return tf.reshape(tf.transpose(tf.stack(tf.meshgrid(*ranges))), (-1, ndim))
def put_atoms_in_cells(coords, cells, N, n_cells, ndim, k=5):
"""Place each atom into cells. O(N) runtime.
Let N be the number of atoms.
Parameters
----------
coords: tf.Tensor
(N, 3) shape.
cells: tf.Tensor
(n_cells, ndim) shape.
N: int
Number atoms
ndim: int
Dimensionality of input space
k: int
Number of nearest neighbors.
Returns
-------
closest_atoms: tf.Tensor
Of shape (n_cells, k, ndim)
"""
n_cells = int(n_cells)
# Tile both cells and coords to form arrays of size (n_cells*N, ndim)
tiled_cells = tf.reshape(tf.tile(cells, (1, N)), (n_cells * N, ndim))
# TODO(rbharath): Change this for tf 1.0
# n_cells tensors of shape (N, 1)
tiled_cells = tf.split(tiled_cells, n_cells)
# Shape (N*n_cells, 1) after tile
tiled_coords = tf.tile(coords, (n_cells, 1))
# List of n_cells tensors of shape (N, 1)
tiled_coords = tf.split(tiled_coords, n_cells)
# Lists of length n_cells
coords_rel = [
tf.to_float(coords) - tf.to_float(cells)
for (coords, cells) in zip(tiled_coords, tiled_cells)
]
coords_norm = [tf.reduce_sum(rel**2, axis=1) for rel in coords_rel]
# Lists of length n_cells
# Get indices of k atoms closest to each cell point
closest_inds = [tf.nn.top_k(norm, k=k)[1] for norm in coords_norm]
# n_cells tensors of shape (k, ndim)
closest_atoms = tf.stack([tf.gather(coords, inds) for inds in closest_inds])
# Tensor of shape (n_cells, k)
closest_inds = tf.stack(closest_inds)
return closest_inds, closest_atoms
# TODO(rbharath):
# - Need to find neighbors of the cells (+/- 1 in every dimension).
# - Need to group closest atoms amongst cell neighbors
# - Need to do another top_k to find indices of closest neighbors.
# - Return N lists corresponding to neighbors for every atom.
def compute_neighbor_cells(cells, ndim, n_cells):
"""Compute neighbors of cells in grid.
# TODO(rbharath): Do we need to handle periodic boundary conditions
properly here?
# TODO(rbharath): This doesn't handle boundaries well. We hard-code
# looking for 26 neighbors, which isn't right for boundary cells in
# the cube.
Note n_cells is box_size**ndim. 26 is the number of neighbors of a cube in
a grid (including diagonals).
Parameters
----------
cells: tf.Tensor
(n_cells, 26) shape.
"""
n_cells = int(n_cells)
if ndim != 3:
raise ValueError("Not defined for dimensions besides 3")
# Number of neighbors of central cube in 3-space is
# 3^2 (top-face) + 3^2 (bottom-face) + (3^2-1) (middle-band)
# TODO(rbharath)
k = 9 + 9 + 8 # (26 faces on Rubik's cube for example)
#n_cells = int(cells.get_shape()[0])
# Tile cells to form arrays of size (n_cells*n_cells, ndim)
# Two tilings (a, b, c, a, b, c, ...) vs. (a, a, a, b, b, b, etc.)
# Tile (a, a, a, b, b, b, etc.)
tiled_centers = tf.reshape(
tf.tile(cells, (1, n_cells)), (n_cells * n_cells, ndim))
# Tile (a, b, c, a, b, c, ...)
tiled_cells = tf.tile(cells, (n_cells, 1))
# Lists of n_cells tensors of shape (N, 1)
tiled_centers = tf.split(tiled_centers, n_cells)
tiled_cells = tf.split(tiled_cells, n_cells)
# Lists of length n_cells
coords_rel = [
tf.to_float(cells) - tf.to_float(centers)
for (cells, centers) in zip(tiled_centers, tiled_cells)
]
coords_norm = [tf.reduce_sum(rel**2, axis=1) for rel in coords_rel]
# Lists of length n_cells
# Get indices of k atoms closest to each cell point
# n_cells tensors of shape (26,)
closest_inds = tf.stack([tf.nn.top_k(norm, k=k)[1] for norm in coords_norm])
return closest_inds
def cutoff(d, x):
"""Truncates interactions that are too far away."""
return tf.where(d < 8, x, tf.zeros_like(x))
def gauss_1(d):
"""Computes first Gaussian interaction term.
Note that d must be in Angstrom
"""
return tf.exp(-(d / 0.5)**2)
def gauss_2(d):
"""Computes second Gaussian interaction term.
Note that d must be in Angstrom.
"""
return tf.exp(-((d - 3) / 2)**2)
def repulsion(d):
"""Computes repulsion interaction term."""
return tf.where(d < 0, d**2, tf.zeros_like(d))
def hydrophobic(d):
"""Compute hydrophobic interaction term."""
where = tf.where(d < 1.5, 1.5 - d, tf.zeros_like(d))
return tf.where(d < 0.5, tf.ones_like(d), where)
def hbond(d):
"""Computes hydrogen bond term."""
where = tf.where(d < 0, (1.0 / 0.7) * (0 - d), tf.zeros_like(d))
return tf.where(d < -0.7, tf.ones_like(d), where)
def g(c, Nrot):
"""Nonlinear function mapping interactions to free energy."""
w = tf.Variable(tf.random_normal([
1,
], stddev=.3))
return c / (1 + w * Nrot)
def h(d):
"""Sum of energy terms used in Autodock Vina.
.. math:: h_{t_i,t_j}(d) = w_1\textrm{gauss}_1(d) + w_2\textrm{gauss}_2(d) + w_3\textrm{repulsion}(d) + w_4\textrm{hydrophobic}(d) + w_5\textrm{hbond}(d)
"""
w_1 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_2 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_3 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_4 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_5 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
return w_1 * gauss_1(d) + w_2 * gauss_2(d) + w_3 * repulsion(
d) + w_4 * hydrophobic(d) + w_5 * hbond(d)
class VinaModel(Model):
def __init__(self, logdir=None, batch_size=50):
"""Vina models.
.. math:: c = \sum_{i < j} f_{t_i,t_j}(r_{ij})
Over all pairs of atoms that can move relative to one-another. :math:`t_i` is the
atomtype of atom :math:`i`.
Can view as
.. math:: c = c_\textrm{inter} + c_\textrm{intra}
depending on whether atoms can move relative to one another. Free energy is
predicted only from :math:`c_\textrm{inter}`. Let :math:`R_t` be the Van der Waal's radius of
atom of type t. Then define surface distance
.. math:: d_{ij} = r_{ij} - R_{t_i} - R_{t_j}
Then the energy term is
.. math:: f_{t_i,t_j}(r_{ij}) = \textrm{cutoff}(d_{ij}, h_{t_i,t_j}(d_{ij}))
where
.. math:: \textrm{cutoff}(d, x) = \begin{cases} x & d < 8 \textrm{ Angstrom} \\ 0 & \textrm{otherwise} \end{cases}
The inner function can be further broken down into a sum of terms
.. math:: h_{t_i,t_j}(d) = w_1\textrm{gauss}_1(d) + w_2\textrm{gauss}_2(d) + w_3\textrm{repulsion}(d) + w_4\textrm{hydrophobic}(d) + w_5\textrm{hbond}(d)
these terms are defined as follows (all constants are in Angstroms):
.. math::
\textrm{gauss}_1(d) = \exp(-(d/(0.5))^2)
\textrm{gauss}_2(d) = \exp(-((d-3)/(2))^2)
\textrm{repulsion}(d) = \begin{cases} d^2 & d < 0 \\ 0 & d \geq 0 \end{cases}
\textrm{hydrophobic}(d) = \begin{cases} 1 & d < 0.5 \\ 1.5 - d & \textrm{otherwise} \\ 0 & d > 1.5 \end{cases}
\textrm{hbond}(d) = \begin{cases} 1 & d < -0.7 \\ (1.0/.7)(0 - d) & \textrm{otherwise} \\ 0 & d > 0 \end{cases}
The free energy of binding is computed as a function of the intermolecular interactions
..math:: s = g(c_\textrm{inter})
This function is defined as
..math:: g(c) = \frac{c}{1 + wN_\textrm{rot}}
Where :math:`w` is a weight parameter and :math:`N_\textrm{rot}` is the number of
rotatable bonds between heavy atoms in the ligand.
Gradients are taken backwards through the binding-free energy function with
respect to the position of the ligand and with respect to the torsions of
rotatable bonds and flexible ligands.
TODO(rbharath): It's not clear to me how the effect of the torsions on the :math:`d_{ij}` is
computed. Is there a way to get distances from torsions?
The idea is that mutations are applied to the ligand, and then gradient descent is
used to optimize starting from the initial structure. The code to compute the mutations
is specified
https://github.com/mwojcikowski/smina/blob/master/src/lib/mutate.cpp
Seems to do random quaternion rotations of the ligand. It's not clear to me yet
how the flexible and rotatable bonds are handled for the system.
Need to know an initial search space for the compound. Typically a cubic
binding box.
References
----------
Autodock Vina Paper:
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3041641/
Smina Paper:
http://pubs.acs.org/doi/pdf/10.1021/ci300604z
Omega Paper (ligand conformation generation):
http://www.sciencedirect.com/science/article/pii/S1093326302002048
QuickVina:
http://www.cil.ntu.edu.sg/Courses/papers/journal/QuickVina.pdf
"""
pass
def __init__(self, max_local_steps=10, max_mutations=10):
warnings.warn("VinaModel is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.max_local_steps = max_local_steps
self.max_mutations = max_mutations
self.graph, self.input_placeholders, self.output_placeholder = self.construct_graph(
)
self.sess = tf.Session(graph=self.graph)
def construct_graph(self,
N_protein=1000,
N_ligand=100,
M=50,
ndim=3,
k=5,
nbr_cutoff=6):
"""Builds the computational graph for Vina."""
graph = tf.Graph()
with graph.as_default():
n_cells = 64
# TODO(rbharath): Make this handle minibatches
protein_coords_placeholder = tf.placeholder(
tf.float32, shape=(N_protein, 3))
ligand_coords_placeholder = tf.placeholder(
tf.float32, shape=(N_ligand, 3))
protein_Z_placeholder = tf.placeholder(tf.int32, shape=(N_protein,))
ligand_Z_placeholder = tf.placeholder(tf.int32, shape=(N_ligand,))
label_placeholder = tf.placeholder(tf.float32, shape=(1,))
# Shape (N_protein+N_ligand, 3)
coords = tf.concat(
[protein_coords_placeholder, ligand_coords_placeholder], axis=0)
# Shape (N_protein+N_ligand,)
Z = tf.concat([protein_Z_placeholder, ligand_Z_placeholder], axis=0)
# Shape (N_protein+N_ligand, M)
nbr_list = compute_neighbor_list(
coords, nbr_cutoff, N_protein + N_ligand, M, n_cells, ndim=ndim, k=k)
all_interactions = []
# Shape (N_protein+N_ligand,)
all_atoms = tf.range(N_protein + N_ligand)
# Shape (N_protein+N_ligand, 3)
atom_coords = tf.gather(coords, all_atoms)
# Shape (N_protein+N_ligand,)
atom_Z = tf.gather(Z, all_atoms)
# Shape (N_protein+N_ligand, M)
nbrs = tf.squeeze(tf.gather(nbr_list, all_atoms))
# Shape (N_protein+N_ligand, M, 3)
nbr_coords = tf.gather(coords, nbrs)
# Shape (N_protein+N_ligand, M)
nbr_Z = tf.gather(Z, nbrs)
# Shape (N_protein+N_ligand, M, 3)
tiled_atom_coords = tf.tile(
tf.reshape(atom_coords, (N_protein + N_ligand, 1, 3)), (1, M, 1))
# Shape (N_protein+N_ligand, M)
dists = tf.reduce_sum((tiled_atom_coords - nbr_coords)**2, axis=2)
# TODO(rbharath): Need to subtract out Van-der-Waals radii from dists
# Shape (N_protein+N_ligand, M)
atom_interactions = h(dists)
# Shape (N_protein+N_ligand, M)
cutoff_interactions = cutoff(dists, atom_interactions)
# TODO(rbharath): Use RDKit to compute number of rotatable bonds in ligand.
Nrot = 1
# TODO(rbharath): Autodock Vina only uses protein-ligand interactions in
# computing free-energy. This implementation currently uses all interaction
# terms. Not sure if this makes a difference.
# Shape (N_protein+N_ligand, M)
free_energy = g(cutoff_interactions, Nrot)
# Shape () -- scalar
energy = tf.reduce_sum(atom_interactions)
loss = 0.5 * (energy - label_placeholder)**2
return (graph, (protein_coords_placeholder, protein_Z_placeholder,
ligand_coords_placeholder, ligand_Z_placeholder),
label_placeholder)
def fit(self, X_protein, Z_protein, X_ligand, Z_ligand, y):
"""Fit to actual data."""
return
def mutate_conformer(protein, ligand):
"""Performs a mutation on the ligand position."""
return
def generate_conformation(self, protein, ligand, max_steps=10):
"""Performs the global search for conformations."""
best_conf = None
best_score = np.inf
conf = self.sample_random_conformation()
for i in range(max_steps):
mut_conf = self.mutate_conformer(conf)
loc_conf = self.gradient_minimize(mut_conf)
if best_conf is None:
best_conf = loc_conf
else:
loc_score = self.score(loc_conf)
if loc_score < best_score:
best_conf = loc_conf
return best_conf
|
Agent007/deepchem
|
contrib/vina_model/vina_model.py
|
Python
|
mit
| 18,976
|
[
"Gaussian",
"RDKit"
] |
9f3cc8afb832feea1c1b12a6bc1f1d4d13315b7a8a316c9f86cf25d4ee72095d
|
"""
Visitor pattern for walking through a BaseNode AST.
"""
from collections import namedtuple
from .membership import AndNode
from .membership import OperatorNode
from .membership import OrNode
from .membership import RolesNode
from .membership import ValueNode
from .membership import XorNode
VisitedNode = namedtuple('VisitedNode', ['node', 'value'])
class NodeVisitor(object):
def visit(self, node, **kwargs):
"""
Visitor method dispatcher based on node type or operator node arity.
Input: BaseNode
"""
if isinstance(node, RolesNode):
return self._visit_roles_node(node, **kwargs)
if isinstance(node, ValueNode):
return self._visit_value_node(node, **kwargs)
if isinstance(node, OperatorNode):
return self._visit_operator_node(node, **kwargs)
raise TypeError('Cannot visit node %r' % node)
def _visit_value_node(self, value_node, **kwargs):
"""Return the value from a ValueNode."""
return value_node.value
def _visit_roles_node(self, roles_node, **kwargs):
"""
Abstract visit method for the leaf node type RolesNode.
Input: RolesNode
"""
raise NotImplementedError(
'Child classes of NodeVisitor must implement _visit_roles_node.')
def _visit_operator_node(self, operator_node, **kwargs):
"""
Dispatcher method for OperatorNode types.
Currently dispatches based on the arity of the operator.
Input: OperatorNode
"""
visited_operands = [VisitedNode(operand, self.visit(operand, **kwargs))
for operand in operator_node._operands]
dispatch_methods = [
self._visit_nullary_node,
self._visit_unary_node,
self._visit_binary_node,
]
return dispatch_methods[operator_node.arity](operator_node,
*visited_operands)
def _visit_nullary_node(self, operator_node):
raise ValueError('There are no nullary operators yet.')
def _visit_unary_node(self, operator_node, visited_operand):
raise NotImplementedError(
'Child classes of NodeVisitor must implement _visit_unary_node.')
def _visit_binary_node(self, operator_node, left_visited_operand,
right_visited_operand):
raise NotImplementedError(
'Child classes of NodeVisitor must implement _visit_binary_node.')
class ExpressionWriter(NodeVisitor):
"""
NodeVisitor concrete class that writes out a pretty-printed expression.
For example, instead of naively interpreting the following node structure
> OrNode(XorNode('A', 'B'), XorNode('C', 'D'))
as '((A ^ B) | (C ^ D))' it will return 'A ^ B | C ^ D'.
But for the following node structure
> AndNode(OrNode('A', 'B'), OrNode('C', 'D'))
it will still print it as '(A | B) & (C | D)'.
All concrete visit methods will return strings.
"""
# Items that appear first have higher precedence.
binary_operator_precedence = [
AndNode,
XorNode,
OrNode,
]
def _visit_value_node(self, value_node, **kwargs):
return str(super(ExpressionWriter, self)._visit_value_node(
value_node, **kwargs))
def _visit_roles_node(self, roles_node, **kwargs):
return str(roles_node)
def _visit_unary_node(self, operator_node, visited_operand):
args = (operator_node.display_name, visited_operand.value)
arity = self._get_arity(visited_operand.node)
if arity is not None and arity > 1:
return '%s(%s)' % args
return '%s%s' % args
def _visit_binary_node(self, operator_node, left_visited_operand,
right_visited_operand):
def get_operand_value(visited_operand):
value = visited_operand.value
if (self._get_arity(visited_operand.node) == 2 and
self._has_precedence_over(operator_node,
visited_operand.node)):
value = '(%s)' % value
return value
return '%s %s %s' % (get_operand_value(left_visited_operand),
operator_node.display_name,
get_operand_value(right_visited_operand))
@staticmethod
def _get_arity(node):
return getattr(node, 'arity', None)
@classmethod
def _has_precedence_over(cls, left_node, right_node):
"""
Helper method that determines which operator node has precedence.
Returns True if left_node has strict precedence over right_node.
"""
return (cls.binary_operator_precedence.index(left_node.__class__) <
cls.binary_operator_precedence.index(right_node.__class__))
class PermissionChecker(NodeVisitor):
"""
NodeVisitor concrete class that checks whether the node has permissions.
PermissionChecker is instantiated with a roles set, which is an iterable of
strings which represent role names. This visitor then evaluates the
expression encoded in the node AST. Each node is an operator or a leaf
node. Currently leaf nodes are RolesNodes, which evaluate to True if and
only if their roles set is a subset of the PermissionChecker's roles set.
Then the operator nodes perform operations on the child booleans and
propagate the final value up to the root.
All concrete visit methods will return a boolean.
"""
def __init__(self, roles):
"""Instantiate a PermissionChecker.
Args:
roles: An iterable of strings, representing roles a user has.
Usage:
from baya.utils import group_names
from baya.membership import RolesNode as g
user_groups = group_names(request.user.ldap_user.group_dns)
# user_groups looks like {'group1', 'group2', ...}
checker = PermissionChecker(user_groups)
required_groups = g('req1') & g('req2')
user_has_permissions = checker.visit(required_groups)
"""
self._roles_set = {role.lower() for role in roles}
def _visit_roles_node(self, roles_node, **kwargs):
return roles_node.get_roles_set(**kwargs) <= self._roles_set
def _visit_unary_node(self, operator_node, visited_operand):
return operator_node.operator(visited_operand.value)
def _visit_binary_node(self, operator_node, left_visited_operand,
right_visited_operand):
return operator_node.operator(left_visited_operand.value,
right_visited_operand.value)
|
counsyl/baya
|
baya/visitors.py
|
Python
|
mit
| 6,755
|
[
"VisIt"
] |
c347337179bc8d319c0c86907b2c3ea1ade50b16502f25608466c498e11ea6d3
|
# -*- coding: utf-8 -*-
"""
This script contains the business logic for the sample ArcGIS Python Toolbox.
It shows how to iterate a feature class and update a field.
You could more easily do this using a call to arcpy.CalculateField_management()
but that's not as interesting an example!
@author: Brian Wilson <brian@wildsong.biz>
"""
from __future__ import print_function
from collections import namedtuple
from datetime import datetime
import arcpy
__version__ = "2020-03-29.1"
def set_field_value(input_fc, fieldname, value):
""" Update the named field in every row of the input feature class with the given value. """
arcpy.AddMessage("Version %s" % __version__)
print(fieldname, value)
start = 0
step = 1
maxcount = int(arcpy.GetCount_management(input_fc).getOutput(0))
arcpy.SetProgressor("step", "Doing serious work here.", start, maxcount, step)
# We don't need OID here, just an example
fields = ["OID@", fieldname]
with arcpy.da.UpdateCursor(input_fc, fields) as cursor:
t = 0
for row in cursor:
msg = "Working.. step %d of %d" % (t,maxcount)
arcpy.SetProgressorLabel(msg)
row[1] = value
cursor.updateRow(row)
arcpy.SetProgressorPosition(t)
t += 1
return
def dump_contents(input_fc):
""" Print the contents of the feature class, this is just a namedtuple sample. """
fcrow = namedtuple("fcrow", ["oid", "datestamp"])
with arcpy.da.SearchCursor(input_fc, ["OID@", "datestamp"]) as cursor:
for row in cursor:
feature = fcrow._make(row)
print(feature.oid, feature.datestamp)
return
# ======================================================================
# UNIT TESTING
# You can run this file directly when writing it to aid in debugging.
# For example, "Set as Startup File" when running under Visual Studio.
if __name__ == '__main__':
arcpy.env.workspace = ".\\test_pro\\test_pro.gdb"
input_fc = "testing_data"
fieldname = "datestamp"
datestring = datetime.datetime.today().strftime("%Y/%m/%d %H:%M:%S")
arcpy.AddMessage("starting geoprocessing")
set_field_value(input_fc, fieldname, datestring)
dump_contents(input_fc)
print("Tests successful!")
exit(0)
# That's all
|
brian32768/ArcGIS_Python_Template
|
some_sample_code.py
|
Python
|
mit
| 2,412
|
[
"Brian"
] |
e504f393019960a22b3e286ddcd0a3d522c6058e355569699415870bc7870cea
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import unittest
import espressomd
def _id(x):
return x
def skipIfMissingFeatures(*args):
"""Unittest skipIf decorator for missing Espresso features."""
if not espressomd.has_features(*args):
missing_features = espressomd.missing_features(*args)
return unittest.skip("Skipping test: missing feature{} {}".format(
's' if missing_features else '', ', '.join(missing_features)))
return _id
def skipIfMissingModules(*args):
"""Unittest skipIf decorator for missing Python modules."""
if len(args) == 1 and not isinstance(
args[0], str) and hasattr(args[0], "__iter__"):
args = set(args[0])
else:
args = set(args)
missing_modules = set(args) - set(sys.modules.keys())
if missing_modules:
return unittest.skip("Skipping test: missing python module{} {}".format(
's' if missing_modules else '', ', '.join(missing_modules)))
return _id
def skipIfMissingGPU():
"""Unittest skipIf decorator for missing GPU."""
if not espressomd.gpu_available():
return unittest.skip("Skipping test: no GPU available")
return _id
|
psci2195/espresso-ffans
|
testsuite/python/unittest_decorators.py
|
Python
|
gpl-3.0
| 1,865
|
[
"ESPResSo"
] |
6157db6767116948bd1b739d1e52b19e16634578d5f6153de0475a94cf3a5e59
|
#!/usr/bin/env python
from ase import *
# Read in the geometry from a xyz file, set the cell, boundary conditions and center
atoms = read('geom.xyz')
atoms.set_cell([7.66348,7.66348,7.66348*2])
atoms.set_pbc((1,1,1))
atoms.center()
# Set initial velocities for hydrogen atoms along the z-direction
p = atoms.get_momenta()
p[0,2]= -1.5
p[1,2]= -1.5
atoms.set_momenta(p)
# Keep some atoms fixed during the simulation
atoms.set_constraint(FixAtoms(indices=range(18,38)))
# Set the calculator and attach it to the system
calc = Siesta('si001+h2',basis='SZ',xc='PBE',meshcutoff=50*Ry)
calc.set_fdf('PAO.EnergyShift', 0.25 * eV)
calc.set_fdf('PAO.SplitNorm', 0.15)
atoms.set_calculator(calc)
# Set the VelocityVerlet algorithm and run it
dyn = VelocityVerlet(atoms,dt=1.0 * fs,trajectory='si001+h2.traj')
dyn.run(steps=100)
|
freephys/python_ase
|
doc/exercises/siesta2/siesta2.py
|
Python
|
gpl-3.0
| 832
|
[
"ASE",
"SIESTA"
] |
1297d5b8ecf0558d242f6a13bc3bc55ba6fddd83542e1a35c7d580b402835172
|
"""
Test the **drf_ember** ``utils`` modules. Several of the functions in this module are
critical to managing the transformation of data to bridge the Django REST Framework's serializer
approach and the JSON API.
"""
from django import test
from django.test.utils import override_settings
from rest_framework.compat import OrderedDict
from rest_framework.test import APIRequestFactory
import pytest
from drf_ember.utils import api as api_utils
from drf_ember.exceptions import JsonApiException
from .models import load_solar_system, Planet
from . import urlconf
from .fixtures.core import EARTH_INCLUDED, PLANETS_INCLUDED, BULK_NEW_PLANETS, BULK_UPDATED_PLANETS
UTILS_TEST_URLCONF = urlconf.TEST_URLS
@override_settings(ROOT_URLCONF=UTILS_TEST_URLCONF)
class SingleResourceTests(test.TestCase):
"""
Checks that the handling of individual resource requests is compliant
with the JSON API specification.
Presently, there is test coverage for the HTTP GET request calls. POST and PATCH
requests are next.
"""
def setUp(self):
"""
Loads 'solar system' test-only models into in-memory database.
"""
load_solar_system()
def test_valid_jsonapi_name(self):
"""
From the JSON API specification:
All member names used in a JSON API document MUST be treated as
case-sensitive by clients and servers, and they MUST meet all of the following conditions:
Member names MUST contain at least one character.
Member names MUST contain only the allowed characters listed below.
Member names MUST start and end with a "globally allowed character", as defined below.
To enable an easy mapping of member names to URLs, it is RECOMMENDED that
member names use only non-reserved, URL safe characters specified in RFC 3986.
"""
response = self.client.get('/planets/3')
jsonapi_document = api_utils.resource_to_jsonapi_document(response.data)
# TODO globally allowed character test and only allowed character test
for member_name in jsonapi_document:
self.assertTrue(len(member_name))
def test_string_identifier(self):
"""
Ensures string identifiers.
From the JSON API specification:
*"The values of the id and type members MUST be strings."*
"""
response = self.client.get('/planets/3')
jsonapi_document = api_utils.resource_to_jsonapi_document(response.data)
identifier = jsonapi_document['data']['id']
self.assertTrue(isinstance(identifier, str))
relationships = jsonapi_document['data']['relationships']
for relationship_key in relationships:
relationship = relationships[relationship_key]
relationship_data = relationship['data']
# handle list relationship
if isinstance(relationship_data, list):
for resource_identifier in relationship_data:
self.assertTrue(isinstance(resource_identifier['id'], str))
# test single resource object
else:
self.assertTrue(isinstance(relationship_data['id'], str))
def test_valid_resource_format_for_get(self):
"""
Ensures resources contain at least the following
top-level members: ``id`` and ``type``
From the JSON API specification:
A "resource identifier object" is an object that identifies an individual resource.
A "resource identifier object" MUST contain type and id members.
A "resource identifier object" MAY also include a meta member, whose value is a meta object that
contains non-standard meta-information.
This test is only for GET calls - the id member is not required when the resource object originates at
the client and represents a new resource to be created on the server.
"""
response = self.client.get('/planets/3')
jsonapi_document = api_utils.resource_to_jsonapi_document(response.data)
main_data = jsonapi_document['data']
self.assertTrue('id' in main_data and 'type' in main_data,
msg='Does not comply with JSON API '
'resource identifier format {0}:'.format(main_data))
relationships = jsonapi_document['data']['relationships']
for relationship_key in relationships:
relationship = relationships[relationship_key]
relationship_data = relationship['data']
# handle list relationship
if isinstance(relationship_data, list):
for resource_identifier in relationship_data:
self.assertTrue('id' in resource_identifier and 'type' in resource_identifier,
msg='Does not comply with JSON API '
'resource identifier format {0}:'.format(resource_identifier))
# test single resource object
else:
self.assertTrue('id' in relationship_data and 'type' in relationship_data,
msg='Does not comply with JSON API '
'resource identifier format {0}:'.format(relationship_data))
def test_get_one_to_one_pk(self):
"""
Ensures JSON API compliant build of one-to-one primary key relationship field
"""
response = self.client.get('/yellow_dwarf/1')
jsonapi_document = api_utils.resource_to_jsonapi_document(response.data)
relationships = jsonapi_document['data']['relationships']
one_to_one_star = {'star': {'data': {'type': 'stars', 'id': '1'}}}
self.assertEqual(one_to_one_star, relationships)
def test_get_one_to_one_with_included(self):
"""
Ensures JSON API compliant build of one-to-one serialized relationship field
"""
response = self.client.get('/yellow_dwarf_with_included/1')
jsonapi_document = api_utils.resource_to_jsonapi_document(response.data)
relationships = jsonapi_document['data']['relationships']
one_to_one_star = {'star': {'data': {'type': 'stars', 'id': '1'}}}
self.assertEqual(one_to_one_star, relationships)
included_star = [{"type": "stars", "id": "1", "attributes": {"galaxy-name": "Via Lactea", "name": "Sun"}}]
included = jsonapi_document['included']
self.assertEqual(included_star, included)
def test_get_many_to_one_pk(self):
"""
Ensures JSON API compliant build of many-to-one primary key field. In Django,
this type of model relationship is represented by the ``ForeignKey`` field that has
its ``many`` argument set to ``False``, which is the default.
"""
response = self.client.get('/planets/3')
jsonapi_document = api_utils.resource_to_jsonapi_document(response.data)
relationships = jsonapi_document['data']['relationships']
many_to_one_star_data = {'data': {'type': 'stars', 'id': '1'}}
self.assertEqual(many_to_one_star_data, relationships['system-star'])
def test_get_many_to_one_with_included(self):
"""
Ensures JSON API compliant build of many-to-one serialized relationship field.
In Django, this type of model relationship is represented by the ``ForeignKey`` field that has
its ``many`` argument set to ``False``, which is the default.
"""
response = self.client.get('/planets_with_included/1')
jsonapi_document = api_utils.resource_to_jsonapi_document(response.data)
relationships = jsonapi_document['data']['relationships']
many_to_one_star_data = {'data': {'type': 'stars', 'id': '1'}}
self.assertEqual(many_to_one_star_data, relationships['system-star'])
sun_elements = [("id", "1"), ("type", "stars"), ("attributes", {"galaxy-name": "Via Lactea", "name": "Sun"})]
sun = OrderedDict(sun_elements)
included = jsonapi_document['included']
included_star = None
for model in included:
if model['type'] == 'stars':
included_star = model
self.assertEqual(included_star, sun)
def test_get_many_to_many_pk(self):
"""
Ensures JSON API compliant build of many-to-many primary key field. In Django,
this type of model relationship is represented by the ``ForeignKey`` field that has
its ``many`` argument set to ``True``.
"""
response = self.client.get('/planets/3')
jsonapi_document = api_utils.resource_to_jsonapi_document(response.data)
relationships = jsonapi_document['data']['relationships']
many_to_many_planet_neighbors_data = {"data": [
{"type": "planets", "id": "2"}, {"type": "planets", "id": "4"}
]}
self.assertEqual(many_to_many_planet_neighbors_data, relationships['neighbors'])
def test_get_many_to_many_with_included(self):
"""
Ensures JSON API compliant build of many-to-many serialized relationship field.
In Django, this type of model relationship is represented by the ``ForeignKey`` field that has
its ``many`` argument set to ``True``.
"""
response = self.client.get('/planets_with_included/3')
print(response.content.decode())
jsonapi_document = api_utils.resource_to_jsonapi_document(response.data)
relationships = jsonapi_document['data']['relationships']
many_to_many_planet_neighbors_data = {"data": [
{"type": "planets", "id": "2"}, {"type": "planets", "id": "4"}
]}
self.assertEqual(many_to_many_planet_neighbors_data, relationships['neighbors'])
included = jsonapi_document['included']
self.assertEqual(included, EARTH_INCLUDED)
@override_settings(ROOT_URLCONF=UTILS_TEST_URLCONF)
class ListResourceTests(test.TestCase):
"""
List GET with pagination
one-to-one primary key relationship field
one-to-one serializer relationship field
many-to-one primary key relationship field
many-to-one serializer relationship field
many-to-many primary key relationship field
many-to-many serializer relationship field
List GET without pagination
one-to-one primary key relationship field
one-to-one serializer relationship field
many-to-one primary key relationship field
many-to-one serializer relationship field
many-to-many primary key relationship field
many-to-many serializer relationship field
"""
def setUp(self):
"""
Loads 'solar system' models into in-memory database.
"""
load_solar_system()
def test_get_is_list(self):
"""
Checks a list is paired with the ``data`` member
"""
response = self.client.get('/planets')
jsonapi_document = api_utils.list_to_jsonapi_document(response.data)
document_data = jsonapi_document['data']
self.assertTrue(isinstance(document_data, list))
def test_paginated_get_resource_count(self):
"""
Checks list GET resource count matches database count for query
"""
db_count = Planet.objects.all().count()
response = self.client.get('/planets')
jsonapi_document = api_utils.list_to_jsonapi_document(response.data)
data = jsonapi_document['data']
self.assertEqual(db_count, len(data))
def test_paginated_top_level_members(self):
"""
Checks that a list GET for resources returns 'data', 'meta', and
expected 'included' fields
"""
response = self.client.get('/planets_with_included')
jsonapi_document = api_utils.list_to_jsonapi_document(response.data)
self.assertTrue('data' in jsonapi_document)
self.assertTrue('meta' in jsonapi_document)
self.assertTrue('included' in jsonapi_document)
def test_paginated_included_is_list(self):
"""
Checks that a list GET for resources returns a list for expected 'included' fields
"""
response = self.client.get('/planets_with_included')
jsonapi_document = api_utils.list_to_jsonapi_document(response.data)
self.assertTrue(isinstance(jsonapi_document['included'], list))
def test_paginated_get_one_to_one_pk(self):
"""
Ensures JSON API compliant build of one-to-one primary key relationship fields
within a paginated list view
"""
response = self.client.get('/yellow_dwarfs')
jsonapi_document = api_utils.list_to_jsonapi_document(response.data)
data = jsonapi_document['data']
for yellow_dwarf in data:
relationships = yellow_dwarf['relationships']
star = relationships['star']
one_to_one_star_data = {'data': {'type': 'stars', 'id': '1'}}
self.assertEqual(one_to_one_star_data, star)
def test_paginated_get_one_to_one_with_included(self):
"""
Ensures JSON API compliant build of one-to-one serialized relationship fields
within a paginated list view
"""
response = self.client.get('/yellow_dwarfs_with_included')
jsonapi_document = api_utils.list_to_jsonapi_document(response.data)
data = jsonapi_document['data']
for yellow_dwarf in data:
relationships = yellow_dwarf['relationships']
star = relationships['star']
one_to_one_star_data = {'data': {'type': 'stars', 'id': '1'}}
self.assertEqual(one_to_one_star_data, star)
included_star = [{"type": "stars", "id": "1", "attributes": {"galaxy-name": "Via Lactea", "name": "Sun"}}]
included = jsonapi_document['included']
self.assertEqual(included_star, included)
def test_paginated_get_many_to_one_pk(self):
"""
Ensures JSON API compliant build of many-to-one primary key fields within
a paginated list view. In Django, this type of model relationship is represented
by the ``ForeignKey`` field that has its ``many`` argument set to ``False``,
which is the default.
"""
response = self.client.get('/planets')
jsonapi_document = api_utils.list_to_jsonapi_document(response.data)
data = jsonapi_document['data']
for planet in data:
relationships = planet['relationships']
many_to_one_star_data = {'data': {'type': 'stars', 'id': '1'}}
self.assertEqual(many_to_one_star_data, relationships['system-star'])
def test_paginated_get_many_to_one_with_included(self):
"""
Ensures JSON API compliant build of many-to-one serialized relationship fields with
a paginated list view.
In Django, this type of model relationship is represented by the ``ForeignKey`` field that has
its ``many`` argument set to ``False``, which is the default.
"""
response = self.client.get('/planets_with_included')
jsonapi_document = api_utils.list_to_jsonapi_document(response.data)
data = jsonapi_document['data']
for planet in data:
relationships = planet['relationships']
many_to_one_star_data = {'data': {'type': 'stars', 'id': '1'}}
self.assertEqual(many_to_one_star_data, relationships['system-star'])
sun_elements = [("id", "1"), ("type", "stars"), ("attributes", {"galaxy-name": "Via Lactea", "name": "Sun"})]
sun = OrderedDict(sun_elements)
included = jsonapi_document['included']
included_star = None
for model in included:
if model['type'] == 'stars':
included_star = model
self.assertEqual(included_star, sun)
def test_paginated_get_many_to_many_pk(self):
"""
Ensures JSON API compliant build of many-to-many primary key fields within a paginated
list view.
In Django, this type of model relationship is represented by the ``ForeignKey`` field that has
its ``many`` argument set to ``True``.
"""
response = self.client.get('/planets')
jsonapi_document = api_utils.list_to_jsonapi_document(response.data)
data = jsonapi_document['data']
for planet in data:
relationships = planet['relationships']
planet_neighbors = relationships['neighbors']['data']
for neighbor in planet_neighbors:
self.assertTrue('id' in neighbor)
self.assertTrue('type' in neighbor)
self.assertTrue(isinstance(neighbor['id'], str))
self.assertTrue(isinstance(neighbor['type'], str))
def test_paginated_get_many_to_many_with_included(self):
"""
Ensures JSON API compliant build of many-to-many serialized relationship fields within
a paginated list view.
In Django, many-to-many model relationship are represented by a ``ForeignKey`` field that has
its ``many`` argument set to ``True``.
"""
response = self.client.get('/planets_with_included')
print(response.content.decode())
jsonapi_document = api_utils.list_to_jsonapi_document(response.data)
data = jsonapi_document['data']
for planet in data:
relationships = planet['relationships']
planet_neighbors = relationships['neighbors']['data']
for neighbor in planet_neighbors:
self.assertTrue('id' in neighbor)
self.assertTrue('type' in neighbor)
self.assertTrue(isinstance(neighbor['id'], str))
self.assertTrue(isinstance(neighbor['type'], str))
included = jsonapi_document['included']
self.assertEqual(included, PLANETS_INCLUDED)
def test_unpaginated_get_resource_count(self):
"""
Checks unpaginated list GET resource count matches database count for query
"""
db_count = Planet.objects.all().count()
response = self.client.get('/planets_unpaginated')
jsonapi_document = api_utils.list_to_jsonapi_document(response.data, paginated=False)
data = jsonapi_document['data']
self.assertEqual(db_count, len(data))
def test_unpaginated_top_level_members(self):
"""
Checks that an unpaginated list GET for resources returns 'data' member but
not a 'meta' member.
"""
response = self.client.get('/planets_unpaginated')
print(response.content.decode())
jsonapi_document = api_utils.list_to_jsonapi_document(response.data, paginated=False)
self.assertTrue('data' in jsonapi_document)
self.assertFalse('meta' in jsonapi_document)
def test_unpaginated_included_is_list(self):
"""
Checks that an unpaginated list GET for resources returns a list for expected
'included' fields
"""
response = self.client.get('/planets_unpaginated_with_included')
jsonapi_document = api_utils.list_to_jsonapi_document(response.data, paginated=False)
self.assertTrue(isinstance(jsonapi_document['included'], list))
def test_unpaginated_get_many_to_one_pk(self):
"""
Ensures JSON API compliant build of many-to-one primary key fields within
an unpaginated list view. In Django, this type of model relationship is represented
by the ``ForeignKey`` field that has its ``many`` argument set to ``False``,
which is the default.
"""
response = self.client.get('/planets')
jsonapi_document = api_utils.list_to_jsonapi_document(response.data)
data = jsonapi_document['data']
for planet in data:
relationships = planet['relationships']
many_to_one_star_data = {'data': {'type': 'stars', 'id': '1'}}
self.assertEqual(many_to_one_star_data, relationships['system-star'])
def test_unpaginated_get_many_to_one_with_included(self):
"""
Ensures JSON API compliant build of many-to-one serialized relationship fields with
an unpaginated list view.
In Django, this type of model relationship is represented by the ``ForeignKey`` field that has
its ``many`` argument set to ``False``, which is the default.
"""
response = self.client.get('/planets_with_included')
jsonapi_document = api_utils.list_to_jsonapi_document(response.data)
data = jsonapi_document['data']
for planet in data:
relationships = planet['relationships']
many_to_one_star_data = {'data': {'type': 'stars', 'id': '1'}}
self.assertEqual(many_to_one_star_data, relationships['system-star'])
sun_elements = [("id", "1"), ("type", "stars"), ("attributes", {"galaxy-name": "Via Lactea", "name": "Sun"})]
sun = OrderedDict(sun_elements)
included = jsonapi_document['included']
included_star = None
for model in included:
if model['type'] == 'stars':
included_star = model
self.assertEqual(included_star, sun)
def test_unpaginated_get_many_to_many_pk(self):
"""
Ensures JSON API compliant build of many-to-many primary key fields within an unpaginated
list view.
In Django, this type of model relationship is represented by the ``ForeignKey`` field that has
its ``many`` argument set to ``True``.
"""
response = self.client.get('/planets')
jsonapi_document = api_utils.list_to_jsonapi_document(response.data)
data = jsonapi_document['data']
for planet in data:
relationships = planet['relationships']
planet_neighbors = relationships['neighbors']['data']
for neighbor in planet_neighbors:
self.assertTrue('id' in neighbor)
self.assertTrue('type' in neighbor)
self.assertTrue(isinstance(neighbor['id'], str))
self.assertTrue(isinstance(neighbor['type'], str))
def test_unpaginated_get_many_to_many_with_included(self):
"""
Ensures JSON API compliant build of many-to-many serialized relationship fields within
an unpaginated list view.
In Django, many-to-many model relationship are represented by a ``ForeignKey`` field that has
its ``many`` argument set to ``True``.
"""
response = self.client.get('/planets_with_included')
print(response.content.decode())
jsonapi_document = api_utils.list_to_jsonapi_document(response.data)
data = jsonapi_document['data']
for planet in data:
relationships = planet['relationships']
planet_neighbors = relationships['neighbors']['data']
for neighbor in planet_neighbors:
self.assertTrue('id' in neighbor)
self.assertTrue('type' in neighbor)
self.assertTrue(isinstance(neighbor['id'], str))
self.assertTrue(isinstance(neighbor['type'], str))
included = jsonapi_document['included']
self.assertEqual(included, PLANETS_INCLUDED)
@override_settings(ROOT_URLCONF=UTILS_TEST_URLCONF)
class ParsingResourceIdTests(test.TestCase):
def test_missing_id_error(self):
"""
Ensures missing resource id raises exception.
"""
factory = APIRequestFactory()
primary_data = {'no_id': 'missing'}
request = factory.patch('planets/3', primary_data)
parser_context = {
'request': request
}
with pytest.raises(JsonApiException) as exception_info:
api_utils.parse_resource_object(primary_data, parser_context)
assert 'Resource id missing' == exception_info.value.detail
@override_settings(ROOT_URLCONF=UTILS_TEST_URLCONF)
class ParsingResourceAttributesTests(test.TestCase):
"""
Checks the ``attributes`` member of primary data in JSON API requests
is correctly parsed for compatibility with the Django REST Framework.
"""
def test_missing_attributes(self):
"""
Ensures a ``None`` for missing ``attributes`` member in primary data.
"""
primary_data = {'no_attributes': {}}
attributes = api_utils.parse_attributes(primary_data)
self.assertIsNone(attributes)
def test_key_formatting(self):
"""
Ensures attribute keys formatted following underscore convention.
"""
mock_attributes = {
'integer': 1234567890,
'a-float-number': 0.0123456789,
'simple_string': 'hello, world!',
'dashed-string': 'hello-world_!',
'an-Object': {'aFirstKey': 1, 'second-key': '2nd'},
'Array': [1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
'OK': True,
'BadStatus': False,
'noStatus': None
}
primary_data = {
'attributes': mock_attributes
}
attributes = api_utils.parse_attributes(primary_data)
self.assertEqual(attributes['integer'], mock_attributes['integer'])
self.assertEqual(attributes['a_float_number'], mock_attributes['a-float-number'])
self.assertEqual(attributes['simple_string'], mock_attributes['simple_string'])
self.assertEqual(attributes['dashed_string'], mock_attributes['dashed-string'])
self.assertEqual(attributes['an_object'], OrderedDict(mock_attributes['an-Object']))
self.assertEqual(attributes['array'], mock_attributes['Array'])
self.assertEqual(attributes['ok'], mock_attributes['OK'])
self.assertEqual(attributes['bad_status'], mock_attributes['BadStatus'])
self.assertEqual(attributes['no_status'], mock_attributes['noStatus'])
test_object = attributes['an_object']
self.assertEqual(test_object['aFirstKey'], 1)
self.assertEqual(test_object['second-key'], '2nd')
self.assertEqual(attributes['dashed_string'], 'hello-world_!')
@override_settings(ROOT_URLCONF=UTILS_TEST_URLCONF)
class ParsingResourceRelationshipsTests(test.TestCase):
"""
Checks the ``relationships`` member of primary data in JSON API requests
is correctly parsed for compatibility with the Django REST Framework.
"""
def test_missing_attributes(self):
"""
Ensures a ``None`` for missing ``relationships`` member in primary data.
"""
primary_data = {'no_attributes': {}}
relationships = api_utils.parse_relationships(primary_data)
self.assertIsNone(relationships)
def test_missing_data_relationship(self):
"""
Ensures a ``None`` for missing ``relationships`` member in primary data.
"""
primary_data = {
'relationships': {
'related-object': {
'no_data': {}
}
}
}
relationships = api_utils.parse_relationships(primary_data)
self.assertIsNone(relationships['related_object'])
def test_to_one_relationships(self):
primary_data = {
'relationships': {
'important-info': {
'data': {
'id': 1
}
},
'more-important-info': {'data': None},
'bad-resource-data': {'pk': 1}
}
}
relationships = api_utils.parse_relationships(primary_data)
self.assertEqual(relationships['important_info'], 1)
self.assertEqual(relationships['more_important_info'], None)
self.assertEqual(relationships['bad_resource_data'], None)
def test_null_relationship_object(self):
primary_data = {
'relationships': {
'important-info': None,
}
}
with pytest.raises(JsonApiException):
api_utils.parse_relationships(primary_data)
def test_invalid_relationship_object(self):
primary_data = {
'relationships': {
'important-info':'not-json-api',
}
}
with pytest.raises(JsonApiException):
api_utils.parse_relationships(primary_data)
def test_to_many_relationships(self):
primary_data = {
'relationships': {
'important-info': {
'data': [
{'id': 1}, {'id': 2}, {'id': 3}
]
},
'more-important-info': {
'data': [
{'id': 1}
]
},
'bad-resource-data': {'pk': 1}
}
}
relationships = api_utils.parse_relationships(primary_data)
self.assertEqual(relationships['important_info'], [1, 2, 3])
self.assertEqual(relationships['more_important_info'], [1])
self.assertEqual(relationships['bad_resource_data'], None)
|
symfonico/drf-ember
|
tests/test_utils.py
|
Python
|
mit
| 28,960
|
[
"Galaxy"
] |
0773744c184111891f80e230e8021cc6c49465a156f84a2a51c9511863d1806d
|
import numpy
import os
import subprocess
# from time import sleep
# Rotates input xyz file and generates 100 output xyz files at random orientations along with input.scan for each of them to give as input to mechAFM
def makeIt(output_folder = "randomRotateOutput/"):
fileNumber = 0
for x in range(41):
for y in range(41):
xyzOut = '''1
#C %s %s 0.0''' % (x*0.2, y*0.2)
scanOut = '''xyzfile %s
paramfile parameters.dat
tipatom T
dummyatom X
units kcal/mol
minterm f
etol 0.001
ftol 0.001
dt 0.001
maxsteps 50000
minimiser FIRE
integrator midpoint
coulomb off
rigidgrid off
flexible off
area 8.0 8.0
center %s %s
zhigh 10.0
zlow 6.0
dx 0.2
dy 0.2
dz 0.1
bufsize 10000
gzip off
statistics on''' % (str(fileNumber) + ".xyz", 0.2*x, 0.2*x)
parametersContent = '''# Parameters for a system from a paper
# name | epsilon (kcal/mol) | sigma (A) | mass (amu) | charge (e)
atom C 0.07000 3.55000 12.01100 0.00000
atom H 0.03350 2.42000 1.00800 0.00000
atom O 0.11080 2.98504 15.99940 0.00000
atom N 0.19200 3.31988 14.00670 0.00000
atom S 0.43560 3.63599 32.06500 0.00000
atom F 0.11080 2.90789 18.99840 0.00000
atom B 0.10500 3.63000 10.81000 0.00000
atom X 20.00000 3.55000 12.01100 0.02100
atom T 0.19200 3.15000 15.99900 -0.02100
# Boron parameters guessed from Baowan & Hill, IET Micro & Nano Letters 2:46 (2007)
# Carbon, oxygen and hydrogen parameters from original CHARMM force field
# Pair style to overwrite and default LJ-mixing
# atom1 | atom2 | pair_style | parameters (eps,sig for LJ; De,a,re for Morse)
# pair_ovwrt C T morse 1 2 3
pair_ovwrt X T lj 20.0000 3.5500
# Tip harmonic constraint
# force constant (kcal/mol) | distance (A)
harm 0.72000 0.00
# Additional parameters for making the molecules flexible
# We need to know the topology, so list the possible bonds and their expected length
# atom1 | atom2 | exp. length (A)
# topobond C C 1.430
# topobond C H 1.095
# topobond C B 1.534
# bonds are assumed harmonic and in their equilibrium position (in the xyz file)
# force constant (kcal/mol)
bond 25.000
# angles are assumed harmonic and in their equilibrium position (in the xyz file)
# force constant (kcal/mol)
angle 0.2500
# dihedrals are assumed harmonic and in their equilibrium position (in the xyz file)
# force constant (kcal/mol)
dihedral 0.2500
# substrate support using a 10-4 wall potential
# epsilon (kcal/mol) | sigma (A) | lambda (A) | r_cut (A) | lateral constant (kcal/mol)
substrate 0.100 3.0 3.0 7.5 0.01'''
os.makedirs(output_folder + str(fileNumber))
xyzFile = open(output_folder + str(fileNumber) + "/" + str(fileNumber) + ".xyz", "w+")
xyzFile.write(xyzOut)
scanFile = open(output_folder + str(fileNumber) + "/" + str(fileNumber) + ".scan", "w+")
scanFile.write(scanOut)
paraFile = open(output_folder + str(fileNumber) + "/" + "parameters.dat", "w+")
paraFile.write(parametersContent)
xyzFile.close()
scanFile.close()
paraFile.close()
print("done with file number " + str(fileNumber))
fileNumber += 1
|
SINGROUP/readAFM
|
databaseCode/toyDB/oneAtom/makeXYZ.py
|
Python
|
gpl-3.0
| 3,995
|
[
"CHARMM"
] |
321b81404b8899d52a8e70a0354bdc38ce9b079b29bbb6b96799479c47c3aa60
|
# Author: Suyog Dutt Jain <suyog.jain@aero.iitb.ac.in>
# Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2008-2015, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os.path import abspath
from io import BytesIO
import copy
import numpy
import unittest
# Local imports.
from mayavi.core.null_engine import NullEngine
# Enthought library imports
from mayavi.sources.array_source import ArraySource
from mayavi.modules.outline import Outline
from mayavi.modules.glyph import Glyph
from mayavi.modules.vector_cut_plane import VectorCutPlane
class TestGlyph(unittest.TestCase):
def make_data(self):
"""Trivial data -- creates an elementatry scalar field and a
constant vector field along the 'x' axis."""
s = numpy.arange(0.0, 10.0, 0.01)
s = numpy.reshape(s, (10,10,10))
s = numpy.transpose(s)
v = numpy.zeros(3000, 'd')
v[1::3] = 1.0
v = numpy.reshape(v, (10,10,10,3))
return s, v
def setUp(self):
"""Initial setting up of test fixture, automatically called by TestCase before any other test method is invoked"""
e = NullEngine()
# Uncomment to see visualization for debugging etc.
#e = Engine()
e.start()
s=e.new_scene()
self.e=e
self.s=s
############################################################
# Create a new scene and set up the visualization.
d = ArraySource()
sc, vec = self.make_data()
d.origin = (-5, -5, -5)
d.scalar_data = sc
d.vector_data = vec
e.add_source(d)
# Create an outline for the data.
o = Outline()
e.add_module(o)
# Glyphs for the scalars
g = Glyph()
e.add_module(g)
g.glyph.glyph_source.glyph_position = 'center'
g.glyph.glyph.vector_mode = 'use_normal'
g.glyph.glyph.scale_factor = 0.5
g.glyph.mask_points.on_ratio = 20
g.actor.property.line_width = 1.0
v = VectorCutPlane()
glyph = v.glyph
gs = glyph.glyph_source
gs.glyph_position = 'tail'
gs.glyph_source = gs.glyph_list[1]
e.add_module(v)
v.implicit_plane.set(normal=(0, 1, 0), origin=(0, 3, 0))
v = VectorCutPlane()
glyph = v.glyph
gs = glyph.glyph_source
gs.glyph_source = gs.glyph_list[2]
gs.glyph_position = 'head'
e.add_module(v)
v.implicit_plane.set(normal=(0, 1, 0), origin=(0, -2, 0))
self.g=g
self.v=v
self.scene = e.current_scene
return
def tearDown(self):
"""For necessary clean up, automatically called by TestCase after the test methods have been invoked"""
self.e.stop()
return
def check(self, mask=False, mask_random_mode=False):
"""Do the actual testing with and without masking. For masking,
both the presence and absence of random mode is also tested.
"""
s = self.scene
src = s.children[0]
g = src.children[0].children[1]
self.assertEqual(g.glyph.glyph_source.glyph_position,'center')
self.assertEqual(g.glyph.glyph.vector_mode,'use_normal')
self.assertEqual(g.glyph.glyph.scale_factor,0.5)
self.assertEqual(g.actor.property.line_width,1.0)
# Test masking
n_output_points = src.outputs[0].number_of_points
n_glyph_input_points = g.glyph.glyph.input.number_of_points
if mask:
self.assertNotEqual(n_glyph_input_points , 0)
if mask_random_mode:
self.assertLessEqual(n_glyph_input_points , n_output_points)
else:
on_ratio = g.glyph.mask_points.on_ratio
self.assertEqual(n_glyph_input_points,
n_output_points / on_ratio)
else:
self.assertEqual(n_glyph_input_points, n_output_points)
v = src.children[0].children[2]
glyph = v.glyph
gs = glyph.glyph_source
self.assertEqual(gs.glyph_position,'tail')
self.assertEqual(gs.glyph_source,gs.glyph_list[1])
self.assertEqual(numpy.allclose(v.implicit_plane.normal,
(0., 1., 0.)),True)
v = src.children[0].children[3]
glyph = v.glyph
gs = glyph.glyph_source
self.assertEqual(gs.glyph_source,gs.glyph_list[2])
self.assertEqual(gs.glyph_position,'head')
self.assertEqual(numpy.allclose(v.implicit_plane.normal,
(0., 1., 0.)),True)
def test_glyph(self):
"Test if the test fixture works"
self.check()
def test_mask_input_points_with_random_mode(self):
"""Test if masking input points works with random mode.
Tests Issue #165"""
s = self.scene
src = s.children[0]
g = src.children[0].children[1]
g.glyph.mask_input_points = True
self.check(mask=True, mask_random_mode=True)
def test_mask_input_points_without_random_mode(self):
"""Test if masking input points works without random mode.
Tests Issue #165"""
s = self.scene
src = s.children[0]
g = src.children[0].children[1]
g.glyph.mask_points.random_mode = 0
g.glyph.mask_input_points = True
self.check(mask=True)
def test_components_changed(self):
""""Test if the modules respond correctly when the components
are changed."""
g=self.g
v=self.v
g.actor = g.actor.__class__()
glyph = g.glyph
g.glyph = glyph.__class__()
g.glyph = glyph
glyph = v.glyph
v.glyph = glyph.__class__()
v.glyph = glyph
v.actor = v.actor.__class__()
v.cutter = v.cutter.__class__()
ip = v.implicit_plane
v.implicit_plane = ip.__class__()
v.implicit_plane = ip
self.check()
def test_save_and_restore(self):
"""Test if saving a visualization and restoring it works."""
engine = self.e
scene = self.scene
# Save visualization.
f = BytesIO()
f.name = abspath('test.mv2') # We simulate a file.
engine.save_visualization(f)
f.seek(0) # So we can read this saved data.
# Remove existing scene.
engine.close_scene(scene)
# Load visualization
engine.load_visualization(f)
self.scene = engine.current_scene
self.check()
def test_deepcopied(self):
"""Test if the MayaVi2 visualization can be deep-copied."""
############################################################
# Test if the MayaVi2 visualization can be deep-copied.
# Pop the source object.
s = self.scene
sources = s.children
s.children = []
# Add it back to see if that works without error.
s.children.extend(sources)
self.check()
# Now deepcopy the source and replace the existing one with
# the copy. This basically simulates cutting/copying the
# object from the UI via the right-click menu on the tree
# view, and pasting the copy back.
sources1 = copy.deepcopy(sources)
s.children[:] = sources1
self.check()
if __name__ == '__main__':
unittest.main()
|
dmsurti/mayavi
|
mayavi/tests/test_glyph.py
|
Python
|
bsd-3-clause
| 7,394
|
[
"Mayavi"
] |
327dc031f8cf1146bde75a97102449bf89b1589cee5324ef2e18effce66fcace
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 Miha Purg <miha.purg@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
This module contains some common classes and functions,
including simple statistical methods and data structures.
"""
from __future__ import absolute_import, division, unicode_literals
from six.moves import zip
from io import open
import math
import sys
import os
import shutil
import logging
import gzip
try:
import statistics
except ImportError:
pass
logger = logging.getLogger(__name__)
from Qpyl import __version__
def get_version_full():
try:
gitdir = os.path.join(os.environ["QTOOLS_HOME"], ".git")
head = open(os.path.join(gitdir, "HEAD")).read().split()[1]
branch = head.split("/")[-1]
ref = open(os.path.join(gitdir, head)).read().strip()[:8]
except:
ref, branch = "Unknown", "Unknown"
return "Qtools version: {}, git id: {} ({})"\
"".format(__version__, ref, branch)
class SpecialFormatter(logging.Formatter):
FORMATS = {logging.DEBUG :"DBG: %(module)s: %(lineno)d: %(message)s",
logging.WARNING : "\nWARNING: %(message)s\n",
logging.CRITICAL : "\nCRITICAL: %(message)s\n",
logging.INFO : "# %(message)s",
'DEFAULT' : "%(message)s"}
def __init__(self, *args, **kwargs):
super(SpecialFormatter, self).__init__(*args, **kwargs)
def format(self, record):
# a bit diff in py2 vs py3
# https://stackoverflow.com/questions/14844970/modifying-logging-message-format-based-on-message-logging-level-in-python3
try:
self._style
self._style._fmt = self.FORMATS.get(record.levelno,
self.FORMATS['DEFAULT'])
except AttributeError:
self._fmt = self.FORMATS.get(record.levelno,
self.FORMATS['DEFAULT'])
return logging.Formatter.format(self, record)
def init_logger(name,
level=None,
handler=None,
formatter=None):
"""Helper function for initializing the logger.
Args:
name (string): module name, usually root: 'Qpyl'
level (int, optional): logging level (DEBUG, INFO, WARNING...), \
default is INFO
handler: (logging.Handler, optional): default is \
StreamHandler(sys.stdout) \
formatter: (logging.Formatter, optional): default is \
SpecialFormatter
Returns:
lg (logging.Logger)
"""
lg = logging.getLogger(name)
if level == None:
level = logging.INFO
lg.setLevel(level)
if handler == None:
handler = logging.StreamHandler(sys.stdout)
if formatter == None:
handler.setFormatter(SpecialFormatter())
lg.addHandler(handler)
return lg
def raise_or_log(message, exception_class, logger, ignore_errors):
"""Method used for raising exceptions or writting them to logger instead
This way one can bypass certain exceptions like non-integer charge groups,
that may occur in weird scenarios (Amber bonding metal model).
Critical level is always used for logging.
"""
if ignore_errors:
logger.critical(message)
else:
raise exception_class(message)
def backup_file(filename):
"""Check if a file exists, make a backup (#filename.1#, #filename.2#...).
Args:
filename (string): name of file to backup
Returns:
backup_filename (string): basename of the new filename or empty
string if the file was not found.
"""
if os.path.lexists(filename):
di = os.path.dirname(filename)
fn = os.path.basename(filename)
backup_filename = fn
i = 1
while os.path.lexists(os.path.join(di, backup_filename)):
backup_filename = "#%s.%d#" % (fn, i)
i += 1
if i > 20:
logger.warning("You have more than 20 backed up files... "
"Cleaning time...")
shutil.copy2(filename, os.path.join(di, backup_filename))
return backup_filename
return ""
# no need for numpy to do these basic stats
class stats(object):
@staticmethod
def mean(vals):
"""Calculate mean.
Args:
vals (list of float): sample values
Wraps statistics.mean() in Py3+.
Returns float('nan') on empty array.
"""
if len(vals) == 0:
return float('nan')
try:
return statistics.mean(vals)
except NameError:
return 1.0 / len(vals) * sum(vals)
@staticmethod
def stdev(vals):
"""Calculate sample standard deviation.
Args:
vals (list of float): sample values
Wraps statistics.stdev() in Py3+.
Returns float('nan') when fewer than two values.
"""
if len(vals) < 2:
return float('nan')
try:
return statistics.stdev(vals)
except NameError:
mean = stats.mean(vals)
variance = [(x - mean)**2 for x in vals]
return math.sqrt(sum(variance)*1.0/(len(vals)-1))
@staticmethod
def sem(vals):
"""Calculates standard error of mean.
Args:
vals (list of float): sample values
Returns float('nan') when fewer than two values.
"""
if len(vals) < 2:
return float('nan')
return stats.stdev(vals) / math.sqrt(len(vals))
@staticmethod
def median(vals):
"""Calculate median
Args:
vals (list of float): sample values
Wraps statistics.median() in Py3+.
Returns float('nan') on empty array.
"""
N = len(vals)
if N == 0:
return float('nan')
try:
return statistics.median(vals)
except NameError:
vals = sorted(vals)
if N % 2 == 0: #even
return stats.mean((vals[N//2-1], vals[N//2]))
else: #odd
return vals[N//2]
class DataContainer(object):
"""
Contains a two dimensional array of values:
[ [ row1_column1, row1_column2, row1_column3, ...],
[ row2_column1, row2_column2, row2_column3, ...],
... ]
and column titles.
Args:
coltitles (list): column titles
Examples:
>>> dg = DataContainer(['Energy_gap', 'dG', 'points'], comment="asd"))
>>> dg.add_row([-300.0, 10.0, 2000])
>>> dg.add_row([-200.0, 5.0, 1000])
>>> dg
DataContainer(['Energy_gap', 'dG', 'points'], comment='asd', Nrows=2)
# get all rows
>>> dg.get_rows()
[[-300.0, 10.0, 2000], [-200.0, 5.0, 1000]]
# get rows from specific columns
>>> dg.get_rows(columns=('Energy_gap', 'points'))
[[-300.0, 2000], [-200.0, 1000]]
# get all columns
>>> dg.get_columns()
[(-300.0, -200.0), (10.0, 5.0), (2000, 1000)]
# get specific columns
>>> dg.get_columns(columns=(0, 1))
[(-300.0, -200.0), (10.0, 5.0)]
>>> dg.get_columns(columns=('Energy_gap', 'dG')
[(-300.0, -200.0), (10.0, 5.0)]
# clean up
>>> dg.delete_rows()
"""
def __init__(self, coltitles, comment=""):
if not isinstance(coltitles, (list, tuple)):
coltitles = [coltitles,]
self.column_titles = list(coltitles)
# a list containing rows of values
# (each row is a list with length = len(coltitles))
self._rows = []
self.comment = comment
def __repr__(self):
return "DataContainer({}, comment='{}', Nrows={})" \
"".format(self.column_titles, self.comment, len(self._rows))
def get_columns(self, columns=None):
"""
Transposes the array and returns the columns instead of rows.
Args:
columns (list), optional: return only columns with
these indices and/or titles
Returns:
list of columns (list of lists)
"""
if not columns:
columns = []
col_inds = []
for col in columns:
if type(col) == int:
col_inds.append(col)
else:
col_inds.append(self.column_titles.index(str(col)))
cols = list(zip(*self._rows)) # transpose
if col_inds:
return [cols[i] for i in col_inds]
else:
return cols
def get_rows(self, columns=None):
"""Return the rows.
Args:
columns (list), optional: return only columns with
these indices and/or titles
Returns:
list of rows (list of lists)
"""
if columns:
cols = self.get_columns(columns)
return list(zip(*cols))
else:
return self._rows
def add_row(self, row):
"""Add a row.
Args:
row (list): a list of values
Raises:
ValueError: if number of elements in row is not equal to
number of column titles
"""
if len(row) != len(self.column_titles):
raise ValueError("Number of elements is not equal to number "
"of columns, in row:\n{}".format(row))
self._rows.append(list(row))
def delete_rows(self):
"""
Removes the rows.
"""
self._rows = []
def __str__(self):
if self.comment:
outs = "#" + self.comment + "\n"
else:
outs = ""
for name in self.column_titles:
width = len(name)
if width < 10:
width = 10
outs += " {name:{width}} ".format(name=name, width=width)
for row in self._rows:
outs += "\n"
for i, val in enumerate(row):
try:
width = len(self.column_titles[i])
if width < 10:
width = 10
except IndexError:
width = 20
if type(val) == float:
outs += " {val:{width}.2f} ".format(val=val, width=width)
else:
outs += " {val:{width}} ".format(val=str(val), width=width)
return outs
# Shamelessly borrowed from
# http://www.genomearchitecture.com/2014/01/how-to-gunzip-on-the-fly-with-python
class gzopen(object):
"""Generic opener that decompresses gzipped files
if needed. Encapsulates an open file or a GzipFile.
Use the same way you would use 'open()'.
"""
def __init__(self, fname):
f = open(fname)
# Read magic number (the first 2 bytes) and rewind.
magic_number = f.read(2)
f.seek(0)
# Encapsulated 'self.f' is a file or a GzipFile.
if magic_number == '\x1f\x8b':
self.f = gzip.GzipFile(fileobj=f)
else:
self.f = f
# Define '__enter__' and '__exit__' to use in
# 'with' blocks. Always close the file and the
# GzipFile if applicable.
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
try:
self.f.fileobj.close()
except AttributeError:
pass
finally:
self.f.close()
# Reproduce the interface of an open file
# by encapsulation.
def __getattr__(self, name):
return getattr(self.f, name)
def __iter__(self):
return iter(self.f)
def next(self):
return next(self.f)
|
mpurg/qtools
|
packages/Qpyl/common.py
|
Python
|
mit
| 12,947
|
[
"Amber"
] |
1065e1cb00fa3524c412cbe51b875bbc0a10ed5594f200aedeb9c46950f65068
|
#!/usr/bin/python -tt
# Automated BLAST search of a FASTA file. Search defaults to blastx (Protein) search,
# blast type, database, e-value threshold and number of hits can be changed with
# commandline inputs.
# Any inputs with - or -- are optional and will default to certain values.
# Blast will be done locally for quicker responses
# Written by: Christopher R. Main, University of Delaware
# Last Updated: 09/26/13
# Versions:
# 0.1 - Open Cluster file and begin searching internet blast
# 0.2 - Search and send output to screen of top result
# 0.3 - Output BLAST results with GI, Length, E-Value, Query Start, Subject Start, Score and Bits
# 0.4 - Setup of for loop to run multiple queries, and output to separate files
# 0.5 - Append data to the file with added sequence name to first column
# 0.6 - Change way of doing inputs
# Future versions:
# 0.7 - Write for local database search, for use on BioHen
# Allow manipulating of FASTA file
from Bio import SeqIO
# Allows for internet blast search
from Bio.Blast import NcbiblastxCommandline
# Parsing of BLAST results
from Bio.Blast import NCBIXML
# Ready arguments from the commandline
import argparse
# Read and parse the arguments from the command line
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--version", action="version", version='Version 0.6')
parser.add_argument("filename", help="location of FASTA file")
parser.add_argument("out_file", help="filename for output of BLAST search results")
parser.add_argument("-b", "--blast", help="what type of blast to use (Defaults to blastx)", default='blastx')
parser.add_argument("-t", "--thres", help="e-value threshold, ignores any numbers above this number (Defaults to 0.005)", type=int, default=0.005)
parser.add_argument("-hl", "--hitlist", help="how many alignments do you want parsed into the file (Defaults to 10)", default=10)
parser.add_argument("-d", "--database", help="what database to search against (nr or swissprot) (Defaults to nr)", default='nr')
args = parser.parse_args()
# Open file
handle = open(args.filename, "rU")
# BLAST first sequence of file
records = list(SeqIO.parse(handle, format="fasta"))
# Open file, this will append the file for each sequence, information is tab delimited and easily imported into Excel or other type software
wfile = open(args.out_file, "a")
# Write headers of columns: Sequence Name, GI #, Title, Length, E-Value, Query Start, Subject Start, Score, Bits
wfile.write("Sequence Name\tGI\tTitle\tLength\te-value\tQuery Start\tQuery End\tSubject Start\tSubject End\tGaps\tScore\tBits\n")
# Begin the loop to search each individual record. Each iteration of the loop will search a new sequence
for i in range(len(records)):
print "Blasting %s..." % (records[i].id)
blarg = NcbiblastxCommandline(cmd = args.blast, query = records[i].seq, db = args.database, evalue = args.thres)
# Take Search and output to file
blast_records = NCBIXML.parse(result_handle)
#blast_records = NCBIXML.parse(open("blarg.xml"))
# Begin the loop, run as many times as there are records in the blast search
for blast_record in blast_records:
print "Writing search results for %s..." % (records[i].id)
# Run as many times as there are alignment sequences set by user input
for alignment in blast_record.alignments:
# Run as many times as there are information stored in the hsp (Has e-value, and other info)
for hsp in alignment.hsps:
# Ignore e-values above what was set by user
if hsp.expect < float(args.thres):
# Strip the ugliness out of the name file, so we can get what we want (ie GI # and Title
giFields = alignment.title.strip().split("|")
gi_num = giFields[1]
gi_title = giFields[4]
wfile.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (records[i].id, gi_num, gi_title, alignment.length, hsp.expect, hsp.query_start, hsp.query_end, hsp.sbjct_start, hsp.sbjct_end, hsp.gaps, hsp.score, hsp.bits))
wfile.close()
print "Writing of %s complete, closing file..." % (args.out_file)
|
calandryll/transcriptome
|
scripts/old/fasta_search.py
|
Python
|
gpl-2.0
| 4,017
|
[
"BLAST"
] |
8ede91bf007b722f091fbfa19c736e28e00d377cd0dcce261de894e7b03a7371
|
'''Noddy output file analysis
Created on 24/03/2014
@author: Florian Wellmann, Sam Thiele
'''
import os
import numpy as np
class NoddyOutput(object):
"""Class definition for Noddy output analysis"""
def __init__(self, output_name):
"""Noddy output analysis
**Arguments**:
- *output_name* = string : (base) name of Noddy output files
"""
print "here"
self.basename = output_name
self.load_model_info()
self.load_geology()
def __add__(self, other):
"""Define addition as addition of grid block values
Note: Check first if model dimensions and settings are the same
"""
# check dimensions
self.compare_dimensions_to(other)
# 1. create copy
import copy
tmp_his = copy.deepcopy(self)
# 2. perform operation
tmp_his.block = self.block + other.block
return tmp_his
def __sub__(self, other):
"""Define subtraction as subtraction of grid block values
Note: Check first if model dimensions and settings are the same
"""
# check dimensions
self.compare_dimensions_to(other)
# 1. create copy
import copy
tmp_his = copy.deepcopy(self)
# 2. perform operation
tmp_his.block = self.block - other.block
return tmp_his
def __iadd__(self, x):
"""Augmented assignment addtition: add value to all grid blocks
**Arguments**:
- *x*: can be either a numerical value (int, float, ...) *or* another
NoddyOutput object! Note that, in both cases, the own block is updated
and no new object is created (compare to overwritten addition operator!)
Note: This method is changing the object *in place*!
"""
# if x is another pynoddy output object, then add values to own grid in place!
if isinstance(x, NoddyOutput):
self.block += x.block
else:
self.block += x
# update grid values
return self
def __isub__(self, x):
"""Augmented assignment addtition: add value(s) to all grid blocks
**Arguments**:
- *x*: can be either a numerical value (int, float, ...) *or* another
NoddyOutput object! Note that, in both cases, the own block is updated
and no new object is created (compare to overwritten addition operator!)
Note: This method is changing the object *in place*!
"""
# if x is another pynoddy output object, then add values to own grid in place!
if isinstance(x, NoddyOutput):
self.block -= x.block
else:
self.block -= x
# update grid values
return self
def set_basename(self, name):
"""Set model basename"""
self.basename = name
def compare_dimensions_to(self, other):
"""Compare model dimensions to another model"""
try:
assert((self.nx, self.ny, self.nz) == (other.nx, other.ny, other.nz))
except AssertionError:
raise AssertionError("Model dimensions do not seem to agree, please check!\n")
try:
assert((self.delx, self.dely, self.delz) == (other.delx, other.dely, other.delz))
except AssertionError:
raise AssertionError("Model dimensions do not seem to agree, please check!\n")
try:
assert((self.xmin, self.ymin, self.zmin) == (other.xmin, other.ymin, other.zmin))
except AssertionError:
raise AssertionError("Model dimensions do not seem to agree, please check!\n")
def load_model_info(self):
"""Load information about model discretisation from .g00 file"""
filelines = open(self.basename + ".g00").readlines()
for line in filelines:
if 'NUMBER OF LAYERS' in line:
self.nz = int(line.split("=")[1])
elif 'LAYER 1 DIMENSIONS' in line:
(self.nx, self.ny) = [int(l) for l in line.split("=")[1].split(" ")[1:]]
elif 'UPPER SW CORNER' in line:
l = [float(l) for l in line.split("=")[1].split(" ")[1:]]
(self.xmin, self.ymin, self.zmax) = l
elif 'LOWER NE CORNER' in line:
l = [float(l) for l in line.split("=")[1].split(" ")[1:]]
(self.xmax, self.ymax, self.zmin) = l
elif 'NUM ROCK' in line:
self.n_rocktypes = int(line.split('=')[1])
self.n_total = self.nx * self.ny * self.nz
(self.extent_x, self.extent_y, self.extent_z) = (self.xmax - self.xmin, self.ymax - self.ymin,
self.zmax - self.zmin)
(self.delx, self.dely, self.delz) = (self.extent_x / float(self.nx),
self.extent_y / float(self.ny),
self.extent_z / float(self.nz))
<<<<<<< HEAD
=======
#load lithology colours & relative ages
if os.path.exists(self.basename + ".g20"):
filelines = open(self.basename + ".g20").readlines()
self.n_events = int(filelines[0].split(' ')[2]) #number of events
lithos = filelines[ 3 + self.n_events : len(filelines) - 1] #litho definitions
self.rock_ids = [] #list of litho ids. Will be a list from 1 to n
self.rock_names = [] #the (string) names of each rock type. Note that names including spaces will not be read properly.
self.rock_colors = [] #the colours of each rock type (in Noddy).
self.rock_events = [] #list of the events that created different lithologies
for l in lithos:
data = l.split(' ')
self.rock_ids.append(int(data[0]))
self.rock_events.append(int(data[1]))
self.rock_names.append(data[2])
self.rock_colors.append( (int(data[-3])/255., int(data[-2])/255., int(data[-1])/255.) )
#calculate stratigraphy
self.stratigraphy = [] #litho id's ordered by the age they were created in
for i in range(max(self.rock_events)+1): #loop through events
#create list of lithos created in this event
lithos = []
for n, e in enumerate(self.rock_events):
if e == i: #current event
lithos.append(self.rock_ids[n])
#reverse order... Noddy litho id's are ordered by event, but reverse ordered within depositional events (ie.
#lithologies created in younger events have larger ids, however the youngest unit created in a given event
#will have the smallest id...
for l in reversed(lithos):
self.stratigraphy.append(l)
>>>>>>> refs/remotes/flohorovicic/master
def load_geology(self):
"""Load block geology ids from .g12 output file"""
print self.basename
f = open(self.basename + ".g12")
method = 'standard' # standard method to read file
# method = 'numpy' # using numpy should be faster - but it messes up the order... possible to fix?
if method == 'standard':
i = 0
j = 0
k = 0
self.block = np.ndarray((self.nx,self.ny,self.nz))
for line in f.readlines():
if line == '\n':
# next z-slice
k += 1
# reset x counter
i = 0
continue
l = [int(l1) for l1 in line.strip().split("\t")]
self.block[i,:,self.nz-k-1] = np.array(l)[::-1]
i += 1
elif method == 'standard_old':
j = 0
j_max = 0
k_max = 0
i_max = 0
self.block = np.ndarray((self.nz,self.ny,self.nx))
for k,line in enumerate(f.readlines()):
if line == '\n':
# next y-slice
j += 1
if j > j_max : j_max = j
continue
for i,l1 in enumerate(line.strip().split("\t")):
if i > i_max: i_max = i
if k/self.nz > k_max : k_max = k/self.nz
self.block[j,i,k/self.nz-1] = int(l1)
print i_max, j_max, k_max
elif method == 'numpy':
# old implementation - didn't work, but why?
self.block = np.loadtxt(f, dtype="int")
# reshape to proper 3-D shape
self.block = self.block.reshape((self.nz,self.ny,self.nx))
self.block = np.swapaxes(self.block, 0, 2)
# self.block = np.swapaxes(self.block, 0, 1)
# print np.shape(self.block)
def determine_unit_volumes(self):
"""Determine volumes of geological units in the discretized block model
"""
#
# Note: for the time being, the following implementation is extremely simple
# and could be optimised, for example to test specifically for units defined
# in stratigraphies, intrusions, etc.!
#
self.block_volume = self.delx * self.dely * self.delz
self.unit_ids = np.unique(self.block)
self.unit_volumes = np.empty(np.shape(self.unit_ids))
for i,unit_id in enumerate(self.unit_ids):
self.unit_volumes[i] = np.sum(self.block == unit_id) * self.block_volume
def plot_section(self, direction='y', position='center', **kwds):
"""Create a section block through the model
**Arguments**:
- *direction* = 'x', 'y', 'z' : coordinate direction of section plot (default: 'y')
- *position* = int or 'center' : cell position of section as integer value
or identifier (default: 'center')
**Optional Keywords**:
- *ax* = matplotlib.axis : append plot to axis (default: create new plot)
- *figsize* = (x,y) : matplotlib figsize
- *colorbar* = bool : plot colorbar (default: True)
- *colorbar_orientation* = 'horizontal' or 'vertical' : orientation of colorbar
(default: 'vertical')
- *title* = string : plot title
- *savefig* = bool : save figure to file (default: show directly on screen)
- *cmap* = matplotlib.cmap : colormap (default: YlOrRd)
- *fig_filename* = string : figure filename
- *ve* = float : vertical exaggeration
- *layer_labels* = list of strings: labels for each unit in plot
- *layers_from* = noddy history file : get labels automatically from history file
- *data* = np.array : data to plot, if different to block data itself
"""
#try importing matplotlib
try:
import matplotlib.pyplot as plt
except ImportError:
print ("Could not draw image as matplotlib is not installed. Please install matplotlib")
cbar_orientation = kwds.get("colorbar_orientation", 'vertical')
# determine if data are passed - if not, then recompute model
if kwds.has_key("data"):
data = kwds["data"]
ve = kwds.get("ve", 1.)
cmap_type = kwds.get('cmap', 'YlOrRd')
if kwds.has_key('ax'):
# append plot to existing axis
ax = kwds['ax']
return_axis = True
else:
return_axis = False
figsize = kwds.get("figsize", (10,6))
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
savefig = kwds.get("savefig", False)
colorbar = kwds.get("colorbar", True)
# extract slice
if direction == 'x':
if position == 'center':
cell_pos = self.nx / 2
else:
cell_pos = position
if kwds.has_key('data'):
section_slice = data[cell_pos,:,:].transpose()
else:
section_slice = self.block[cell_pos,:,:].transpose()
xlabel = "y"
ylabel = "z"
if direction == 'y':
if position == 'center':
cell_pos = self.ny / 2
else:
cell_pos = position
if kwds.has_key('data'):
section_slice = data[:,cell_pos,:].transpose()
else:
section_slice = self.block[:,cell_pos,:].transpose()
xlabel = "x"
ylabel = "z"
if direction == 'z':
if position == 'center':
cell_pos = self.nz / 2
else:
cell_pos = position
if kwds.has_key('data'):
section_slice = data[:,:,cell_pos].transpose()
else:
section_slice = self.block[:,:,cell_pos].transpose()
xlabel = "x"
ylabel = "y"
title = kwds.get("title", "Section in %s-direction, pos=%d" % (direction, cell_pos))
im = ax.imshow(section_slice, interpolation='nearest', aspect=ve, cmap=cmap_type, origin = 'lower left')
if colorbar:
# cbar = plt.colorbar(im)
# _ = cbar
#
import matplotlib as mpl
bounds = np.arange(np.min(section_slice),np.max(section_slice)+1)
cmap = plt.cm.jet
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
if cbar_orientation == 'horizontal':
ax2 = fig.add_axes([0.125, 0.18, 0.775, 0.04])
cb = mpl.colorbar.ColorbarBase(ax2, cmap=cmap_type, norm=norm, spacing='proportional',
ticks=bounds-0.5, boundaries=bounds,
orientation = 'horizontal') # , format='%s')
else: # default is vertical
# create a second axes for the colorbar
ax2 = fig.add_axes([0.95, 0.165, 0.03, 0.69])
cb = mpl.colorbar.ColorbarBase(ax2, cmap=cmap_type, norm=norm, spacing='proportional',
ticks=bounds-0.5, boundaries=bounds,
orientation = 'vertical') # , format='%s')
# define the bins and normalize
if kwds.has_key("layer_labels"):
cb.set_ticklabels(kwds["layer_labels"])
# invert axis to have "correct" stratigraphic order
cb.ax.invert_yaxis()
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if return_axis:
return ax
elif savefig:
fig_filename = kwds.get("fig_filename", "%s_section_%s_pos_%d" % (self.basename, direction, cell_pos))
plt.savefig(fig_filename, bbox_inches="tight")
else:
plt.show()
def export_to_vtk(self, **kwds):
"""Export model to VTK
Export the geology blocks to VTK for visualisation of the entire 3-D model in an
external VTK viewer, e.g. Paraview.
..Note:: Requires pyevtk, available for free on: https://github.com/firedrakeproject/firedrake/tree/master/python/evtk
**Optional keywords**:
- *vtk_filename* = string : filename of VTK file (default: output_name)
- *data* = np.array : data array to export to VKT (default: entire block model)
"""
vtk_filename = kwds.get("vtk_filename", self.basename)
from evtk.hl import gridToVTK
# Coordinates
x = np.arange(0, self.extent_x + 0.1*self.delx, self.delx, dtype='float64')
y = np.arange(0, self.extent_y + 0.1*self.dely, self.dely, dtype='float64')
z = np.arange(0, self.extent_z + 0.1*self.delz, self.delz, dtype='float64')
# self.block = np.swapaxes(self.block, 0, 2)
if kwds.has_key("data"):
gridToVTK(vtk_filename, x, y, z, cellData = {"data" : kwds['data']})
else:
gridToVTK(vtk_filename, x, y, z, cellData = {"geology" : self.block})
class NoddyGeophysics(object):
"""Definition to read, analyse, and visualise calculated geophysical responses"""
def __init__(self, output_name):
"""Methods to read, analyse, and visualise calculated geophysical responses
.. note:: The geophysical responses have can be computed with a keyword in the
function `compute_model`, e.g.:
``pynoddy.compute_model(history_name, output, type = 'GEOPHYSICS')``
"""
self.basename = output_name
self.read_gravity()
self.read_magnetics()
def read_gravity(self):
"""Read calculated gravity response"""
grv_lines = open(self.basename + ".grv", 'r').readlines()
self.grv_header = grv_lines[:8]
# read in data
# print len(grv_lines) - 8
dx = len(grv_lines) - 8
dy = len(grv_lines[8].rstrip().split("\t"))
self.grv_data = np.ndarray((dx, dy))
for i,line in enumerate(grv_lines[8:]):
self.grv_data[i,:] = np.array([float(x) for x in line.rstrip().split("\t")])
def read_magnetics(self):
"""Read caluclated magnetic field response"""
mag_lines = open(self.basename + ".mag", 'r').readlines()
self.mag_header = mag_lines[:8]
# read in data
# print len(mag_lines) - 8
dx = len(mag_lines) - 8
dy = len(mag_lines[8].rstrip().split("\t"))
self.mag_data = np.ndarray((dx, dy))
for i,line in enumerate(mag_lines[8:]):
self.mag_data[i,:] = np.array([float(x) for x in line.rstrip().split("\t")])
class NoddyTopology(object):
"""Definition to read, analyse, and visualise calculated voxel topology"""
def __init__(self, output_name, **kwds):
"""Methods to read, analyse, and visualise calculated voxel topology
.. note:: The voxel topology have can be computed with a keyword in the
function `compute_model`, e.g.: ``pynoddy.compute_model(history_name, output, type = 'TOPOLOGY')``
**Arguments**
- *output_name* = the name of the noddy output to run topology on.
**Optional Keywords**
- *load_attributes* = True if nodes and edges in the topology network should be attributed with properties such as volume
and surface area and lithology colour. Default is True.
"""
self.basename = output_name
self.load_attributes = kwds.get("load_attributes",True)
#load network
self.loadNetwork()
def loadNetwork(self):
'''
Loads the topology network into a NetworkX datastructure
'''
#import networkx
try:
import networkx as nx
except ImportError:
print "Warning: NetworkX module could not be loaded. Please install NetworkX from https://networkx.github.io/ to perform topological analyses in PyNoddy"
#initialise new networkX graph
self.graph = nx.Graph()
self.graph.name = self.basename
#load lithology properties
self.read_properties()
#load graph
f = open(self.basename + ".g23",'r')
lines = f.readlines() #read lines
for l in lines: #load edges
if '_' in l: #this line contains topology stuff (aka ignore empty lines)
l=l.rstrip()
data=l.split('\t')
#calculate edge colors
topoCode1 = data[0].split('_')[1]
topoCode2 = data[1].split('_')[1]
lithoCode1 = data[0].split('_')[0]
lithoCode2 = data[1].split('_')[0]
<<<<<<< HEAD
count = int(data[-1]) #number of voxels with this neibour relationship (proxy of surface area)
#calculate edge type (dyke, fault etc)
eCode=0
=======
count = int(data[-1]) #number of voxels with this neighbour relationship (proxy of surface area)
#calculate edge type (dyke, fault etc)
eCode=0
eAge = self.lithology_properties[int(lithoCode1)]['age'] #for original stratigraphy. Default is the age of the first node
>>>>>>> refs/remotes/flohorovicic/master
eType = 'stratigraphic' #default is stratigraphy
eColour='k' #black
for i in range(0,len(topoCode1) - 1): #-1 removes the trailing character
if (topoCode1[i] != topoCode2[i]): #find the difference
<<<<<<< HEAD
=======
#this is the 'age' of this edge, as the lithologies formed during
#different events
eAge = i
#calculate what the difference means (ie. edge type)
>>>>>>> refs/remotes/flohorovicic/master
if int(topoCode2[i]) > int(topoCode1[i]):
eCode=topoCode2[i]
else:
eCode=topoCode1[i]
if int(eCode) == 0: #stratigraphic contact
eColour = 'k' #black
eType = 'stratigraphic'
elif int(eCode) == 2 or int(eCode) == 7 or int(eCode) == 8: #various types of faults
eColour = 'r' #red
eType = 'fault'
elif int(eCode) == 3: #unconformity
eColour = 'b' #blue
eType = 'unconformity'
elif int(eCode) == 5: #plug/dyke
eColour = 'y' #yellow
eType = 'intrusive'
else:
eColour = 'g' #green
eType = 'unknown'
#create nodes & associated properties
<<<<<<< HEAD
self.graph.add_node(data[0], lithology=lithoCode1, name=self.lithology_properties[int(lithoCode1)]['name'])
self.graph.add_node(data[1], lithology=lithoCode2, name=self.lithology_properties[int(lithoCode2)]['name'])
=======
self.graph.add_node(data[0], lithology=lithoCode1, name=self.lithology_properties[int(lithoCode1)]['name'], age = self.lithology_properties[int(lithoCode1)]['age'])
self.graph.add_node(data[1], lithology=lithoCode2, name=self.lithology_properties[int(lithoCode2)]['name'], age = self.lithology_properties[int(lithoCode2)]['age'])
>>>>>>> refs/remotes/flohorovicic/master
if (self.load_attributes):
self.graph.node[data[0]]['colour']=self.lithology_properties[int(lithoCode1)]['colour']
self.graph.node[data[0]]['centroid']=self.node_properties["%d_%s" % (int(lithoCode1),topoCode1) ]['centroid']
self.graph.node[data[0]]['volume'] = self.node_properties["%d_%s" % (int(lithoCode1),topoCode1) ]['volume']
self.graph.node[data[1]]['colour']=self.lithology_properties[int(lithoCode2)]['colour']
self.graph.node[data[1]]['centroid']=self.node_properties[ "%d_%s" % (int(lithoCode2),topoCode2) ]['centroid']
self.graph.node[data[1]]['volume'] = self.node_properties[ "%d_%s" % (int(lithoCode2),topoCode2) ]['volume']
#add edge
self.graph.add_edge(data[0],data[1],edgeCode=eCode,edgeType=eType, colour=eColour, area=count, weight=1)
def read_properties( self ):
<<<<<<< HEAD
#initialise properties dict
self.lithology_properties = {}
#open & parse properties file
f = open(self.basename + ".g20", 'r')
lines = f.readlines()
nevents = int(lines[0].split(' ')[2]) #number of events
for i in range(nevents + 3,len(lines)-1): #loop through lithology definitions
l = (lines[i].strip()).split(' ')
#load lithology parameters
params = {}
params['code'] = int(l[0])
params['name'] = ' '.join(l[2:-3])
#colours are the last 3 values
params['colour'] = [ float(l[-3]) / 255.0, float(l[-2]) / 255.0, float(l[-1]) / 255.0 ]
#store lithology parameters (using lithocode as key)
self.lithology_properties[params['code']] = params
#load last line (list of names)
self.event_names = (lines[-1].strip()).split('\t')
#close properties file
f.close
=======
#load lithology colours & relative ages. There is some duplication here
#of the NoddyOutput (sloppy, I know...) - ideally I should implement a base class
#that does this stuff and NoddyOutput and NoddyTopology both inherit from....
if os.path.exists(self.basename + ".g20"):
filelines = open(self.basename + ".g20").readlines()
self.n_events = int(filelines[0].split(' ')[2]) #number of events
lithos = filelines[ 3 + self.n_events : len(filelines) - 1] #litho definitions
self.rock_ids = [] #list of litho ids. Will be a list from 1 to n
self.rock_names = [] #the (string) names of each rock type. Note that names including spaces will not be read properly.
self.rock_colors = [] #the colours of each rock type (in Noddy).
self.rock_events = [] #list of the events that created different lithologies
for l in lithos:
data = l.split(' ')
self.rock_ids.append(int(data[0]))
self.rock_events.append(int(data[1]))
self.rock_names.append(data[2])
self.rock_colors.append( (int(data[-3])/255., int(data[-2])/255., int(data[-1])/255.) )
#load last line (list of names)
self.event_names = (filelines[-1].strip()).split('\t')
#calculate stratigraphy
self.stratigraphy = [] #litho id's ordered by the age they were created in
for i in range(max(self.rock_events)+1): #loop through events
#create list of lithos created in this event
lithos = []
for n, e in enumerate(self.rock_events):
if e == i: #current event
lithos.append(self.rock_ids[n])
#reverse order... Noddy litho id's are ordered by event, but reverse ordered within depositional events (ie.
#lithologies created in younger events have larger ids, however the youngest unit created in a given event
#will have the smallest id...
for l in reversed(lithos):
self.stratigraphy.append(l)
#create property dict for easier access to attributes from node codes
self.lithology_properties = {}
for l in self.rock_ids: #litho codes
params = {}
params['code'] = l
params['name'] = self.rock_names[l - 1]
params['colour'] = self.rock_colors[ l - 1 ]
params['age'] = self.stratigraphy.index(l)
self.lithology_properties[params['code']] = params
#f = open(self.basename + ".g20", 'r')
#lines = f.readlines()
#for i in range(self.n_events + 3,len(lines)-1): #loop through lithology definitions
# l = (lines[i].strip()).split(' ')
# #load lithology parameters
# params = {}
# params['code'] = int(l[0])
# params['name'] = ' '.join(l[2:-3])
# #colours are the last 3 values
# params['colour'] = [ float(l[-3]) / 255.0, float(l[-2]) / 255.0, float(l[-1]) / 255.0 ]
# #store lithology parameters (using lithocode as key)
# self.lithology_properties[params['code']] = params
>>>>>>> refs/remotes/flohorovicic/master
#load node locations from .vs file
if (self.load_attributes):
self.node_properties = {}
f = open(self.basename + "_v.vs", 'r')
lines =f.readlines()
for l in lines:
if "PVRTX" in l: #this is a vertex
data = l.split(' ')
params = {}
params['centroid']=[ float(data[2]), float(data[3]), float(data[4])]
params['litho'] = int(data[5])
params['topo'] = data[6]
<<<<<<< HEAD
params['volume'] = 100#int(data[7]) #number of voxels of this type
=======
params['volume'] = int(data[7]) #number of voxels of this type
>>>>>>> refs/remotes/flohorovicic/master
#save (key = LITHO_TOPO (eg. 2_001a))
self.node_properties[ '%d_%s' % (params['litho'],params['topo']) ] = params
f.close()
def read_adjacency_matrix(self):
"""
*Depreciated*
Reads max number of lithologies aross all models"""
ml_lines = open(self.basename + ".g22", 'r').readlines()
# read in data
for line in ml_lines:
self.maxlitho = line
print "maxlitho =", self.maxlitho
def filter_node_volumes(self,min_volume=50):
'''
Removes all nodes with volumes less than the specified size
**Arguments**:
- *min_volume* = the threshold volume. Nodes with smaller volumes are deleted.
**Returns**
- returns the number of deleted nodes
'''
count = 0
for n in self.graph.nodes():
if self.graph.node[n]['volume'] < min_volume:
self.graph.remove_node(n)
count+=1
return count
def collapse_stratigraphy(self):
'''
Collapses all stratigraphic edges in this network to produce a network that only contains
structurally bound rock volumes. Essentially this is a network built only with Topology codes
and ignoring lithology
**Returns**
- a new NoddyTopology object containing the collapsed graph. The original object is not modified.
'''
#make copy of this object
import copy
topo = copy.deepcopy(self)
<<<<<<< HEAD
#retrieve list of edges, ignoring lithology
edges = []
for e in topo.graph.edges_iter():
code1 = e[0].split("_")[1] #topology code of node 1
code2 = e[1].split("_")[1] #topology code of node 2
edges.append( (code1,code2) ) #add edge tuple to edges array
#remake graph
topo.graph.clear()
topo.graph.add_edges_from(edges)
return topo
def collapse_topology(self, verbose=False):
=======
topo.type = "structural"
#clear graph
topo.graph.clear()
#rebuild network, but ignoring lithology
for e in self.graph.edges(data=True):
u = e[0].split("_")[1] #topology code of node 1
v = e[1].split("_")[1] #topology code of node 2
#change code1 & code2 endings 2 a (discrete volumes don't mean anything anymore)
u = u[:-1] + 'A' #retain last letter for compatability/concistency...
v = v[:-1] + 'A'
#update the attributes of the nodes
for i,n in enumerate([u,v]):
if not topo.graph.has_node(n):
topo.graph.add_node(n,age_list=[self.graph.node[e[i]]['age']],
colour_list=[self.graph.node[e[i]]['colour']],
name_list=[self.graph.node[e[i]]['name']],
name=n,
volume=self.graph.node[e[i]]['volume'],
lithology_list=[self.graph.node[e[i]]['lithology']],
centroid_list=[self.graph.node[e[i]]['centroid']])
else:
topo.graph.node[n]['age_list'].append(self.graph.node[e[i]]['age']) #append age
topo.graph.node[n]['colour_list'].append(self.graph.node[e[i]]['colour']) #append colour
topo.graph.node[n]['name_list'].append(self.graph.node[e[i]]['name']) #append name
topo.graph.node[n]['lithology_list'].append(self.graph.node[e[i]]['lithology']) #append lithology
topo.graph.node[n]['centroid_list'].append(self.graph.node[e[i]]['centroid']) #append centroid
topo.graph.node[n]['volume'] = topo.graph.node[u]['volume'] + self.graph.node[e[i]]['volume'] #increment volume
#add edge
if topo.graph.has_edge(u,v): #edge already exists
#merge attributes
data = topo.graph.get_edge_data(u,v)
data['area'] = data['area'] + e[2]['area']
else:
#create new edge
topo.graph.add_edge(u,v,attr_dict=e[2]) #copy all edge attributes across
#remove self loops
topo.graph.remove_edges_from( topo.graph.selfloop_edges() )
#calculate node centroids
for n in topo.graph.nodes(data=True):
n[1]['centroid'] = ( np.mean( [c[0] for c in n[1]['centroid_list']] ),
np.mean( [c[1] for c in n[1]['centroid_list']] ),
np.mean( [c[2] for c in n[1]['centroid_list']] ))
n[1]['lithology'] = n[1]['lithology_list'][0] #defined by bottom lithology
n[1]['colour'] = n[1]['colour_list'][0]
n[1]['age'] = np.mean(n[1]['age_list'])
return topo
def collapse_structure(self, verbose=False):
>>>>>>> refs/remotes/flohorovicic/master
'''
Collapses all topology codes down to the last (most recent) difference. Information regarding specific model topology is
generalised, eg. lithology A has a fault and stratigrappic contact with B (regardless of how many different faults are involved).
<<<<<<< HEAD
Note that this function has not been properly tested, and i'm not exactly sure what it does...
=======
>>>>>>> refs/remotes/flohorovicic/master
**Optional Arguments**:
- *verbose* = True if this function should write to the print buffer. Default is False.
**Returns**
- a new NoddyTopology object containing the collapsed graph. The original object is not modified.
'''
import copy
topo = copy.deepcopy(self)
#clear the graph in topo
topo.graph.clear()
#loop through graph
for e in self.graph.edges(data=True):
#get lithology code
lith1 = e[0].split("_")[0] #lithology code of node1
lith2 = e[1].split("_")[0] #lithology code of node2
<<<<<<< HEAD
#get topology code
code1 = e[0].split("_")[1] #topology code of node 1
code2 = e[1].split("_")[1] #topology code of node 2
#calculate new topology codes
newCode1 = '0' #if the topology codes are the same, the code is zero (signifying a stratigraphic contact)
newCode2 = '0'
name = self.event_names[0]
for i in range(len(code1)-1,-1,-1):
if code1[i] != code2[i]: #find the first difference
newCode1 = code1[i]
newCode2 = code2[i]
name = self.event_names[i]
#calculate new node tags
u = "%s_%s" % (lith1,newCode1)
v = "%s_%s" % (lith2,newCode2)
=======
#calculate new node tags (based entirely on lithology)
u = "%s" % (lith1)
v = "%s" % (lith2)
#update attributes of u
if not topo.graph.has_node(u): #new node, add
topo.graph.add_node(u,age=self.graph.node[e[0]]['age'],
colour=self.graph.node[e[0]]['colour'],
name=self.graph.node[e[0]]['name'],
volume=self.graph.node[e[0]]['volume'],
lithology=self.graph.node[e[0]]['lithology'])
else:
topo.graph.node[u]['volume'] = topo.graph.node[u]['volume'] + self.graph.node[e[0]]['volume'] #increment volume
#do the same for v
if not topo.graph.has_node(v): #new node, add
topo.graph.add_node(v,age=self.graph.node[e[1]]['age'],
colour=self.graph.node[e[1]]['colour'],
name=self.graph.node[e[1]]['name'],
volume=self.graph.node[e[1]]['volume'],
lithology=self.graph.node[e[1]]['lithology'])
else:
topo.graph.node[v]['volume'] = topo.graph.node[v]['volume'] + self.graph.node[e[1]]['volume'] #increment volume
#generate edges
>>>>>>> refs/remotes/flohorovicic/master
if topo.graph.has_edge(u,v): #edge already exists
#do our best to append/merge attributes
data = topo.graph.get_edge_data(u,v)
for key in e[2].keys():
try:
try:
<<<<<<< HEAD
data[key] = str(int(data[key]) + int(e[2][key])) #increment numbers
except ValueError:
data[key] = e[2][key] #replace
except KeyError: #key not found, add new key
data[key] = e[2][key]
=======
data[key] = float(data[key]) + float(e[2][key]) #increment numbers
except (ValueError,TypeError):
try:
data[key].append(e[2][key]) #try appending (for lists)
except AttributeError:
data[key] = [ e[2][key] ] #make list
except KeyError: #key not found, add new key
data[key] = e[2][key]
#maintain that weight = 1
data['weight'] = 1.0
>>>>>>> refs/remotes/flohorovicic/master
else:
#create new edge
topo.graph.add_edge(u,v,attr_dict=e[2])
#set edge name
topo.graph.get_edge_data(u,v)['name'] = name
if verbose:
print ("Collapsed (%s,%s) to (%s,%s)" % (e[0],e[1],u,v))
return topo
def jaccard_coefficient(self,G2):
'''
Calculates the Jaccard Coefficient (ratio between the intersection & union) of the graph representing this NOddyTopology and G2.
**Arguments**
- *G2* = a valid NoddyTopology object or NetworkX graph that this topology is to be compared with
**Returns**
- The jaccard_coefficient
'''
#intersection is initially zero
intersection=0
#add edges from this graph to union
union=self.graph.number_of_edges()
#ensure G2 is a graph object
if isinstance(G2,NoddyTopology):
G2 = G2.graph #we want the graph bit
<<<<<<< HEAD
=======
#ensure we are not comparing two empty graphs
if G2.number_of_edges() == 0 and self.graph.number_of_edges()==0:
print "Warning: comparing two empty graphs... %s and %s" % (self.graph.name,G2.name)
return 1 #two null graphs should be the same
#add edges from this graph to union
union=G2.number_of_edges()
>>>>>>> refs/remotes/flohorovicic/master
for e in self.graph.edges_iter():
if (G2.has_edge(e[0],e[1])): #edge present in both graphs
intersection+=1 #add this edge to intersection
else:
union += 1 #edge is new, add to union
return intersection / float(union)
def is_unique(self, known ):
'''
Returns True if the topology of this model is different (ie. forms a different network) to a list of models.
**Arguments**:
-*known* = a list of valid NoddyTopology objects or NetworkX graphs to compare with.
**Returns**:
- Returns true if this topology is unique, otherwise false
'''
for g2 in known:
if self.jaccard_coefficient(g2) == 1:
return False #the models match
return True
<<<<<<< HEAD
=======
def find_first_match(self,known):
'''
Identical to is_unique, except that the index of the first match is returned if this matches, otherwise
-1 is returned.
**Arguments**:
-*known* = a list of valid NoddyTopology objects or NetworkX graphs to compare with.
**Returns**:
- Returns the index of the first matching topology object, or -1
'''
index=0
for g2 in known:
if self.jaccard_coefficient(g2) == 1:
return index #the models match
index+=1
return -1
>>>>>>> refs/remotes/flohorovicic/master
@staticmethod
def combine_topologies(topology_list):
'''
Combines a list of topology networks into a weighted 'super-network'. This is designed for
estimating the likelyhood of a given edge occuring using a series of networks generated in
a Monte-Carlo type analysis.
**Arguments**
- *topology_list* = A list of networkX graphs or NoddyTopology objects to build supernetwork from.
**Returns**
- A NetworkX graph object containing all edges from the input graphs and weighted ('weight' parameter)
according to their observed frequency.
'''
<<<<<<< HEAD
=======
#validate input
if len(topology_list) < 1:
print "Topology list contains no topologies... cannot combine."
return
>>>>>>> refs/remotes/flohorovicic/master
import networkx as nx
S = nx.Graph()
w_inc = 1. / len(topology_list) #the amount weights go up per edge.
#if an edge is observed in every topology, then
#the weight == 1
#copy nodes from all networks in topology_list into S
import copy
for G in topology_list:
<<<<<<< HEAD
#ensure G is a Graph
if isinstance(G,NoddyTopology):
G = G.graph #we want the graph bit
#loop through edges
for e in G.edges(data=True):
if (S.has_edge(e[0],e[1])): #edge already exists
S.edge[e[0]][e[1]]['weight'] = S.edge[e[0]][e[1]]['weight'] + 1 #increment weight
else: #otherwise add edge
try:
S.add_edge(e[0],e[1],edgeCode=e[2]['edgeCode'],edgeType=e[2]['edgeType'], colour=e[2]['colour'], weight=1)
except KeyError:
S.add_edge(e[0],e[1], weight=1)
=======
#ensure G is a Graph
if isinstance(G,NoddyTopology):
G = G.graph #we want the graph bit
#loop through nodes and average/append them
for n in G.nodes():
#Node 1
if not S.has_node(n):
S.add_node(n,attr_dict = copy.copy(G.node[n]))
#cast variables to list (or tuple of lists from centroid)
if G.node[n].has_key('volume'):
S.node[n]['volume_list'] = [G.node[n]['volume']]
S.node[n]['volume'] = G.node[n]['volume'] * w_inc
else:
S.node[n]['volume_list'] = [0]
S.node[n]['volume'] = 0
if S.node[n].has_key('centroid'):
S.node[n]['centroid_list'] = ([G.node[n]['centroid'][0]],[G.node[n]['centroid'][1]],[G.node[n]['centroid'][2]])
S.node[n]['centroid'] = (w_inc * S.node[n]['centroid'][0],w_inc * S.node[n]['centroid'][1],w_inc * S.node[n]['centroid'][2])
else: #node already exists, store attributes
#append centroid
if G.node[n].has_key('centroid'):
c1 = G.node[n]['centroid']
#list of all centroids
S.node[n]['centroid_list'][0].append(c1[0])
S.node[n]['centroid_list'][1].append(c1[1])
S.node[n]['centroid_list'][2].append(c1[2])
#average centroid
S.node[n]['centroid'] = (S.node[n]['centroid'][0] + w_inc * c1[0],
S.node[n]['centroid'][1] + w_inc * c1[1],
S.node[n]['centroid'][2] + w_inc * c1[2])
#append volume
if G.node[n].has_key('volume'):
S.node[n]['volume_list'].append(G.node[n]['volume'])
#add to average
S.node[n]['volume'] = S.node[n]['volume'] + w_inc * G.node[n]['volume']
#now copy edges across and average/append them
for G in topology_list:
#ensure G is a Graph
if isinstance(G,NoddyTopology):
G = G.graph #we want the graph bit
#loop through edges
for e in G.edges(data=True):
#average/add edges
if not S.has_edge(e[0],e[1]): #add new edge
#add edge
S.add_edge(e[0],e[1],e[2])
s_e = S.edge[e[0]][e[1]]
s_e['weight'] = w_inc
#cast vars to list
s_e['area_list'] = [s_e['area']]
try:
s_e['area'] = s_e['area'] * w_inc
except TypeError:
print "Type error combining edge %s, %s. List was observed rather than float - %s" % (e[0],e[1],str(s_e['area']))
else: #edge already exists
#append/average attributes
s_e = S.edge[e[0]][e[1]]
s_e['area_list'].append(e[2]['area']) #store area
s_e['area'] = s_e['area'] + e[2]['area'] * w_inc #average area
#increment weight
s_e['weight'] = s_e['weight'] + w_inc
>>>>>>> refs/remotes/flohorovicic/master
#return the graph
return S
@staticmethod
def calculate_unique_topologies(topology_list, **kwds):
'''
Calculates the number of unique topologies in a list of NoddyTopologies
**Arguments**:
- *topology_list* = The list of NoddyTopologies to search through.
**Optional Keywords**:
- *output* = A File or list to write cumulative observed topologies distribution. Default is None (nothing written).
<<<<<<< HEAD
=======
- *ids* = A list to write the unique topology id's for each topology in the provided topology_list (in that
order). Default is None.
- *frequency* = A list to write frequency counts to.
>>>>>>> refs/remotes/flohorovicic/master
**Returns**:
- Returns a list of unique topologies.
'''
output = kwds.get("output",None)
out_list = []
uTopo = []
for t in topology_list:
if t.is_unique(uTopo):
#t.filter_node_volumes(50)
uTopo.append(t)
#store cumulative output
out_list.append(len(uTopo))
#write output file if necessary
import types
if not output is None:
if type(output) == types.StringType: #path has been given so write file
#check directory exists
if not os.path.exists(os.path.dirname(output)) and not os.path.dirname(output) == '':
os.makedirs(os.path.dirname(output))
f = open(output,'w')
for o in out_list:
f.write("%d\n" % o)
f.close()
elif type(output) == types.ListType:
for o in out_list:
output.append(o)
return uTopo
def calculate_overlap(self, G2):
'''
Calculates the overlap between this NoddyTopology and another NoddyTopology or networkX graph
**Arguments**
- *G2* = a valid NoddyTopology object or NetworkX graph that this topology is to be compared with
**Returns**
- The number of overlapping edges
'''
#ensure G2 is a graph object
if (isinstance(G2,NoddyTopology)):
G2 = G2.graph #we want the graph bit
similarity=0
for e in self.graph.edges_iter():
if (G2.has_edge(e[0],e[1])):
similarity+=1
return similarity
def find_matching(self,known):
'''
Finds the first matching NoddyTopology (or NetworkX graph) in the specified list
**Arguments**:
-*known* = a list of valid NoddyTopology objects or NetworkX graphs to compare with.
**Returns**:
- Returns the first matching object (jaccard coefficient = 1), or otherwise None
'''
for g1 in known:
if self.jaccard_coefficient(g1) == 1.0:
return g1 #return the match
return None #no match
def write_summary_file(self,path,append=True):
'''
Writes summary information about this network to a file
**Optional Arguments**
<<<<<<< HEAD
- *append* = True if summary information should be appended to the file. If so the file is written as a csv spreadsheet.
Default is true. If False is passed, a single, detailed summary is written for this network.
=======
- *append* = True if summary information should be appended to the file. If so the file is written as a csv spreadsheet.
Default is true. If False is passed, a single, detailed summary is written for this network.
>>>>>>> refs/remotes/flohorovicic/master
'''
if append: #write summary information in spreadsheet formant
exists = os.path.exists(path)
f = open(path,"a")
if not exists: #write header
f.write("name,#nodes,#edges\n") #todo add other stuff here
#write data
f.write("%s,%s,%s\n" % (self.basename,self.graph.number_of_nodes(),self.graph.number_of_edges()))
f.close()
else: #write detailed information
import networkx as nx
f = open(path,"w")
f.write("Summary:")
f.write("Name: %s\n" % self.basename)
f.write("#nodes: %s\n" % self.graph.number_of_nodes())
f.write("#edges: %s\n" % self.graph.number_of_edges())
f.write("Detail")
f.write("Degree sequence: %s" % str(nx.degree(self.graph).values()))
f.write("Node list: %s" % str(self.graph.nodes(data=False)))
f.write("Edge list: %s" % str(self.graph.edges(data=False)))
f.write("Node attributes: %s" % str(self.graph.nodes(data=True)))
f.write("Edge attributes: %s" % str(self.graph.edges(data=True)))
f.close()
<<<<<<< HEAD
def draw_matrix_image( self, outputname="" ):
=======
@staticmethod
def draw_graph_matrix(G,**kwds):
'''
Draws an adjacency matrix representing the specified graph object. Equivalent to
NoddyTopology.draw_matrix_image() but for a networkX graph object.
**Keywords**:
- *strat* = A dictionary linking node names to stratigraphic heights and names. Should be as follows { node_name : (height,name) }.
- *path* = The path to save this image to. If not provided, the image is drawn to the screen
- *dpi* = The resolution to save this image. Default is 300
- *size* = The size of the image to save (in inches). This value will be used as the width and the height
'''
try:
import matplotlib.pyplot as plt
import matplotlib.patches as patches
except ImportError:
print "Could not draw image as matplotlib is not installed. Please install matplotlib."
return
n = G.number_of_nodes()
#retrieve data from network
nodes=G.nodes(data=True)
#sort node list alphabetically first
nodes = sorted(nodes,key=lambda node: str.lower( node[0] ))
#now sort by age, if we know it
if nodes[0][1].has_key('age'):
nodes = sorted(nodes,key=lambda node: node[1]['age'])
#build node id dictionary mapping
ids = {}
for i in range(len(nodes)):
node = nodes[i][0]
ids[node] = i
#build matrix
mat = [[('',0) for i in range(n)] for j in range(n)]
labels = {}
dots=np.zeros( (n,n) )
for e in G.edges(data=True):
#calculate alpha
alpha = e[2].get('weight',0.4) #super networks will have a weight
#otherwise use 0.4
#store colours (nb. matrix is symmetric, so operations are repeated)
mat[ids[e[0]]][ids[e[1]]] = (e[2]['colour'],alpha)
mat[ids[e[1]]][ids[e[0]]] = (e[2]['colour'],alpha)
#label info
if type(e[2]['colour']) is list: #add from list
for i in range( len(e[2]['colour']) ):
labels[e[2]['colour'][i]] = e[2]['edgeType'][i]
else: #add directly
labels[e[2]['colour']] = e[2]['edgeType']
#save dots (for comparison matrices)
dots[ids[e[0]]][ids[e[1]]] = e[2].get('comp_id',0) == 1 #default is no dot
dots[ids[e[1]]][ids[e[0]]] = e[2].get('comp_id',0) == 1
f, ax = plt.subplots()
for x in range(len(mat)):
for y in range(len(mat[0])):
c = mat[x][y][0] #colour (single colour or list of colours if this is a lithological topology)
a = mat[x][y][1] #alpha
if (a > 1 ): #catch floating point errors
a = 0.99999
if type(c) is list: #multiple relationships...
#find unique relationships, in case they are repeated (though they should not be)
unique = []
for i in c:
if not i in unique:
unique.append(i)
#draw unique
if len(unique) == 1:
if c != '':
#draw patch
patch = ax.add_patch( patches.Rectangle(
(x,y),
1,1,color=c[0],alpha=a))
patch.set_label( labels[c[0]] )
labels[c[0]] = '_nolegend_' #so we don't show labels multiple times
elif len(unique) == 2: #draw two triangles
#upper triangle
upper = ax.add_patch( patches.Polygon(
xy=[[x,y],[x+1,y],[x,y+1]],
color=c[0],alpha=a))
upper.set_label( labels[c[0]] )
labels[c[0]] = '_nolegend_' #so we don't show labels multiple times
#lower triangle
lower = ax.add_patch( patches.Polygon(
xy=[[x+1,y+1],[x+1,y],[x,y+1]],
color=c[1],alpha=a))
upper.set_label( labels[c[1]] )
labels[c[1]] = '_nolegend_' #so we don't show labels multiple times
elif len(unique) == 3: #draw two triangles with circle
#upper triangle
upper = ax.add_patch( patches.Polygon(
xy=[[x,y],[x+1,y],[x,y+1]],
color=c[0],alpha=a))
upper.set_label( labels[c[0]] )
labels[c[0]] = '_nolegend_' #so we don't show labels multiple times
#lower triangle
lower = ax.add_patch( patches.Polygon(
xy=[[x+1,y+1],[x+1,y],[x,y+1]],
color=c[1],alpha=a))
lower.set_label( labels[c[1]] )
labels[c[1]] = '_nolegend_' #so we don't show labels multiple times
#circle
circle = ax.add_patch( patches.Circle(
(x+0.5,y+0.5), 0.25,
color=c[2],alpha=1))
circle.set_label( labels[c[2]] )
labels[c[2]] = '_nolegend_' #so we don't show labels multiple times
elif len(unique) == 4: #draw 4 boxes
#upper left
patch = ax.add_patch( patches.Rectangle(
(x,y),
.5,.5,color=c[0],alpha=a))
patch.set_label( labels[c[0]] )
labels[c[0]] = '_nolegend_' #so we don't show labels multiple times
#upper right
patch = ax.add_patch( patches.Rectangle(
(x+.5,y),
.5,.5,color=c[1],alpha=a))
patch.set_label( labels[c[1]] )
labels[c[1]] = '_nolegend_' #so we don't show labels multiple times
#lower left
patch = ax.add_patch( patches.Rectangle(
(x,y+.5),
.5,.5,color=c[2],alpha=a))
patch.set_label( labels[c[2]] )
labels[c[2]] = '_nolegend_' #so we don't show labels multiple times
#lower right
patch = ax.add_patch( patches.Rectangle(
(x+.5,y+.5),
.5,.5,color=c[3],alpha=a))
patch.set_label( labels[c[3]] )
labels[c[3]] = '_nolegend_' #so we don't show labels multiple times
else: #uh oh - though tbh this *should* never happen.... (though Murphy would disagree)
print "Error: more than 4 relationship types! This cannot be drawn on adjacency matrix"
print c
break
else: #only one relationship, rectangular patch
if c != '':
#draw patch
patch = ax.add_patch( patches.Rectangle(
(x,y),
1,1,facecolor=c,alpha=a))
if a < 0.05: #dot hatch
patch = ax.add_patch( patches.Rectangle(
(x,y),
1,1, facecolor='w',edgecolor=c,alpha=0.4,hatch='.'))
elif a < 0.1: #cross hatch
patch = ax.add_patch( patches.Rectangle(
(x,y),
1,1, facecolor='w', edgecolor=c,alpha=0.4,hatch='x'))
patch.set_label( labels[c] )
labels[c] = '_nolegend_' #so we don't show labels multiple times
#draw dots
if dots[x][y] == 1: #draw dot
ax.scatter(x+0.5,y+0.5,c='k',alpha=0.6)
#print "dot %d, %d" % (x,y)
#plot grid
#ax.grid()
#plot legend
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#set limits & flip y
ax.set_ylim(0,n)
ax.set_xlim(0,n)
#ax.invert_yaxis()
#set ticks
ax.set_xticks([ x + .5 for x in range(n)])
ax.set_yticks([ y + .5 for y in range(n)])
#build node name mapping
name_list = [] #order list containing node names from 0 to n
for node in nodes:
if node[1].has_key('name'):
name = node[1]['name']
#name+=node[0].split('_')[-1]
else:
name = node[0]
name_list.append(name)
ax.xaxis.set_ticklabels(name_list,rotation=90)
ax.yaxis.set_ticklabels(name_list)
#set figure size
size = kwds.get('size',5.)
f.set_figwidth(size)
f.set_figheight(size)
#save/show
if kwds.has_key('path'):
f.savefig(kwds['path'],dpi=kwds.get('dpi',300))
else:
f.show()
def draw_adjacency_matrix(self, **kwds):
'''
Draws an adjacency matrix representing this topology object.
**Keywords**:
- *path* = The path to save this image to. If not provided, the image is drawn to the screen
- *dpi* = The resolution to save this image. Default is 300
- *size* = The size of the image to save (in inches). This value will be used as the width and the height
'''
NoddyTopology.draw_graph_matrix(self.graph,**kwds)
def draw_difference_matrix(self, G2, **kwds):
'''
Draws an adjacency matrix containing the difference between this topology and the provided topology
**Arguments**:
- *G2* = A different NoddyTopology or NetworkX Graph to compare to
**Optional Keywords**:
- *strat* = A dictionary linking node names to stratigraphic heights and names. Should be as follows { node_name : (height,name) }.
- *path* = The path to save this image to. If not provided, the image is drawn to the screen
- *dpi* = The resolution to save this image. Default is 300
- *size* = The size of the image to save (in inches). This value will be used as the width and the height
'''
#ensure G2 is a graph object
#if (isinstance(G2,NoddyTopology)):
# G2 = G2.graph #we want the graph bit
#get difference
n, edge_list = self.calculate_difference(G2,data=True)
#make graph of difference
import networkx as nx
D = nx.Graph()
D.add_edges_from(edge_list)
#plot
NoddyTopology.draw_graph_matrix(D,kwds=kwds)
def _dep_draw_matrix_image( self, outputname="" ):
>>>>>>> refs/remotes/flohorovicic/master
'''
Draws an (adjacency) matrix representing this NoddyTopology object.
**Arguments**
- *outputname* = the path of the image to be written. If left as '' the image is written to the same directory as the basename.
'''
#try importing matplotlib
try:
import matplotlib.pyplot as plt
except ImportError:
print ("Could not draw image as matplotlib is not installed. Please install matplotlib")
#get output path
if outputname == "":
outputname = self.basename + "_matrix.jpg"
#open the matrix file
f = open(self.basename + '.g25','r')
lines = f.readlines()
rows = []
for l in lines:
l = l.rstrip()
row = []
for e in l.split('\t'):
row.append(int(e))
rows.append(row)
#draw & save
print "Saving matrix image to... " + outputname
cmap=plt.get_cmap('Paired')
cmap.set_under('white') # Color for values less than vmin
plt.imshow(rows, interpolation="nearest", vmin=1, cmap=cmap)
plt.savefig(outputname)
plt.clf()
def draw_network_image(self, outputname="", **kwds ):
'''
Draws a network diagram of this NoddyTopology to the specified image
<<<<<<< HEAD
**Arguments**
=======
**Arguments**:
>>>>>>> refs/remotes/flohorovicic/master
- *outputname* = the path of the image being written. If left as '' the image is written to the same directory as the basename.
**Optional Keywords**:
- *dimension* = '2D' for a 2D network diagram or '3D' for a 3D network diagram. Default is '2D'.
- *axis* = the axis to view on for 3D network diagrams
- *perspective* = True to use perspective projection, or False for orthographic projection. Default is False.
- *node_size* = The size that nodes are drawn. Default is 1500.
<<<<<<< HEAD
- *layout* = The layout algorithm used in 2D. Options are 'spring_layout' (default), 'shell_layout', 'circular_layout'
and 'spectral_layout'.
- *verbose* = True if this function is allowed to write to the print buffer, otherwise false. Default is true
=======
- *layout* = The layout algorithm used in 2D. Options are 'spring_layout' (default), 'shell_layout', 'circular_layout' and 'spectral_layout'.
- *verbose* = True if this function is allowed to write to the print buffer, otherwise false. Default is False.
>>>>>>> refs/remotes/flohorovicic/master
'''
#import networkx
import networkx as nx
#try importing matplotlib
try:
import matplotlib.pyplot as plt
except ImportError:
print ("Could not draw image as matplotlib is not installed. Please install matplotlib")
#get args
dims=kwds.get("dimension",'2D')
view_axis=kwds.get("axis",'y') #default view along y axis
perspective=kwds.get("perspective",False)
node_size = kwds.get("node_size",1500)
layout = kwds.get("layout",'spring_layout')
<<<<<<< HEAD
verbose = kwds.get("verbose",True)
=======
verbose = kwds.get("verbose",False)
>>>>>>> refs/remotes/flohorovicic/master
#get output path
if outputname == "":
outputname = self.basename + "_graph.jpg"
#setup node colours (by lithologies)
#nCols = map(int,[G.node[n]['lithology'] for n in G.nodes()])
nCols = []
for n in self.graph.nodes():
nCols.append(self.graph.node[n]['colour'])
#setup colors (by type)
eCols = []#map(int,[G.edge[e[0]][e[1]]['edgeType'] for e in G.edges()])
for e in self.graph.edges():
eCols.append(self.graph.edge[e[0]][e[1]]['colour'])
#calculate node positions & sizes
size = [node_size] * nx.number_of_nodes(self.graph)
pos = {}
if '3D' in dims: #3D layout
size_dict = {}
for n in self.graph.nodes():
#initialise size array
size_dict[n] = node_size
dz=1 #z buffer
#calculate 2D location (orthographic)
if view_axis == 'x' or view_axis == 'side': #side view
pos[n]=[self.graph.node[n]['centroid'][1],self.graph.node[n]['centroid'][2]]
dz=self.graph.node[n]['centroid'][0]
elif view_axis == 'y' or view_axis == 'front': #front view
pos[n]=[self.graph.node[n]['centroid'][0],self.graph.node[n]['centroid'][2]]
dz=self.graph.node[n]['centroid'][1]
elif view_axis == 'z' or view_axis == 'top': #top view
pos[n]=[self.graph.node[n]['centroid'][0],self.graph.node[n]['centroid'][1]]
dz=self.graph.node[n]['centroid'][2]
#apply perspective correction if necessary
if perspective==True:
pos[n][0] = pos[n][0] / (dz)
pos[n][1] = pos[n][1] / (dz)
size_dict[n] = (size_dict[n] / dz) * 500
#store size array
size = size_dict.values()
else: #2D layout
if 'shell_layout' in layout: #layouts: spring_layout, shell_layout, circular_layout, spectral_layout
pos = nx.shell_layout(self.graph)
if 'circular_layout' in layout:
pos = nx.circular_layout(self.graph)
if 'circular_layout' in layout:
pos = nx.spectral_layout(self.graph)
else:
pos = nx.spring_layout(self.graph)
#print "Position = " + str(pos)
#draw & save
if verbose:
print "Saving network image to..." + outputname
nx.draw(self.graph,pos,node_color=nCols,node_size=size, edge_color=eCols) #cmap=cm
#nx.draw_networkx_labels(G,pos,font_size=8)
plt.savefig(outputname)
plt.clf()
<<<<<<< HEAD
=======
def draw_network_hive( self, **kwds ):
'''
Draws a network hive plot (see https://github.com/ericmjl/hiveplot).
The axes of the hive are: node lithology, edge age & edge area.
ie. the top axis lists the nodes in stratigraphic order. The second axis
lists edges in structural age & thrid axis lists edges by surface area.
Nodes are joined to edge-nodes by lines on the graph if they are topologically linked
(ie. if an edge has that node as an end point).
**Optional Keywords**:
- *path* = the path to save this figure
- *dpi* = the resolution of the figure
- *bg* = the background color. Default is black.
- *axes* = The color of the axes and labels.
'''
#make axes
axes = [[],[],[]]
#nb. was lithology
axes[0] = [(n,int(d['age'])) for n, d in self.graph.nodes(data=True)] #nodes
axes[1] = [(u,v,d['age']) for u,v,d in self.graph.edges(data=True)] #edges treated as nodes on these axes
axes[2] = [(u,v,d['area']) for u,v,d in self.graph.edges(data=True)]
#calculate node positions
node_positions = [{},{},{}]
for ax in range(3): #axes
for n in axes[ax]: #nodes
node_id = n[:-1]
if len(node_id) == 1:
node_id = n[0] #change form tuple to value
node_positions[ax][node_id] = n[-1] #use node parameter
#drop attributes from node ids
axes[0] = [ n for n, d in axes[0]]
axes[1] = [ (u,v) for u, v, d in axes[1]] #string contains edge type
axes[2] = [ (u,v) for u,v,d in axes[2]]
#calculate edges
edges = {}
edge_vals = {}
for u,v,d in self.graph.edges(data=True):
if not edges.has_key(d['edgeType']):
edges[d['edgeType']] = [] #init list
edge_vals[d['edgeType']] = {}#'cm' : 'alpha', 'color' : d['colour']}
e1 = (u,v) #inter group edge
e2 = (u,(u,v)) #between group edges
e3 = (v,(u,v))
e4 = ((u,v),(u,v))
edges[d['edgeType']].append(e1)
edges[d['edgeType']].append(e2)
edges[d['edgeType']].append(e3)
edges[d['edgeType']].append(e4)
edge_vals[d['edgeType']][e1] = d['colour'] #set edge color
edge_vals[d['edgeType']][e2] = d['colour'] #set edge color
edge_vals[d['edgeType']][e3] = d['colour'] #set edge color
edge_vals[d['edgeType']][e4] = d['colour'] #set edge color
#make plot
axis_cols = kwds.get('axes',['white','white','white'])
if not type(axis_cols) is list:
axis_cols = [axis_cols] * 3
from pynoddy.experiment.util.hive_plot import HivePlot
h = HivePlot(axes,edges,node_positions=node_positions, node_size=0.2,
edge_colormap=edge_vals,lbl_axes=['Stratigraphic Age',
'Structural Age',
'Surface Area'],
axis_cols=axis_cols)
h.draw(**kwds)
@staticmethod
def draw_mayavi_graph( G, **kwds ):
'''
Draws the provided network with mayavi. This requires the Mayavi python library
(mayavi.mlab)
**Optional Keywords**:
- *node_size* = The size of the nodes. Default is 40.
- *edge_thickness* = The thickness of the edges. Default is 4
- *show* = If true, the model is displayed in the mayavi viewer after exporting. Default is True
- *path* = A path to save the mayavi vtk file to after generating it.
'''
import networkx as nx
import numpy as np
try:
from mayavi import mlab
except ImportError:
print("Error loading mayavi package: mayavi is not installed or is not on the python path. To install with pip, use 'pip install mayavi' (or 'conda install mayavi'")
return
node_size = kwds.get('node_size',250)
edge_thickness = kwds.get('edge_thickness',10)
#convert node labels to integers
G2 = nx.convert_node_labels_to_integers(G)
#load positions
x = []
y = []
z = []
nCols = [] #node colours
for n in G2.nodes():
assert G2.node[n].has_key('centroid'), "Error: node centroids are not defined."
centroid = G2.node[n]['centroid']
x.append(centroid[0])
y.append(centroid[1])
z.append(centroid[2])
nCols.append(int(G2.node[n]['lithology']))
#get edges of different types
edge_groups = {} #keys: 'type' : (edge,edge_colour,weight_list)
from matplotlib.colors import ColorConverter
cc = ColorConverter()
for e in G2.edges(data=True):
e_type = e[2]['edgeType']
if not edge_groups.has_key(e_type):
col = e[2].get('colour',(0.3,0.3,0.3))
#convert matplotlib colours to rgb
if not type( col ) is tuple:
col= cc.to_rgb( col )
#edges are stored as follows: ((x_coords,y_coords,zcoords),edge_pairs,colour,values)
edge_groups[e_type] = (([],[],[]),[],col,[]) #Initialise edge type
#append start coordinates
id_start = len(edge_groups[e_type][0][0])
edge_groups[e_type][0][0].append(x[e[0]])
edge_groups[e_type][0][1].append(y[e[0]])
edge_groups[e_type][0][2].append(z[e[0]])
edge_groups[e_type][3].append(e[2].get('weight',1.0) * edge_thickness)
#append end coordinates
id_end = len(edge_groups[e_type][0][0])
edge_groups[e_type][0][0].append(x[e[1]])
edge_groups[e_type][0][1].append(y[e[1]])
edge_groups[e_type][0][2].append(z[e[1]])
edge_groups[e_type][3].append(e[2].get('weight',1.0) * edge_thickness)
#append edge pair
edge_groups[e_type][1].append( (id_start,id_end) )
#make figure
mlab.figure(1,bgcolor=(1,1,1))
mlab.clf()
#make nodes
pts = mlab.points3d(x,y,z,nCols,scale_factor=node_size,scale_mode='none',resolution=20)
#make edges
for k in edge_groups.keys():
e = edge_groups[k]
#make start & end points
pts2 = mlab.points3d(e[0][0],e[0][1],e[0][2],e[3],scale_factor=edge_thickness,scale_mode='none',resolution=5)
##pts2.mlab_source.set(edge_groups[k][3])
#bind lines
pts2.mlab_source.dataset.lines = np.array(e[1])
#build geometry
tube = mlab.pipeline.tube(pts2,tube_radius=edge_thickness)
tube.filter.vary_radius = 'vary_radius_by_scalar'
tube.filter.radius_factor = 5
#tube.mlab_source.set(edge_groups[k][3] * edge_thickness)
mlab.pipeline.surface(tube,color=e[2])#color=(0.3,0.3,0.3))
#ends = mlab.points3d(e_x,e_y,e_z,np_c,scale_factor=edge_thickness,scale_mode='none',resolution=10)
#ends.mlab_source.dataset.lines = np.array(lines)
#tube = mlab.pipeline.tube(ends,tube_radius=edge_thickness)
#mlab.pipeline.surface(tube)
#pts.mlab_source.dataset.lines = np.array(G2.edges())
#tube = mlab.pipeline.tube(pts,tube_radius=edge_thickness)
#mlab.pipeline.surface(tube,color=np.array(eCols))#color=(0.3,0.3,0.3))
#write
if kwds.has_key('path'):
try:
from tvtk.api import write_data
except:
print("Warning: tvtk not installed - cannot write vtk file.")
write_data(pts.mlab_source.dataset,kwds['path'])
#show, if asked
if kwds.get('show',True):
mlab.show()
def draw_mayavi( self, **kwds ):
'''
Draws this network with mayavi. This requires the Mayavi python library
(mayavi.mlab)
**Optional Keywords**:
- *node_size* = The size of the nodes. Default is 40.
- *edge_thickness* = The thickness of the edges. Default is 4
- *show* = If true, the model is displayed in the mayavi viewer after exporting. Default is True
- *path* = A path to save the mayavi vtk file to after generating it.
'''
NoddyTopology.draw_mayavi_graph(self.graph,**kwds)
>>>>>>> refs/remotes/flohorovicic/master
def draw_3d_network( self, **kwds ):
'''
Draws a 3D network using Mayavi.
**Optional Keywords**:
- *show* = If True, the 3D network is displayed immediatly on-screen in an interactive matplotlib viewer. Default is True.
- *output* = If defined an image of the network is saved to this location.
<<<<<<< HEAD
- *vtk* = A path to save a .vtk model of the network (for later viewing). If
undefined a vtk is not saved (default)
=======
- *node_size* = The size of the nodes. Default is 40.
- *geology* = a NoddyOutput object to draw with the network
- *res* = resolution to draw geology at. Default is 4 (ie 1/4 of all voxels are drawn)
- *horizons* = a list of geology surfaces to draw. Default is nothing (none drawn). Slow!
See NoddyOutput.get_surface_grid for details.
- *sections* = draw geology sections. Default is True.
>>>>>>> refs/remotes/flohorovicic/master
'''
#import mayavi & networkx
import networkx as nx
try:
from mayavi import mlab
import numpy as np
except:
print("Error drawing interactive network: Mayavi is not installed")
return
show = kwds.get("show",True)
outputname = kwds.get("output",'')
vtk = kwds.get("vtk",'')
#convert node labels to integers
G2 = nx.convert_node_labels_to_integers(self.graph)
<<<<<<< HEAD
=======
#make figure
fig = plt.figure()
ax = fig.gca(projection='3d')
#load geology
if kwds.has_key('geology'):
base=kwds.get('geology')
res=kwds.get('res',1)
if kwds.get('sections',True): #plot sections
#get sections
sections = [base.get_section_lines('x',1),base.get_section_lines('y',1)]
#plot sections
for s in sections:
for k in s[0].keys():
ax.plot(s[0][k],s[1][k],s[2][k],c=s[3][k],zdir='z',alpha=0.5,linewidth=3)
if kwds.has_key('horizons'): #plot surfaces
h = kwds.get('horizons')
surfaces = base.get_surface_grid(h) #range(0,base.n_rocktypes) #[12,14]
#draw surfaces
for s in surfaces:
for k in s[0].keys():
for i in range(len(s[0][k])): #draw line segments
#ax.scatter(sx[k],sy[k],sz[k],s=2,linewidths=(0,),zdir='z',antialiased=False)
#ax.plot_trisurf(sx[k],sy[k],sz[k],color='r',alpha=0.6,antialiased=False)
ax.plot(s[0][k][i],s[1][k][i],s[2][k][i],c=s[3][k],zdir='z',alpha=0.6)
>>>>>>> refs/remotes/flohorovicic/master
#load positions
x = []
y = []
z = []
nCols = []
for n in G2.nodes():
x.append(G2.node[n]['centroid'][0])
y.append(G2.node[n]['centroid'][1])
z.append(G2.node[n]['centroid'][2])
nCols.append(int(G2.node[n]['lithology']))
#make figure
mlab.figure(1, bgcolor=(1,1,1))
mlab.clf()
pts = mlab.points3d(x,y,z,nCols, scale_factor=250, scale_mode='none',resolution=20)
pts.mlab_source.dataset.lines = np.array(G2.edges())
tube = mlab.pipeline.tube(pts,tube_radius=10)
mlab.pipeline.surface(tube,color=(0.3,0.3,0.3))
#show
if show:
mlab.show()
#save
if outputname != '':
mlab.savefig(outputname)
if vtk!='':
try:
from tvtk.api import write_data
except:
print("Warning: tvtk not installed - cannot write vtk file.")
return
write_data(pts.mlab_source.dataset,outputname)
if __name__ == '__main__':
# some testing and debugging functions...
# os.chdir(r'/Users/Florian/git/pynoddy/sandbox')
# NO = NoddyOutput("strike_slip_out")
<<<<<<< HEAD
os.chdir(r'C:\Users\Sam\Documents\Temporary Model Files\pynoddy\1ktest-1-100')
NO = "GBasin123_random_draw_0001"
#create NoddyTopology
topo = NoddyTopology(NO,load_attributes=False)
topo_c = topo.collapse_topology()
print len( topo_c.graph.edges() )
print len( topo.graph.edges() )
#draw network
#topo.draw_network_image(dimension='3D',perspective=False,axis='x')
=======
#os.chdir(r'C:\Users\Sam\Documents\Temporary Model Files')
os.chdir(r'C:\Users\Sam\OneDrive\Documents\Masters\Models\Primitive\Fold+Unconformity+Intrusion+Fault')
import cPickle as pk
st = pk.load(open('super_topology.pkl'))
NoddyTopology.draw_mayavi_graph(st)
#NO = "NFault/NFault"
#NO = 'Fold/Fold_Fault/fold_fault'
#NO = 'GBasin'
#create NoddyTopology
#geo = NoddyOutput(NO)
#topo = NoddyTopology(NO,load_attributes=True)
#topo.export_vtk(show=True)
#topo.draw_mayavi()
#topo_c = topo.collapse_topology()
#print len( topo_c.graph.edges() )
#print len( topo.graph.edges() )
#draw network
#topo.draw_network_image(dimension='3D',perspective=False,axis='x')
#topo.draw_3d_network(geology=geo,show=True,horizons=[4])
# topo.draw_adjacency_matrix()
# topo.draw_network_hive()
#struct = topo.collapse_stratigraphy()
#struct.draw_matrix_image()
#litho = topo.collapse_topology()
#litho.draw_matrix_image()
>>>>>>> refs/remotes/flohorovicic/master
#draw matrix
#topo.draw_matrix_image()
#draw 3D network
#topo.draw_3d_network()
|
Leguark/pynoddy
|
pynoddy/output.py
|
Python
|
gpl-2.0
| 86,360
|
[
"Mayavi",
"ParaView",
"VTK"
] |
3d72c4117dd18b6884251750746d809a6de710491f6ceebac8002cf0793d5654
|
from rdkit import RDConfig,RDRandom
import unittest
from rdkit.ML.InfoTheory import rdInfoTheory as rdit
from rdkit import DataStructs
import numpy
import os,cPickle
def feq(a,b,tol=1e-4):
return abs(a-b)<tol
class TestCase(unittest.TestCase):
def setUp(self) :
pass
def test0GainFuns(self):
arr = numpy.array([9,5])
self.failUnless(feq(rdit.InfoEntropy(arr),0.9403))
arr = numpy.array([9,9])
self.failUnless(feq(rdit.InfoEntropy(arr),1.0000))
arr = numpy.array([5,5])
self.failUnless(feq(rdit.InfoEntropy(arr),1.0000))
arr = numpy.array([5,0])
self.failUnless(feq(rdit.InfoEntropy(arr),0.0000))
arr = numpy.array([5,5,5])
self.failUnless(feq(rdit.InfoEntropy(arr),1.5850))
arr = numpy.array([2,5,5])
self.failUnless(feq(rdit.InfoEntropy(arr),1.4834))
mat2 = numpy.array([[6,2],[3,3]])
self.failUnless(feq(rdit.InfoGain(mat2),0.0481))
self.failUnless(feq(rdit.ChiSquare(mat2),0.9333))
mat3 = numpy.array([[1,1],[2,1]])
self.failUnless(feq(rdit.InfoGain(mat3),0.0200))
mat4 = numpy.array([[2,0],[1,2]])
self.failUnless(feq(rdit.InfoGain(mat4),0.4200))
mat5 = numpy.array([[0,0],[0,0]])
self.failUnless(feq(rdit.InfoGain(mat5),0.0000))
mat6 = numpy.array([[1,0],[1,0]])
self.failUnless(feq(rdit.InfoGain(mat6),0.0000))
def test1ranker(self) :
nbits = 100
ninst = 100
dm = 50
nact = 10
nc = 2
rn = rdit.InfoBitRanker(nbits, nc, rdit.InfoType.ENTROPY)
fps = []
na = 0
ni = 0
for i in range(ninst) :
v = DataStructs.SparseBitVect(nbits)
for j in range(dm):
v.SetBit(RDRandom.randrange(0,nbits))
if (RDRandom.randrange(0,ninst) < nact) :
na += 1
rn.AccumulateVotes(v, 1)
fps.append((v,1))
else:
ni += 1
rn.AccumulateVotes(v, 0)
fps.append((v,0))
res = rn.GetTopN(50)
rn2 = rdit.InfoBitRanker(nbits, nc)
for fp in fps:
rn2.AccumulateVotes(fp[0], fp[1])
res2 = rn2.GetTopN(50)
self.failUnless((res==res2).all())
rn3 = rdit.InfoBitRanker(nbits, nc, rdit.InfoType.BIASENTROPY)
#rn3.SetBiasList([0])
for fp in fps:
rn3.AccumulateVotes(fp[0], fp[1])
res3 = rn3.GetTopN(50)
for i in range(50) :
fan = res3[i,2]/na
fin = res3[i,3]/ni
self.failUnless(fan > fin)
def test2ranker(self) :
nbits = 100
ninst = 100
dm = 50
nact = 10
nc = 2
RDRandom.seed(23)
rn = rdit.InfoBitRanker(nbits, nc, rdit.InfoType.ENTROPY)
rn.SetMaskBits([63,70,15,25,10])
fps = []
na = 0
ni = 0
for i in range(ninst) :
v = DataStructs.SparseBitVect(nbits)
for j in range(dm):
v.SetBit(RDRandom.randrange(0,nbits))
if (RDRandom.randrange(0,ninst) < nact) :
na += 1
rn.AccumulateVotes(v, 1)
fps.append((v,1))
else:
ni += 1
rn.AccumulateVotes(v, 0)
fps.append((v,0))
res = rn.GetTopN(5)
ids = [int(x[0]) for x in res]
ids.sort()
self.failUnless(ids==[10,15,25,63,70])
try:
res = rn.GetTopN(10)
except:
ok = 1
else:
ok = 0
self.failUnless(ok)
def test3Issue140(self) :
nbits = 2
examples = [[0,0,0],[1,1,0],[0,0,1],[1,1,1]]
rn = rdit.InfoBitRanker(2,2,rdit.InfoType.ENTROPY)
for example in examples:
act = example.pop(-1)
bv = DataStructs.ExplicitBitVect(2)
for i in range(2):
bv[i] = example[i]
rn.AccumulateVotes(bv,act)
try:
res = rn.GetTopN(1)
except:
res = None
self.failUnless(res is not None)
def test4Issue237(self) :
inF = open(os.path.join(RDConfig.RDBaseDir,'Code','ML','InfoTheory','Wrap','testData','Issue237.pkl'),'rb')
examples,avail,bias,nB,nPoss = cPickle.load(inF)
ranker = rdit.InfoBitRanker(nB,nPoss,rdit.InfoType.BIASENTROPY)
ranker.SetMaskBits(avail)
for ex in examples:
ranker.AccumulateVotes(ex[1],ex[-1])
# this dumps core on linux if the bug isn't fixed:
v=ranker.GetTopN(1)
self.failUnless(int(v[0][0])==12)
if __name__ == '__main__':
unittest.main()
|
rdkit/rdkit-orig
|
Code/ML/InfoTheory/Wrap/testRanker.py
|
Python
|
bsd-3-clause
| 4,870
|
[
"RDKit"
] |
95bbf4caa46eb4e8feb30a35372497df680b053c7fc6c27d1566b18f03230328
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: systemd
author:
- "Ansible Core Team"
version_added: "2.2"
short_description: Manage services.
description:
- Controls systemd services on remote hosts.
options:
name:
required: true
description:
- Name of the service.
aliases: ['unit', 'service']
state:
required: false
default: null
choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
C(restarted) will always bounce the service. C(reloaded) will always reload.
enabled:
required: false
choices: [ "yes", "no" ]
default: null
description:
- Whether the service should start on boot. B(At least one of state and enabled are required.)
masked:
required: false
choices: [ "yes", "no" ]
default: null
description:
- Whether the unit should be masked or not, a masked unit is impossible to start.
daemon_reload:
required: false
default: no
choices: [ "yes", "no" ]
description:
- run daemon-reload before doing any other operations, to make sure systemd has read any changes.
aliases: ['daemon-reload']
notes:
- One option other than name is required.
requirements:
- A system managed by systemd
'''
EXAMPLES = '''
# Example action to start service httpd, if not running
- systemd: state=started name=httpd
# Example action to stop service cron on debian, if running
- systemd: name=cron state=stopped
# Example action to restart service cron on centos, in all cases, also issue deamon-reload to pick up config changes
- systemd: state=restarted daemon_reload: yes name=crond
# Example action to reload service httpd, in all cases
- systemd: name=httpd state=reloaded
# Example action to enable service httpd and ensure it is not masked
- systemd:
name: httpd
enabled: yes
masked: no
'''
RETURN = '''
status:
description: A dictionary with the key=value pairs returned from `systemctl show`
returned: success
type: complex
sample: {
"ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ActiveEnterTimestampMonotonic": "8135942",
"ActiveExitTimestampMonotonic": "0",
"ActiveState": "active",
"After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
"AllowIsolate": "no",
"Before": "shutdown.target multi-user.target",
"BlockIOAccounting": "no",
"BlockIOWeight": "1000",
"CPUAccounting": "no",
"CPUSchedulingPolicy": "0",
"CPUSchedulingPriority": "0",
"CPUSchedulingResetOnFork": "no",
"CPUShares": "1024",
"CanIsolate": "no",
"CanReload": "yes",
"CanStart": "yes",
"CanStop": "yes",
"CapabilityBoundingSet": "18446744073709551615",
"ConditionResult": "yes",
"ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ConditionTimestampMonotonic": "7902742",
"Conflicts": "shutdown.target",
"ControlGroup": "/system.slice/crond.service",
"ControlPID": "0",
"DefaultDependencies": "yes",
"Delegate": "no",
"Description": "Command Scheduler",
"DevicePolicy": "auto",
"EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
"ExecMainCode": "0",
"ExecMainExitTimestampMonotonic": "0",
"ExecMainPID": "595",
"ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ExecMainStartTimestampMonotonic": "8134990",
"ExecMainStatus": "0",
"ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"FragmentPath": "/usr/lib/systemd/system/crond.service",
"GuessMainPID": "yes",
"IOScheduling": "0",
"Id": "crond.service",
"IgnoreOnIsolate": "no",
"IgnoreOnSnapshot": "no",
"IgnoreSIGPIPE": "yes",
"InactiveEnterTimestampMonotonic": "0",
"InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"InactiveExitTimestampMonotonic": "8135942",
"JobTimeoutUSec": "0",
"KillMode": "process",
"KillSignal": "15",
"LimitAS": "18446744073709551615",
"LimitCORE": "18446744073709551615",
"LimitCPU": "18446744073709551615",
"LimitDATA": "18446744073709551615",
"LimitFSIZE": "18446744073709551615",
"LimitLOCKS": "18446744073709551615",
"LimitMEMLOCK": "65536",
"LimitMSGQUEUE": "819200",
"LimitNICE": "0",
"LimitNOFILE": "4096",
"LimitNPROC": "3902",
"LimitRSS": "18446744073709551615",
"LimitRTPRIO": "0",
"LimitRTTIME": "18446744073709551615",
"LimitSIGPENDING": "3902",
"LimitSTACK": "18446744073709551615",
"LoadState": "loaded",
"MainPID": "595",
"MemoryAccounting": "no",
"MemoryLimit": "18446744073709551615",
"MountFlags": "0",
"Names": "crond.service",
"NeedDaemonReload": "no",
"Nice": "0",
"NoNewPrivileges": "no",
"NonBlocking": "no",
"NotifyAccess": "none",
"OOMScoreAdjust": "0",
"OnFailureIsolate": "no",
"PermissionsStartOnly": "no",
"PrivateNetwork": "no",
"PrivateTmp": "no",
"RefuseManualStart": "no",
"RefuseManualStop": "no",
"RemainAfterExit": "no",
"Requires": "basic.target",
"Restart": "no",
"RestartUSec": "100ms",
"Result": "success",
"RootDirectoryStartOnly": "no",
"SameProcessGroup": "no",
"SecureBits": "0",
"SendSIGHUP": "no",
"SendSIGKILL": "yes",
"Slice": "system.slice",
"StandardError": "inherit",
"StandardInput": "null",
"StandardOutput": "journal",
"StartLimitAction": "none",
"StartLimitBurst": "5",
"StartLimitInterval": "10000000",
"StatusErrno": "0",
"StopWhenUnneeded": "no",
"SubState": "running",
"SyslogLevelPrefix": "yes",
"SyslogPriority": "30",
"TTYReset": "no",
"TTYVHangup": "no",
"TTYVTDisallocate": "no",
"TimeoutStartUSec": "1min 30s",
"TimeoutStopUSec": "1min 30s",
"TimerSlackNSec": "50000",
"Transient": "no",
"Type": "simple",
"UMask": "0022",
"UnitFileState": "enabled",
"WantedBy": "multi-user.target",
"Wants": "system.slice",
"WatchdogTimestampMonotonic": "0",
"WatchdogUSec": "0",
}
'''
import os
import glob
from ansible.module_utils.basic import AnsibleModule
# ===========================================
# Main control flow
def main():
# init
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, type='str', aliases=['unit', 'service']),
state = dict(choices=[ 'started', 'stopped', 'restarted', 'reloaded'], type='str'),
enabled = dict(type='bool'),
masked = dict(type='bool'),
daemon_reload= dict(type='bool', default=False, aliases=['daemon-reload']),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']],
)
# initialize
systemctl = module.get_bin_path('systemctl')
unit = module.params['name']
rc = 0
out = err = ''
result = {
'name': unit,
'changed': False,
'status': {},
}
#TODO: check if service exists
(rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
if rc != 0:
module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, unit, err))
# load return of systemctl show into dictionary for easy access and return
k = None
multival = []
for line in out.split('\n'): # systemd can have multiline values delimited with {}
if line.strip():
if k is None:
if '=' in line:
k,v = line.split('=', 1)
if v.lstrip().startswith('{'):
if not v.rstrip().endswith('}'):
multival.append(line)
continue
result['status'][k] = v.strip()
k = None
else:
if line.rstrip().endswith('}'):
result['status'][k] = '\n'.join(multival).strip()
multival = []
k = None
else:
multival.append(line)
if 'LoadState' in result['status'] and result['status']['LoadState'] == 'not-found':
module.fail_json(msg='Could not find the requested service "%r": %s' % (unit, err))
elif 'LoadError' in result['status']:
module.fail_json(msg="Failed to get the service status '%s': %s" % (unit, result['status']['LoadError']))
# Run daemon-reload first, if requested
if module.params['daemon_reload']:
(rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
# mask/unmask the service, if requested
if module.params['masked'] is not None:
masked = (result['status']['LoadState'] == 'masked')
# Change?
if masked != module.params['masked']:
result['changed'] = True
if module.params['masked']:
action = 'mask'
else:
action = 'unmask'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
# do we need to enable the service?
enabled = False
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
# check systemctl result or if it is a init script
initscript = '/etc/init.d/' + unit
if rc == 0 or (os.access(initscript, os.X_OK) and bool(glob.glob('/etc/rc?.d/S??' + unit))):
enabled = True
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
result['enabled'] = not enabled
if module.params['state'] is not None:
# default to desired state
result['state'] = module.params['state']
# What is current service state?
if 'ActiveState' in result['status']:
action = None
if module.params['state'] == 'started':
if result['status']['ActiveState'] != 'active':
action = 'start'
result['changed'] = True
elif module.params['state'] == 'stopped':
if result['status']['ActiveState'] == 'active':
action = 'stop'
result['changed'] = True
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
result['changed'] = True
if action:
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
else:
# this should not happen?
module.fail_json(msg="Service is in unknown state", status=result['status'])
module.exit_json(**result)
if __name__ == '__main__':
main()
|
bsmr-ansible/ansible-modules-core
|
system/systemd.py
|
Python
|
gpl-3.0
| 13,997
|
[
"Brian"
] |
dbb0fc93131a9b660d51fd94289ce27b9ed3eb384c5dda4f74baef93725b0a69
|
"""
Takes blast output and reformats it to prepare for iTOL
"""
import os,sys
import argparse
import re
from collections import Counter
from accessionMap import AccessionGG
accReg = re.compile("([A-Z]+_\d+.\d)")
def bacteriocinLengths(blastFile,lengthsOut,accTable):
lenCnts = Counter()
accTable = AccessionGG(args.accession_table)#maps accession ids to green gene ids
with open(blastFile,'r') as handle:
for ln in handle:
ln = ln.rstrip()
toks = ln.split('\t')
bacID,refID,st,end,start,gtype,seq = toks
st,end = int(st),int(end)
seq = seq.replace('-','')
accession = accReg.findall(refID)[0]
#gg = accTable.lookupGenbank(accession)
gg = accession
if gg==None: continue
if gg in lenCnts: #calculate average length
num,denom = lenCnts[gg]
num+=len(seq)
denom +=1
lenCnts[gg] = (num,denom)
else:
lenCnts[gg] = (len(seq),1)
lenHandle = open(lengthsOut,'w')
for accum,frac in lenCnts.items():
num,denom = frac
average = num/denom
lenHandle.write("%s\t%d\n"%(accum,average))
lenHandle.close()
def bacteriocinCounts(blastFile,countsOut,accTable):
bacCnts = Counter()
with open(blastFile,'r') as handle:
for ln in handle:
ln = ln.rstrip()
toks = ln.split('\t')
bacID,refID,st,end,start,gtype,seq = toks
st,end = int(st),int(end)
seq = seq.replace('-','')
accession = accReg.findall(refID)[0]
#gg = accTable.lookupGenbank(accession)
gg = accession
if gg==None: continue
bacCnts[gg] +=1
bacHandle = open(countsOut,'w')
for accum,count in bacCnts.items():
bacHandle.write("%s\t%d\n"%(accum,count))
bacHandle.close()
def anchorgeneCounts(anchorGeneFile,countsOut,accTable):
anchorCnts = Counter()
with open(anchorGeneFile,'r') as handle:
for ln in handle:
ln = ln.rstrip()
toks = ln.split('\t')
refID = toks[1]
accession = accReg.findall(refID)[0]
#gg = accTable.lookupGenbank(accession)
gg = accession
if gg==None: continue
anchorCnts[gg] +=1
anchorHandle = open(countsOut,'w')
for accum,count in anchorCnts.items():
anchorHandle.write("%s\t%d\n"%(accum,count))
anchorHandle.close()
if __name__=="__main__":
parser = argparse.ArgumentParser(description=\
'Format Blast output for iTOL visualization ')
parser.add_argument(\
'--accession-table', type=str, required=True,
help='A table for converting accession IDs into green gene ids')
parser.add_argument(\
'--blast-input', type=str, required=True,
help='Blast tab output from bacteriocin.py')
parser.add_argument(\
'--anchor-genes', type=str, required=False,
help='Anchor genes from genbank files')
parser.add_argument(\
'--lengths', type=str, required=False,
help='Average bacteriocin length for each species')
parser.add_argument(\
'--bacteriocin-counts', type=str, required=False,
help='Number of bacteriocin for each species')
parser.add_argument(\
'--anchor-gene-counts', type=str, required=False,
help='Number of bacteriocin for each species')
args = parser.parse_args()
accTable = AccessionGG(args.accession_table)#maps accession ids to green gene ids
bacteriocinLengths(args.blast_input,args.lengths,accTable)
bacteriocinCounts(args.blast_input,args.bacteriocin_counts,accTable)
anchorgeneCounts(args.anchor_genes,args.anchor_gene_counts,accTable)
|
idoerg/BOA
|
src/format/phyloformat.py
|
Python
|
gpl-3.0
| 3,843
|
[
"BLAST"
] |
023ec0f813491ce0a86727ffd85a662a867ff914b4d8a7c48280a235de708740
|
"""
Test the command line script dials.scale, for successful completion.
"""
from __future__ import annotations
import json
import platform
import sys
import procrunner
import pytest
import iotbx.merging_statistics
import iotbx.mtz
from cctbx import uctbx
from dxtbx.model import Beam, Crystal, Detector, Experiment, Goniometer, Scan
from dxtbx.model.experiment_list import ExperimentList
from dxtbx.serialize import load
from libtbx import phil
from dials.algorithms.scaling.algorithm import ScalingAlgorithm, prepare_input
from dials.array_family import flex
from dials.command_line import merge, report, scale
from dials.util.options import ArgumentParser
def run_one_scaling(working_directory, argument_list):
"""Run the dials.scale algorithm."""
command = ["dials.scale"] + argument_list
print(command)
result = procrunner.run(command, working_directory=working_directory)
print(result.stderr)
assert not result.returncode and not result.stderr
assert (working_directory / "scaled.expt").is_file()
assert (working_directory / "scaled.refl").is_file()
assert (working_directory / "dials.scale.html").is_file()
table = flex.reflection_table.from_file(working_directory / "scaled.refl")
assert "inverse_scale_factor" in table
assert "inverse_scale_factor_variance" in table
def get_merging_stats(
scaled_unmerged_mtz,
anomalous=False,
n_bins=20,
use_internal_variance=False,
eliminate_sys_absent=False,
data_labels=None,
):
"""Return a merging statistics result from an mtz file."""
i_obs = iotbx.merging_statistics.select_data(
str(scaled_unmerged_mtz), data_labels=data_labels
)
i_obs = i_obs.customized_copy(anomalous_flag=False, info=i_obs.info())
result = iotbx.merging_statistics.dataset_statistics(
i_obs=i_obs,
n_bins=n_bins,
anomalous=anomalous,
use_internal_variance=use_internal_variance,
eliminate_sys_absent=eliminate_sys_absent,
)
return result
def generated_exp(n=1):
"""Generate an experiment list with two experiments."""
experiments = ExperimentList()
exp_dict = {
"__id__": "crystal",
"real_space_a": [1.0, 0.0, 0.0],
"real_space_b": [0.0, 1.0, 0.0],
"real_space_c": [0.0, 0.0, 2.0],
"space_group_hall_symbol": " C 2y",
}
crystal = Crystal.from_dict(exp_dict)
scan = Scan(image_range=[0, 90], oscillation=[0.0, 1.0])
beam = Beam(s0=(0.0, 0.0, 1.01))
goniometer = Goniometer((1.0, 0.0, 0.0))
detector = Detector()
experiments.append(
Experiment(
beam=beam,
scan=scan,
goniometer=goniometer,
detector=detector,
crystal=crystal,
)
)
if n > 1:
for _ in range(n - 1):
experiments.append(
Experiment(
beam=beam,
scan=scan,
goniometer=goniometer,
detector=detector,
crystal=crystal,
)
)
return experiments
def generated_param():
"""Generate the default scaling parameters object."""
phil_scope = phil.parse(
"""
include scope dials.command_line.scale.phil_scope
""",
process_includes=True,
)
parser = ArgumentParser(phil=phil_scope, check_format=False)
parameters, _ = parser.parse_args(args=[], quick_parse=True, show_diff_phil=False)
return parameters
def generate_test_reflections():
"""Generate a small reflection table."""
reflections = flex.reflection_table()
reflections["intensity.sum.value"] = flex.double([1.0, 2.0, 3.0, 4.0])
reflections["intensity.sum.variance"] = flex.double([1.0, 2.0, 3.0, 4.0])
reflections["miller_index"] = flex.miller_index(
[(0, 0, 1), (0, 0, 1), (0, 0, 2), (0, 0, 2)]
)
reflections["xyzobs.px.value"] = flex.vec3_double(
[(0, 0, 1), (0, 0, 2), (0, 0, 3), (0, 0, 4)]
)
reflections["id"] = flex.int([0, 0, 0, 0])
return reflections
def generate_test_input(n=1):
"""Generate params, exps and refls."""
reflections = []
for _ in range(n):
reflections.append(generate_test_reflections())
return generated_param(), generated_exp(n), reflections
def test_scale_script_prepare_input():
"""Test prepare_input method of scaling script."""
# test the components of the scaling script directly with a test reflection
# table, experiments list and params.
params, exp, reflections = generate_test_input()
# try to pass in unequal number of reflections and experiments
reflections.append(generate_test_reflections())
with pytest.raises(ValueError):
_ = ScalingAlgorithm(params, exp, reflections)
params, exp, reflections = generate_test_input()
# Try to use use_datasets when not identifiers set
params.dataset_selection.use_datasets = [0]
with pytest.raises(ValueError):
_ = ScalingAlgorithm(params, exp, reflections)
# Try to use use_datasets when not identifiers set
params.dataset_selection.use_datasets = None
params.dataset_selection.exclude_datasets = [0]
with pytest.raises(ValueError):
_ = ScalingAlgorithm(params, exp, reflections)
# Now make two experiments with identifiers and select on them
params, exp, reflections = generate_test_input(n=2)
exp[0].identifier = "0"
reflections[0].experiment_identifiers()[0] = "0"
exp[1].identifier = "1"
reflections[1].experiment_identifiers()[0] = "1"
list1 = ExperimentList().append(exp[0])
list2 = ExperimentList().append(exp[1])
reflections[0].assert_experiment_identifiers_are_consistent(list1)
reflections[1].assert_experiment_identifiers_are_consistent(list2)
params.dataset_selection.use_datasets = [0]
params, exp, script_reflections = prepare_input(params, exp, reflections)
assert len(script_reflections) == 1
# Try again, this time excluding
params, exp, reflections = generate_test_input(n=2)
exp[0].identifier = "0"
reflections[0].experiment_identifiers()[0] = "0"
exp[1].identifier = "1"
reflections[1].experiment_identifiers()[1] = "1"
params.dataset_selection.exclude_datasets = [0]
params, exp, script_reflections = prepare_input(params, exp, reflections)
assert len(script_reflections) == 1
assert script_reflections[0] is reflections[1]
# Try having two unequal space groups
params, exp, reflections = generate_test_input(n=2)
exp_dict = {
"__id__": "crystal",
"real_space_a": [1.0, 0.0, 0.0],
"real_space_b": [0.0, 1.0, 0.0],
"real_space_c": [0.0, 0.0, 2.0],
"space_group_hall_symbol": " P 1",
}
crystal = Crystal.from_dict(exp_dict)
exp[0].crystal = crystal
with pytest.raises(ValueError):
_ = prepare_input(params, exp, reflections)
# Test cutting data
params, exp, reflections = generate_test_input(n=1)
params.cut_data.d_min = 1.5
params, _, script_reflections = prepare_input(params, exp, reflections)
r = script_reflections[0]
assert list(r.get_flags(r.flags.user_excluded_in_scaling)) == [
False,
False,
True,
True,
]
# Ensure that the user_excluded_in_scaling flags are reset before applying any new
# cutoffs by re-passing script_reflections to prepare_input
params.cut_data.d_min = None
params, _, script_reflections = prepare_input(params, exp, script_reflections)
r = script_reflections[0]
assert list(r.get_flags(r.flags.user_excluded_in_scaling)) == [
False,
False,
False,
False,
]
params.cut_data.d_max = 1.25
params, _, script_reflections = prepare_input(params, exp, reflections)
r = script_reflections[0]
assert list(r.get_flags(r.flags.user_excluded_in_scaling)) == [
True,
True,
False,
False,
]
params, exp, reflections = generate_test_input(n=1)
reflections[0]["partiality"] = flex.double([0.5, 0.8, 1.0, 1.0])
params.cut_data.partiality_cutoff = 0.75
_, __, script_reflections = prepare_input(params, exp, reflections)
r = script_reflections[0]
assert list(r.get_flags(r.flags.user_excluded_in_scaling)) == [
True,
False,
False,
False,
]
def test_targeted_scaling_against_mtz(dials_data, tmp_path):
"""Test targeted scaling against an mtz generated with dials.scale."""
location = dials_data("l_cysteine_4_sweeps_scaled", pathlib=True)
refl = location / "scaled_35.refl"
expt = location / "scaled_35.expt"
command = ["dials.scale", refl, expt, "unmerged_mtz=unmerged.mtz"]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.expt").is_file()
assert (tmp_path / "scaled.refl").is_file()
assert (tmp_path / "unmerged.mtz").is_file()
refl = location / "scaled_30.refl"
expt = location / "scaled_30.expt"
target_mtz = tmp_path / "unmerged.mtz"
command = ["dials.scale", refl, expt, f"target_mtz={target_mtz}"]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.expt").is_file()
assert (tmp_path / "scaled.refl").is_file()
expts = load.experiment_list(tmp_path / "scaled.expt", check_format=False)
assert len(expts) == 1
@pytest.mark.parametrize(
"option",
[
None,
"reflection_selection.method=random",
"reflection_selection.method=intensity_ranges",
"reflection_selection.method=use_all",
"intensity_choice=sum",
"intensity=profile",
],
)
def test_scale_single_dataset_with_options(dials_data, tmp_path, option):
"""Test different non-default command-line options with a single dataset."""
data_dir = dials_data("l_cysteine_dials_output", pathlib=True)
refl_1 = data_dir / "20_integrated.pickle"
expt_1 = data_dir / "20_integrated_experiments.json"
args = [refl_1, expt_1]
if option:
args.append(option)
run_one_scaling(tmp_path, args)
@pytest.fixture(scope="session")
def vmxi_protk_reindexed(dials_data, tmp_path_factory):
"""Reindex the protk data to be in the correct space group."""
location = dials_data("vmxi_proteinase_k_sweeps", pathlib=True)
command = [
"dials.reindex",
location / "experiments_0.json",
location / "reflections_0.pickle",
"space_group=P422",
]
tmp_path = tmp_path_factory.mktemp("vmxi_protk_reindexed")
procrunner.run(command, working_directory=tmp_path)
return tmp_path / "reindexed.expt", tmp_path / "reindexed.refl"
@pytest.mark.parametrize(
("options", "expected", "tolerances"),
[
(["error_model=None"], None, None),
(
["error_model=basic", "basic.minimisation=individual"],
(0.61, 0.049),
(0.05, 0.005),
),
(["error_model.basic.a=0.61"], (0.61, 0.049), (1e-6, 0.005)),
(["error_model.basic.b=0.049"], (0.61, 0.049), (0.05, 1e-6)),
(
["error_model.basic.b=0.02", "error_model.basic.a=1.5"],
(1.50, 0.02),
(1e-6, 1e-6),
),
(
["error_model=basic", "basic.minimisation=regression"],
(0.995, 0.051),
(0.05, 0.005),
),
(
["error_model.basic.a=0.99", "basic.minimisation=regression"],
(0.99, 0.051),
(1e-6, 0.005),
),
(
["error_model.basic.b=0.051", "basic.minimisation=regression"],
(1.0, 0.051),
(0.05, 1e-6),
),
],
)
def test_error_model_options(
vmxi_protk_reindexed, tmp_path, options, expected, tolerances
):
"""Test different non-default command-line options with a single dataset.
Current values taken at 14.11.19"""
expt_1, refl_1 = vmxi_protk_reindexed
args = [refl_1, expt_1] + list(options)
run_one_scaling(tmp_path, args)
expts = load.experiment_list(tmp_path / "scaled.expt", check_format=False)
config = expts[0].scaling_model.configdict
if not expected:
assert "error_model_parameters" not in config
else:
params = expts[0].scaling_model.configdict["error_model_parameters"]
print(list(params))
assert params[0] == pytest.approx(expected[0], abs=tolerances[0])
assert params[1] == pytest.approx(expected[1], abs=tolerances[1])
@pytest.mark.parametrize(
"option",
[
None,
"combine.joint_analysis=False",
"reflection_selection.method=quasi_random",
"reflection_selection.method=random",
"reflection_selection.method=intensity_ranges",
"reflection_selection.method=use_all",
"anomalous=True",
],
)
def test_scale_multiple_datasets_with_options(dials_data, tmp_path, option):
"""Test different non-defaul command-line options with multiple datasets."""
data_dir = dials_data("l_cysteine_dials_output", pathlib=True)
refl_1 = data_dir / "20_integrated.pickle"
expt_1 = data_dir / "20_integrated_experiments.json"
refl_2 = data_dir / "25_integrated.pickle"
expt_2 = data_dir / "25_integrated_experiments.json"
args = [refl_1, expt_1, refl_2, expt_2]
if option:
args.append(option)
run_one_scaling(tmp_path, args)
def test_scale_physical(dials_data, tmp_path):
"""Test standard scaling of one dataset."""
data_dir = dials_data("l_cysteine_dials_output", pathlib=True)
refl_1 = data_dir / "20_integrated.pickle"
expt_1 = data_dir / "20_integrated_experiments.json"
extra_args = [
"model=physical",
"merged_mtz=merged.mtz",
"error_model=None",
"intensity_choice=profile",
"unmerged_mtz=unmerged.mtz",
"crystal_name=foo",
"project_name=bar",
"use_free_set=1",
"outlier_rejection=simple",
"json=scaling.json",
]
run_one_scaling(tmp_path, [refl_1, expt_1] + extra_args)
unmerged_mtz = tmp_path / "unmerged.mtz"
merged_mtz = tmp_path / "merged.mtz"
assert unmerged_mtz.is_file()
assert merged_mtz.is_file()
assert (tmp_path / "scaling.json").is_file()
for f in (unmerged_mtz, merged_mtz):
mtz_obj = iotbx.mtz.object(str(f))
assert mtz_obj.crystals()[1].name() == "foo"
assert mtz_obj.crystals()[1].project_name() == "bar"
# Now inspect output, check it hasn't changed drastically, or if so verify
# that the new behaviour is more correct and update test accordingly.
result = get_merging_stats(unmerged_mtz)
print(result.overall.r_pim, result.overall.cc_one_half, result.overall.n_obs)
assert result.overall.r_pim < 0.0255 # at 30/01/19, value was 0.02410
assert result.overall.cc_one_half > 0.9955 # at 30/01/19, value was 0.9960
assert result.overall.n_obs > 2300 # at 30/01/19, was 2320
refls = flex.reflection_table.from_file(tmp_path / "scaled.refl")
n_scaled = refls.get_flags(refls.flags.scaled).count(True)
assert n_scaled == result.overall.n_obs
assert n_scaled == refls.get_flags(refls.flags.bad_for_scaling, all=False).count(
False
)
# Try running again with the merged.mtz as a target, to trigger the
# target_mtz option
extra_args.append("target_mtz=merged.mtz")
run_one_scaling(tmp_path, [refl_1, expt_1] + extra_args)
result = get_merging_stats(tmp_path / "unmerged.mtz")
assert (
result.overall.r_pim < 0.026
) # at 17/07/19 was 0.256 after updates to merged mtz export
assert (
result.overall.cc_one_half > 0.9955
) # at 14/08/18, value was 0.999, at 07/02/19 was 0.9961
assert result.overall.n_obs > 2300 # at 07/01/19, was 2321, at 07/02/19 was 2321
# run again with the concurrent scaling option turned off and the 'standard'
# outlier rejection
extra_args = [
"model=physical",
"merged_mtz=merged.mtz",
"unmerged_mtz=unmerged.mtz",
"use_free_set=1",
"outlier_rejection=standard",
"refinement_order=consecutive",
"intensity_choice=combine",
]
run_one_scaling(tmp_path, [refl_1, expt_1] + extra_args)
# Now inspect output, check it hasn't changed drastically, or if so verify
# that the new behaviour is more correct and update test accordingly.
result = get_merging_stats(tmp_path / "unmerged.mtz")
assert (
result.overall.r_pim < 0.024
) # at 07/01/19, value was 0.02372, at 30/01/19 was 0.021498
assert (
result.overall.cc_one_half > 0.995
) # at 07/01/19, value was 0.99568, at 30/01/19 was 0.9961
assert result.overall.n_obs > 2300 # at 07/01/19, was 2336, at 22/05/19 was 2311
def test_scale_set_absorption_level(dials_data, tmp_path):
"""Test that the absorption parameters are correctly set for the absorption option."""
location = dials_data("l_cysteine_dials_output", pathlib=True)
refl = location / "20_integrated.pickle"
expt = location / "20_integrated_experiments.json"
# exclude a central region of data to force the failure of the full matrix
# minimiser due to indeterminate solution of the normal equations. In this
# case, the error should be caught and scaling can proceed.
command = [
"dials.scale",
refl,
expt,
"absorption_level=medium",
"unmerged_mtz=unmerged.mtz",
]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.refl").is_file()
assert (tmp_path / "scaled.expt").is_file()
expts = load.experiment_list(tmp_path / "scaled.expt", check_format=False)
assert expts[0].scaling_model.configdict["lmax"] == 6
assert expts[0].scaling_model.configdict["abs_surface_weight"] == 5e4
abs_params = expts[0].scaling_model.components["absorption"].parameters
result = get_merging_stats(tmp_path / "unmerged.mtz")
assert result.overall.r_pim < 0.024
assert result.overall.cc_one_half > 0.995
assert result.overall.n_obs > 2300
## now scale again with different options, but fix the absorption surface to
# test the correction.fix option.
command = [
"dials.scale",
tmp_path / "scaled.refl",
tmp_path / "scaled.expt",
"error_model=None",
"physical.correction.fix=absorption",
]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.refl").is_file()
assert (tmp_path / "scaled.expt").is_file()
expts = load.experiment_list(tmp_path / "scaled.expt", check_format=False)
new_abs_params = expts[0].scaling_model.components["absorption"].parameters
assert abs_params == new_abs_params
def test_scale_normal_equations_failure(dials_data, tmp_path):
location = dials_data("l_cysteine_dials_output", pathlib=True)
refl = location / "20_integrated.pickle"
expt = location / "20_integrated_experiments.json"
# exclude a central region of data to force the failure of the full matrix
# minimiser due to indeterminate solution of the normal equations. In this
# case, the error should be caught and scaling can proceed.
command = ["dials.scale", refl, expt, "exclude_images=800:1400"]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.refl").is_file()
assert (tmp_path / "scaled.expt").is_file()
@pytest.mark.xfail(
sys.platform == "darwin" and platform.machine() == "arm64",
reason="CC1/2 somewhat differs for unknown reasons",
)
def test_scale_and_filter_image_group_mode(dials_data, tmp_path):
"""Test the scale and filter command line program."""
location = dials_data("multi_crystal_proteinase_k", pathlib=True)
command = [
"dials.scale",
"filtering.method=deltacchalf",
"stdcutoff=3.0",
"mode=image_group",
"max_percent_removed=6.0",
"max_cycles=6",
"d_min=2.0",
"group_size=5",
"unmerged_mtz=unmerged.mtz",
"scale_and_filter_results=analysis_results.json",
"error_model=None",
]
for i in [1, 2, 3, 4, 5, 7, 10]:
command.append(location / f"experiments_{i}.json")
command.append(location / f"reflections_{i}.pickle")
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.refl").is_file()
assert (tmp_path / "scaled.expt").is_file()
assert (tmp_path / "analysis_results.json").is_file()
result = get_merging_stats(tmp_path / "unmerged.mtz")
assert result.overall.r_pim < 0.135 # 12/07/21 was 0.129,
assert result.overall.cc_one_half > 0.94 # 12/07/21 was 0.953
assert result.overall.n_obs > 19000 # 12/07/21 was 19579
analysis_results = json.load((tmp_path / "analysis_results.json").open())
assert analysis_results["cycle_results"]["1"]["image_ranges_removed"] == [
[[16, 24], 4]
]
assert analysis_results["cycle_results"]["2"]["image_ranges_removed"] == [
[[21, 25], 5]
]
assert analysis_results["termination_reason"] == "max_percent_removed"
def test_scale_and_filter_termination(dials_data, tmp_path):
"""Test the scale and filter command line program,
when it terminates with a cycle of no reflections removed."""
location = dials_data("multi_crystal_proteinase_k", pathlib=True)
command = [
"dials.scale",
"filtering.method=deltacchalf",
"stdcutoff=2.0",
"max_percent_removed=40.0",
"max_cycles=8",
"d_min=2.0",
"unmerged_mtz=unmerged.mtz",
"scale_and_filter_results=analysis_results.json",
"error_model=None",
"full_matrix=False",
]
for i in [1, 2, 3, 4, 5, 7, 10]:
command.append(location / f"experiments_{i}.json")
command.append(location / f"reflections_{i}.pickle")
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.refl").is_file()
assert (tmp_path / "scaled.expt").is_file()
assert (tmp_path / "analysis_results.json").is_file()
analysis_results = json.load((tmp_path / "analysis_results.json").open())
assert analysis_results["termination_reason"] == "no_more_removed"
assert len(analysis_results["cycle_results"]["1"]["removed_datasets"]) == 1
assert analysis_results["cycle_results"]["2"]["removed_datasets"] == []
refls = flex.reflection_table.from_file(tmp_path / "scaled.refl")
assert len(set(refls["id"])) == 6
assert len(set(refls["imageset_id"])) == 6
def test_scale_and_filter_image_group_single_dataset(dials_data, tmp_path):
"""Test the scale and filter deltacchalf.mode=image_group on a
single data set."""
data_dir = dials_data("l_cysteine_dials_output", pathlib=True)
command = [
"dials.scale",
data_dir / "20_integrated.pickle",
data_dir / "20_integrated_experiments.json",
"filtering.method=deltacchalf",
"stdcutoff=3.0",
"mode=image_group",
"max_cycles=1",
"scale_and_filter_results=analysis_results.json",
"error_model=None",
]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.refl").is_file()
assert (tmp_path / "scaled.expt").is_file()
assert (tmp_path / "analysis_results.json").is_file()
analysis_results = json.load((tmp_path / "analysis_results.json").open())
assert analysis_results["cycle_results"]["1"]["image_ranges_removed"] == []
assert len(analysis_results["cycle_results"].keys()) == 1
assert analysis_results["termination_reason"] == "no_more_removed"
def test_scale_dose_decay_model(dials_data, tmp_path):
"""Test the scale and filter command line program."""
location = dials_data("multi_crystal_proteinase_k", pathlib=True)
command = ["dials.scale", "d_min=2.0", "model=dose_decay"]
for i in [1, 2, 3, 4, 5, 7, 10]:
command.append(location / f"experiments_{i}.json")
command.append(location / f"reflections_{i}.pickle")
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.refl").is_file()
assert (tmp_path / "scaled.expt").is_file()
assert (tmp_path / "dials.scale.html").is_file()
expts = load.experiment_list(tmp_path / "scaled.expt", check_format=False)
assert expts[0].scaling_model.id_ == "dose_decay"
def test_scale_best_unit_cell_d_min(dials_data, tmp_path):
location = dials_data("multi_crystal_proteinase_k", pathlib=True)
best_unit_cell = uctbx.unit_cell((42, 42, 39, 90, 90, 90))
d_min = 2
command = [
"dials.scale",
"best_unit_cell=%g,%g,%g,%g,%g,%g" % best_unit_cell.parameters(),
f"d_min={d_min:g}",
"unmerged_mtz=unmerged.mtz",
"merged_mtz=merged.mtz",
]
for i in [1, 2, 3, 4, 5, 7, 10]:
command.append(location / f"experiments_{i}.json")
command.append(location / f"reflections_{i}.pickle")
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.refl").is_file()
assert (tmp_path / "scaled.expt").is_file()
assert (tmp_path / "unmerged.mtz").is_file()
assert (tmp_path / "merged.mtz").is_file()
stats = get_merging_stats(tmp_path / "unmerged.mtz")
assert stats.overall.d_min >= d_min
assert stats.crystal_symmetry.unit_cell().parameters() == pytest.approx(
best_unit_cell.parameters()
)
m = iotbx.mtz.object(str(tmp_path / "merged.mtz"))
for ma in m.as_miller_arrays():
assert best_unit_cell.parameters() == pytest.approx(ma.unit_cell().parameters())
def test_scale_and_filter_dataset_mode(dials_data, tmp_path):
"""Test the scale and filter command line program."""
location = dials_data("multi_crystal_proteinase_k", pathlib=True)
command = [
"dials.scale",
"filtering.method=deltacchalf",
"stdcutoff=1.0",
"mode=dataset",
"max_cycles=2",
"d_min=1.4",
"output.reflections=filtered.refl",
"scale_and_filter_results=analysis_results.json",
"unmerged_mtz=unmerged.mtz",
"error_model=None",
]
for i in [1, 2, 3, 4, 5, 7, 10]:
command.append(location / f"experiments_{i}.json")
command.append(location / f"reflections_{i}.pickle")
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "filtered.refl").is_file()
assert (tmp_path / "scaled.expt").is_file()
assert (tmp_path / "analysis_results.json").is_file()
analysis_results = json.load((tmp_path / "analysis_results.json").open())
assert analysis_results["cycle_results"]["1"]["removed_datasets"] == [
analysis_results["initial_expids_and_image_ranges"][4][0]
]
assert "expids_and_image_ranges" in analysis_results
def test_scale_array(dials_data, tmp_path):
"""Test a standard dataset - ideally needs a large dataset or full matrix
round may fail. Currently turning off absorption term to avoid
overparameterisation and failure of full matrix minimisation."""
data_dir = dials_data("l_cysteine_dials_output", pathlib=True)
refl = data_dir / "20_integrated.pickle"
expt = data_dir / "20_integrated_experiments.json"
extra_args = ["model=array", "array.absorption_correction=0", "full_matrix=0"]
run_one_scaling(tmp_path, [refl, expt] + extra_args)
def test_multi_scale(dials_data, tmp_path):
"""Test standard scaling of two datasets."""
data_dir = dials_data("l_cysteine_dials_output", pathlib=True)
refl_1 = data_dir / "20_integrated.pickle"
expt_1 = data_dir / "20_integrated_experiments.json"
refl_2 = data_dir / "25_integrated.pickle"
expt_2 = data_dir / "25_integrated_experiments.json"
extra_args = [
"unmerged_mtz=unmerged.mtz",
"error_model=None",
"intensity_choice=profile",
"outlier_rejection=simple",
]
run_one_scaling(tmp_path, [refl_1, refl_2, expt_1, expt_2] + extra_args)
# Now inspect output, check it hasn't changed drastically, or if so verify
# that the new behaviour is more correct and update test accordingly.
result = get_merging_stats(tmp_path / "unmerged.mtz")
expected_nobs = 5526 # 19/06/20
assert abs(result.overall.n_obs - expected_nobs) < 30
assert result.overall.r_pim < 0.0221 # at 22/10/18, value was 0.22037
assert result.overall.cc_one_half > 0.9975 # at 07/08/18, value was 0.99810
print(result.overall.r_pim)
print(result.overall.cc_one_half)
refls = flex.reflection_table.from_file(tmp_path / "scaled.refl")
n_scaled = refls.get_flags(refls.flags.scaled).count(True)
assert n_scaled == result.overall.n_obs
assert n_scaled == refls.get_flags(refls.flags.bad_for_scaling, all=False).count(
False
)
assert len(set(refls["id"])) == 2
assert len(set(refls["imageset_id"])) == 2
for id_ in range(2):
sel = refls["id"] == id_
assert set(refls["imageset_id"].select(sel)) == {id_}
# run again, optimising errors, and continuing from where last run left off.
extra_args = [
"error_model=basic",
"unmerged_mtz=unmerged.mtz",
"check_consistent_indexing=True",
]
run_one_scaling(tmp_path, ["scaled.refl", "scaled.expt"] + extra_args)
# Now inspect output, check it hasn't changed drastically, or if so verify
# that the new behaviour is more correct and update test accordingly.
# Note: error optimisation currently appears to give worse results here!
result = get_merging_stats(tmp_path / "unmerged.mtz")
expected_nobs = 5560 # 22/06/20
print(result.overall.r_pim)
print(result.overall.cc_one_half)
assert abs(result.overall.n_obs - expected_nobs) < 100
assert result.overall.r_pim < 0.016 # at #22/06/20, value was 0.015
assert result.overall.cc_one_half > 0.997 # at #22/06/20, value was 0.999
def test_multi_scale_individual_error_models(dials_data, tmp_path):
"""Test standard scaling of two datasets."""
data_dir = dials_data("l_cysteine_dials_output", pathlib=True)
refl_1 = data_dir / "20_integrated.pickle"
expt_1 = data_dir / "20_integrated_experiments.json"
refl_2 = data_dir / "25_integrated.pickle"
expt_2 = data_dir / "25_integrated_experiments.json"
extra_args = [
"unmerged_mtz=unmerged.mtz",
"error_model.grouping=individual",
"intensity_choice=profile",
]
run_one_scaling(tmp_path, [refl_1, refl_2, expt_1, expt_2] + extra_args)
# Now inspect output, check it hasn't changed drastically, or if so verify
# that the new behaviour is more correct and update test accordingly.
result = get_merging_stats(tmp_path / "unmerged.mtz")
expected_nobs = 5358 # 04/05/21
assert abs(result.overall.n_obs - expected_nobs) < 30
assert result.overall.r_pim < 0.015 # at 04/05/21, value was 0.013
assert result.overall.cc_one_half > 0.9975 # at 04/05/21, value was 0.999
# Test that different error models were determined for each sweep.
scaling_models = load.experiment_list(
tmp_path / "scaled.expt", check_format=False
).scaling_models()
params_1 = scaling_models[0].configdict["error_model_parameters"]
params_2 = scaling_models[1].configdict["error_model_parameters"]
assert params_1[0] != params_2[0]
assert params_1[1] != params_2[1]
def test_multi_scale_exclude_images(dials_data, tmp_path):
"""Test scaling of multiple dataset with image exclusion."""
data_dir = dials_data("l_cysteine_dials_output", pathlib=True)
refl_1 = data_dir / "20_integrated.pickle"
expt_1 = data_dir / "20_integrated_experiments.json"
refl_2 = data_dir / "25_integrated.pickle"
expt_2 = data_dir / "25_integrated_experiments.json"
# Expect this dataset to be given batches 1-1800 and 1901-3600
# Try excluding last two hundred batches
extra_args = [
"error_model=None",
"intensity_choice=profile",
"outlier_rejection=simple",
"exclude_images=0:1601:1800",
"exclude_images=1:1501:1700",
]
run_one_scaling(tmp_path, [refl_1, refl_2, expt_1, expt_2] + extra_args)
refls = flex.reflection_table.from_file(tmp_path / "scaled.refl")
d1 = refls.select(refls["id"] == 0)
d2 = refls.select(refls["id"] == 1)
nd1_scaled = d1.get_flags(d1.flags.scaled).count(True)
# full sweep would have 2312, expect ~2060
assert nd1_scaled < 2100
assert nd1_scaled > 2000
nd2_scaled = d2.get_flags(d2.flags.scaled).count(True)
# full sweep would have 3210
assert nd2_scaled < 2900
assert nd2_scaled > 2800
scaling_models = load.experiment_list(
tmp_path / "scaled.expt", check_format=False
).scaling_models()
assert scaling_models[0].configdict["valid_image_range"] == [1, 1600]
assert scaling_models[1].configdict["valid_image_range"] == [1, 1500]
assert pytest.approx(scaling_models[0].configdict["valid_osc_range"], [0, 160.0])
assert pytest.approx(scaling_models[1].configdict["valid_osc_range"], [-145.0, 5.0])
# Run again, excluding some more from one run.
extra_args = [
"error_model=None",
"intensity_choice=profile",
"outlier_rejection=simple",
"exclude_images=0:1401:1600",
]
run_one_scaling(tmp_path, ["scaled.refl", "scaled.expt"] + extra_args)
scaling_models = load.experiment_list(
tmp_path / "scaled.expt", check_format=False
).scaling_models()
assert scaling_models[0].configdict["valid_image_range"] == [1, 1400]
assert scaling_models[1].configdict["valid_image_range"] == [1, 1500]
assert pytest.approx(scaling_models[0].configdict["valid_osc_range"], [0, 140.0])
assert pytest.approx(scaling_models[1].configdict["valid_osc_range"], [-145.0, 5.0])
refls = flex.reflection_table.from_file(tmp_path / "scaled.refl")
d1 = refls.select(refls["id"] == 0)
d2 = refls.select(refls["id"] == 1)
nd1_scaled = d1.get_flags(d1.flags.scaled).count(True)
# full sweep would have 2312, expect 1800
assert nd1_scaled < 1850
assert nd1_scaled > 1750
nd2_scaled = d2.get_flags(d2.flags.scaled).count(True)
# full sweep would have 3210, expect ~2850
assert nd2_scaled < 2900
assert nd2_scaled > 2800
def test_scale_handle_bad_dataset(dials_data, tmp_path):
"""Set command line parameters such that one dataset does not meet the
criteria for inclusion in scaling. Check that this is excluded and the
scaling job completes without failure."""
location = dials_data("multi_crystal_proteinase_k", pathlib=True)
command = [
"dials.scale",
"reflection_selection.method=intensity_ranges",
"Isigma_range=90.0,1000",
]
for i in range(1, 6):
command.append(location / f"experiments_{i}.json")
command.append(location / f"reflections_{i}.pickle")
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
reflections = flex.reflection_table.from_file(tmp_path / "scaled.refl")
expts = load.experiment_list(tmp_path / "scaled.expt", check_format=False)
assert len(expts) == 4
assert len(reflections.experiment_identifiers()) == 4
def test_targeted_scaling(dials_data, tmp_path):
"""Test the targeted scaling workflow."""
location = dials_data("l_cysteine_4_sweeps_scaled", pathlib=True)
target_refl = location / "scaled_35.refl"
target_expt = location / "scaled_35.expt"
data_dir = dials_data("l_cysteine_dials_output", pathlib=True)
refl_1 = data_dir / "20_integrated.pickle"
expt_1 = data_dir / "20_integrated_experiments.json"
refl_2 = data_dir / "25_integrated.pickle"
expt_2 = data_dir / "25_integrated_experiments.json"
# Do targeted scaling, use this as a chance to test the KB model as well.
extra_args = ["model=KB"]
run_one_scaling(tmp_path, [target_refl, refl_1, target_expt, expt_1] + extra_args)
scaled_exp = tmp_path / "scaled.expt"
scaled_refl = tmp_path / "scaled.refl"
experiments_list = load.experiment_list(scaled_exp, check_format=False)
assert len(experiments_list.scaling_models()) == 2
assert experiments_list.scaling_models()[0].id_ == "physical"
assert experiments_list.scaling_models()[1].id_ == "KB"
extra_args = ["model=KB", "only_target=True"]
run_one_scaling(tmp_path, [refl_2, scaled_refl, expt_2, scaled_exp] + extra_args)
experiments_list = load.experiment_list(
tmp_path / "scaled.expt", check_format=False
)
assert len(experiments_list.scaling_models()) == 1
assert experiments_list.scaling_models()[0].id_ == "KB"
def test_shared_absorption_surface(dials_data, tmp_path):
data_dir = dials_data("l_cysteine_dials_output", pathlib=True)
refl_1 = data_dir / "20_integrated.pickle"
expt_1 = data_dir / "20_integrated_experiments.json"
refl_2 = data_dir / "25_integrated.pickle"
expt_2 = data_dir / "25_integrated_experiments.json"
# Do targeted scaling, use this as a chance to test the KB model as well.
extra_args = ["share.absorption=True"]
run_one_scaling(tmp_path, [refl_1, expt_1, refl_2, expt_2] + extra_args)
expts = load.experiment_list(tmp_path / "scaled.expt", check_format=False)
assert (
expts.scaling_models()[0].components["absorption"].parameters
== expts.scaling_models()[1].components["absorption"].parameters
)
assert (
expts.scaling_models()[0].components["absorption"].parameter_esds
== expts.scaling_models()[1].components["absorption"].parameter_esds
)
def test_incremental_scale_workflow(dials_data, tmp_path):
"""Try scale, cosym, scale, cosym, scale."""
data_dir = dials_data("l_cysteine_dials_output", pathlib=True)
refl_1 = data_dir / "20_integrated.pickle"
expt_1 = data_dir / "20_integrated_experiments.json"
run_one_scaling(tmp_path, [refl_1, expt_1])
# test order also - first new file before scaled
refl_2 = data_dir / "25_integrated.pickle"
expt_2 = data_dir / "25_integrated_experiments.json"
command = ["dials.cosym", refl_2, expt_2, "scaled.refl", "scaled.expt"]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "symmetrized.expt").is_file()
assert (tmp_path / "symmetrized.refl").is_file()
# now try scaling again to check everything is okay
run_one_scaling(tmp_path, ["symmetrized.refl", "symmetrized.expt"])
# test order also - first scaled file then new file
refl_2 = data_dir / "30_integrated.pickle"
expt_2 = data_dir / "30_integrated_experiments.json"
command = [
"dials.cosym",
"scaled.refl",
"scaled.expt",
refl_1,
expt_2,
"output.reflections=symmetrized.refl",
"output.experiments=symmetrized.expt",
]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "symmetrized.expt").is_file()
assert (tmp_path / "symmetrized.refl").is_file()
# now try scaling again to check everything is okay
run_one_scaling(tmp_path, ["symmetrized.refl", "symmetrized.expt"])
@pytest.mark.parametrize(
("mode", "parameter", "parameter_values"),
[
("single", None, None),
("multi", "physical.absorption_correction", None),
("multi", "model", "physical array"),
],
)
def test_scale_cross_validate(dials_data, tmp_path, mode, parameter, parameter_values):
"""Test standard scaling of one dataset."""
data_dir = dials_data("l_cysteine_dials_output", pathlib=True)
refl = data_dir / "20_integrated.pickle"
expt = data_dir / "20_integrated_experiments.json"
extra_args = [
f"cross_validation_mode={mode}",
"nfolds=2",
"full_matrix=0",
"error_model=None",
]
if parameter:
extra_args += [f"parameter={parameter}"]
if parameter_values:
extra_args += [f"parameter_values={parameter_values}"]
command = ["dials.scale", refl, expt] + extra_args
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
def test_few_reflections(dials_data):
"""
Test that dials.symmetry does something sensible if given few reflections.
Use some example integrated data generated from two ten-image 1° sweeps. These
each contain a few dozen integrated reflections.
Also test the behaviour of dials.merge and dials.report on the output.
By suppressing the output from dials.scale and dials.report, we obviate the need to
run in a temporary directory.
Args:
dials_data: DIALS custom Pytest fixture for access to test data.
"""
# Get the input experiment lists and reflection tables.
data_dir = dials_data("l_cysteine_dials_output", pathlib=True)
experiments = ExperimentList.from_file(data_dir / "11_integrated.expt")
experiments.extend(ExperimentList.from_file(data_dir / "23_integrated.expt"))
refls = "11_integrated.refl", "23_integrated.refl"
reflections = [flex.reflection_table.from_file(data_dir / refl) for refl in refls]
# Get and use the default parameters for dials.scale, suppressing HTML output.
scale_params = scale.phil_scope.fetch(
source=phil.parse("output.html=None")
).extract()
# Does what it says on the tin. Run scaling.
scaled_expt, scaled_refl = scale.run_scaling(scale_params, experiments, reflections)
# Get and use the default parameters for dials.merge.
merge_params = merge.phil_scope.fetch(source=phil.parse("")).extract()
# Run dials.merge on the scaling output.
merge.merge_data_to_mtz(merge_params, scaled_expt, [scaled_refl])
# Get and use the default parameters for dials.report, suppressing HTML output.
report_params = report.phil_scope.fetch(
source=phil.parse("output.html=None")
).extract()
# Get an Analyser object, which does the dials.report stuff.
analyse = report.Analyser(
report_params,
grid_size=report_params.grid_size,
centroid_diff_max=report_params.centroid_diff_max,
)
# Run dials.report on scaling output.
analyse(scaled_refl, scaled_expt)
|
dials/dials
|
tests/algorithms/scaling/test_scale.py
|
Python
|
bsd-3-clause
| 43,030
|
[
"CRYSTAL"
] |
4a238730c9486be2c935042864afc898811128695795a549086c86eb8266e5af
|
#!/usr/bin/env python3
"""
A recursive solution to the Pardoner's Puzzle from
http://math-fail.com/2015/02/the-pardoners-puzzle.html
Copyright (c) Vedran Šego <vsego@vsego.org>
"""
import sys
n = 8
pos = (6, 2) # starting position (col, row)
max_steps = 15
# The list of neighbour towns not directly connected
# Each connection is a set of neighbour tuples
no_connection = [ {(7,3),(7,4)} ]
# For testing
#n = 4
#pos = (0, 0) # starting position (col, row)
#max_steps = 7
#no_connection = [ {(0,1),(0,2)} ]
# Remember all the coordinates, each associated with
# the step in which it is visited (zero == "not visited")
towns = { (i, j): 0 for i in range(n) for j in range(n) }
towns[pos] = 1
n2 = n*n
def towns_bad(towns, steps_left):
"""
Returns `True` if a solution is obviously impossible, i.e.,
1. if there are more zeros than can be visited no matter how are they arranged, or
2. if the zeros are split into two unconnected regions.
"""
chk = [ pos for pos, steps in towns.items() if steps == 0 ]
# Are there too many zeros? (this could be computed directly)
d = n
max_possible = 0
for k in range(steps_left+1):
max_possible += d
if k % 2 == 0:
d -= 1
if len(chk) > max_possible:
return True
# Are all the zeros connected?
if not chk:
return False
removed = [ chk[0] ]
del chk[0]
while removed:
pos = removed.pop()
for i in range(-1, 2, 2):
for neighbour in [ (pos[0]+i, pos[1]), (pos[0], pos[1]+i) ]:
if neighbour in chk:
removed.append(neighbour)
chk.remove(neighbour)
return bool(chk)
def try_dir(direction, pos, steps_left, towns, no_connection, visited):
"""
Try visit all the fields going from the position `pos` in the direction `direction`.
"""
org_pos = pos
while True:
new_pos = (pos[0] + direction[0], pos[1] + direction[1])
if 0 <= new_pos[0] < n and 0 <= new_pos[1] < n and towns[new_pos] == 0:
next_step = { pos, new_pos }
if any( nc == next_step for nc in no_connection):
break
pos = new_pos
visited += 1
towns[pos] = visited
if towns_bad(towns, steps_left):
#print("Stopped with %d steps left." % steps_left)
break
else:
break
if pos != org_pos:
#one_step(pos, steps_left, towns, no_connection, visited)
while pos != org_pos:
if pos in towns:
one_step(pos, steps_left, towns, no_connection, visited, not_there=direction)
towns[pos] = 0
visited -= 1
pos = (pos[0] - direction[0], pos[1] - direction[1])
def print_towns(towns):
"""
Print the order in which the towns are visited.
"""
for i in range(n):
for j in range(n):
print("{:3d}".format(towns[(i,j)]), end="")
print()
def one_step(pos, steps_left, towns, no_connection, visited, not_there = None):
"""
Try to visit all that you can, starting from the position `pos`
in `steps_left` number of steps,
skipping `no_connection` connections,
except in the `not_there` or its opposite direction.
"""
#print("\nSteps left: %d\n" % steps_left); print_towns(towns); print()
if visited >= n2:
print_towns(towns)
sys.exit(0)
if steps_left <= 0:
#print_towns(towns); print()
return
steps_left -= 1
if not_there is None:
for i in range(-1, 2, 2):
try_dir((i,0), pos, steps_left, towns, no_connection, visited)
try_dir((0,i), pos, steps_left, towns, no_connection, visited)
else:
if not_there[0] == 0:
for i in range(-1, 2, 2):
try_dir((i,0), pos, steps_left, towns, no_connection, visited)
else:
for i in range(-1, 2, 2):
try_dir((0,i), pos, steps_left, towns, no_connection, visited)
one_step(pos, max_steps, towns, no_connection, 1)
|
vsego/misc
|
pardoners_puzzle.py
|
Python
|
mit
| 4,102
|
[
"VisIt"
] |
935b0af88a5f0804673b9d08da87d6a0e0d8e2ffa28a9ccc56f3473d7205957e
|
# smCounter: barcode-aware variant caller
# Chang Xu. 23May2016; online version of 10APR2017
import os
import datetime
import subprocess
import math
import operator
import argparse
import random
import multiprocessing
import traceback
from collections import defaultdict
# 3rd party modules
import pysam
import scipy.stats
# global contants (note that multiprocessing imports this .py file, so do not do much work outside functions)
pcr_error = 1e-6
pcr_no_error = 1.0 - 3e-5
atgc = ('A', 'T', 'G', 'C')
#-------------------------------------------------------------------------------------
# function to calculate posterior probability for each barcode.
#-------------------------------------------------------------------------------------
def calProb(oneBC, mtDrop):
outDict = defaultdict(float)
if len(oneBC) <= mtDrop:
outDict['A'] = 0.0
outDict['T'] = 0.0
outDict['G'] = 0.0
outDict['C'] = 0.0
else:
prodP = defaultdict(float)
cnt = defaultdict(int)
tmpOut = defaultdict(float)
rightP = 1.0
sumP = 0.0
pcrP = defaultdict(float)
# set ATGC count = 0
for char in atgc:
cnt[char] = 0
# get unique bases. Make sure uniqBaseList contains 4 members, unless the barcode already contains more than or equal to 4 bases/indels
# NOTE: existBase contains only the alleles, including indels, with at least 1 read in the MT. uniqBase may contain more.
existBase = set([info[0][0] for info in oneBC.values()])
uniqBase = set([info[0][0] for info in oneBC.values()])
if len(uniqBase) < 4:
for b in atgc:
if b not in uniqBase:
uniqBase.add(b)
if len(uniqBase) == 4:
break
uniqBaseList = list(uniqBase)
# set initial value in prodP to be 1.0
for b in uniqBaseList:
prodP[b] = 1.0
for info in oneBC.values():
base = info[0][0]
# prob is the error probability
prob = info[0][1]
pairOrder = info[0][2]
if pairOrder != 'Paired':
prob = 0.1
# prodP is the probability of no sequencing error for each base
prodP[base] *= 1.0 - prob
cnt[base] += 1
for char in list(uniqBase - set([base])):
prodP[char] *= prob
# rightP is the probabilty that there is no sequencing error, hence the alternative alleles come from PCR error
rightP *= 1.0 - prob
for char in uniqBaseList:
ratio = (cnt[char] + 0.5) / (len(oneBC) + 0.5 * len(uniqBaseList))
pcrP[char] = 10.0 ** (-6.0 * ratio)
for key in prodP.keys():
if key in existBase:
# tmpOut[key] is P(BC|key), or the likelihood of all reads in the barcode, given the true allele is *key*.
tmpOut[key] = pcr_no_error * prodP[key] + rightP * min([pcrP[char] for char in pcrP.keys() if char != key])
else:
tmpOut[key] = rightP
for char in existBase:
if char != key:
tmpOut[key] *= pcrP[char]
sumP += tmpOut[key]
for key in prodP.iterkeys():
outDict[key] = 0.0 if sumP <= 0 else tmpOut[key] / sumP
return outDict
#-------------------------------------------------------------------------------------
# convert variant type, reference base, variant base to output format
#-------------------------------------------------------------------------------------
def convertToVcf(origRef,origAlt):
vtype = '.'
ref = origRef
alt = origAlt
if len(origAlt) == 1:
vtype = 'SNP'
elif origAlt == 'DEL':
vtype = 'SDEL'
else:
vals = origAlt.split('|')
if vals[0] in ('DEL', 'INS'):
vtype = 'INDEL'
ref = vals[1]
alt = vals[2]
return (ref, alt, vtype)
#-------------------------------------------------------------------------------------
# check if a locus is within or flanked by homopolymer region and/or low complexity region
#-------------------------------------------------------------------------------------
def isHPorLowComp(chrom, pos, length, refb, altb, refGenome):
# get reference bases for interval [pos-length, pos+length]
refs = pysam.FastaFile(refGenome)
chromLength = refs.get_reference_length(chrom)
pos0 = int(pos) - 1
Lseq = refs.fetch(reference=chrom, start=max(0,pos0-length) , end=pos0).upper()
Rseq_ref = refs.fetch(reference=chrom, start= pos0+len(refb) , end=min(pos0+len(refb)+length,chromLength)).upper()
Rseq_alt = refs.fetch(reference=chrom, start= pos0+len(altb) , end=min(pos0+len(altb)+length,chromLength)).upper()
refSeq = Lseq + refb + Rseq_ref
altSeq = Lseq + altb + Rseq_alt
# check homopolymer
homoA = refSeq.find('A'*length) >= 0 or altSeq.find('A'*length) >= 0
homoT = refSeq.find('T'*length) >= 0 or altSeq.find('T'*length) >= 0
homoG = refSeq.find('G'*length) >= 0 or altSeq.find('G'*length) >= 0
homoC = refSeq.find('C'*length) >= 0 or altSeq.find('C'*length) >= 0
homop = homoA or homoT or homoG or homoC
# check low complexity -- window length is 2 * homopolymer region. If any 2 nucleotide >= 99%
len2 = 2 * length
LseqLC = refs.fetch(reference=chrom, start=max(0,pos0-len2) , end=pos0).upper()
Rseq_refLC = refs.fetch(reference=chrom, start= pos0+len(refb), end=min(pos0+len(refb)+len2,chromLength)).upper() # ref seq
Rseq_altLC = refs.fetch(reference=chrom, start= pos0+len(altb), end=min(pos0+len(altb)+len2,chromLength)).upper() # alt seq
refSeqLC = LseqLC + refb + Rseq_refLC
altSeqLC = LseqLC + altb + Rseq_altLC
lowcomp = False
# Ref seq
totalLen = len(refSeqLC)
for i in range(totalLen-len2):
subseq = refSeqLC[i:(i+len2)]
countA = subseq.count('A')
countT = subseq.count('T')
countG = subseq.count('G')
countC = subseq.count('C')
sortedCounts = sorted([countA, countT, countG, countC], reverse=True)
top2Freq = 1.0 * (sortedCounts[0] + sortedCounts[1]) / len2
if top2Freq >= 0.99:
lowcomp = True
break
# If ref seq is not LC, check alt seq
if not lowcomp:
totalLen = len(altSeqLC)
for i in range(totalLen-len2):
subseq = altSeqLC[i:(i+len2)]
countA = subseq.count('A')
countT = subseq.count('T')
countG = subseq.count('G')
countC = subseq.count('C')
sortedCounts = sorted([countA, countT, countG, countC], reverse=True)
top2Freq = 1.0 * (sortedCounts[0] + sortedCounts[1]) / len2
if top2Freq >= 0.99:
lowcomp = True
break
return (homop, lowcomp)
#-------------------------------------------------------------------------------------
# filter variants
#-------------------------------------------------------------------------------------
def filterVariants(ref,alt,vtype,origAlt,origRef,usedMT,strongMTCnt,chrom,pos,hpLen,refGenome,MTCnt,alleleCnt,cvg,discordPairCnt,concordPairCnt,reverseCnt,forwardCnt,lowQReads,r1BcEndPos,r2BcEndPos,r2PrimerEndPos,primerDist):
# init output string
fltr = ';'
# low coverage filter
if usedMT < 5:
fltr += 'LM;'
# low number of strong MTs filter
if strongMTCnt[origAlt] < 2 :
fltr += 'LSM;'
# check region for homopolymer or low complexity
(isHomopolymer,isLowComplexity) = isHPorLowComp(chrom, pos, hpLen, ref, alt, refGenome)
# homopolymer filter
if isHomopolymer and 1.0 * MTCnt[origAlt] / usedMT < 0.99:
fltr += 'HP;'
# low complexity filter
if isLowComplexity and 1.0 * MTCnt[origAlt] / usedMT < 0.99:
fltr += 'LowC;'
# strand bias and discordant pairs filter
af_alt = 100.0 * alleleCnt[origAlt] / cvg
pairs = discordPairCnt[origAlt] + concordPairCnt[origAlt] # total number of paired reads covering the pos
if pairs >= 1000 and 1.0 * discordPairCnt[origAlt] / pairs >= 0.5:
fltr += 'DP;'
elif af_alt <= 60.0:
refR = reverseCnt[origRef]
refF = forwardCnt[origRef]
altR = reverseCnt[origAlt]
altF = forwardCnt[origAlt]
fisher = scipy.stats.fisher_exact([[refR, refF], [altR, altF]])
oddsRatio = fisher[0]
pvalue = fisher[1]
if pvalue < 0.00001 and (oddsRatio >= 50 or oddsRatio <= 1.0/50):
fltr += 'SB;'
# base quality filter. Reject if more than 40% reads are lowQ
if vtype == 'SNP' and origAlt in alleleCnt.keys() and origAlt in lowQReads.keys():
bqAlt = 1.0 * lowQReads[origAlt] / alleleCnt[origAlt]
else:
bqAlt = 0.0
if bqAlt > 0.4:
fltr += 'LowQ;'
# random end and fixed end position filters
if vtype == 'SNP':
# random end position filter
endBase = 20 # distance to barcode end of the read
# R1
refLeEnd = sum(d <= endBase for d in r1BcEndPos[origRef]) # number of REF R2 reads with distance <= endBase
refGtEnd = len(r1BcEndPos[origRef]) - refLeEnd # number of REF R2 reads with distance > endBase
altLeEnd = sum(d <= endBase for d in r1BcEndPos[origAlt]) # number of ALT R2 reads with distance <= endBase
altGtEnd = len(r1BcEndPos[origAlt]) - altLeEnd # number of ALT R2 reads with distance > endBase
fisher = scipy.stats.fisher_exact([[refLeEnd, refGtEnd], [altLeEnd, altGtEnd]])
oddsRatio = fisher[0]
pvalue = fisher[1]
if pvalue < 0.001 and oddsRatio < 0.05 and af_alt <= 60.0:
fltr += 'R1CP;'
# R2
refLeEnd = sum(d <= endBase for d in r2BcEndPos[origRef]) # number of REF R2 reads with distance <= endBase
refGtEnd = len(r2BcEndPos[origRef]) - refLeEnd # number of REF R2 reads with distance > endBase
altLeEnd = sum(d <= endBase for d in r2BcEndPos[origAlt]) # number of ALT R2 reads with distance <= endBase
altGtEnd = len(r2BcEndPos[origAlt]) - altLeEnd # number of ALT R2 reads with distance > endBase
fisher = scipy.stats.fisher_exact([[refLeEnd, refGtEnd], [altLeEnd, altGtEnd]])
oddsRatio = fisher[0]
pvalue = fisher[1]
if pvalue < 0.001 and oddsRatio < 0.05 and af_alt <= 60.0:
fltr += 'R2CP;'
# fixed end position filter
endBase = primerDist # distance to primer end of the read
refLeEnd = sum(d <= endBase for d in r2PrimerEndPos[origRef]) # number of REF R2 reads with distance <= endBase
refGtEnd = len(r2PrimerEndPos[origRef]) - refLeEnd # number of REF R2 reads with distance > endBase
altLeEnd = sum(d <= endBase for d in r2PrimerEndPos[origAlt]) # number of ALT R2 reads with distance <= endBase
altGtEnd = len(r2PrimerEndPos[origAlt]) - altLeEnd # number of ALT R2 reads with distance > endBase
fisher = scipy.stats.fisher_exact([[refLeEnd, refGtEnd], [altLeEnd, altGtEnd]])
oddsRatio = fisher[0]
pvalue = fisher[1]
# reject if variant is clustered within 2 bases from primer sequence due to possible enzyme initiation error
if altLeEnd + altGtEnd > 0:
if 1.0 * altLeEnd / (altLeEnd + altGtEnd) >= 0.98 or (pvalue < 0.001 and oddsRatio < 1.0/20):
fltr += 'PrimerCP;'
# done
return fltr
#-------------------------------------------------------------------------------------
# function to call variants
#-------------------------------------------------------------------------------------
def vc(bamFile, chrom, pos, minBQ, minMQ, mtDepth, rpb, hpLen, mismatchThr, mtDrop, maxMT, primerDist, refGenome):
samfile = pysam.AlignmentFile(bamFile, 'rb')
idx = 0
cvg = 0
bcDict = defaultdict(lambda: defaultdict(list))
allBcDict = defaultdict(list)
alleleCnt = defaultdict(int)
MTCnt = defaultdict(int)
r1BcEndPos = defaultdict(list)
r2BcEndPos = defaultdict(list)
r2PrimerEndPos = defaultdict(list)
MT3Cnt = 0
MT5Cnt = 0
MT7Cnt = 0
MT10Cnt = 0
strongMTCnt = defaultdict(int)
predIndex = defaultdict(lambda: defaultdict(float))
finalDict = defaultdict(float)
r1Cnt = defaultdict(int)
r2Cnt = defaultdict(int)
forwardCnt = defaultdict(int)
reverseCnt = defaultdict(int)
concordPairCnt = defaultdict(int)
discordPairCnt = defaultdict(int)
mismatchCnt = defaultdict(float)
bqSum = defaultdict(int)
lowQReads = defaultdict(int)
# set threshold for strongMT based on mtDepth
if rpb < 1.5:
smt = 2.0
elif rpb < 3.0:
smt = 3.0
else:
smt = 4.0
# get reference base
refseq = pysam.FastaFile(refGenome)
origRef = refseq.fetch(reference=chrom, start=int(pos)-1, end=int(pos))
origRef = origRef.upper()
# pile up reads
for read in samfile.pileup(region = chrom + ':' + pos + ':' + pos, truncate=True, max_depth=1000000, stepper='nofilter'):
for pileupRead in read.pileups:
# read ID
qname = pileupRead.alignment.query_name
qnameSplit = qname.split(":")
readid = ':'.join(qnameSplit[:-2])
# barcode sequence
BC = qnameSplit[-2]
# duplex tag - temporary hack from end of readid - should be CC, TT, or NN for duplex runs
duplexTag = qnameSplit[-3]
# mapping quality
mq = pileupRead.alignment.mapping_quality
# get NM tag
NM = 0
allTags = pileupRead.alignment.tags
for (tag, value) in allTags:
if tag == 'NM':
NM = value
break
# count number of INDELs in the read sequence
nIndel = 0
cigar = pileupRead.alignment.cigar
cigarOrder = 1
leftSP = 0 # soft clipped bases on the left
rightSP = 0 # soft clipped bases on the right
for (op, value) in cigar:
# 1 for insertion
if op == 1 or op == 2:
nIndel += value
if cigarOrder == 1 and op == 4:
leftSP = value
if cigarOrder > 1 and op == 4:
rightSP += value
cigarOrder += 1
# Number of mismatches except INDEL, including softcilpped sequences
mismatch = max(0, NM - nIndel)
# read length, including softclip
readLen = pileupRead.alignment.query_length
# calculate mismatch per 100 bases
mismatchPer100b = 100.0 * mismatch / readLen if readLen > 0 else 0.0
# paired read
if pileupRead.alignment.is_read1:
pairOrder = 'R1'
if pileupRead.alignment.is_read2:
pairOrder = 'R2'
# +/- strand
strand = 'Reverse' if pileupRead.alignment.is_reverse else 'Forward'
# coverage -- read, not fragment
cvg += 1
# check if the site is the beginning of insertion
if pileupRead.indel > 0:
site = pileupRead.alignment.query_sequence[pileupRead.query_position]
inserted = pileupRead.alignment.query_sequence[(pileupRead.query_position + 1) : (pileupRead.query_position + 1 + pileupRead.indel)]
base = 'INS|' + site + '|' + site + inserted
bq = pileupRead.alignment.query_qualities[pileupRead.query_position]
bqSum[base] += bq
# inclusion condition
incCond = bq >= minBQ and mq >= minMQ and mismatchPer100b <= mismatchThr
alleleCnt[base] += 1
mismatchCnt[base] += mismatchPer100b
if pairOrder == 'R1':
r1Cnt[base] += 1
if pairOrder == 'R2':
r2Cnt[base] += 1
if strand == 'Reverse':
reverseCnt[base] += 1
else:
forwardCnt[base] += 1
# check if the site is the beginning of deletion
elif pileupRead.indel < 0:
site = pileupRead.alignment.query_sequence[pileupRead.query_position]
deleted = refseq.fetch(reference=chrom, start=int(pos), end=int(pos)+abs(pileupRead.indel))
deleted = deleted.upper()
base = 'DEL|' + site + deleted + '|' + site
bq = pileupRead.alignment.query_qualities[pileupRead.query_position]
bqSum[base] += bq
# inclusion condition
incCond = bq >= minBQ and mq >= minMQ and mismatchPer100b <= mismatchThr
alleleCnt[base] += 1
mismatchCnt[base] += mismatchPer100b
if pairOrder == 'R1':
r1Cnt[base] += 1
if pairOrder == 'R2':
r2Cnt[base] += 1
if strand == 'Reverse':
reverseCnt[base] += 1
else:
forwardCnt[base] += 1
# site is not beginning of any INDEL
else:
# If the site ifself is a deletion, set quality = minBQ
if pileupRead.is_del:
base = 'DEL'
bq = minBQ
bqSum[base] += bq
# inclusion condition
incCond = bq >= minBQ and mq >= minMQ and mismatchPer100b <= mismatchThr
# if the site is a regular locus,
else:
base = pileupRead.alignment.query_sequence[pileupRead.query_position] # note: query_sequence includes soft clipped bases
bq = pileupRead.alignment.query_qualities[pileupRead.query_position]
bqSum[base] += bq
# count the number of low quality reads (less than Q20 by default) for each base
if bq < minBQ:
lowQReads[base] += 1
# inclusion condition
incCond = bq >= minBQ and mq >= minMQ and mismatchPer100b <= mismatchThr
if pairOrder == 'R1':
# distance to the barcode end in R1;
if pileupRead.alignment.is_reverse:
distToBcEnd = pileupRead.alignment.query_alignment_length - (pileupRead.query_position - leftSP)
else:
distToBcEnd = pileupRead.query_position - leftSP
if incCond:
r1BcEndPos[base].append(distToBcEnd)
r1Cnt[base] += 1
if pairOrder == 'R2':
# distance to the barcode and/or primer end in R2. Different cases for forward and reverse strand
if pileupRead.alignment.is_reverse:
distToBcEnd = pileupRead.query_position - leftSP
distToPrimerEnd = pileupRead.alignment.query_alignment_length - (pileupRead.query_position - leftSP)
else:
distToBcEnd = pileupRead.alignment.query_alignment_length - (pileupRead.query_position - leftSP)
distToPrimerEnd = pileupRead.query_position - leftSP
if incCond:
r2BcEndPos[base].append(distToBcEnd)
r2PrimerEndPos[base].append(distToPrimerEnd)
r2Cnt[base] += 1
if strand == 'Reverse':
reverseCnt[base] += 1
else:
forwardCnt[base] += 1
alleleCnt[base] += 1
mismatchCnt[base] += mismatchPer100b
# count total number of fragments and MTs
if readid not in allBcDict[BC]:
allBcDict[BC].append(readid)
# decide which read goes into analysis
if incCond:
if readid not in bcDict[BC]:
prob = pow(10.0, -bq / 10.0)
readinfo = [base, prob, pairOrder]
bcDict[BC][readid].append(readinfo)
elif base == bcDict[BC][readid][0][0] or base in ['N', '*']:
bcDict[BC][readid][0][1] = max((pow(10.0, -bq / 10.0) , bcDict[BC][readid][0][1]))
bcDict[BC][readid][0][2] = 'Paired'
if base == bcDict[BC][readid][0][0]:
concordPairCnt[base] += 1
else:
del bcDict[BC][readid]
discordPairCnt[base] += 1
# total number of MT, fragments, reads, including those dropped from analysis
allMT = len(allBcDict)
allFrag = sum([len(allBcDict[bc]) for bc in allBcDict])
# downsampling MTs (not dropped) to args.maxMT
ds = maxMT if maxMT > 0 else int(round(2.0 * mtDepth))
# MTs used
usedMT = min(ds, len(bcDict))
# done if zero coverage (note hack for 41 blank output fields!)
if usedMT == 0:
out_long = '\t'.join([chrom, pos, origRef] + ['']*41 + ['Zero_Coverage'])
return out_long
if len(bcDict) > ds:
random.seed(pos)
bcKeys = random.sample(bcDict.keys(), ds)
else:
bcKeys = bcDict.keys()
usedFrag = sum([len(bcDict[bc]) for bc in bcKeys])
totalR1 = sum(r1Cnt.values())
totalR2 = sum(r2Cnt.values())
for bc in bcKeys:
bcProb = calProb(bcDict[bc], mtDrop)
for char in bcProb.iterkeys():
x = 1.0 - bcProb[char]
log10P = -math.log10(x) if x > 0.0 else 16.0
predIndex[bc][char] = log10P
finalDict[char] += log10P
max_base = [x for x in predIndex[bc].keys() if predIndex[bc][x] == max(predIndex[bc].values())]
if len(max_base) == 1:
cons = max_base[0]
MTCnt[cons] += 1
if predIndex[bc][cons] > smt:
strongMTCnt[cons] += 1
# Tie in max predIndex is most likely due to single read MT.
elif len(bcDict[bc]) == 1:
cons = bcDict[bc].values()[0][0][0]
MTCnt[cons] += 1
if len(bcDict[bc]) >= 3:
MT3Cnt += 1
if len(bcDict[bc]) >= 5:
MT5Cnt += 1
if len(bcDict[bc]) >= 7:
MT7Cnt += 1
if len(bcDict[bc]) >= 10:
MT10Cnt += 1
sortedList = sorted(finalDict.items(), key=operator.itemgetter(1), reverse=True)
maxBase = sortedList[0][0]
maxPI = sortedList[0][1]
secondMaxBase = sortedList[1][0]
secondMaxPI = sortedList[1][1]
# call variants
origAlt = secondMaxBase if maxBase == origRef else maxBase
altPI = secondMaxPI if maxBase == origRef else maxPI
# convert from internal smCounter format to format needed for output
(ref, alt, vtype) = convertToVcf(origRef,origAlt)
# apply filters if PI >= 5 (at least 2 MTs), and locus not in a deletion
fltr = ';'
if altPI >= 5 and vtype in ('SNP', 'INDEL'):
fltr = filterVariants(ref,alt,vtype,origAlt,origRef,usedMT,strongMTCnt,chrom,pos,hpLen,refGenome,MTCnt,alleleCnt,cvg,discordPairCnt,concordPairCnt,reverseCnt,forwardCnt,lowQReads,r1BcEndPos,r2BcEndPos,r2PrimerEndPos,primerDist)
# identify possible bi-allelic variants - top 2 alleles are non-reference and both VMFs >= 45%. Not necessarily passing the filters.
mfAlt = 1.0 * MTCnt[maxBase] / usedMT # MT fraction of the base with the highest PI
mfAlt2 = 1.0 * MTCnt[secondMaxBase] / usedMT # MT fraction of the base with the second highest PI
if maxBase != origRef and secondMaxBase != origRef and mfAlt >= 0.45 and mfAlt2 >= 0.45: # conditions to be considered bi-allelic
# convert from internal smCounter format to format needed for output
origAlt2 = secondMaxBase
(ref2, alt2, vtype2) = convertToVcf(origRef,origAlt2)
# apply filters to 2nd variant if PI2 >= 5 (at least 2 MTs), and locus not in a deletion
fltr2 = ';'
if secondMaxPI >= 5 and vtype2 in ('SNP', 'INDEL'):
fltr2 = filterVariants(ref2,alt2,vtype2,origAlt2,origRef,usedMT,strongMTCnt,chrom,pos,hpLen,refGenome,MTCnt,alleleCnt,cvg,discordPairCnt,concordPairCnt,reverseCnt,forwardCnt,lowQReads,r1BcEndPos,r2BcEndPos,r2PrimerEndPos,primerDist)
# prepare output for bi-allelic variants (if var2 is filtered, regardless of var1, do nothing. output var1 only)
if fltr == ';' and fltr2 == ';': # both var1 and var2 pass the filters -- this is a bi-allelic variant. var1's statistics (MT, DP, etc) are reported
alt = alt + ',' + alt2
vtype = vtype.lower() + ',' + vtype2.lower()
elif fltr != ';' and fltr2 == ';': # if var1 is filtered and the var2 passes, then it's a single variant of var2
alt = alt2
fltr = fltr2
origAlt = origAlt2
# build detailed output vector
frac_alt = round((1.0 * alleleCnt[origAlt] / cvg),4) # based on all reads, including the excluded reads
frac_A = round((1.0 * alleleCnt['A'] / cvg),4)
frac_T = round((1.0 * alleleCnt['T'] / cvg),4)
frac_G = round((1.0 * alleleCnt['G'] / cvg),4)
frac_C = round((1.0 * alleleCnt['C'] / cvg),4)
fracs = (alleleCnt['A'], alleleCnt['T'], alleleCnt['G'], alleleCnt['C'], frac_A, frac_T, frac_G, frac_C)
MT_f_alt = round((1.0 * MTCnt[origAlt] / usedMT),4) # based on only used MTs
MT_f_A = round((1.0 * MTCnt['A'] / usedMT),4)
MT_f_T = round((1.0 * MTCnt['T'] / usedMT),4)
MT_f_G = round((1.0 * MTCnt['G'] / usedMT),4)
MT_f_C = round((1.0 * MTCnt['C'] / usedMT),4)
MTs = (MT3Cnt, MT5Cnt, MT7Cnt, MT10Cnt, MTCnt['A'], MTCnt['T'], MTCnt['G'], MTCnt['C'], MT_f_A, MT_f_T, MT_f_G, MT_f_C)
strongMT = (strongMTCnt['A'], strongMTCnt['T'], strongMTCnt['G'], strongMTCnt['C'])
predIdx = (round(finalDict['A'], 2), round(finalDict['T'], 2), round(finalDict['G'], 2), round(finalDict['C'], 2))
outvec = [chrom, pos, ref, alt, vtype, cvg, allFrag, allMT, usedFrag, usedMT, round(finalDict[origAlt], 2), alleleCnt[origAlt], frac_alt, MTCnt[origAlt], MT_f_alt, strongMTCnt[origAlt]]
outvec.extend(fracs)
outvec.extend(MTs)
outvec.extend(strongMT)
outvec.extend(predIdx)
outvec.append(fltr)
out_long = '\t'.join((str(x) for x in outvec))
return out_long
#------------------------------------------------------------------------------------------------
# wrapper function for "vc()" - because Python multiprocessing module does not pass stack trace
#------------------------------------------------------------------------------------------------
def vc_wrapper(*args):
try:
output = vc(*args)
except:
print("Exception thrown in vc() function at genome location:", args[1], args[2])
output = "Exception thrown!\n" + traceback.format_exc()
return output
#------------------------------------------------------------------------------------------------
# global for argument parsing (hack that works when calling from either command line or pipeline)
#------------------------------------------------------------------------------------------------
parser = None
def argParseInit(): # this is done inside a function because multiprocessing module imports the script
global parser
parser = argparse.ArgumentParser(description='Variant calling using molecular barcodes', fromfile_prefix_chars='@')
parser.add_argument('--outPrefix', default=None, required=True, help='prefix for output files')
parser.add_argument('--bamFile' , default=None, required=True, help='BAM file')
parser.add_argument('--bedTarget', default=None, required=True, help='BED file for target region')
parser.add_argument('--mtDepth' , default=None, required=True, type=int, help='Mean MT depth')
parser.add_argument('--rpb' , default=None, required=True, type=float, help='Mean read pairs per MT')
parser.add_argument('--nCPU' , type=int, default=1 , help='number of CPUs to use in parallel')
parser.add_argument('--minBQ' , type=int, default=20, help='minimum base quality allowed for analysis')
parser.add_argument('--minMQ' , type=int, default=30, help='minimum mapping quality allowed for analysis')
parser.add_argument('--hpLen' , type=int, default=10, help='Minimum length for homopolymers')
parser.add_argument('--mismatchThr', type=float, default=6.0, help='average number of mismatches per 100 bases allowed')
parser.add_argument('--mtDrop' , type=int, default=0, help='Drop MTs with lower than or equal to X reads.')
parser.add_argument('--maxMT' , type=int, default=0, help='Randomly downsample to X MTs (max number of MTs at any position). If set to 0 (default), maxMT = 2.0 * mean MT depth')
parser.add_argument('--primerDist' , type=int, default=2, help='filter variants that are within X bases to primer')
parser.add_argument('--threshold' , type=int, default=0, help='Minimum prediction index for a variant to be called. Must be non-negative. Typically ranges from 10 to 60. If set to 0 (default), smCounter will choose the appropriate cutoff based on the mean MT depth.')
parser.add_argument('--refGenome' , default = '/qgen/home/rvijaya/downloads/alt_hap_masked_ref/ucsc.hg19.fasta')
parser.add_argument('--bedTandemRepeats' , default = '/qgen/home/xuc/UCSC/simpleRepeat.bed', help = 'bed for UCSC tandem repeats')
parser.add_argument('--bedRepeatMaskerSubset', default = '/qgen/home/xuc/UCSC/SR_LC_SL.nochr.bed', help = 'bed for RepeatMasker simple repeats, low complexity, microsatellite regions')
parser.add_argument('--bedtoolsPath' , default = '/qgen/bin/bedtools-2.25.0/bin/', help = 'path to bedtools')
parser.add_argument('--runPath' , default=None, help='path to working directory')
parser.add_argument('--logFile' , default=None, help='log file')
parser.add_argument('--paramFile', default=None, help='optional parameter file that contains the above paramters. if specified, this must be the only parameter, except for --logFile.')
#--------------------------------------------------------------------------------------
# main function
#--------------------------------------------------------------------------------------
def main(args):
# log run start
timeStart = datetime.datetime.now()
print("smCounter started at " + str(timeStart))
# if argument parser global not assigned yet, initialize it
if parser == None:
argParseInit()
# get arguments passed in via a lambda object (e.g. from upstream pipeline)
if type(args) is not argparse.Namespace:
argsList = []
for argName, argVal in args.iteritems():
argsList.append("--{0}={1}".format(argName, argVal))
args = parser.parse_args(argsList)
# get arguments from disk file specified on command line (warning: this silently deletes all actual command line parameters)
elif args.paramFile != None:
args = parser.parse_args(("@" + args.paramFile,))
# echo all parameters to the log file
for argName, argVal in vars(args).iteritems():
print(argName, argVal)
# change working directory to runDir
if args.runPath != None:
os.chdir(args.runPath)
# make list of loci to call variants
locList = []
for line in open(args.bedTarget, 'r'):
if not line.startswith("track "):
(chrom, regionStart, regionEnd) = line.strip().split('\t')[0:3]
for pos in range(int(regionStart),int(regionEnd)):
locList.append((chrom, str(pos+1)))
# call variants in parallel
pool = multiprocessing.Pool(processes=args.nCPU)
results = [pool.apply_async(vc_wrapper, args=(args.bamFile, x[0], x[1], args.minBQ, args.minMQ, args.mtDepth, args.rpb, args.hpLen, args.mismatchThr, args.mtDrop, args.maxMT, args.primerDist, args.refGenome)) for x in locList]
output = [p.get() for p in results]
pool.close()
pool.join()
# check for exceptions thrown by vc()
for idx in range(len(output)):
line = output[idx]
if line.startswith("Exception thrown!"):
print(line)
raise Exception("Exception thrown in vc() at location: " + str(locList[idx]))
# report start of variant filtering
print("begin variant filtering and output")
# merge and sort RepeatMasker tracks (could be done prior to run) Note: assuming TRF repeat already merged and sorted!!
bedExe = args.bedtoolsPath + 'bedtools'
bedRepeatMasker = args.outPrefix + '.tmp.repeatMasker.bed'
subprocess.check_call(bedExe + ' merge -c 4 -o distinct -i ' + args.bedRepeatMaskerSubset + ' | ' + bedExe + ' sort -i - > ' + bedRepeatMasker, shell=True)
# merge and sort target region
bedTarget = args.outPrefix + '.tmp.target.bed'
subprocess.check_call(bedExe + ' merge -i ' + args.bedTarget + ' | ' + bedExe + ' sort -i - > ' + bedTarget, shell=True)
# intersect 2 repeats tracks with target region
subprocess.check_call(bedExe + ' intersect -a ' + args.bedTandemRepeats + ' -b ' + bedTarget + ' | ' + bedExe + ' sort -i - > ' + args.outPrefix + '.tmp.target.repeats1.bed', shell=True)
subprocess.check_call(bedExe + ' intersect -a ' + bedRepeatMasker + ' -b ' + bedTarget + ' | ' + bedExe + ' sort -i - > ' + args.outPrefix + '.tmp.target.repeats2.bed', shell=True)
# read in tandem repeat list
trfRegions = defaultdict(list)
for line in open(args.outPrefix + '.tmp.target.repeats1.bed', 'r'):
vals = line.strip().split()
(chrom, regionStart, regionEnd) = vals[0:3]
trfRegions[chrom].append((int(regionStart), int(regionEnd), "RepT;"))
# read in simple repeat, low complexity, satelite list
rmRegions = defaultdict(list)
for line in open(args.outPrefix + '.tmp.target.repeats2.bed', 'r'):
(chrom, regionStart, regionEnd, typeCodes) = line.strip().split()
repTypes = []
for typeCode in typeCodes.split(","):
if typeCode == 'Simple_repeat':
repTypes.append('RepS')
elif typeCode == 'Low_complexity':
repTypes.append('LowC')
elif typeCode == 'Satellite':
repTypes.append('SL')
else:
repTypes.append('Other_Repeat')
repType = ";".join(repTypes) + ";"
rmRegions[chrom].append((int(regionStart), int(regionEnd), repType))
# remove intermediate files
os.remove(args.outPrefix + '.tmp.target.bed')
os.remove(args.outPrefix + '.tmp.repeatMasker.bed')
os.remove(args.outPrefix + '.tmp.target.repeats1.bed')
os.remove(args.outPrefix + '.tmp.target.repeats2.bed')
# set up header columns (Note: "headerAll" must parallel the output of the vc() function.)
headerAll = ('CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'DP', 'FR' , 'MT', 'UFR', 'UMT', 'PI', 'VDP', 'VAF', 'VMT', 'VMF', 'VSM', 'DP_A', 'DP_T', 'DP_G', 'DP_C', 'AF_A', 'AF_T', 'AF_G', 'AF_C', 'MT_3RPM', 'MT_5RPM', 'MT_7RPM', 'MT_10RPM', 'UMT_A', 'UMT_T', 'UMT_G', 'UMT_C', 'UMF_A', 'UMF_T', 'UMF_G', 'UMF_C', 'VSM_A', 'VSM_T', 'VSM_G', 'VSM_C', 'PI_A', 'PI_T', 'PI_G', 'PI_C', 'FILTER')
headerVariants = ('CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'DP', 'MT', 'UMT', 'PI', 'THR', 'VMT', 'VMF', 'VSM', 'FILTER')
# set up hash of variable fields
headerAllIndex = {}
for i in range(len(headerAll)):
headerAllIndex[headerAll[i]] = i
# ALL repeats filter. If MT fraction < 40% and the variant is inside the tandem repeat region, reject.
for i in range(len(output)):
outline = output[i]
lineList = outline.split('\t')
chromTr = lineList[headerAllIndex['CHROM']]
altTr = lineList[headerAllIndex['ALT']]
try:
posTr = int(lineList[headerAllIndex['POS']])
except ValueError:
continue
try:
altMtFracTr = float(lineList[headerAllIndex['VMF']])
except ValueError:
continue
try:
pred = int(float(lineList[headerAllIndex['PI']]))
except ValueError:
pred = 0
if pred >= 5 and altTr != 'DEL':
# check tandem repeat from TRF if MT fraction < 40%
if altMtFracTr < 40:
for (locL, locR, repType) in trfRegions[chromTr]:
if locL < posTr <= locR:
lineList[-1] += repType
break
# check simple repeat, lc, sl from RepeatMasker
for (locL, locR, repType) in rmRegions[chromTr]:
if locL < posTr <= locR:
lineList[-1] += repType
break
lineList[-1] = 'PASS' if lineList[-1] == ';' else lineList[-1].strip(';')
output[i] = '\t'.join(lineList)
# VCF header
header_vcf = \
'##fileformat=VCFv4.2\n' + \
'##reference=GRCh37\n' + \
'##INFO=<ID=TYPE,Number=1,Type=String,Description="Variant type: SNP or INDEL">\n' + \
'##INFO=<ID=DP,Number=1,Type=Integer,Description="Total read depth">\n' + \
'##INFO=<ID=MT,Number=1,Type=Integer,Description="Total MT depth">\n' + \
'##INFO=<ID=UMT,Number=1,Type=Integer,Description="Filtered MT depth">\n' + \
'##INFO=<ID=PI,Number=1,Type=Float,Description="Variant prediction index">\n' + \
'##INFO=<ID=THR,Number=1,Type=Integer,Description="Variant prediction index minimum threshold">\n' + \
'##INFO=<ID=VMT,Number=1,Type=Integer,Description="Variant MT depth">\n' + \
'##INFO=<ID=VMF,Number=1,Type=Float,Description="Variant MT fraction">\n' + \
'##INFO=<ID=VSM,Number=1,Type=Integer,Description="Variant strong MT depth">\n' + \
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n' + \
'##FORMAT=<ID=AD,Number=.,Type=Integer,Description="Filtered allelic MT depths for the ref and alt alleles">\n' + \
'##FORMAT=<ID=VF,Number=1,Type=Float,Description="Variant MT fraction, same as VMF">\n' + \
'##FILTER=<ID=RepT,Description="Variant in simple tandem repeat region, as defined by Tandem Repeats Finder">\n' + \
'##FILTER=<ID=RepS,Description="Variant in simple repeat region, as defined by RepeatMasker">\n' + \
'##FILTER=<ID=LowC,Description="Variant in low complexity region, as defined by RepeatMasker">\n' + \
'##FILTER=<ID=SL,Description="Variant in micro-satelite region, as defined by RepeatMasker">\n' + \
'##FILTER=<ID=HP,Description="Inside or flanked by homopolymer region">\n' + \
'##FILTER=<ID=LM,Description="Low coverage (fewer than 5 MTs)">\n' + \
'##FILTER=<ID=LSM,Description="Fewer than 2 strong MTs">\n' + \
'##FILTER=<ID=SB,Description="Strand bias">\n' + \
'##FILTER=<ID=LowQ,Description="Low base quality (mean < 22)">\n' + \
'##FILTER=<ID=MM,Description="Too many genome reference mismatches in reads (default threshold is 6.5 per 100 bases)">\n' + \
'##FILTER=<ID=DP,Description="Too many discordant read pairs">\n' + \
'##FILTER=<ID=R1CP,Description="Variants are clustered at the end of R1 reads">\n' + \
'##FILTER=<ID=R2CP,Description="Variants are clustered at the end of R2 reads">\n' + \
'##FILTER=<ID=PrimerCP,Description="Variants are clustered immediately after the primer, possible enzyme initiation error">\n' + \
'\t'.join(('#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', args.outPrefix)) + '\n'
# set cutoff value for about 20 FP/Mb
threshold = int(math.ceil(14.0 + 0.012 * args.mtDepth)) if args.threshold == 0 else args.threshold
# open output files
outAll = open(args.outPrefix + '.smCounter.all.txt', 'w')
outVariants = open(args.outPrefix + '.smCounter.cut.txt', 'w')
outVcf = open(args.outPrefix + '.smCounter.cut.vcf', 'w')
# write column headers
outAll.write('\t'.join(headerAll) + '\n')
outVariants.write('\t'.join(headerVariants) + '\n')
outVcf.write(header_vcf)
for line in output:
# write to the detailed output
outAll.write(line)
outAll.write("\n")
# unpack text fields
fields = line.split('\t')
# skip if no PI
PI = fields[headerAllIndex['PI']]
if len(PI) == 0:
continue
# get ALT and prediction index
ALT = fields[headerAllIndex['ALT']]
QUAL = str(int(float(PI))) # truncate PI to conform to VCF phred-like tradition
# write to vcf file and short output
if int(QUAL) >= threshold and ALT != 'DEL': # if PI > threshold, write to vcf (regardless of filters)
# parse fields needed from main data vector
CHROM = fields[headerAllIndex['CHROM']]
POS = fields[headerAllIndex['POS']]
REF = fields[headerAllIndex['REF']]
TYPE = fields[headerAllIndex['TYPE']]
DP = fields[headerAllIndex['DP']]
MT = fields[headerAllIndex['MT']]
UMT = fields[headerAllIndex['UMT']]
VMT = fields[headerAllIndex['VMT']]
VMF = fields[headerAllIndex['VMF']]
VSM = fields[headerAllIndex['VSM']]
FILTER= fields[headerAllIndex['FILTER']]
THR = str(threshold)
INFO = ';'.join(('TYPE='+TYPE, 'DP='+DP, 'MT='+MT, 'UMT='+UMT, 'PI='+PI, 'THR='+THR, 'VMT='+VMT, 'VMF='+VMF, 'VSM='+VSM))
# hack attempt to satisfy downstream software - not correct for germline heterozygous, male X, etc, etc, etc
alts = ALT.split(",")
if len(alts) == 2:
genotype = '1/2'
elif len(alts) != 1:
raise Exception("error hacking genotype field for " + alts)
elif CHROM == "chrY" or CHROM == "chrM":
genotype = '1'
elif float(VMF) > 0.95:
genotype = '1/1'
else:
genotype = '0/1'
REFMT = str(int(UMT) - int(VMT))
AD = REFMT + "," + VMT
if len(alts) == 2:
AD = AD + ",1" # horrific hack for the 2nd alt
# output
FORMAT = 'GT:AD:VF'
SAMPLE = ":".join((genotype,AD,VMF))
ID = '.'
vcfLine = '\t'.join((CHROM, POS, ID, REF, ALT, QUAL, FILTER, INFO, FORMAT, SAMPLE)) + '\n'
shortLine = '\t'.join((CHROM, POS, REF, ALT, TYPE, DP, MT, UMT, PI, THR, VMT, VMF, VSM, FILTER)) + '\n'
outVcf.write(vcfLine)
outVariants.write(shortLine)
# debug counter for summary
if TYPE == 'SNP':
numCalledSnps = 0
else:
numCalledIndels = 0
outVcf.close()
outAll.close()
outVariants.close()
# log run completion
timeEnd = datetime.datetime.now()
print("smCounter completed running at " + str(timeEnd))
print("smCounter total time: "+ str(timeEnd-timeStart))
# pass threshold back to caller
return threshold
#----------------------------------------------------------------------------------------------
# pythonism to run from the command line
#----------------------------------------------------------------------------------------------
if __name__ == "__main__":
# init the argumet parser
argParseInit()
# get command line arguments
args = parser.parse_args()
# initialize logger
import run_log
run_log.init(args.logFile)
# call main program
main(args)
|
xuchang116/smCounter
|
smCounter.py
|
Python
|
mit
| 42,453
|
[
"pysam"
] |
40a1ecba3d1a7ae90eebaeee291d24c65c7aa53e84b72261aef010a70fcfe497
|
"""
Filter factories and their associated functions for mlab.
Module functions meant to be applied to a data source object should take
only one positional argument, the data object, to be easily used in
helper functions.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Prabhu Ramachandran
# Copyright (c) 2007-2015, Enthought, Inc.
# License: BSD Style.
from traits.api import Instance, CFloat, CInt, CArray, Trait, \
Enum, Property, Any, String
from tvtk.common import camel2enthought
from tvtk.api import tvtk
import mayavi.filters.api as filters
from mayavi.core.registry import registry
from .pipe_base import PipeFactory, make_function
# This the list is dynamically populated further down below at the end.
__all__ = ['tube', 'warp_scalar', 'threshold', 'elevation_filter',
'set_active_attribute', 'user_defined'
]
def new_class(name, bases, dict_):
try:
import new
return new.classobj(name, bases, dict_)
except ImportError:
return type(name, bases, dict_)
##############################################################################
class TubeFactory(PipeFactory):
"""Applies the Tube mayavi filter to the given VTK object."""
_target = Instance(filters.Tube, ())
tube_sides = CInt(6, adapts='filter.number_of_sides',
desc="""number of sides of the tubes used to
represent the lines.""")
tube_radius = CFloat(0.05, adapts='filter.radius',
desc="""radius of the tubes used to represent the
lines.""")
tube = make_function(TubeFactory)
##############################################################################
class WarpScalarFactory(PipeFactory):
"""Applies the WarpScalar mayavi filter to the given VTK object."""
_target = Instance(filters.WarpScalar, ())
warp_scale = CFloat(1.0, adapts="filter.scale_factor",
help="scale of the warp scalar")
warp_scalar = make_function(WarpScalarFactory)
##############################################################################
class ThresholdFactory(PipeFactory):
"""Applies the Threshold mayavi filter to the given VTK object."""
_target = Instance(filters.Threshold, ())
filter_type = Enum('cells', 'points', adapts='filter_type',
help="If threshold is put on cells or points")
low = Trait(None, None, CFloat, help="The lower threshold")
def _low_changed(self):
if self.low is None:
pass
else:
self._target.lower_threshold = self.low
up = Trait(None, None, CFloat, help="The upper threshold")
def _up_changed(self):
if self.up is None:
pass
else:
self._target.upper_threshold = self.up
threshold = make_function(ThresholdFactory)
##############################################################################
class ElevationFilterFactory(PipeFactory):
"""Applies the Elevation Filter mayavi filter to the given VTK object."""
high_point = CArray(default=[0, 0, 1], shape=(3,),
adapts="filter.high_point",
help="The end point of the projection line")
low_point = CArray(default=[0, 0, 0], shape=(3,),
adapts="filter.low_point",
help="The start point of the projection line")
_target = Instance(filters.ElevationFilter, ())
elevation_filter = make_function(ElevationFilterFactory)
##############################################################################
class SetActiveAttributeFactory(PipeFactory):
""" Applies the SetActiveAttribute Filter mayavi filter to the given
VTK object.
"""
point_scalars = String(
adapts="point_scalars_name",
help="The name of the active point scalars")
point_vectors = String(
adapts="point_vectors_name",
help="The name of the active point vectors")
point_tensors = String(
adapts="point_tensors_name",
help="The name of the active point tensors")
cell_scalars = String(
adapts="cell_scalars_name",
help="The name of the active cell scalars")
cell_vectors = String(
adapts="cell_vectors_name",
help="The name of the active cell vectors")
cell_tensors = String(
adapts="cell_tensors_name",
help="The name of the active cell tensors")
_target = Instance(filters.SetActiveAttribute, ())
set_active_attribute = make_function(SetActiveAttributeFactory)
##############################################################################
class UserDefinedFactory(PipeFactory):
"""Applies the UserDefined mayavi filter to the given TVTK object."""
_target = Instance(filters.UserDefined, ())
filter = Instance(tvtk.Object, adapts="filter",
help="the tvtk filter to adapt. This"
"be either an instance of the filter, or the"
"name of this filter.")
def __init__(self, parent, **kwargs):
if 'filter' in kwargs:
filter = kwargs['filter']
if not isinstance(filter, tvtk.Object):
try:
filter = getattr(tvtk, filter)
except AttributeError:
raise Exception('Filter %s unknown to TVTK' % filter)
kwargs['filter'] = filter()
self._target.filter = kwargs['filter']
self._target.setup_filter()
else:
self._target.filter = kwargs['filter']
if not 'name' in kwargs:
kwargs['name'] = 'UserDefined(%s)' % \
kwargs['filter'].__class__.__name__
super(UserDefinedFactory, self).__init__(parent, **kwargs)
user_defined = make_function(UserDefinedFactory)
############################################################################
# Automatically generated filters from registry.
############################################################################
class _AutomaticFilterFactory(PipeFactory):
"""The base class for any auto-generated factory classes.
NOTE: This class requires that the `_metadata` trait be set to
the metadata object for the object for which this is a factory.
"""
# The target.
_target = Property
# The saved target that is created once and then always returned.
_saved_target = Any(None)
def _get__target(self):
"""Getter for the _target trait."""
if self._saved_target is None:
self._saved_target = self._metadata.get_callable()()
return self._saved_target
def _make_functions(namespace):
"""Make the functions for adding filters and add them to the
namespace automatically.
"""
for fil in registry.filters:
func_name = camel2enthought(fil.id)
class_name = fil.id
if func_name.endswith('_filter'):
func_name = func_name[:-7]
class_name = class_name[:-6]
class_name = class_name + 'Factory'
# Don't create any that are already defined.
if class_name in namespace:
continue
# The class to wrap.
klass = new_class(
class_name, (_AutomaticFilterFactory,), {'__doc__': fil.help, }
)
klass._metadata = fil
# The mlab helper function.
func = make_function(klass)
# Inject class/function into the namespace and __all__.
namespace[class_name] = klass
namespace[func_name] = func
__all__.append(func_name)
# Create the module related functions.
_make_functions(locals())
|
dmsurti/mayavi
|
mayavi/tools/filters.py
|
Python
|
bsd-3-clause
| 7,862
|
[
"Mayavi",
"VTK"
] |
58e6d06cc98eb27be5fbe3c04087505f0fc2ca9d92ed9612ec922446d91b9ad3
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Qiming Sun <osirpt.sun@gmail.com>
# Junzi Liu <latrix1247@gmail.com>
#
import copy
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf.gto import mole
from pyscf.lib import logger
from pyscf.scf import hf
from pyscf import __config__
LINEAR_DEP_THRESHOLD = getattr(__config__, 'scf_addons_remove_linear_dep_threshold', 1e-8)
LINEAR_DEP_TRIGGER = getattr(__config__, 'scf_addons_remove_linear_dep_trigger', 1e-10)
def frac_occ_(mf, tol=1e-3):
from pyscf.scf import uhf, rohf
old_get_occ = mf.get_occ
mol = mf.mol
def guess_occ(mo_energy, nocc):
sorted_idx = numpy.argsort(mo_energy)
homo = mo_energy[sorted_idx[nocc-1]]
lumo = mo_energy[sorted_idx[nocc]]
frac_occ_lst = abs(mo_energy - homo) < tol
integer_occ_lst = (mo_energy <= homo) & (~frac_occ_lst)
mo_occ = numpy.zeros_like(mo_energy)
mo_occ[integer_occ_lst] = 1
degen = numpy.count_nonzero(frac_occ_lst)
frac = nocc - numpy.count_nonzero(integer_occ_lst)
mo_occ[frac_occ_lst] = float(frac) / degen
return mo_occ, numpy.where(frac_occ_lst)[0], homo, lumo
get_grad = None
if isinstance(mf, uhf.UHF):
def get_occ(mo_energy, mo_coeff=None):
nocca, noccb = mol.nelec
mo_occa, frac_lsta, homoa, lumoa = guess_occ(mo_energy[0], nocca)
mo_occb, frac_lstb, homob, lumob = guess_occ(mo_energy[1], noccb)
if abs(homoa - lumoa) < tol or abs(homob - lumob) < tol:
mo_occ = numpy.array([mo_occa, mo_occb])
logger.warn(mf, 'fraction occ = %6g for alpha orbitals %s '
'%6g for beta orbitals %s',
mo_occa[frac_lsta[0]], frac_lsta,
mo_occb[frac_lstb[0]], frac_lstb)
logger.info(mf, ' alpha HOMO = %.12g LUMO = %.12g', homoa, lumoa)
logger.info(mf, ' beta HOMO = %.12g LUMO = %.12g', homob, lumob)
logger.debug(mf, ' alpha mo_energy = %s', mo_energy[0])
logger.debug(mf, ' beta mo_energy = %s', mo_energy[1])
else:
mo_occ = old_get_occ(mo_energy, mo_coeff)
return mo_occ
elif isinstance(mf, rohf.ROHF):
def get_occ(mo_energy, mo_coeff=None):
nocca, noccb = mol.nelec
mo_occa, frac_lsta, homoa, lumoa = guess_occ(mo_energy, nocca)
mo_occb, frac_lstb, homob, lumob = guess_occ(mo_energy, noccb)
if abs(homoa - lumoa) < tol or abs(homob - lumob) < tol:
mo_occ = mo_occa + mo_occb
logger.warn(mf, 'fraction occ = %6g for alpha orbitals %s '
'%6g for beta orbitals %s',
mo_occa[frac_lsta[0]], frac_lsta,
mo_occb[frac_lstb[0]], frac_lstb)
logger.info(mf, ' HOMO = %.12g LUMO = %.12g', homoa, lumoa)
logger.debug(mf, ' mo_energy = %s', mo_energy)
else:
mo_occ = old_get_occ(mo_energy, mo_coeff)
return mo_occ
def get_grad(mo_coeff, mo_occ, fock):
occidxa = mo_occ > 0
occidxb = mo_occ > 1
viridxa = ~occidxa
viridxb = ~occidxb
uniq_var_a = viridxa.reshape(-1,1) & occidxa
uniq_var_b = viridxb.reshape(-1,1) & occidxb
if getattr(fock, 'focka', None) is not None:
focka = fock.focka
fockb = fock.fockb
elif getattr(fock, 'ndim', None) == 3:
focka, fockb = fock
else:
focka = fockb = fock
focka = reduce(numpy.dot, (mo_coeff.T.conj(), focka, mo_coeff))
fockb = reduce(numpy.dot, (mo_coeff.T.conj(), fockb, mo_coeff))
g = numpy.zeros_like(focka)
g[uniq_var_a] = focka[uniq_var_a]
g[uniq_var_b] += fockb[uniq_var_b]
return g[uniq_var_a | uniq_var_b]
else: # RHF
def get_occ(mo_energy, mo_coeff=None):
nocc = (mol.nelectron+1) // 2 # n_docc + n_socc
mo_occ, frac_lst, homo, lumo = guess_occ(mo_energy, nocc)
n_docc = mol.nelectron // 2
n_socc = nocc - n_docc
if abs(homo - lumo) < tol or n_socc:
mo_occ *= 2
degen = len(frac_lst)
mo_occ[frac_lst] -= float(n_socc) / degen
logger.warn(mf, 'fraction occ = %6g for orbitals %s',
mo_occ[frac_lst[0]], frac_lst)
logger.info(mf, 'HOMO = %.12g LUMO = %.12g', homo, lumo)
logger.debug(mf, ' mo_energy = %s', mo_energy)
else:
mo_occ = old_get_occ(mo_energy, mo_coeff)
return mo_occ
mf.get_occ = get_occ
if get_grad is not None:
mf.get_grad = get_grad
return mf
frac_occ = frac_occ_
def dynamic_occ_(mf, tol=1e-3):
assert(isinstance(mf, hf.RHF))
old_get_occ = mf.get_occ
def get_occ(mo_energy, mo_coeff=None):
mol = mf.mol
nocc = mol.nelectron // 2
sort_mo_energy = numpy.sort(mo_energy)
lumo = sort_mo_energy[nocc]
if abs(sort_mo_energy[nocc-1] - lumo) < tol:
mo_occ = numpy.zeros_like(mo_energy)
mo_occ[mo_energy<lumo] = 2
lst = abs(mo_energy - lumo) < tol
mo_occ[lst] = 0
logger.warn(mf, 'set charge = %d', mol.charge+int(lst.sum())*2)
logger.info(mf, 'HOMO = %.12g LUMO = %.12g',
sort_mo_energy[nocc-1], sort_mo_energy[nocc])
logger.debug(mf, ' mo_energy = %s', sort_mo_energy)
else:
mo_occ = old_get_occ(mo_energy, mo_coeff)
return mo_occ
mf.get_occ = get_occ
return mf
dynamic_occ = dynamic_occ_
def dynamic_level_shift_(mf, factor=1.):
'''Dynamically change the level shift in each SCF cycle. The level shift
value is set to (HF energy change * factor)
'''
old_get_fock = mf.get_fock
last_e = [None]
def get_fock(h1e, s1e, vhf, dm, cycle=-1, diis=None,
diis_start_cycle=None, level_shift_factor=None, damp_factor=None):
if cycle >= 0 or diis is not None:
ehf =(numpy.einsum('ij,ji', h1e, dm) +
numpy.einsum('ij,ji', vhf, dm) * .5)
if last_e[0] is not None:
level_shift_factor = abs(ehf-last_e[0]) * factor
logger.info(mf, 'Set level shift to %g', level_shift_factor)
last_e[0] = ehf
return old_get_fock(h1e, s1e, vhf, dm, cycle, diis, diis_start_cycle,
level_shift_factor, damp_factor)
mf.get_fock = get_fock
return mf
dynamic_level_shift = dynamic_level_shift_
def float_occ_(mf):
'''
For UHF, allowing the Sz value being changed during SCF iteration.
Determine occupation of alpha and beta electrons based on energy spectrum
'''
from pyscf.scf import uhf
assert(isinstance(mf, uhf.UHF))
def get_occ(mo_energy, mo_coeff=None):
mol = mf.mol
ee = numpy.sort(numpy.hstack(mo_energy))
n_a = numpy.count_nonzero(mo_energy[0]<(ee[mol.nelectron-1]+1e-3))
n_b = mol.nelectron - n_a
if mf.nelec is None:
nelec = mf.mol.nelec
else:
nelec = mf.nelec
if n_a != nelec[0]:
logger.info(mf, 'change num. alpha/beta electrons '
' %d / %d -> %d / %d',
nelec[0], nelec[1], n_a, n_b)
mf.nelec = (n_a, n_b)
return uhf.UHF.get_occ(mf, mo_energy, mo_coeff)
mf.get_occ = get_occ
return mf
dynamic_sz_ = float_occ = float_occ_
def follow_state_(mf, occorb=None):
occstat = [occorb]
old_get_occ = mf.get_occ
def get_occ(mo_energy, mo_coeff=None):
if occstat[0] is None:
mo_occ = old_get_occ(mo_energy, mo_coeff)
else:
mo_occ = numpy.zeros_like(mo_energy)
s = reduce(numpy.dot, (occstat[0].T, mf.get_ovlp(), mo_coeff))
nocc = mf.mol.nelectron // 2
#choose a subset of mo_coeff, which maximizes <old|now>
idx = numpy.argsort(numpy.einsum('ij,ij->j', s, s))
mo_occ[idx[-nocc:]] = 2
logger.debug(mf, ' mo_occ = %s', mo_occ)
logger.debug(mf, ' mo_energy = %s', mo_energy)
occstat[0] = mo_coeff[:,mo_occ>0]
return mo_occ
mf.get_occ = get_occ
return mf
follow_state = follow_state_
def mom_occ_(mf, occorb, setocc):
'''Use maximum overlap method to determine occupation number for each orbital in every
iteration. It can be applied to unrestricted HF/KS and restricted open-shell
HF/KS.'''
from pyscf.scf import uhf, rohf
if isinstance(mf, uhf.UHF):
coef_occ_a = occorb[0][:, setocc[0]>0]
coef_occ_b = occorb[1][:, setocc[1]>0]
elif isinstance(mf, rohf.ROHF):
if mf.mol.spin != (numpy.sum(setocc[0]) - numpy.sum(setocc[1])):
raise ValueError('Wrong occupation setting for restricted open-shell calculation.')
coef_occ_a = occorb[:, setocc[0]>0]
coef_occ_b = occorb[:, setocc[1]>0]
else:
raise RuntimeError('Cannot support this class of instance %s' % mf)
log = logger.Logger(mf.stdout, mf.verbose)
def get_occ(mo_energy=None, mo_coeff=None):
if mo_energy is None: mo_energy = mf.mo_energy
if mo_coeff is None: mo_coeff = mf.mo_coeff
if isinstance(mf, rohf.ROHF): mo_coeff = numpy.array([mo_coeff, mo_coeff])
mo_occ = numpy.zeros_like(setocc)
nocc_a = int(numpy.sum(setocc[0]))
nocc_b = int(numpy.sum(setocc[1]))
s_a = reduce(numpy.dot, (coef_occ_a.T, mf.get_ovlp(), mo_coeff[0]))
s_b = reduce(numpy.dot, (coef_occ_b.T, mf.get_ovlp(), mo_coeff[1]))
#choose a subset of mo_coeff, which maximizes <old|now>
idx_a = numpy.argsort(numpy.einsum('ij,ij->j', s_a, s_a))[::-1]
idx_b = numpy.argsort(numpy.einsum('ij,ij->j', s_b, s_b))[::-1]
mo_occ[0][idx_a[:nocc_a]] = 1.
mo_occ[1][idx_b[:nocc_b]] = 1.
log.debug(' New alpha occ pattern: %s', mo_occ[0])
log.debug(' New beta occ pattern: %s', mo_occ[1])
if isinstance(mf.mo_energy, numpy.ndarray) and mf.mo_energy.ndim == 1:
log.debug1(' Current mo_energy(sorted) = %s', mo_energy)
else:
log.debug1(' Current alpha mo_energy(sorted) = %s', mo_energy[0])
log.debug1(' Current beta mo_energy(sorted) = %s', mo_energy[1])
if (int(numpy.sum(mo_occ[0])) != nocc_a):
log.error('mom alpha electron occupation numbers do not match: %d, %d',
nocc_a, int(numpy.sum(mo_occ[0])))
if (int(numpy.sum(mo_occ[1])) != nocc_b):
log.error('mom alpha electron occupation numbers do not match: %d, %d',
nocc_b, int(numpy.sum(mo_occ[1])))
#output 1-dimension occupation number for restricted open-shell
if isinstance(mf, rohf.ROHF): mo_occ = mo_occ[0, :] + mo_occ[1, :]
return mo_occ
mf.get_occ = get_occ
return mf
mom_occ = mom_occ_
def project_mo_nr2nr(mol1, mo1, mol2):
r''' Project orbital coefficients from basis set 1 (C1 for mol1) to basis
set 2 (C2 for mol2).
.. math::
|\psi1\rangle = |AO1\rangle C1
|\psi2\rangle = P |\psi1\rangle = |AO2\rangle S^{-1}\langle AO2| AO1\rangle> C1 = |AO2\rangle> C2
C2 = S^{-1}\langle AO2|AO1\rangle C1
There are three relevant functions:
:func:`project_mo_nr2nr` is the projection for non-relativistic (scalar) basis.
:func:`project_mo_nr2r` projects from non-relativistic to relativistic basis.
:func:`project_mo_r2r` is the projection between relativistic (spinor) basis.
'''
s22 = mol2.intor_symmetric('int1e_ovlp')
s21 = mole.intor_cross('int1e_ovlp', mol2, mol1)
if isinstance(mo1, numpy.ndarray) and mo1.ndim == 2:
return lib.cho_solve(s22, numpy.dot(s21, mo1), strict_sym_pos=False)
else:
return [lib.cho_solve(s22, numpy.dot(s21, x), strict_sym_pos=False)
for x in mo1]
def project_mo_nr2r(mol1, mo1, mol2):
__doc__ = project_mo_nr2nr.__doc__
assert(not mol1.cart)
s22 = mol2.intor_symmetric('int1e_ovlp_spinor')
s21 = mole.intor_cross('int1e_ovlp_sph', mol2, mol1)
ua, ub = mol2.sph2spinor_coeff()
s21 = numpy.dot(ua.T.conj(), s21) + numpy.dot(ub.T.conj(), s21) # (*)
# mo2: alpha, beta have been summed in Eq. (*)
# so DM = mo2[:,:nocc] * 1 * mo2[:,:nocc].H
if isinstance(mo1, numpy.ndarray) and mo1.ndim == 2:
mo2 = numpy.dot(s21, mo1)
return lib.cho_solve(s22, mo2, strict_sym_pos=False)
else:
return [lib.cho_solve(s22, numpy.dot(s21, x), strict_sym_pos=False)
for x in mo1]
def project_mo_r2r(mol1, mo1, mol2):
__doc__ = project_mo_nr2nr.__doc__
s22 = mol2.intor_symmetric('int1e_ovlp_spinor')
t22 = mol2.intor_symmetric('int1e_spsp_spinor')
s21 = mole.intor_cross('int1e_ovlp_spinor', mol2, mol1)
t21 = mole.intor_cross('int1e_spsp_spinor', mol2, mol1)
n2c = s21.shape[1]
pl = lib.cho_solve(s22, s21, strict_sym_pos=False)
ps = lib.cho_solve(t22, t21, strict_sym_pos=False)
if isinstance(mo1, numpy.ndarray) and mo1.ndim == 2:
return numpy.vstack((numpy.dot(pl, mo1[:n2c]),
numpy.dot(ps, mo1[n2c:])))
else:
return [numpy.vstack((numpy.dot(pl, x[:n2c]),
numpy.dot(ps, x[n2c:]))) for x in mo1]
def project_dm_nr2nr(mol1, dm1, mol2):
r''' Project density matrix representation from basis set 1 (mol1) to basis
set 2 (mol2).
.. math::
|AO2\rangle DM_AO2 \langle AO2|
= |AO2\rangle P DM_AO1 P \langle AO2|
DM_AO2 = P DM_AO1 P
P = S_{AO2}^{-1}\langle AO2|AO1\rangle
There are three relevant functions:
:func:`project_dm_nr2nr` is the projection for non-relativistic (scalar) basis.
:func:`project_dm_nr2r` projects from non-relativistic to relativistic basis.
:func:`project_dm_r2r` is the projection between relativistic (spinor) basis.
'''
s22 = mol2.intor_symmetric('int1e_ovlp')
s21 = mole.intor_cross('int1e_ovlp', mol2, mol1)
p21 = lib.cho_solve(s22, s21, strict_sym_pos=False)
if isinstance(dm1, numpy.ndarray) and dm1.ndim == 2:
return reduce(numpy.dot, (p21, dm1, p21.conj().T))
else:
return lib.einsum('pi,nij,qj->npq', p21, dm1, p21.conj())
def project_dm_nr2r(mol1, dm1, mol2):
__doc__ = project_dm_nr2nr.__doc__
assert(not mol1.cart)
s22 = mol2.intor_symmetric('int1e_ovlp_spinor')
s21 = mole.intor_cross('int1e_ovlp_sph', mol2, mol1)
ua, ub = mol2.sph2spinor_coeff()
s21 = numpy.dot(ua.T.conj(), s21) + numpy.dot(ub.T.conj(), s21) # (*)
# mo2: alpha, beta have been summed in Eq. (*)
# so DM = mo2[:,:nocc] * 1 * mo2[:,:nocc].H
p21 = lib.cho_solve(s22, s21, strict_sym_pos=False)
if isinstance(dm1, numpy.ndarray) and dm1.ndim == 2:
return reduce(numpy.dot, (p21, dm1, p21.conj().T))
else:
return lib.einsum('pi,nij,qj->npq', p21, dm1, p21.conj())
def project_dm_r2r(mol1, dm1, mol2):
__doc__ = project_dm_nr2nr.__doc__
s22 = mol2.intor_symmetric('int1e_ovlp_spinor')
t22 = mol2.intor_symmetric('int1e_spsp_spinor')
s21 = mole.intor_cross('int1e_ovlp_spinor', mol2, mol1)
t21 = mole.intor_cross('int1e_spsp_spinor', mol2, mol1)
n2c = s21.shape[1]
pl = lib.cho_solve(s22, s21, strict_sym_pos=False)
ps = lib.cho_solve(t22, t21, strict_sym_pos=False)
p21 = scipy.linalg.block_diag(pl, ps)
if isinstance(dm1, numpy.ndarray) and dm1.ndim == 2:
return reduce(numpy.dot, (p21, dm1, p21.conj().T))
else:
return lib.einsum('pi,nij,qj->npq', p21, dm1, p21.conj())
def remove_linear_dep_(mf, threshold=LINEAR_DEP_THRESHOLD,
lindep=LINEAR_DEP_TRIGGER):
'''
Args:
threshold : float
The threshold under which the eigenvalues of the overlap matrix are
discarded to avoid numerical instability.
lindep : float
The threshold that triggers the special treatment of the linear
dependence issue.
'''
s = mf.get_ovlp()
cond = numpy.max(lib.cond(s))
if cond < 1./lindep:
return mf
logger.info(mf, 'Applying remove_linear_dep_ on SCF obejct.')
logger.debug(mf, 'Overlap condition number %g', cond)
def eigh(h, s):
d, t = numpy.linalg.eigh(s)
x = t[:,d>threshold] / numpy.sqrt(d[d>threshold])
xhx = reduce(numpy.dot, (x.T.conj(), h, x))
e, c = numpy.linalg.eigh(xhx)
c = numpy.dot(x, c)
return e, c
mf._eigh = eigh
return mf
remove_linear_dep = remove_linear_dep_
def convert_to_uhf(mf, out=None, remove_df=False):
'''Convert the given mean-field object to the unrestricted HF/KS object
Note this conversion only changes the class of the mean-field object.
The total energy and wave-function are the same as them in the input
mf object. If mf is an second order SCF (SOSCF) object, the SOSCF layer
will be discarded. Its underlying SCF object mf._scf will be converted.
Args:
mf : SCF object
Kwargs
remove_df : bool
Whether to convert the DF-SCF object to the normal SCF object.
This conversion is not applied by default.
Returns:
An unrestricted SCF object
'''
from pyscf import scf
from pyscf import dft
from pyscf.soscf import newton_ah
assert(isinstance(mf, hf.SCF))
logger.debug(mf, 'Converting %s to UHF', mf.__class__)
def update_mo_(mf, mf1):
if mf.mo_energy is not None:
if isinstance(mf, scf.uhf.UHF):
mf1.mo_occ = mf.mo_occ
mf1.mo_coeff = mf.mo_coeff
mf1.mo_energy = mf.mo_energy
elif getattr(mf, 'kpts', None) is None: # UHF
mf1.mo_occ = numpy.array((mf.mo_occ>0, mf.mo_occ==2), dtype=numpy.double)
mf1.mo_energy = (mf.mo_energy, mf.mo_energy)
mf1.mo_coeff = (mf.mo_coeff, mf.mo_coeff)
else: # This to handle KRHF object
mf1.mo_occ = ([numpy.asarray(occ> 0, dtype=numpy.double)
for occ in mf.mo_occ],
[numpy.asarray(occ==2, dtype=numpy.double)
for occ in mf.mo_occ])
mf1.mo_energy = (mf.mo_energy, mf.mo_energy)
mf1.mo_coeff = (mf.mo_coeff, mf.mo_coeff)
return mf1
if isinstance(mf, scf.ghf.GHF):
raise NotImplementedError
elif out is not None:
assert(isinstance(out, scf.uhf.UHF))
out = _update_mf_without_soscf(mf, out, remove_df)
elif isinstance(mf, scf.uhf.UHF):
# Remove with_df for SOSCF method because the post-HF code checks the
# attribute .with_df to identify whether an SCF object is DF-SCF method.
# with_df in SOSCF is used in orbital hessian approximation only. For the
# returned SCF object, whehter with_df exists in SOSCF has no effects on the
# mean-field energy and other properties.
if getattr(mf, '_scf', None):
return _update_mf_without_soscf(mf, copy.copy(mf._scf), remove_df)
else:
return copy.copy(mf)
else:
known_cls = {scf.hf.RHF : scf.uhf.UHF,
scf.rohf.ROHF : scf.uhf.UHF,
scf.hf_symm.RHF : scf.uhf_symm.UHF,
scf.hf_symm.ROHF : scf.uhf_symm.UHF,
dft.rks.RKS : dft.uks.UKS,
dft.roks.ROKS : dft.uks.UKS,
dft.rks_symm.RKS : dft.uks_symm.UKS,
dft.rks_symm.ROKS : dft.uks_symm.UKS}
out = _object_without_soscf(mf, known_cls, remove_df)
return update_mo_(mf, out)
def _object_without_soscf(mf, known_class, remove_df=False):
from pyscf.soscf import newton_ah
sub_classes = []
obj = None
for i, cls in enumerate(mf.__class__.__mro__):
if cls in known_class:
obj = known_class[cls](mf.mol)
break
else:
sub_classes.append(cls)
if obj is None:
raise NotImplementedError(
"Incompatible object types. Mean-field `mf` class not found in "
"`known_class` type.\n\nmf = '%s'\n\nknown_class = '%s'" %
(mf.__class__.__mro__, known_class))
if isinstance(mf, newton_ah._CIAH_SOSCF):
remove_df = (remove_df or
# The main SCF object is not a DFHF object
not getattr(mf._scf, 'with_df', None))
# Mimic the initialization procedure to restore the Hamiltonian
for cls in reversed(sub_classes):
class_name = cls.__name__
if '_DFHF' in class_name:
if not remove_df:
obj = obj.density_fit()
elif '_SGXHF' in class_name:
if not remove_df:
obj = obj.COSX()
elif '_X2C_SCF' in class_name:
obj = obj.x2c()
elif 'WithSolvent' in class_name:
obj = obj.ddCOSMO(mf.with_solvent)
elif 'QMMM' in class_name and getattr(mf, 'mm_mol', None):
from pyscf.qmmm.itrf import qmmm_for_scf
obj = qmmm_for_scf(obj, mf.mm_mol)
elif '_DFTD3' in class_name:
from pyscf.dftd3.itrf import dftd3
obj = dftd3(obj)
return _update_mf_without_soscf(mf, obj, remove_df)
def _update_mf_without_soscf(mf, out, remove_df=False):
from pyscf.soscf import newton_ah
mf_dic = dict(mf.__dict__)
# if mf is SOSCF object, avoid to overwrite the with_df method
# FIXME: it causes bug when converting pbc-SOSCF.
if isinstance(mf, newton_ah._CIAH_SOSCF):
mf_dic.pop('with_df', None)
out.__dict__.update(mf_dic)
if remove_df and getattr(out, 'with_df', None):
delattr(out, 'with_df')
return out
def convert_to_rhf(mf, out=None, remove_df=False):
'''Convert the given mean-field object to the restricted HF/KS object
Note this conversion only changes the class of the mean-field object.
The total energy and wave-function are the same as them in the input
mf object. If mf is an second order SCF (SOSCF) object, the SOSCF layer
will be discarded. Its underlying SCF object mf._scf will be converted.
Args:
mf : SCF object
Kwargs
remove_df : bool
Whether to convert the DF-SCF object to the normal SCF object.
This conversion is not applied by default.
Returns:
An unrestricted SCF object
'''
from pyscf import scf
from pyscf import dft
from pyscf.soscf import newton_ah
assert(isinstance(mf, hf.SCF))
logger.debug(mf, 'Converting %s to RHF', mf.__class__)
def update_mo_(mf, mf1):
if mf.mo_energy is not None:
if isinstance(mf, scf.hf.RHF): # RHF/ROHF/KRHF/KROHF
mf1.mo_occ = mf.mo_occ
mf1.mo_coeff = mf.mo_coeff
mf1.mo_energy = mf.mo_energy
elif getattr(mf, 'kpts', None) is None: # UHF
mf1.mo_occ = mf.mo_occ[0] + mf.mo_occ[1]
mf1.mo_energy = mf.mo_energy[0]
mf1.mo_coeff = mf.mo_coeff[0]
if getattr(mf.mo_coeff[0], 'orbsym', None) is not None:
mf1.mo_coeff = lib.tag_array(mf1.mo_coeff, orbsym=mf.mo_coeff[0].orbsym)
else: # KUHF
mf1.mo_occ = [occa+occb for occa, occb in zip(*mf.mo_occ)]
mf1.mo_energy = mf.mo_energy[0]
mf1.mo_coeff = mf.mo_coeff[0]
return mf1
if getattr(mf, 'nelec', None) is None:
nelec = mf.mol.nelec
else:
nelec = mf.nelec
if isinstance(mf, scf.ghf.GHF):
raise NotImplementedError
elif out is not None:
assert(isinstance(out, scf.hf.RHF))
out = _update_mf_without_soscf(mf, out, remove_df)
elif (isinstance(mf, scf.hf.RHF) or
(nelec[0] != nelec[1] and isinstance(mf, scf.rohf.ROHF))):
if getattr(mf, '_scf', None):
return _update_mf_without_soscf(mf, copy.copy(mf._scf), remove_df)
else:
return copy.copy(mf)
else:
if nelec[0] == nelec[1]:
known_cls = {scf.uhf.UHF : scf.hf.RHF ,
scf.uhf_symm.UHF : scf.hf_symm.RHF ,
dft.uks.UKS : dft.rks.RKS ,
dft.uks_symm.UKS : dft.rks_symm.RKS,
scf.rohf.ROHF : scf.hf.RHF ,
scf.hf_symm.ROHF : scf.hf_symm.RHF ,
dft.roks.ROKS : dft.rks.RKS ,
dft.rks_symm.ROKS: dft.rks_symm.RKS}
else:
known_cls = {scf.uhf.UHF : scf.rohf.ROHF ,
scf.uhf_symm.UHF : scf.hf_symm.ROHF ,
dft.uks.UKS : dft.roks.ROKS ,
dft.uks_symm.UKS : dft.rks_symm.ROKS}
out = _object_without_soscf(mf, known_cls, remove_df)
return update_mo_(mf, out)
def convert_to_ghf(mf, out=None, remove_df=False):
'''Convert the given mean-field object to the generalized HF/KS object
Note this conversion only changes the class of the mean-field object.
The total energy and wave-function are the same as them in the input
mf object. If mf is an second order SCF (SOSCF) object, the SOSCF layer
will be discarded. Its underlying SCF object mf._scf will be converted.
Args:
mf : SCF object
Kwargs
remove_df : bool
Whether to convert the DF-SCF object to the normal SCF object.
This conversion is not applied by default.
Returns:
An generalized SCF object
'''
from pyscf import scf
from pyscf import dft
from pyscf.soscf import newton_ah
assert(isinstance(mf, hf.SCF))
logger.debug(mf, 'Converting %s to GHF', mf.__class__)
def update_mo_(mf, mf1):
if mf.mo_energy is not None:
if isinstance(mf, scf.hf.RHF): # RHF
nao, nmo = mf.mo_coeff.shape
orbspin = get_ghf_orbspin(mf.mo_energy, mf.mo_occ, True)
mf1.mo_energy = numpy.empty(nmo*2)
mf1.mo_energy[orbspin==0] = mf.mo_energy
mf1.mo_energy[orbspin==1] = mf.mo_energy
mf1.mo_occ = numpy.empty(nmo*2)
mf1.mo_occ[orbspin==0] = mf.mo_occ > 0
mf1.mo_occ[orbspin==1] = mf.mo_occ == 2
mo_coeff = numpy.zeros((nao*2,nmo*2), dtype=mf.mo_coeff.dtype)
mo_coeff[:nao,orbspin==0] = mf.mo_coeff
mo_coeff[nao:,orbspin==1] = mf.mo_coeff
if getattr(mf.mo_coeff, 'orbsym', None) is not None:
orbsym = numpy.zeros_like(orbspin)
orbsym[orbspin==0] = mf.mo_coeff.orbsym
orbsym[orbspin==1] = mf.mo_coeff.orbsym
mo_coeff = lib.tag_array(mo_coeff, orbsym=orbsym)
mf1.mo_coeff = lib.tag_array(mo_coeff, orbspin=orbspin)
else: # UHF
nao, nmo = mf.mo_coeff[0].shape
orbspin = get_ghf_orbspin(mf.mo_energy, mf.mo_occ, False)
mf1.mo_energy = numpy.empty(nmo*2)
mf1.mo_energy[orbspin==0] = mf.mo_energy[0]
mf1.mo_energy[orbspin==1] = mf.mo_energy[1]
mf1.mo_occ = numpy.empty(nmo*2)
mf1.mo_occ[orbspin==0] = mf.mo_occ[0]
mf1.mo_occ[orbspin==1] = mf.mo_occ[1]
mo_coeff = numpy.zeros((nao*2,nmo*2), dtype=mf.mo_coeff[0].dtype)
mo_coeff[:nao,orbspin==0] = mf.mo_coeff[0]
mo_coeff[nao:,orbspin==1] = mf.mo_coeff[1]
if getattr(mf.mo_coeff[0], 'orbsym', None) is not None:
orbsym = numpy.zeros_like(orbspin)
orbsym[orbspin==0] = mf.mo_coeff[0].orbsym
orbsym[orbspin==1] = mf.mo_coeff[1].orbsym
mo_coeff = lib.tag_array(mo_coeff, orbsym=orbsym)
mf1.mo_coeff = lib.tag_array(mo_coeff, orbspin=orbspin)
return mf1
if out is not None:
assert(isinstance(out, scf.ghf.GHF))
out = _update_mf_without_soscf(mf, out, remove_df)
elif isinstance(mf, scf.ghf.GHF):
if getattr(mf, '_scf', None):
return _update_mf_without_soscf(mf, copy.copy(mf._scf), remove_df)
else:
return copy.copy(mf)
else:
known_cls = {scf.hf.RHF : scf.ghf.GHF,
scf.rohf.ROHF : scf.ghf.GHF,
scf.uhf.UHF : scf.ghf.GHF,
scf.hf_symm.RHF : scf.ghf_symm.GHF,
scf.hf_symm.ROHF : scf.ghf_symm.GHF,
scf.uhf_symm.UHF : scf.ghf_symm.GHF,
dft.rks.RKS : None,
dft.roks.ROKS : None,
dft.uks.UKS : None,
dft.rks_symm.RKS : None,
dft.rks_symm.ROKS : None,
dft.uks_symm.UKS : None}
out = _object_without_soscf(mf, known_cls, remove_df)
return update_mo_(mf, out)
def get_ghf_orbspin(mo_energy, mo_occ, is_rhf=None):
'''Spin of each GHF orbital when the GHF orbitals are converted from
RHF/UHF orbitals
For RHF orbitals, the orbspin corresponds to first occupied orbitals then
unoccupied orbitals. In the occupied orbital space, if degenerated, first
alpha then beta, last the (open-shell) singly occupied (alpha) orbitals. In
the unoccupied orbital space, first the (open-shell) unoccupied (beta)
orbitals if applicable, then alpha and beta orbitals
For UHF orbitals, the orbspin corresponds to first occupied orbitals then
unoccupied orbitals.
'''
if is_rhf is None: # guess whether the orbitals are RHF orbitals
is_rhf = mo_energy[0].ndim == 0
if is_rhf:
nmo = mo_energy.size
nocc = numpy.count_nonzero(mo_occ >0)
nvir = nmo - nocc
ndocc = numpy.count_nonzero(mo_occ==2)
nsocc = nocc - ndocc
orbspin = numpy.array([0,1]*ndocc + [0]*nsocc + [1]*nsocc + [0,1]*nvir)
else:
nmo = mo_energy[0].size
nocca = numpy.count_nonzero(mo_occ[0]>0)
nvira = nmo - nocca
noccb = numpy.count_nonzero(mo_occ[1]>0)
nvirb = nmo - noccb
# round(6) to avoid numerical uncertainty in degeneracy
es = numpy.append(mo_energy[0][mo_occ[0] >0],
mo_energy[1][mo_occ[1] >0])
oidx = numpy.argsort(es.round(6))
es = numpy.append(mo_energy[0][mo_occ[0]==0],
mo_energy[1][mo_occ[1]==0])
vidx = numpy.argsort(es.round(6))
orbspin = numpy.append(numpy.array([0]*nocca+[1]*noccb)[oidx],
numpy.array([0]*nvira+[1]*nvirb)[vidx])
return orbspin
del(LINEAR_DEP_THRESHOLD, LINEAR_DEP_TRIGGER)
def fast_newton(mf, mo_coeff=None, mo_occ=None, dm0=None,
auxbasis=None, dual_basis=None, **newton_kwargs):
'''This is a wrap function which combines several operations. This
function first setup the initial guess
from density fitting calculation then use for
Newton solver and call Newton solver.
Newton solver attributes [max_cycle_inner, max_stepsize, ah_start_tol,
ah_conv_tol, ah_grad_trust_region, ...] can be passed through **newton_kwargs.
'''
import copy
from pyscf.lib import logger
from pyscf import df
from pyscf.soscf import newton_ah
if auxbasis is None:
auxbasis = df.addons.aug_etb_for_dfbasis(mf.mol, 'ahlrichs', beta=2.5)
if dual_basis:
mf1 = mf.newton()
pmol = mf1.mol = newton_ah.project_mol(mf.mol, dual_basis)
mf1 = mf1.density_fit(auxbasis)
else:
mf1 = mf.newton().density_fit(auxbasis)
mf1.with_df._compatible_format = False
mf1.direct_scf_tol = 1e-7
if getattr(mf, 'grids', None):
from pyscf.dft import gen_grid
approx_grids = gen_grid.Grids(mf.mol)
approx_grids.verbose = 0
approx_grids.level = max(0, mf.grids.level-3)
mf1.grids = approx_grids
approx_numint = copy.copy(mf._numint)
mf1._numint = approx_numint
for key in newton_kwargs:
setattr(mf1, key, newton_kwargs[key])
if mo_coeff is None or mo_occ is None:
mo_coeff, mo_occ = mf.mo_coeff, mf.mo_occ
if dm0 is not None:
mo_coeff, mo_occ = mf1.from_dm(dm0)
elif mo_coeff is None or mo_occ is None:
logger.note(mf, '========================================================')
logger.note(mf, 'Generating initial guess with DIIS-SCF for newton solver')
logger.note(mf, '========================================================')
if dual_basis:
mf0 = copy.copy(mf)
mf0.mol = pmol
mf0 = mf0.density_fit(auxbasis)
else:
mf0 = mf.density_fit(auxbasis)
mf0.direct_scf_tol = 1e-7
mf0.conv_tol = 3.
mf0.conv_tol_grad = 1.
if mf0.level_shift == 0:
mf0.level_shift = .2
if getattr(mf, 'grids', None):
mf0.grids = approx_grids
mf0._numint = approx_numint
# Note: by setting small_rho_cutoff, dft.get_veff function may overwrite
# approx_grids and approx_numint. It will further changes the corresponding
# mf1 grids and _numint. If inital guess dm0 or mo_coeff/mo_occ were given,
# dft.get_veff are not executed so that more grid points may be found in
# approx_grids.
mf0.small_rho_cutoff = mf.small_rho_cutoff * 10
mf0.kernel()
mf1.with_df = mf0.with_df
mo_coeff, mo_occ = mf0.mo_coeff, mf0.mo_occ
if dual_basis:
if mo_occ.ndim == 2:
mo_coeff =(project_mo_nr2nr(pmol, mo_coeff[0], mf.mol),
project_mo_nr2nr(pmol, mo_coeff[1], mf.mol))
else:
mo_coeff = project_mo_nr2nr(pmol, mo_coeff, mf.mol)
mo_coeff, mo_occ = mf1.from_dm(mf.make_rdm1(mo_coeff,mo_occ))
mf0 = None
logger.note(mf, '============================')
logger.note(mf, 'Generating initial guess end')
logger.note(mf, '============================')
mf1.kernel(mo_coeff, mo_occ)
mf.converged = mf1.converged
mf.e_tot = mf1.e_tot
mf.mo_energy = mf1.mo_energy
mf.mo_coeff = mf1.mo_coeff
mf.mo_occ = mf1.mo_occ
# mf = copy.copy(mf)
# def mf_kernel(*args, **kwargs):
# logger.warn(mf, "fast_newton is a wrap function to quickly setup and call Newton solver. "
# "There's no need to call kernel function again for fast_newton.")
# del(mf.kernel) # warn once and remove circular depdence
# return mf.e_tot
# mf.kernel = mf_kernel
return mf
|
gkc1000/pyscf
|
pyscf/scf/addons.py
|
Python
|
apache-2.0
| 35,857
|
[
"PySCF"
] |
4faa3fd1f77ed1dfbeac5a553ebf9af656d33d6719e826d502e3f8951c7fd8e2
|
"""
code to extract a single cell from a set of alignments or reads marked via Valentine's umis
repository:
https://github.com/vals/umis
"""
import regex as re
import sys
from argparse import ArgumentParser
from pysam import AlignmentFile
def extract_barcode(sam, barcode):
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
sam_file = AlignmentFile(sam, mode='r')
filter_file = AlignmentFile("-", mode='wh', template=sam_file)
track = sam_file.fetch(until_eof=True)
for i, aln in enumerate(track):
if aln.is_unmapped:
continue
match = parser_re.match(aln.qname)
CB = match.group('CB')
if CB == barcode:
filter_file.write(aln)
def stream_fastq(file_handler):
''' Generator which gives all four lines if a fastq read as one string
'''
next_element = ''
for i, line in enumerate(file_handler):
next_element += line
if i % 4 == 3:
yield next_element
next_element = ''
def extract_barcode_fastq(fastq, barcode):
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
fastq_file = stream_fastq(open(fastq))
for read in fastq_file:
match = parser_re.match(read)
CB = match.group('CB')
if CB == barcode:
sys.stdout.write(read)
if __name__ == "__main__":
parser = ArgumentParser("extract reads/alignments from a single cell")
parser.add_argument("file", help="A SAM or FASTQ file")
parser.add_argument("barcode", help="barcode of the cell to extract")
args = parser.parse_args()
extract_fn = extract_barcode_sam if args.file.endswith(".sam") else extract_barcode_fastq
extract_fn(args.file, args.barcode)
|
roryk/junkdrawer
|
extract-barcode.py
|
Python
|
mit
| 1,714
|
[
"pysam"
] |
b3b5183010243a461ce4f8b655deafd088934afe9893eb9200b4f65e2e46d881
|
from time import sleep
from lettuce import *
from rapidsms.contrib.locations.models import Location
from survey.features.page_objects.question import BatchQuestionsListPage, AddQuestionPage, ListAllQuestionsPage, CreateNewQuestionPage, CreateNewSubQuestionPage, EditQuestionPage
from survey.models import Batch, QuestionModule, BatchQuestionOrder
from survey.models.question import Question, QuestionOption
from survey.models.householdgroups import HouseholdMemberGroup
from survey.models.answer_rule import AnswerRule
@step(u'And I have 100 questions under the batch')
def and_i_have_100_questions_under_the_batch(step):
for i in xrange(100):
q = Question.objects.create(
text="some questions %d" %
i,
answer_type=Question.NUMBER,
identifier='ID %d' %
i,
order=i)
q.batches.add(world.batch)
BatchQuestionOrder.objects.create(
batch=world.batch, question=q, order=i)
@step(u'And I visit questions listing page of the batch')
def and_i_visit_questions_listing_page_of_the_batch(step):
world.page = BatchQuestionsListPage(world.browser, world.batch)
world.page.visit()
@step(u'Then I should see the questions list paginated')
def then_i_should_see_the_questions_list_paginated(step):
world.page.validate_fields()
world.page.validate_pagination()
world.page.validate_fields()
@step(u'When I change to 100 questions per page')
def when_i_change_to_100_questions_per_page(step):
world.page.fill_valid_values({'number_of_questions_per_page': 100})
world.page.click_by_css('#a-question-list')
@step(u'Then I should not see pagination')
def then_i_should_not_see_pagination(step):
world.page.validate_pagination(False)
@step(u'And I have no questions under the batch')
def and_i_have_no_questions_under_the_batch(step):
Question.objects.filter(batches=world.batch).delete()
@step(u'Then I should see error message on the page')
def then_i_should_see_error_message_on_the_page(step):
world.page.is_text_present(
"There are no questions associated with this batch yet.")
@step(u'And I click add question button')
def and_i_click_add_question_button(step):
world.page.click_link_by_text("Select Question")
@step(u'Then I should see a add question page')
def then_i_should_see_a_add_question_page(step):
world.page = AddQuestionPage(world.browser, world.batch)
world.page.validate_url()
@step(u'When I fill the details for add question form')
def when_i_fill_the_details_for_add_question_form(step):
data = {'module': world.module.id,
'text': 'hritik question',
'answer_type': Question.NUMBER,
'identifier': 'ID 1'}
world.page.fill_valid_values(data)
@step(u'Then I should go back to questions listing page')
def then_i_should_go_back_to_questions_listing_page(step):
world.page = BatchQuestionsListPage(world.browser, world.batch)
world.page.validate_url()
@step(u'And I should see question successfully added message')
def and_i_should_see_question_successfully_added_message(step):
world.page.is_text_present("Question successfully added.")
@step(u'And I have a member group')
def and_i_have_a_member_group(step):
world.household_member_group = HouseholdMemberGroup.objects.create(
name='Age 4-5', order=1)
@step(u'And I visit add new question page of the batch')
def and_i_visit_add_new_question_page_of_the_batch(step):
world.page = AddQuestionPage(world.browser, world.batch)
world.page.visit()
@step(u'And I fill the details for question')
def and_i_fill_the_details_for_question(step):
world.page.fill_valid_values(
{'identifier': 'ID 1', 'module': world.module.id, 'text': 'hritik question'})
world.page.select('group', [world.household_member_group.pk])
@step(u'When I select multichoice for answer type')
def when_i_select_multichoice_for_answer_type(step):
world.page.select('answer_type', [Question.MULTICHOICE])
@step(u'Then I should see one option field')
def then_i_should_see_one_option_field(step):
world.page.see_one_option_field("Option 1")
world.page.see_option_add_and_remove_buttons(1)
@step(u'When I click add-option icon')
def when_i_click_add_option_icon(step):
world.page.click_by_css(".icon-plus")
@step(u'Then I should see two options field')
def then_i_should_see_two_options_field(step):
world.page.see_one_option_field("Option 1")
world.page.see_one_option_field("Option 2")
world.page.see_option_add_and_remove_buttons(2)
@step(u'When I click remove-option icon')
def when_i_click_remove_option_icon(step):
world.page.click_by_css(".icon-remove")
@step(u'Then I should see only one option field')
def then_i_should_see_only_one_option_field(step):
world.page.see_one_option_field("Option 1")
world.page.see_option_add_and_remove_buttons(1)
world.page.option_not_present("Option 2")
@step(u'And I fill an option question')
def and_i_fill_an_option_question(step):
world.option = {'options': 'some option question text'}
world.page.fill_valid_values(world.option)
@step(u'And I have more than 50 questions')
def and_i_have_100_questions(step):
for i in xrange(100):
Question.objects.create(
text="some questions %d" %
i,
answer_type=Question.NUMBER,
identifier='ID %d' %
i,
order=i)
@step(u'And I visit questions list page')
def and_i_visit_questions_list_page(step):
world.page = ListAllQuestionsPage(world.browser)
world.page.visit()
@step(u'And If I click create new question link')
def and_if_i_click_create_new_question_link(step):
world.page.click_link_by_text("Create New Question")
@step(u'Then I should see create new question page')
def then_i_should_see_create_new_question_page(step):
world.page = CreateNewQuestionPage(world.browser)
world.page.validate_url()
@step(u'And I visit create new question page')
def and_i_visit_create_new_question_page(step):
world.page = CreateNewQuestionPage(world.browser)
world.page.visit()
@step(u'And I have a multichoice question')
def and_i_have_a_multichoice_question(step):
world.multi_choice_question = Question.objects.create(
module=world.module,
text="Are these insecticide?",
answer_type=Question.MULTICHOICE,
order=6,
group=world.household_member_group,
identifier='ID 1')
world.option1 = QuestionOption.objects.create(
question=world.multi_choice_question, text="Yes", order=1)
world.option2 = QuestionOption.objects.create(
question=world.multi_choice_question, text="No", order=2)
world.option3 = QuestionOption.objects.create(
question=world.multi_choice_question, text="Dont Know", order=3)
@step(u'And I click on view options link')
def and_i_click_on_view_options_link(step):
world.page.click_link_by_partial_href(
"#view_options_%d" % world.multi_choice_question.id)
@step(u'Then I should see the question options in a modal')
def then_i_should_see_the_question_options_in_a_modal(step):
world.page.validate_fields_present(
[world.multi_choice_question.text, "Text", "Order"])
@step(u'And when I click the close button')
def and_when_i_click_the_close_button(step):
world.page.click_link_by_text("Close")
@step(u'Then I should be back to questions list page')
def then_i_should_see_questions_list_page(step):
sleep(2)
world.page.validate_fields()
@step(u'And I click on view add subquestion link')
def and_i_click_on_view_add_subquestion_link(step):
world.browser.click_link_by_text("Add Subquestion")
@step(u'Then I should go to add subquestion page')
def then_i_should_go_to_add_subquestion_page(step):
world.page = CreateNewSubQuestionPage(
world.browser, question=world.multi_choice_question)
world.page.validate_url()
@step(u'When I fill in subquestion details')
def when_i_fill_in_subquestion_details(step):
world.page.fill_valid_values(
{'module': world.module.id, 'text': 'hritik question', 'identifier': 'Q001'})
world.page.select('group', [world.household_member_group.pk])
world.page.select('answer_type', [Question.NUMBER])
@step(u'And I should see subquestion successfully added message')
def and_i_should_see_subquestion_successfully_added_message(step):
world.page.see_success_message('Sub question', 'added')
@step(u'And I fill the invalid details details for question')
def and_i_fill_the_invalid_details_details_for_question(step):
a_very_long_text = "Is there something here I'm missing? Is uni_form " \
"overriding the setting somehow? If not, any advice as " \
"to what I might look for in debug to see where/why this is happening?"
world.page.fill_valid_values({'text': a_very_long_text})
@step(u'And I should see question was not added')
def and_i_should_see_question_was_not_added(step):
world.page.see_message("Question was not added.")
@step(u'And I should see that option in the form')
def and_i_should_see_that_option_in_the_form(step):
world.page.see_option_text(world.option['options'], 'options')
@step(u'And I visit question listing page')
def and_i_visit_question_listing_page(step):
world.page = ListAllQuestionsPage(world.browser)
world.page.visit()
@step(u'And I click the edit question link')
def and_i_click_the_edit_question_link(step):
world.page.click_link_by_text(" Edit")
@step(u'Then I should see the edit question page')
def then_i_should_see_the_edit_question_page(step):
world.page = EditQuestionPage(world.browser, world.multi_choice_question)
world.page.validate_url()
@step(u'And I see the question form with values')
def and_i_see_the_question_form_with_values(step):
world.form = {'module': 'Module',
'text': 'Text',
'group': 'Group',
'answer_type': 'Answer type'}
form_values = {'module': world.module.id,
'text': world.multi_choice_question.text,
'group': world.multi_choice_question.group.id,
'answer_type': world.multi_choice_question.answer_type}
world.page.validate_form_present(world.form)
world.page.validate_form_values(form_values)
@step(u'When I fill in edited question details')
def when_i_fill_in_edited_question_details(step):
world.edited_question_details = {
'module': world.module.id,
'text': 'edited question',
'group': world.multi_choice_question.group.id}
world.page.see_select_option(['Number'], 'answer_type')
world.page.fill_valid_values(world.edited_question_details)
@step(u'Then I should see the question successfully edited')
def then_i_should_see_the_question_successfully_edited(step):
world.page.see_success_message("Question", "edited")
@step(u'And I click on delete question link')
def and_i_click_on_delete_question_link(step):
world.page.click_link_by_partial_href(
"#delete_question_%d" % world.multi_choice_question.id)
@step(u'Then I should see a delete question confirmation modal')
def then_i_should_see_a_delete_question_confirmation_modal(step):
world.page.see_confirm_modal_message(world.multi_choice_question.text)
@step(u'Then I should see that the question was deleted successfully')
def then_i_should_see_that_the_question_was_deleted_successfully(step):
world.page.see_success_message("Question", "deleted")
@step(u'And I have a sub question for that question')
def and_i_have_a_sub_question_for_that_question(step):
world.sub_question = Question.objects.create(
module=world.module,
parent=world.multi_choice_question,
text="Sub Question 2?",
answer_type=Question.NUMBER,
subquestion=True,
identifier='Q101')
@step(u'Then I should not see the sub question')
def then_i_should_not_see_the_sub_question(step):
world.page.is_text_present(world.sub_question.text, False)
@step(u'And I have a non multichoice question')
def and_i_have_a_non_multi_choice_question(step):
world.multi_choice_question = Question.objects.create(
module=world.module,
text="Are these insecticide?",
answer_type=Question.NUMBER,
order=7,
group=world.household_member_group,
identifier='Q921')
world.multi_choice_question.batches.add(world.batch)
BatchQuestionOrder.objects.create(
batch=world.batch, question=world.multi_choice_question, order=1)
@step(u'When I click on the question')
def and_i_click_on_the_question(step):
world.page.click_link_by_text(world.multi_choice_question.text)
@step(u'Then I should see the sub question below the question')
def then_i_should_see_the_sub_question_below_the_question(step):
world.page.is_text_present("Subquestion")
world.page.is_text_present(world.sub_question.text)
@step(u'And I have a rule linking one option with that subquestion')
def and_i_have_a_rule_linking_one_option_with_that_subquestion(step):
world.answer_rule = AnswerRule.objects.create(
question=world.multi_choice_question,
action=AnswerRule.ACTIONS['ASK_SUBQUESTION'],
condition=AnswerRule.CONDITIONS['EQUALS_OPTION'],
validate_with_option=world.option3,
next_question=world.sub_question)
@step(u'And I have a subquestion under that question')
def and_i_have_a_subquestion_under_that_question(step):
world.sub_question = Question.objects.create(
module=world.module,
subquestion=True,
parent=world.multi_choice_question,
text="this is a subquestion",
identifier='Q022')
@step(u'When I fill in duplicate subquestion details')
def when_i_fill_in_duplicate_subquestion_details(step):
world.page.fill_valid_values(
{'module': world.module.id, 'identifier': 'ID 1', 'text': world.sub_question.text})
world.page.select('group', [world.household_member_group.pk])
world.page.select('answer_type', [Question.NUMBER])
@step(u'And I should see subquestion not added message')
def and_i_should_see_subquestion_not_added_message(step):
world.page.is_text_present("Sub question not saved.")
@step(u'And I have a rule on value with that subquestion')
def and_i_have_a_rule_on_value_with_that_subquestion(step):
world.answer_rule = AnswerRule.objects.create(
question=world.multi_choice_question,
validate_with_value=1,
condition=AnswerRule.CONDITIONS['EQUALS'],
action=AnswerRule.ACTIONS['ASK_SUBQUESTION'],
next_question=world.sub_question,
batch=world.batch)
@step(u'And I click on view logic link')
def and_i_click_on_view_logic_link(step):
world.page.click_link_by_partial_href(
"#view_logic_%d" % world.multi_choice_question.id)
@step(u'Then I should see the logic in a modal')
def then_i_should_see_the_logic_in_a_modal(step):
world.page.validate_fields_present(
[world.multi_choice_question.text, "Eligibility Criteria", "Question/Value/Option", "Action"])
@step(u'Then I should see delete logic icon')
def then_i_should_delete_logic_icon(step):
world.browser.find_by_css('.icon-trash')
@step(u'When I click delete logic icon')
def when_i_click_delete_logic_icon(step):
world.page.click_by_css('#delete-icon-%s' % world.answer_rule.id)
@step(u'And I click confirm delete')
def and_i_click_confirm_delete(step):
world.page.click_by_css('#delete-logic-%s' % world.answer_rule.id)
@step(u'Then I should redirected to batch question page')
def then_i_should_redirected_to_batch_question_page(step):
world.page = BatchQuestionsListPage(world.browser, world.batch)
world.page.validate_url()
@step(u'Then I should see special characters message')
def and_i_should_see_special_characters_message(step):
special_characters = "Please note that the following special characters will be removed ["
for character in Question.IGNORED_CHARACTERS:
special_characters = special_characters + character + " "
special_characters = special_characters.strip() + "]"
world.page.is_text_present(special_characters)
@step(u'And I click delete sub question link')
def and_i_click_delete_sub_question_link(step):
sleep(3)
world.page.click_delete_subquestion()
@step(u'Then I should see a confirm delete subqestion modal')
def then_i_should_see_a_confirm_delete_subqestion_modal(step):
world.page.see_confirm_modal_message(world.sub_question.text)
@step(u'Then I should see the sub question deleted successfully')
def then_i_should_see_the_sub_question_deleted_successfully(step):
world.page.see_success_message("Sub question", "deleted")
@step(u'When I click confirm delete')
def when_i_click_confirm_delete(step):
world.page.click_by_css("#delete-subquestion-%s" % world.sub_question.id)
@step(u'And I click edit sub question link')
def and_i_click_edit_sub_question_link(step):
sleep(3)
world.page.click_by_css("#edit_subquestion_%s" % world.sub_question.id)
@step(u'Then I see the sub question form with values')
def then_i_see_the_sub_question_form_with_values(step):
form_values = {'module': world.module.id, 'text': world.sub_question.text,
'group': world.multi_choice_question.group.id,
'identifier': "Q101",
'answer_type': world.sub_question.answer_type}
world.page.validate_form_values(form_values)
@step(u'When I fill in edited sub question details')
def when_i_fill_in_edited_sub_question_details(step):
world.edited_sub_question_details = {
'identifier': 'Q101',
'module': world.module.id,
'text': 'edited question',
'group': world.multi_choice_question.group.id}
world.page.see_select_option(['Number'], 'answer_type')
world.page.fill_valid_values(world.edited_sub_question_details)
@step(u'Then I should see the sub question successfully edited')
def then_i_should_see_the_sub_question_successfully_edited(step):
world.page.see_success_message("Sub question", "edited")
@step(u'And I click delete question rule')
def and_i_click_delete_question_rule(step):
sleep(2)
world.page.click_by_css('#delete-icon-%s' % world.answer_rule.id)
@step(u'And I should see that the logic was deleted successfully')
def and_i_should_see_that_the_logic_was_deleted_successfully(step):
world.page.see_success_message("Logic", "deleted")
@step(u'And I select multichoice question in batch')
def and_i_select_multichoice_question_in_batch(step):
world.batch = Batch.objects.create(
order=1,
name="Batch A",
description='description',
survey=world.survey)
world.multi_choice_question.batches.add(world.batch)
BatchQuestionOrder.objects.create(
batch=world.batch, question=world.multi_choice_question, order=1)
@step(u'And I have a module')
def and_i_have_a_module(step):
world.module = QuestionModule.objects.create(name="Education")
@step(u'And I have a location')
def and_i_have_a_location(step):
world.kampala = Location.objects.create(name="Kampala")
@step(u'And I have an open batch in that location')
def and_i_have_an_open_batch_in_that_location(step):
world.batch = Batch.objects.create(
order=1,
name="Batch A",
description='description',
survey=world.survey)
world.batch.open_for_location(world.kampala)
@step(u'Then I should see question list with only view options action')
def then_i_should_see_question_list_with_only_view_options_action(step):
world.page.validate_only_view_options_action_exists()
@step(u'And I have a multichoice and numeric questions with logics')
def and_i_have_a_multichoice_and_numeric_questions(step):
world.numeric_question = Question.objects.create(
text="some questions", answer_type=Question.NUMBER, identifier='ID', order=1)
world.multi_choice_question = Question.objects.create(
text="Are these insecticide?",
answer_type=Question.MULTICHOICE,
order=6,
identifier='ID 1')
world.option3 = QuestionOption.objects.create(
text="haha", order=1, question=world.multi_choice_question)
world.numeric_question.batches.add(world.batch)
world.multi_choice_question.batches.add(world.batch)
BatchQuestionOrder.objects.create(
batch=world.batch, question=world.numeric_question, order=1)
BatchQuestionOrder.objects.create(
batch=world.batch, question=world.multi_choice_question, order=2)
AnswerRule.objects.create(
batch=world.batch,
question=world.multi_choice_question,
action=AnswerRule.ACTIONS['END_INTERVIEW'],
condition=AnswerRule.CONDITIONS['EQUALS_OPTION'],
validate_with_option=world.option3)
AnswerRule.objects.create(
batch=world.batch,
question=world.numeric_question,
action=AnswerRule.ACTIONS['END_INTERVIEW'],
condition=AnswerRule.CONDITIONS['EQUALS_OPTION'],
validate_with_value=2)
@step(u'Then I should see field required error message')
def then_i_should_see_field_required_error_message(step):
world.page.is_text_present("This field is required.")
@step(u'And I should be able to export questions')
def and_i_should_be_able_to_export_questions(step):
world.page.find_by_css("#export_question", "Export Questions")
|
unicefuganda/uSurvey
|
survey/features/Question-steps.py
|
Python
|
bsd-3-clause
| 21,446
|
[
"VisIt"
] |
26dc046374916fa9f9e31be80f4dd6df4ed066b89f1500492dcc77ae300476dd
|
"""
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, adam.candy@imperial.ac.uk
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
This plugin so far calculates the minimum of of multiple NetCDF file layers using gdrmath. It needs to be extended to mesh the resulting NetCDF created or the user-sepcified file. From the UI none of the radio buttons or check boxes are functional other than those relating to Calculate Minimum as getting the min function to work took so long.
"""
# -*- coding: utf-8 -*-
"""
/***************************************************************************
MeshNetCDF
A QGIS plugin
Create Gmsh mesh from NetCDF (.nc) file where the z-coordinate is a metric for the mesh size.
-------------------
begin : 2012-07-25
copyright : (C) 2012 by AMCG
email : shaun.lee10@imperial.ac.uk
***************************************************************************/
/***************************************************************************
* *
* *
***************************************************************************/
"""
from StandardModules import *
import UserInterfaceSetup
import PreMeshingFunctions
import MeshOperations
import os
class MeshNetCDF(UserInterfaceSetup.UsIntSetup, PreMeshingFunctions.PreMesh, MeshOperations.MeshOp):
def __init__(self, iface):
UserInterfaceSetup.UsIntSetup.__init__(self, iface)
def initGui(self):
UserInterfaceSetup.UsIntSetup.initGui(self)
def openSingleNetCDFFiles(self):
UserInterfaceSetup.UsIntSetup.openSingleNetCDFFiles(self)
def openGeo(self):
UserInterfaceSetup.UsIntSetup.openGeo(self)
def getActiveLayers(self):
UserInterfaceSetup.UsIntSetup.getActiveLayers(self)
def setDropDownOptions(self):
UserInterfaceSetup.UsIntSetup.setDropDownOptions(self)
def getNetCDFDropDownOptions(self):
PreMeshingFunctions.PreMesh.getNetCDFDropDownOptions(self)
def getShapeDropDownOptions(self):
PreMeshingFunctions.PreMesh.getShapeDropDownOptions(self)
def convertShape(self):
PreMeshingFunctions.PreMesh.convertShape(self)
def runIdDef(self):
PreMeshingFunctions.PreMesh.runIdDef(self)
def getFiles(self):
PreMeshingFunctions.PreMesh.getFiles(self)
def writePosFile(self):
PreMeshingFunctions.PreMesh.writePosFile(self)
def calculateMinimum(self):
PreMeshingFunctions.PreMesh.calculateMinimum(self)
def appendGeo(self):
MeshOperations.MeshOp.appendGeo(self)
def generateMesh(self):
MeshOperations.MeshOp.generateMesh(self)
def functionOfBathymetry(self):
MeshOperations.MeshOp.functionOfBathymetry(self)
def importMsh(self):
MeshOperations.MeshOp.importMsh(self)
def meshNetCDF(self):
MeshOperations.MeshOp.meshNetCDF(self)
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu(u"&Mesh NetCDF",self.action)
self.iface.removeToolBarIcon(self.action)
"""
Using AssertionError as it is not used anywhere else in the code thereby circumventing erronous exceptions.
"""
def __checkForErrors(self):
msgBox = QtGui.QMessageBox.critical
ui = self.dlg.ui
if ui.grpNCDF.isChecked():
if not (ui.singleNetCDFRadioButton.isChecked() or ui.multipleNetCDFFilesRadioButton.isChecked()):
msgBox(None,"Error: Invalid Input","Please check if given input is correct. Some radio button might not have been checked.")
raise AssertionError ("Error: Invalid Input.")
if ui.singleNetCDFRadioButton.isChecked():
if not (ui.singleNetCDFLayersRadioButton.isChecked() or ui.singleNetCDFChooseFilesRadioButton.isChecked()):
msgBox(None,"Error: Invalid Input","Please check if given input is correct. Some radio button might not have been checked.")
raise AssertionError ("Error: Invalid Input.")
if ui.domainShapefileLayerRadioButton.isChecked() == False and ui.chooseGeoFileRadioButton.isChecked() == False:
msgBox(None,"Error: Invalid Input","Neither a domain Shapefile layer or Geo file was specified.")
raise AssertionError ("Error: Invalid Input.")
if ui.grpDefID.isChecked():
def_Id = ui.Default_Id.text()
if def_Id == "":
msgBox(None,"Error: No Default Id specified","Please enter the Default ID.")
raise AssertionError ("Error: No Default ID specified.")
try :
int(def_Id)
except ValueError:
msgBox(None,"Error: Invalid Default ID","Please enter a valid integer for default ID.")
raise AssertionError ("Error: Invalid Default ID.")
if int(def_Id) < 0 :
msgBox(None,"Error: Invalid Default ID", "Please enter a positive number for the default ID.")
raise AssertionError ("Error: Invalid Default ID.")
if ui.grpNCDF.isChecked() and ui.singleNetCDFRadioButton.isChecked() and ui.singleNetCDFChooseFilesRadioButton.isChecked():
try :
test = open(str(ui.singleNetCDFChooseFilesLineEdit.text()),"r")
test.close()
except IOError:
msgBox(None, "Error: Invalid File Path","Please enter a valid filepath for the NetCDF file.")
raise AssertionError ("Error: Invalid File Path.")
if ui.chooseGeoFileRadioButton.isChecked():
try :
test = open(str(ui.chooseGeoFileLineEdit.text()),"r")
test.close()
except IOError:
msgBox(None, "Error: Invalid File Path","Please enter a valid filepath for the geo file.")
raise AssertionError ("Error: Invalid File Path.")
self.getShapeDropDownOptions()
if ui.domainShapefileLayerRadioButton.isChecked():
try:
filepath = self.domainShapefileLayerFileName + "test"
test = open(filepath,"w")
test.close()
os.remove(filepath)
except IOError:
msgBox(None,"Error: Permission Denied","The current domain shapefile layer is in a directory for which you do not have write permissions. Please move it to a suitable directory.")
raise AssertionError ("Error: Permission Denied.")
try:
sf = shapefile.Reader(str(self.domainShapefileLayerFileName))
sf.records()
except ValueError:
msgBox(None,"Error: Invalid Shapefile Records","The records for the Shapefile supplied is invalid. Ensure the polygon's ID is a positive integer.")
return False
# run method that performs all the real work
def run(self):
# show the dialog
try :
self.getActiveLayers()
if len(self.activeLayers)==0:
QtGui.QMessageBox.critical(None,"Error: No Active Layer","There are no active layers. Please load a layer.")
raise AssertionError ("Error: No Active Layer.")
self.setDropDownOptions()
layers = self.iface.mapCanvas().layers()
self.dlg.ui.IdDropdown.clear()
for n in layers:
layer_n = str(n.name())
if ".shp" in str(n.source()):
self.dlg.ui.IdDropdown.addItem(layer_n, QVariant(str(n.source())))
self.dlg.show()
self.dlg.ui.singleNetCDFLayersRadioButton.setChecked(True)
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result == 1:
startTime = datetime.datetime.now()
print "Operation Started: " + str(strftime("%Y-%m-%d %H:%M:%S", gmtime()))
self.__checkForErrors()
self.meshNetCDF()
print "Operation Stopped: " + str(strftime("%Y-%m-%d %H:%M:%S", gmtime()))
timePassed = datetime.datetime.now() - startTime
print "Time Elapsed: " + str(timePassed.seconds) + " seconds."
except AssertionError as e:
print e.message
|
adamcandy/qgis-plugins-meshing
|
release/mesh_netcdf/meshnetcdf.py
|
Python
|
lgpl-2.1
| 8,544
|
[
"NetCDF"
] |
fc7c9e80a33dc1a00dd53e3f4dd4185534dba8d6648d4c8c32a7f820749e53d7
|
"""
Miscellaneous utility functions and classes.
**Mathematical functions**
* :func:`~fatiando.utils.normal`
* :func:`~fatiando.utils.gaussian`
* :func:`~fatiando.utils.gaussian2d`
* :func:`~fatiando.utils.safe_solve`
* :func:`~fatiando.utils.safe_dot`
* :func:`~fatiando.utils.safe_diagonal`
* :func:`~fatiando.utils.safe_inverse`
**Point scatter generation**
* :func:`~fatiando.utils.random_points`
* :func:`~fatiando.utils.circular_points`
* :func:`~fatiando.utils.connect_points`
**Unit conversion**
* :func:`~fatiando.utils.si2mgal`
* :func:`~fatiando.utils.mgal2si`
* :func:`~fatiando.utils.si2eotvos`
* :func:`~fatiando.utils.eotvos2si`
* :func:`~fatiando.utils.si2nt`
* :func:`~fatiando.utils.nt2si`
**Coordinate system conversions**
* :func:`~fatiando.utils.sph2cart`
**Others**
* :func:`~fatiando.utils.fromimage`: Load a matrix from an image file
* :func:`~fatiando.utils.contaminate`: Contaminate a vector with pseudo-random
Gaussian noise
* :func:`~fatiando.utils.dircos`: Get the 3 coordinates of a unit vector
* :func:`~fatiando.utils.ang2vec`: Convert intensity, inclination and
declination to a 3-component vector
* :func:`~fatiando.utils.vecnorm`: Get the norm of a vector or list of vectors
* :func:`~fatiando.utils.vecmean`: Take the mean array out of a list of arrays
* :func:`~fatiando.utils.vecstd`: Take the standard deviation array out of a
list of arrays
* :class:`~fatiando.utils.SparseList`: Store only non-zero elements on an
immutable list
* :func:`~fatiando.utils.sec2hms`: Convert seconds to hours, minutes, and
seconds
* :func:`~fatiando.utils.sec2year`: Convert seconds to Julian years
* :func:`~fatiando.utils.year2sec`: Convert Julian years to seconds
----
"""
import math
import numpy
import scipy.sparse
import scipy.sparse.linalg
import scipy.misc
import PIL.Image
from . import constants, gridder
def fromimage(fname, ranges=None, shape=None):
"""
Load an array of normalized gray-scale values from an image file.
The values will be in the range [0, 1]. The shape of the array is the shape
of the image (ny, nx), i.e., number of pixels in vertical (height) and
horizontal (width) dimensions.
Parameters:
* fname : str
Name of the image file
* ranges : [vmax, vmin] = floats
If not ``None``, will set the gray-scale values to this range.
* shape : (ny, nx)
If not ``None``, will interpolate the array to match this new shape
Returns:
* values : 2d-array
The array of gray-scale values
"""
image = scipy.misc.fromimage(PIL.Image.open(fname), flatten=True)
# Invert the color scale and normalize
values = (image.max() - image) / numpy.abs(image).max()
if ranges is not None:
vmin, vmax = ranges
values *= vmax - vmin
values += vmin
if shape is not None and tuple(shape) != values.shape:
ny, nx = values.shape
X, Y = numpy.meshgrid(range(nx), range(ny))
values = gridder.interp(X.ravel(), Y.ravel(), values.ravel(),
shape)[2].reshape(shape)
return values
def safe_inverse(matrix):
"""
Calculate the inverse of a matrix using an apropriate algorithm.
Uses the standard :func:`numpy.linalg.inv` if *matrix* is dense.
If it is sparse (from :mod:`scipy.sparse`) then will use
:func:`scipy.sparse.linalg.inv`.
Parameters:
* matrix : 2d-array
The matrix
Returns:
* inverse : 2d-array
The inverse of *matrix*
"""
if scipy.sparse.issparse(matrix):
return scipy.sparse.linalg.inv(matrix)
else:
return numpy.linalg.inv(matrix)
def safe_solve(matrix, vector):
"""
Solve a linear system using an apropriate algorithm.
Uses the standard :func:`numpy.linalg.solve` if both *matrix* and *vector*
are dense.
If any of the two is sparse (from :mod:`scipy.sparse`) then will use the
Conjugate Gradient Method (:func:`scipy.sparse.cgs`).
Parameters:
* matrix : 2d-array
The matrix defining the linear system
* vector : 1d or 2d-array
The right-side vector of the system
Returns:
* solution : 1d or 2d-array
The solution of the linear system
"""
if scipy.sparse.issparse(matrix) or scipy.sparse.issparse(vector):
estimate, status = scipy.sparse.linalg.cgs(matrix, vector)
if status >= 0:
return estimate
else:
raise ValueError('CGS exited with input error')
else:
return numpy.linalg.solve(matrix, vector)
def safe_dot(a, b):
"""
Make the dot product using the appropriate method.
If *a* and *b* are dense, will use :func:`numpy.dot`. If either is sparse
(from :mod:`scipy.sparse`) will use the multiplication operator (i.e., \*).
Parameters:
* a, b : array or matrix
The vectors/matrices to take the dot product of.
Returns:
* prod : array or matrix
The dot product of *a* and *b*
"""
if scipy.sparse.issparse(a) or scipy.sparse.issparse(b):
return a * b
else:
return numpy.dot(a, b)
def safe_diagonal(matrix):
"""
Get the diagonal of a matrix using the appropriate method.
Parameters:
* matrix : 2d-array, matrix, sparse matrix
The matrix...
Returns:
* diag : 1d-array
A numpy array with the diagonal of the matrix
"""
if scipy.sparse.issparse(matrix):
return numpy.array(matrix.diagonal())
else:
return numpy.diagonal(matrix).copy()
def vecnorm(vectors):
"""
Get the l2 norm of each vector in a list.
Use this to get, for example, the magnetization intensity from a list of
magnetization vectors.
Parameters:
* vectors : list of arrays
The vector
Returns:
* norms : list
The norms of the vectors
Examples::
>>> v = [[1, 1, 1], [2, 2, 2], [3, 3, 3]]
>>> print vecnorm(v)
[ 1.73205081 3.46410162 5.19615242]
"""
norm = numpy.sqrt(sum(i ** 2 for i in numpy.transpose(vectors)))
return norm
def sph2cart(lon, lat, height):
"""
Convert spherical coordinates to Cartesian geocentric coordinates.
Parameters:
* lon, lat, height : floats
Spherical coordinates. lon and lat in degrees, height in meters. height
is the height above mean Earth radius.
Returns:
* x, y, z : floats
Converted Cartesian coordinates
"""
d2r = numpy.pi / 180.0
radius = constants.MEAN_EARTH_RADIUS + height
x = numpy.cos(d2r * lat) * numpy.cos(d2r * lon) * radius
y = numpy.cos(d2r * lat) * numpy.sin(d2r * lon) * radius
z = numpy.sin(d2r * lat) * radius
return x, y, z
def si2nt(value):
"""
Convert a value from SI units to nanoTesla.
Parameters:
* value : number or array
The value in SI
Returns:
* value : number or array
The value in nanoTesla
"""
return value * constants.T2NT
def nt2si(value):
"""
Convert a value from nanoTesla to SI units.
Parameters:
* value : number or array
The value in nanoTesla
Returns:
* value : number or array
The value in SI
"""
return value / constants.T2NT
def si2eotvos(value):
"""
Convert a value from SI units to Eotvos.
Parameters:
* value : number or array
The value in SI
Returns:
* value : number or array
The value in Eotvos
"""
return value * constants.SI2EOTVOS
def eotvos2si(value):
"""
Convert a value from Eotvos to SI units.
Parameters:
* value : number or array
The value in Eotvos
Returns:
* value : number or array
The value in SI
"""
return value / constants.SI2EOTVOS
def si2mgal(value):
"""
Convert a value from SI units to mGal.
Parameters:
* value : number or array
The value in SI
Returns:
* value : number or array
The value in mGal
"""
return value * constants.SI2MGAL
def mgal2si(value):
"""
Convert a value from mGal to SI units.
Parameters:
* value : number or array
The value in mGal
Returns:
* value : number or array
The value in SI
"""
return value / constants.SI2MGAL
def vec2ang(vector):
"""
Convert a 3-component vector to intensity, inclination and declination.
.. note:: Coordinate system is assumed to be x->North, y->East, z->Down.
Inclination is positive down and declination is measured with respect
to x (North).
Parameter:
* vector : array = [x, y, z]
The vector
Returns:
* [intensity, inclination, declination] : floats
The intensity, inclination and declination (in degrees)
Examples::
>>> s = vec2ang([1.5, 1.5, 2.121320343559643])
>>> print "%.3f %.3f %.3f" % tuple(s)
3.000 45.000 45.000
"""
intensity = numpy.linalg.norm(vector)
r2d = 180. / numpy.pi
x, y, z = vector
declination = r2d * numpy.arctan2(y, x)
inclination = r2d * numpy.arcsin(z / intensity)
return [intensity, inclination, declination]
def ang2vec(intensity, inc, dec):
"""
Convert intensity, inclination and declination to a 3-component vector
.. note:: Coordinate system is assumed to be x->North, y->East, z->Down.
Inclination is positive down and declination is measured with respect
to x (North).
Parameter:
* intensity : float or array
The intensity (norm) of the vector
* inc : float
The inclination of the vector (in degrees)
* dec : float
The declination of the vector (in degrees)
Returns:
* vec : array = [x, y, z]
The vector
Examples::
>>> import numpy
>>> print ang2vec(3, 45, 45)
[ 1.5 1.5 2.12132034]
>>> print ang2vec(numpy.arange(4), 45, 45)
[[ 0. 0. 0. ]
[ 0.5 0.5 0.70710678]
[ 1. 1. 1.41421356]
[ 1.5 1.5 2.12132034]]
"""
return numpy.transpose([intensity * i for i in dircos(inc, dec)])
def dircos(inc, dec):
"""
Returns the 3 coordinates of a unit vector given its inclination and
declination.
.. note:: Coordinate system is assumed to be x->North, y->East, z->Down.
Inclination is positive down and declination is measured with respect
to x (North).
Parameter:
* inc : float
The inclination of the vector (in degrees)
* dec : float
The declination of the vector (in degrees)
Returns:
* vect : list = [x, y, z]
The unit vector
"""
d2r = numpy.pi / 180.
vect = [numpy.cos(d2r * inc) * numpy.cos(d2r * dec),
numpy.cos(d2r * inc) * numpy.sin(d2r * dec),
numpy.sin(d2r * inc)]
return vect
def vecmean(arrays):
"""
Take the mean array out of a list of arrays.
Parameter:
* arrays : list
List of arrays
Returns:
* mean : array
The mean of each element in the arrays
Example::
>>> print vecmean([[1, 1, 2], [2, 3, 5]])
[ 1.5 2. 3.5]
"""
return numpy.mean(arrays, axis=0)
def vecstd(arrays):
"""
Take the standard deviation array out of a list of arrays.
Parameter:
* arrays : list
List of arrays
Returns:
* std : array
Standard deviation of each element in the arrays
Example::
>>> print vecstd([[1, 1, 2], [2, 3, 5]])
[ 0.5 1. 1.5]
"""
return numpy.std(arrays, axis=0)
class SparseList(object):
"""
Store only non-zero elements on an immutable list.
Can iterate over and access elements just like if it were a list.
Parameters:
* size : int
Size of the list.
* elements : dict
Dictionary used to initialize the list. Keys are the index of the
elements and values are their respective values.
Example::
>>> l = SparseList(5)
>>> l[3] = 42.0
>>> print len(l)
5
>>> print l[1], l[3]
0.0 42.0
>>> l[1] += 3.0
>>> for i in l:
... print i,
0.0 3.0 0.0 42.0 0.0
>>> l2 = SparseList(4, elements={1:3.2, 3:2.8})
>>> for i in l2:
... print i,
0.0 3.2 0.0 2.8
"""
def __init__(self, size, elements=None):
self.size = size
self.i = 0
if elements is None:
self.elements = {}
else:
self.elements = elements
def __str__(self):
return str(self.elements)
def __len__(self):
return self.size
def __iter__(self):
self.i = 0
return self
def __getitem__(self, index):
if index < 0:
index = self.size + index
if index >= self.size or index < 0:
raise IndexError('index out of range')
return self.elements.get(index, 0.)
def __setitem__(self, key, value):
if key >= self.size:
raise IndexError('index out of range')
self.elements[key] = value
def next(self):
if self.i == self.size:
raise StopIteration()
res = self.__getitem__(self.i)
self.i += 1
return res
def sec2hms(seconds):
"""
Convert seconds into a string with hours, minutes and seconds.
Parameters:
* seconds : float
Time in seconds
Returns:
* time : str
String in the format ``'%dh %dm %2.5fs'``
Example::
>>> print sec2hms(62.2)
0h 1m 2.20000s
>>> print sec2hms(3862.12345678)
1h 4m 22.12346s
"""
h = int(seconds / 3600)
m = int((seconds - h * 3600) / 60)
s = seconds - h * 3600 - m * 60
return '%dh %dm %2.5fs' % (h, m, s)
def sec2year(seconds):
"""
Convert seconds into decimal Julian years.
Julian years have 365.25 days.
Parameters:
* seconds : float
Time in seconds
Returns:
* years : float
Time in years
Example::
>>> print sec2year(31557600)
1.0
"""
return float(seconds) / 31557600.0
def year2sec(years):
"""
Convert decimal Julian years into seconds.
Julian years have 365.25 days.
Parameters:
* years : float
Time in years
Returns:
* seconds : float
Time in seconds
Example::
>>> print year2sec(1)
31557600.0
"""
return 31557600.0 * float(years)
def contaminate(data, stddev, percent=False, return_stddev=False, seed=None):
r"""
Add pseudorandom gaussian noise to an array.
Noise added is normally distributed with zero mean.
Parameters:
* data : array or list of arrays
Data to contaminate
* stddev : float or list of floats
Standard deviation of the Gaussian noise that will be added to *data*
* percent : True or False
If ``True``, will consider *stddev* as a decimal percentage and the
standard deviation of the Gaussian noise will be this percentage of
the maximum absolute value of *data*
* return_stddev : True or False
If ``True``, will return also the standard deviation used to
contaminate *data*
* seed : None or int
Seed used to generate the pseudo-random numbers. If `None`, will use a
different seed every time. Use the same seed to generate the same
random sequence to contaminate the data.
Returns:
if *return_stddev* is ``False``:
* contam : array or list of arrays
The contaminated data array
else:
* results : list = [contam, stddev]
The contaminated data array and the standard deviation used to
contaminate it.
Examples:
>>> import numpy as np
>>> data = np.ones(5)
>>> noisy = contaminate(data, 0.1, seed=0)
>>> print noisy
[ 1.03137726 0.89498775 0.95284582 1.07906135 1.04172782]
>>> noisy, std = contaminate(data, 0.05, seed=0, percent=True,
... return_stddev=True)
>>> print std
0.05
>>> print noisy
[ 1.01568863 0.94749387 0.97642291 1.03953067 1.02086391]
>>> data = [np.zeros(5), np.ones(3)]
>>> noisy = contaminate(data, [0.1, 0.2], seed=0)
>>> print noisy[0]
[ 0.03137726 -0.10501225 -0.04715418 0.07906135 0.04172782]
>>> print noisy[1]
[ 0.81644754 1.20192079 0.98163167]
"""
numpy.random.seed(seed)
# Check if dealing with an array or list of arrays
if not isinstance(stddev, list):
stddev = [stddev]
data = [data]
contam = []
for i in xrange(len(stddev)):
if stddev[i] == 0.:
contam.append(data[i])
continue
if percent:
stddev[i] = stddev[i] * max(abs(data[i]))
noise = numpy.random.normal(scale=stddev[i], size=len(data[i]))
# Subtract the mean so that the noise doesn't introduce a systematic
# shift in the data
noise -= noise.mean()
contam.append(numpy.array(data[i]) + noise)
numpy.random.seed()
if len(contam) == 1:
contam = contam[0]
stddev = stddev[0]
if return_stddev:
return [contam, stddev]
else:
return contam
def normal(x, mean, std):
"""
Normal distribution.
.. math::
N(x,\\bar{x},\sigma) = \\frac{1}{\sigma\sqrt{2 \pi}}
\exp\\left(-\\frac{(x-\\bar{x})^2}{\sigma^2}\\right)
Parameters:
* x : float or array
Value at which to calculate the normal distribution
* mean : float
The mean of the distribution :math:`\\bar{x}`
* std : float
The standard deviation of the distribution :math:`\sigma`
Returns:
* normal : array
Normal distribution evaluated at *x*
"""
factor = (std * numpy.sqrt(2 * numpy.pi))
return numpy.exp(-1 * ((mean - x) / std) ** 2) / factor
def gaussian(x, mean, std):
"""
Non-normalized Gaussian function
.. math::
G(x,\\bar{x},\sigma) = \exp\\left(-\\frac{(x-\\bar{x})^2}{\sigma^2}
\\right)
Parameters:
* x : float or array
Values at which to calculate the Gaussian function
* mean : float
The mean of the distribution :math:`\\bar{x}`
* std : float
The standard deviation of the distribution :math:`\sigma`
Returns:
* gauss : array
Gaussian function evaluated at *x*
"""
return numpy.exp(-1 * ((mean - x) / std) ** 2)
def gaussian2d(x, y, sigma_x, sigma_y, x0=0, y0=0, angle=0.0):
"""
Non-normalized 2D Gaussian function
Parameters:
* x, y : float or arrays
Coordinates at which to calculate the Gaussian function
* sigma_x, sigma_y : float
Standard deviation in the x and y directions
* x0, y0 : float
Coordinates of the center of the distribution
* angle : float
Rotation angle of the gaussian measure from the x axis (north) growing
positive to the east (positive y axis)
Returns:
* gauss : array
Gaussian function evaluated at *x*, *y*
"""
theta = -1 * angle * numpy.pi / 180.
tmpx = 1. / sigma_x ** 2
tmpy = 1. / sigma_y ** 2
sintheta = numpy.sin(theta)
costheta = numpy.cos(theta)
a = tmpx * costheta + tmpy * sintheta ** 2
b = (tmpy - tmpx) * costheta * sintheta
c = tmpx * sintheta ** 2 + tmpy * costheta ** 2
xhat = x - x0
yhat = y - y0
return numpy.exp(-(a * xhat ** 2 + 2. * b * xhat * yhat + c * yhat ** 2))
def random_points(area, n, seed=None):
"""
Generate a set of n random points.
Parameters:
* area : list = [x1, x2, y1, y2]
Area inside of which the points are contained
* n : int
Number of points
* seed : None or int
Seed used to generate the pseudo-random numbers. If `None`, will use a
different seed every time. Use the same seed to generate the same
random sequence.
Result:
* points : list
List of (x, y) coordinates of the points
"""
x1, x2, y1, y2 = area
numpy.random.seed(seed)
xs = numpy.random.uniform(x1, x2, n)
ys = numpy.random.uniform(y1, y2, n)
numpy.random.seed()
return numpy.array([xs, ys]).T
def circular_points(area, n, random=False, seed=None):
"""
Generate a set of n points positioned in a circular array.
The diameter of the circle is equal to the smallest dimension of the area
Parameters:
* area : list = [x1, x2, y1, y2]
Area inside of which the points are contained
* n : int
Number of points
* random : True or False
If True, positions of the points on the circle will be chosen at random
* seed : None or int
Seed used to generate the pseudo-random numbers if `random==True`.
If `None`, will use a different seed every time.
Use the same seed to generate the same random sequence.
Result:
* points : list
List of (x, y) coordinates of the points
"""
x1, x2, y1, y2 = area
radius = 0.5 * min(x2 - x1, y2 - y1)
if random:
numpy.random.seed(seed)
angles = numpy.random.uniform(0, 2 * math.pi, n)
numpy.random.seed()
else:
da = 2. * math.pi / float(n)
angles = numpy.arange(0., 2. * math.pi, da)
xs = 0.5 * (x1 + x2) + radius * numpy.cos(angles)
ys = 0.5 * (y1 + y2) + radius * numpy.sin(angles)
return numpy.array([xs, ys]).T
def connect_points(pts1, pts2):
"""
Connects each point in the first list with all points in the second.
If the first list has N points and the second has M, the result are 2 lists
with N*M points each, representing the connections.
Parameters:
* pts1 : list
List of (x, y) coordinates of the points.
* pts2 : list
List of (x, y) coordinates of the points.
Returns:
* results : lists of lists = [connect1, connect2]
2 lists with the connected points
"""
connect1 = []
append1 = connect1.append
connect2 = []
append2 = connect2.append
for p1 in pts1:
for p2 in pts2:
append1(p1)
append2(p2)
return [connect1, connect2]
|
eusoubrasileiro/fatiando_seismic
|
fatiando/utils.py
|
Python
|
bsd-3-clause
| 22,382
|
[
"Gaussian"
] |
b01dbcacabbcfa1cfe383d043110249f37db45ad86ebcf1dd5bc47999d47c224
|
# Version: 0.18
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
* [![Latest Version][pypi-image]][pypi-url]
* [![Build Status][travis-image]][travis-url]
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other languages) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg
[pypi-url]: https://pypi.python.org/pypi/versioneer/
[travis-image]:
https://img.shields.io/travis/warner/python-versioneer/master.svg
[travis-url]: https://travis-ci.org/warner/python-versioneer
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py)
)
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY[
"git"
] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
from __future__ import absolute_import
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(
r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
mo = re.search(
r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert (
cfg.versionfile_source is not None
), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass(cmdclass=None):
"""Get the custom setuptools/distutils subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "build_py" in cmds:
_build_py = cmds["build_py"]
elif "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if "py2exe" in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "sdist" in cmds:
_sdist = cmds["sdist"]
elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(
target_versionfile, self._versioneer_generated_versions
)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (
EnvironmentError,
configparser.NoSectionError,
configparser.NoOptionError,
) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(
" appending versionfile_source ('%s') to MANIFEST.in"
% cfg.versionfile_source
)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
FabioRosado/opsdroid
|
versioneer.py
|
Python
|
apache-2.0
| 69,159
|
[
"Brian"
] |
f8b81d9c4089007fa2c9905e1036e819daee743efe11761725a0824acd2c362c
|
from collections import OrderedDict, defaultdict, namedtuple, Counter
from collections.abc import Iterable
from copy import deepcopy
from numbers import Real
from pathlib import Path
import re
import warnings
from xml.etree import ElementTree as ET
import numpy as np
import openmc
import openmc.data
import openmc.checkvalue as cv
from ._xml import clean_indentation, reorder_attributes
from .mixin import IDManagerMixin
# Units for density supported by OpenMC
DENSITY_UNITS = ('g/cm3', 'g/cc', 'kg/m3', 'atom/b-cm', 'atom/cm3', 'sum',
'macro')
NuclideTuple = namedtuple('NuclideTuple', ['name', 'percent', 'percent_type'])
class Material(IDManagerMixin):
"""A material composed of a collection of nuclides/elements.
To create a material, one should create an instance of this class, add
nuclides or elements with :meth:`Material.add_nuclide` or
`Material.add_element`, respectively, and set the total material density
with `Material.set_density()`. The material can then be assigned to a cell
using the :attr:`Cell.fill` attribute.
Parameters
----------
material_id : int, optional
Unique identifier for the material. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the material. If not specified, the name will be the empty
string.
temperature : float, optional
Temperature of the material in Kelvin. If not specified, the material
inherits the default temperature applied to the model.
Attributes
----------
id : int
Unique identifier for the material
temperature : float
Temperature of the material in Kelvin.
density : float
Density of the material (units defined separately)
density_units : str
Units used for `density`. Can be one of 'g/cm3', 'g/cc', 'kg/m3',
'atom/b-cm', 'atom/cm3', 'sum', or 'macro'. The 'macro' unit only
applies in the case of a multi-group calculation.
depletable : bool
Indicate whether the material is depletable.
nuclides : list of namedtuple
List in which each item is a namedtuple consisting of a nuclide string,
the percent density, and the percent type ('ao' or 'wo'). The namedtuple
has field names ``name``, ``percent``, and ``percent_type``.
isotropic : list of str
Nuclides for which elastic scattering should be treated as though it
were isotropic in the laboratory system.
average_molar_mass : float
The average molar mass of nuclides in the material in units of grams per
mol. For example, UO2 with 3 nuclides will have an average molar mass
of 270 / 3 = 90 g / mol.
volume : float
Volume of the material in cm^3. This can either be set manually or
calculated in a stochastic volume calculation and added via the
:meth:`Material.add_volume_information` method.
paths : list of str
The paths traversed through the CSG tree to reach each material
instance. This property is initialized by calling the
:meth:`Geometry.determine_paths` method.
num_instances : int
The number of instances of this material throughout the geometry. This
property is initialized by calling the :meth:`Geometry.determine_paths`
method.
fissionable_mass : float
Mass of fissionable nuclides in the material in [g]. Requires that the
:attr:`volume` attribute is set.
"""
next_id = 1
used_ids = set()
def __init__(self, material_id=None, name='', temperature=None):
# Initialize class attributes
self.id = material_id
self.name = name
self.temperature = temperature
self._density = None
self._density_units = 'sum'
self._depletable = False
self._paths = None
self._num_instances = None
self._volume = None
self._atoms = {}
self._isotropic = []
# A list of tuples (nuclide, percent, percent type)
self._nuclides = []
# The single instance of Macroscopic data present in this material
# (only one is allowed, hence this is different than _nuclides, etc)
self._macroscopic = None
# If specified, a list of table names
self._sab = []
def __repr__(self):
string = 'Material\n'
string += '{: <16}=\t{}\n'.format('\tID', self._id)
string += '{: <16}=\t{}\n'.format('\tName', self._name)
string += '{: <16}=\t{}\n'.format('\tTemperature', self._temperature)
string += '{: <16}=\t{}'.format('\tDensity', self._density)
string += f' [{self._density_units}]\n'
string += '{: <16}\n'.format('\tS(a,b) Tables')
for sab in self._sab:
string += '{: <16}=\t{}\n'.format('\tS(a,b)', sab)
string += '{: <16}\n'.format('\tNuclides')
for nuclide, percent, percent_type in self._nuclides:
string += '{: <16}'.format('\t{}'.format(nuclide))
string += '=\t{: <12} [{}]\n'.format(percent, percent_type)
if self._macroscopic is not None:
string += '{: <16}\n'.format('\tMacroscopic Data')
string += '{: <16}'.format('\t{}'.format(self._macroscopic))
return string
@property
def name(self):
return self._name
@property
def temperature(self):
return self._temperature
@property
def density(self):
return self._density
@property
def density_units(self):
return self._density_units
@property
def depletable(self):
return self._depletable
@property
def paths(self):
if self._paths is None:
raise ValueError('Material instance paths have not been determined. '
'Call the Geometry.determine_paths() method.')
return self._paths
@property
def num_instances(self):
if self._num_instances is None:
raise ValueError(
'Number of material instances have not been determined. Call '
'the Geometry.determine_paths() method.')
return self._num_instances
@property
def nuclides(self):
return self._nuclides
@property
def isotropic(self):
return self._isotropic
@property
def average_molar_mass(self):
# Using the sum of specified atomic or weight amounts as a basis, sum
# the mass and moles of the material
mass = 0.
moles = 0.
for nuc in self.nuclides:
if nuc.percent_type == 'ao':
mass += nuc.percent * openmc.data.atomic_mass(nuc.name)
moles += nuc.percent
else:
moles += nuc.percent / openmc.data.atomic_mass(nuc.name)
mass += nuc.percent
# Compute and return the molar mass
return mass / moles
@property
def volume(self):
return self._volume
@name.setter
def name(self, name):
if name is not None:
cv.check_type(f'name for Material ID="{self._id}"',
name, str)
self._name = name
else:
self._name = ''
@temperature.setter
def temperature(self, temperature):
cv.check_type(f'Temperature for Material ID="{self._id}"',
temperature, (Real, type(None)))
self._temperature = temperature
@depletable.setter
def depletable(self, depletable):
cv.check_type(f'Depletable flag for Material ID="{self._id}"',
depletable, bool)
self._depletable = depletable
@volume.setter
def volume(self, volume):
if volume is not None:
cv.check_type('material volume', volume, Real)
self._volume = volume
@isotropic.setter
def isotropic(self, isotropic):
cv.check_iterable_type('Isotropic scattering nuclides', isotropic,
str)
self._isotropic = list(isotropic)
@property
def fissionable_mass(self):
if self.volume is None:
raise ValueError("Volume must be set in order to determine mass.")
density = 0.0
for nuc, atoms_per_cc in self.get_nuclide_atom_densities().values():
Z = openmc.data.zam(nuc)[0]
if Z >= 90:
density += 1e24 * atoms_per_cc * openmc.data.atomic_mass(nuc) \
/ openmc.data.AVOGADRO
return density*self.volume
@classmethod
def from_hdf5(cls, group):
"""Create material from HDF5 group
Parameters
----------
group : h5py.Group
Group in HDF5 file
Returns
-------
openmc.Material
Material instance
"""
mat_id = int(group.name.split('/')[-1].lstrip('material '))
name = group['name'][()].decode() if 'name' in group else ''
density = group['atom_density'][()]
if 'nuclide_densities' in group:
nuc_densities = group['nuclide_densities'][()]
# Create the Material
material = cls(mat_id, name)
material.depletable = bool(group.attrs['depletable'])
if 'volume' in group.attrs:
material.volume = group.attrs['volume']
if "temperature" in group.attrs:
material.temperature = group.attrs["temperature"]
# Read the names of the S(a,b) tables for this Material and add them
if 'sab_names' in group:
sab_tables = group['sab_names'][()]
for sab_table in sab_tables:
name = sab_table.decode()
material.add_s_alpha_beta(name)
# Set the Material's density to atom/b-cm as used by OpenMC
material.set_density(density=density, units='atom/b-cm')
if 'nuclides' in group:
nuclides = group['nuclides'][()]
# Add all nuclides to the Material
for fullname, density in zip(nuclides, nuc_densities):
name = fullname.decode().strip()
material.add_nuclide(name, percent=density, percent_type='ao')
if 'macroscopics' in group:
macroscopics = group['macroscopics'][()]
# Add all macroscopics to the Material
for fullname in macroscopics:
name = fullname.decode().strip()
material.add_macroscopic(name)
return material
def add_volume_information(self, volume_calc):
"""Add volume information to a material.
Parameters
----------
volume_calc : openmc.VolumeCalculation
Results from a stochastic volume calculation
"""
if volume_calc.domain_type == 'material':
if self.id in volume_calc.volumes:
self._volume = volume_calc.volumes[self.id].n
self._atoms = volume_calc.atoms[self.id]
else:
raise ValueError('No volume information found for material ID={}.'
.format(self.id))
else:
raise ValueError('No volume information found for material ID={}.'
.format(self.id))
def set_density(self, units, density=None):
"""Set the density of the material
Parameters
----------
units : {'g/cm3', 'g/cc', 'kg/m3', 'atom/b-cm', 'atom/cm3', 'sum', 'macro'}
Physical units of density.
density : float, optional
Value of the density. Must be specified unless units is given as
'sum'.
"""
cv.check_value('density units', units, DENSITY_UNITS)
self._density_units = units
if units == 'sum':
if density is not None:
msg = 'Density "{}" for Material ID="{}" is ignored ' \
'because the unit is "sum"'.format(density, self.id)
warnings.warn(msg)
else:
if density is None:
msg = 'Unable to set the density for Material ID="{}" ' \
'because a density value must be given when not using ' \
'"sum" unit'.format(self.id)
raise ValueError(msg)
cv.check_type('the density for Material ID="{}"'.format(self.id),
density, Real)
self._density = density
def add_nuclide(self, nuclide, percent, percent_type='ao'):
"""Add a nuclide to the material
Parameters
----------
nuclide : str
Nuclide to add, e.g., 'Mo95'
percent : float
Atom or weight percent
percent_type : {'ao', 'wo'}
'ao' for atom percent and 'wo' for weight percent
"""
cv.check_type('nuclide', nuclide, str)
cv.check_type('percent', percent, Real)
cv.check_value('percent type', percent_type, {'ao', 'wo'})
if self._macroscopic is not None:
msg = 'Unable to add a Nuclide to Material ID="{}" as a ' \
'macroscopic data-set has already been added'.format(self._id)
raise ValueError(msg)
# If nuclide name doesn't look valid, give a warning
try:
Z, _, _ = openmc.data.zam(nuclide)
except ValueError as e:
warnings.warn(str(e))
else:
# For actinides, have the material be depletable by default
if Z >= 89:
self.depletable = True
self._nuclides.append(NuclideTuple(nuclide, percent, percent_type))
def remove_nuclide(self, nuclide):
"""Remove a nuclide from the material
Parameters
----------
nuclide : str
Nuclide to remove
"""
cv.check_type('nuclide', nuclide, str)
# If the Material contains the Nuclide, delete it
for nuc in reversed(self.nuclides):
if nuclide == nuc.name:
self.nuclides.remove(nuc)
def add_macroscopic(self, macroscopic):
"""Add a macroscopic to the material. This will also set the
density of the material to 1.0, unless it has been otherwise set,
as a default for Macroscopic cross sections.
Parameters
----------
macroscopic : str
Macroscopic to add
"""
# Ensure no nuclides, elements, or sab are added since these would be
# incompatible with macroscopics
if self._nuclides or self._sab:
msg = 'Unable to add a Macroscopic data set to Material ID="{}" ' \
'with a macroscopic value "{}" as an incompatible data ' \
'member (i.e., nuclide or S(a,b) table) ' \
'has already been added'.format(self._id, macroscopic)
raise ValueError(msg)
if not isinstance(macroscopic, str):
msg = 'Unable to add a Macroscopic to Material ID="{}" with a ' \
'non-string value "{}"'.format(self._id, macroscopic)
raise ValueError(msg)
if self._macroscopic is None:
self._macroscopic = macroscopic
else:
msg = 'Unable to add a Macroscopic to Material ID="{}". ' \
'Only one Macroscopic allowed per ' \
'Material.'.format(self._id)
raise ValueError(msg)
# Generally speaking, the density for a macroscopic object will
# be 1.0. Therefore, lets set density to 1.0 so that the user
# doesnt need to set it unless its needed.
# Of course, if the user has already set a value of density,
# then we will not override it.
if self._density is None:
self.set_density('macro', 1.0)
def remove_macroscopic(self, macroscopic):
"""Remove a macroscopic from the material
Parameters
----------
macroscopic : str
Macroscopic to remove
"""
if not isinstance(macroscopic, str):
msg = 'Unable to remove a Macroscopic "{}" in Material ID="{}" ' \
'since it is not a string'.format(self._id, macroscopic)
raise ValueError(msg)
# If the Material contains the Macroscopic, delete it
if macroscopic == self._macroscopic:
self._macroscopic = None
def add_element(self, element, percent, percent_type='ao', enrichment=None,
enrichment_target=None, enrichment_type=None):
"""Add a natural element to the material
Parameters
----------
element : str
Element to add, e.g., 'Zr' or 'Zirconium'
percent : float
Atom or weight percent
percent_type : {'ao', 'wo'}, optional
'ao' for atom percent and 'wo' for weight percent. Defaults to atom
percent.
enrichment : float, optional
Enrichment of an enrichment_taget nuclide in percent (ao or wo).
If enrichment_taget is not supplied then it is enrichment for U235
in weight percent. For example, input 4.95 for 4.95 weight percent
enriched U.
Default is None (natural composition).
enrichment_target: str, optional
Single nuclide name to enrich from a natural composition (e.g., 'O16')
.. versionadded:: 0.12
enrichment_type: {'ao', 'wo'}, optional
'ao' for enrichment as atom percent and 'wo' for weight percent.
Default is: 'ao' for two-isotope enrichment; 'wo' for U enrichment
.. versionadded:: 0.12
Notes
-----
General enrichment procedure is allowed only for elements composed of
two isotopes. If `enrichment_target` is given without `enrichment`
natural composition is added to the material.
"""
cv.check_type('nuclide', element, str)
cv.check_type('percent', percent, Real)
cv.check_value('percent type', percent_type, {'ao', 'wo'})
# Make sure element name is just that
if not element.isalpha():
raise ValueError("Element name should be given by the "
"element's symbol or name, e.g., 'Zr', 'zirconium'")
# Allow for element identifier to be given as a symbol or name
if len(element) > 2:
el = element.lower()
element = openmc.data.ELEMENT_SYMBOL.get(el)
if element is None:
msg = 'Element name "{}" not recognised'.format(el)
raise ValueError(msg)
else:
if element[0].islower():
msg = 'Element name "{}" should start with an uppercase ' \
'letter'.format(element)
raise ValueError(msg)
if len(element) == 2 and element[1].isupper():
msg = 'Element name "{}" should end with a lowercase ' \
'letter'.format(element)
raise ValueError(msg)
# skips the first entry of ATOMIC_SYMBOL which is n for neutron
if element not in list(openmc.data.ATOMIC_SYMBOL.values())[1:]:
msg = 'Element name "{}" not recognised'.format(element)
raise ValueError(msg)
if self._macroscopic is not None:
msg = 'Unable to add an Element to Material ID="{}" as a ' \
'macroscopic data-set has already been added'.format(self._id)
raise ValueError(msg)
if enrichment is not None and enrichment_target is None:
if not isinstance(enrichment, Real):
msg = 'Unable to add an Element to Material ID="{}" with a ' \
'non-floating point enrichment value "{}"'\
.format(self._id, enrichment)
raise ValueError(msg)
elif element != 'U':
msg = 'Unable to use enrichment for element {} which is not ' \
'uranium for Material ID="{}"'.format(element, self._id)
raise ValueError(msg)
# Check that the enrichment is in the valid range
cv.check_less_than('enrichment', enrichment, 100./1.008)
cv.check_greater_than('enrichment', enrichment, 0., equality=True)
if enrichment > 5.0:
msg = 'A uranium enrichment of {} was given for Material ID='\
'"{}". OpenMC assumes the U234/U235 mass ratio is '\
'constant at 0.008, which is only valid at low ' \
'enrichments. Consider setting the isotopic ' \
'composition manually for enrichments over 5%.'.\
format(enrichment, self._id)
warnings.warn(msg)
# Add naturally-occuring isotopes
element = openmc.Element(element)
for nuclide in element.expand(percent,
percent_type,
enrichment,
enrichment_target,
enrichment_type):
self.add_nuclide(*nuclide)
def add_elements_from_formula(self, formula, percent_type='ao', enrichment=None,
enrichment_target=None, enrichment_type=None):
"""Add a elements from a chemical formula to the material.
.. versionadded:: 0.12
Parameters
----------
formula : str
Formula to add, e.g., 'C2O', 'C6H12O6', or (NH4)2SO4.
Note this is case sensitive, elements must start with an uppercase
character. Multiplier numbers must be integers.
percent_type : {'ao', 'wo'}, optional
'ao' for atom percent and 'wo' for weight percent. Defaults to atom
percent.
enrichment : float, optional
Enrichment of an enrichment_target nuclide in percent (ao or wo).
If enrichment_target is not supplied then it is enrichment for U235
in weight percent. For example, input 4.95 for 4.95 weight percent
enriched U. Default is None (natural composition).
enrichment_target : str, optional
Single nuclide name to enrich from a natural composition (e.g., 'O16')
enrichment_type : {'ao', 'wo'}, optional
'ao' for enrichment as atom percent and 'wo' for weight percent.
Default is: 'ao' for two-isotope enrichment; 'wo' for U enrichment
Notes
-----
General enrichment procedure is allowed only for elements composed of
two isotopes. If `enrichment_target` is given without `enrichment`
natural composition is added to the material.
"""
cv.check_type('formula', formula, str)
if '.' in formula:
msg = 'Non-integer multiplier values are not accepted. The ' \
'input formula {} contains a "." character.'.format(formula)
raise ValueError(msg)
# Tokenizes the formula and check validity of tokens
tokens = re.findall(r"([A-Z][a-z]*)(\d*)|(\()|(\))(\d*)", formula)
for row in tokens:
for token in row:
if token.isalpha():
if token == "n" or token not in openmc.data.ATOMIC_NUMBER:
msg = 'Formula entry {} not an element symbol.' \
.format(token)
raise ValueError(msg)
elif token not in ['(', ')', ''] and not token.isdigit():
msg = 'Formula must be made from a sequence of ' \
'element symbols, integers, and backets. ' \
'{} is not an allowable entry.'.format(token)
raise ValueError(msg)
# Checks that the number of opening and closing brackets are equal
if formula.count('(') != formula.count(')'):
msg = 'Number of opening and closing brackets is not equal ' \
'in the input formula {}.'.format(formula)
raise ValueError(msg)
# Checks that every part of the original formula has been tokenized
for row in tokens:
for token in row:
formula = formula.replace(token, '', 1)
if len(formula) != 0:
msg = 'Part of formula was not successfully parsed as an ' \
'element symbol, bracket or integer. {} was not parsed.' \
.format(formula)
raise ValueError(msg)
# Works through the tokens building a stack
mat_stack = [Counter()]
for symbol, multi1, opening_bracket, closing_bracket, multi2 in tokens:
if symbol:
mat_stack[-1][symbol] += int(multi1 or 1)
if opening_bracket:
mat_stack.append(Counter())
if closing_bracket:
stack_top = mat_stack.pop()
for symbol, value in stack_top.items():
mat_stack[-1][symbol] += int(multi2 or 1) * value
# Normalizing percentages
percents = mat_stack[0].values()
norm_percents = [float(i) / sum(percents) for i in percents]
elements = mat_stack[0].keys()
# Adds each element and percent to the material
for element, percent in zip(elements, norm_percents):
if enrichment_target is not None and element == re.sub(r'\d+$', '', enrichment_target):
self.add_element(element, percent, percent_type, enrichment,
enrichment_target, enrichment_type)
elif enrichment is not None and enrichment_target is None and element == 'U':
self.add_element(element, percent, percent_type, enrichment)
else:
self.add_element(element, percent, percent_type)
def add_s_alpha_beta(self, name, fraction=1.0):
r"""Add an :math:`S(\alpha,\beta)` table to the material
Parameters
----------
name : str
Name of the :math:`S(\alpha,\beta)` table
fraction : float
The fraction of relevant nuclei that are affected by the
:math:`S(\alpha,\beta)` table. For example, if the material is a
block of carbon that is 60% graphite and 40% amorphous then add a
graphite :math:`S(\alpha,\beta)` table with fraction=0.6.
"""
if self._macroscopic is not None:
msg = 'Unable to add an S(a,b) table to Material ID="{}" as a ' \
'macroscopic data-set has already been added'.format(self._id)
raise ValueError(msg)
if not isinstance(name, str):
msg = 'Unable to add an S(a,b) table to Material ID="{}" with a ' \
'non-string table name "{}"'.format(self._id, name)
raise ValueError(msg)
cv.check_type('S(a,b) fraction', fraction, Real)
cv.check_greater_than('S(a,b) fraction', fraction, 0.0, True)
cv.check_less_than('S(a,b) fraction', fraction, 1.0, True)
new_name = openmc.data.get_thermal_name(name)
if new_name != name:
msg = 'OpenMC S(a,b) tables follow the GND naming convention. ' \
'Table "{}" is being renamed as "{}".'.format(name, new_name)
warnings.warn(msg)
self._sab.append((new_name, fraction))
def make_isotropic_in_lab(self):
self.isotropic = [x.name for x in self._nuclides]
def get_elements(self):
"""Returns all elements in the material
.. versionadded:: 0.12
Returns
-------
elements : list of str
List of element names
"""
return sorted({re.split(r'(\d+)', i)[0] for i in self.get_nuclides()})
def get_nuclides(self):
"""Returns all nuclides in the material
Returns
-------
nuclides : list of str
List of nuclide names
"""
return [x.name for x in self._nuclides]
def get_nuclide_densities(self):
"""Returns all nuclides in the material and their densities
Returns
-------
nuclides : dict
Dictionary whose keys are nuclide names and values are 3-tuples of
(nuclide, density percent, density percent type)
"""
# keep ordered dictionary for testing purposes
nuclides = OrderedDict()
for nuclide in self._nuclides:
nuclides[nuclide.name] = nuclide
return nuclides
def get_nuclide_atom_densities(self):
"""Returns all nuclides in the material and their atomic densities in
units of atom/b-cm
Returns
-------
nuclides : dict
Dictionary whose keys are nuclide names and values are tuples of
(nuclide, density in atom/b-cm)
"""
sum_density = False
if self.density_units == 'sum':
sum_density = True
density = 0.
elif self.density_units == 'macro':
density = self.density
elif self.density_units == 'g/cc' or self.density_units == 'g/cm3':
density = -self.density
elif self.density_units == 'kg/m3':
density = -0.001 * self.density
elif self.density_units == 'atom/b-cm':
density = self.density
elif self.density_units == 'atom/cm3' or self.density_units == 'atom/cc':
density = 1.E-24 * self.density
# For ease of processing split out nuc, nuc_density,
# and nuc_density_type into separate arrays
nucs = []
nuc_densities = []
nuc_density_types = []
for nuclide in self.nuclides:
nucs.append(nuclide.name)
nuc_densities.append(nuclide.percent)
nuc_density_types.append(nuclide.percent_type)
nucs = np.array(nucs)
nuc_densities = np.array(nuc_densities)
nuc_density_types = np.array(nuc_density_types)
if sum_density:
density = np.sum(nuc_densities)
percent_in_atom = np.all(nuc_density_types == 'ao')
density_in_atom = density > 0.
sum_percent = 0.
# Convert the weight amounts to atomic amounts
if not percent_in_atom:
for n, nuc in enumerate(nucs):
nuc_densities[n] *= self.average_molar_mass / \
openmc.data.atomic_mass(nuc)
# Now that we have the atomic amounts, lets finish calculating densities
sum_percent = np.sum(nuc_densities)
nuc_densities = nuc_densities / sum_percent
# Convert the mass density to an atom density
if not density_in_atom:
density = -density / self.average_molar_mass * 1.E-24 \
* openmc.data.AVOGADRO
nuc_densities = density * nuc_densities
nuclides = OrderedDict()
for n, nuc in enumerate(nucs):
nuclides[nuc] = (nuc, nuc_densities[n])
return nuclides
def get_mass_density(self, nuclide=None):
"""Return mass density of one or all nuclides
Parameters
----------
nuclides : str, optional
Nuclide for which density is desired. If not specified, the density
for the entire material is given.
Returns
-------
float
Density of the nuclide/material in [g/cm^3]
"""
mass_density = 0.0
for nuc, atoms_per_cc in self.get_nuclide_atom_densities().values():
if nuclide is None or nuclide == nuc:
density_i = 1e24 * atoms_per_cc * openmc.data.atomic_mass(nuc) \
/ openmc.data.AVOGADRO
mass_density += density_i
return mass_density
def get_mass(self, nuclide=None):
"""Return mass of one or all nuclides.
Note that this method requires that the :attr:`Material.volume` has
already been set.
Parameters
----------
nuclides : str, optional
Nuclide for which mass is desired. If not specified, the density
for the entire material is given.
Returns
-------
float
Mass of the nuclide/material in [g]
"""
if self.volume is None:
raise ValueError("Volume must be set in order to determine mass.")
return self.volume*self.get_mass_density(nuclide)
def clone(self, memo=None):
"""Create a copy of this material with a new unique ID.
Parameters
----------
memo : dict or None
A nested dictionary of previously cloned objects. This parameter
is used internally and should not be specified by the user.
Returns
-------
clone : openmc.Material
The clone of this material
"""
if memo is None:
memo = {}
# If no nemoize'd clone exists, instantiate one
if self not in memo:
# Temporarily remove paths -- this is done so that when the clone is
# made, it doesn't create a copy of the paths (which are specific to
# an instance)
paths = self._paths
self._paths = None
clone = deepcopy(self)
clone.id = None
clone._num_instances = None
# Restore paths on original instance
self._paths = paths
# Memoize the clone
memo[self] = clone
return memo[self]
def _get_nuclide_xml(self, nuclide):
xml_element = ET.Element("nuclide")
xml_element.set("name", nuclide.name)
if nuclide.percent_type == 'ao':
xml_element.set("ao", str(nuclide.percent))
else:
xml_element.set("wo", str(nuclide.percent))
return xml_element
def _get_macroscopic_xml(self, macroscopic):
xml_element = ET.Element("macroscopic")
xml_element.set("name", macroscopic)
return xml_element
def _get_nuclides_xml(self, nuclides):
xml_elements = []
for nuclide in nuclides:
xml_elements.append(self._get_nuclide_xml(nuclide))
return xml_elements
def to_xml_element(self):
"""Return XML representation of the material
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing material data
"""
# Create Material XML element
element = ET.Element("material")
element.set("id", str(self._id))
if len(self._name) > 0:
element.set("name", str(self._name))
if self._depletable:
element.set("depletable", "true")
if self._volume:
element.set("volume", str(self._volume))
# Create temperature XML subelement
if self.temperature is not None:
element.set("temperature", str(self.temperature))
# Create density XML subelement
if self._density is not None or self._density_units == 'sum':
subelement = ET.SubElement(element, "density")
if self._density_units != 'sum':
subelement.set("value", str(self._density))
subelement.set("units", self._density_units)
else:
raise ValueError('Density has not been set for material {}!'
.format(self.id))
if self._macroscopic is None:
# Create nuclide XML subelements
subelements = self._get_nuclides_xml(self._nuclides)
for subelement in subelements:
element.append(subelement)
else:
# Create macroscopic XML subelements
subelement = self._get_macroscopic_xml(self._macroscopic)
element.append(subelement)
if self._sab:
for sab in self._sab:
subelement = ET.SubElement(element, "sab")
subelement.set("name", sab[0])
if sab[1] != 1.0:
subelement.set("fraction", str(sab[1]))
if self._isotropic:
subelement = ET.SubElement(element, "isotropic")
subelement.text = ' '.join(self._isotropic)
return element
@classmethod
def mix_materials(cls, materials, fracs, percent_type='ao', name=None):
"""Mix materials together based on atom, weight, or volume fractions
.. versionadded:: 0.12
Parameters
----------
materials : Iterable of openmc.Material
Materials to combine
fracs : Iterable of float
Fractions of each material to be combined
percent_type : {'ao', 'wo', 'vo'}
Type of percentage, must be one of 'ao', 'wo', or 'vo', to signify atom
percent (molar percent), weight percent, or volume percent,
optional. Defaults to 'ao'
name : str
The name for the new material, optional. Defaults to concatenated
names of input materials with percentages indicated inside
parentheses.
Returns
-------
openmc.Material
Mixture of the materials
"""
cv.check_type('materials', materials, Iterable, Material)
cv.check_type('fracs', fracs, Iterable, Real)
cv.check_value('percent type', percent_type, {'ao', 'wo', 'vo'})
fracs = np.asarray(fracs)
void_frac = 1. - np.sum(fracs)
# Warn that fractions don't add to 1, set remainder to void, or raise
# an error if percent_type isn't 'vo'
if not np.isclose(void_frac, 0.):
if percent_type in ('ao', 'wo'):
msg = ('A non-zero void fraction is not acceptable for '
'percent_type: {}'.format(percent_type))
raise ValueError(msg)
else:
msg = ('Warning: sum of fractions do not add to 1, void '
'fraction set to {}'.format(void_frac))
warnings.warn(msg)
# Calculate appropriate weights which are how many cc's of each
# material are found in 1cc of the composite material
amms = np.asarray([mat.average_molar_mass for mat in materials])
mass_dens = np.asarray([mat.get_mass_density() for mat in materials])
if percent_type == 'ao':
wgts = fracs * amms / mass_dens
wgts /= np.sum(wgts)
elif percent_type == 'wo':
wgts = fracs / mass_dens
wgts /= np.sum(wgts)
elif percent_type == 'vo':
wgts = fracs
# If any of the involved materials contain S(a,b) tables raise an error
sab_names = set(sab[0] for mat in materials for sab in mat._sab)
if sab_names:
msg = ('Currently we do not support mixing materials containing '
'S(a,b) tables')
raise NotImplementedError(msg)
# Add nuclide densities weighted by appropriate fractions
nuclides_per_cc = defaultdict(float)
mass_per_cc = defaultdict(float)
for mat, wgt in zip(materials, wgts):
for nuc, atoms_per_bcm in mat.get_nuclide_atom_densities().values():
nuc_per_cc = wgt*1.e24*atoms_per_bcm
nuclides_per_cc[nuc] += nuc_per_cc
mass_per_cc[nuc] += nuc_per_cc*openmc.data.atomic_mass(nuc) / \
openmc.data.AVOGADRO
# Create the new material with the desired name
if name is None:
name = '-'.join(['{}({})'.format(m.name, f) for m, f in
zip(materials, fracs)])
new_mat = openmc.Material(name=name)
# Compute atom fractions of nuclides and add them to the new material
tot_nuclides_per_cc = np.sum([dens for dens in nuclides_per_cc.values()])
for nuc, atom_dens in nuclides_per_cc.items():
new_mat.add_nuclide(nuc, atom_dens/tot_nuclides_per_cc, 'ao')
# Compute mass density for the new material and set it
new_density = np.sum([dens for dens in mass_per_cc.values()])
new_mat.set_density('g/cm3', new_density)
# If any of the involved materials is depletable, the new material is
# depletable
new_mat.depletable = any(mat.depletable for mat in materials)
return new_mat
@classmethod
def from_xml_element(cls, elem):
"""Generate material from an XML element
Parameters
----------
elem : xml.etree.ElementTree.Element
XML element
Returns
-------
openmc.Material
Material generated from XML element
"""
mat_id = int(elem.get('id'))
mat = cls(mat_id)
mat.name = elem.get('name')
if "temperature" in elem.attrib:
mat.temperature = float(elem.get("temperature"))
if 'volume' in elem.attrib:
mat.volume = float(elem.get('volume'))
mat.depletable = bool(elem.get('depletable'))
# Get each nuclide
for nuclide in elem.findall('nuclide'):
name = nuclide.attrib['name']
if 'ao' in nuclide.attrib:
mat.add_nuclide(name, float(nuclide.attrib['ao']))
elif 'wo' in nuclide.attrib:
mat.add_nuclide(name, float(nuclide.attrib['wo']), 'wo')
# Get each S(a,b) table
for sab in elem.findall('sab'):
fraction = float(sab.get('fraction', 1.0))
mat.add_s_alpha_beta(sab.get('name'), fraction)
# Get total material density
density = elem.find('density')
units = density.get('units')
if units == 'sum':
mat.set_density(units)
else:
value = float(density.get('value'))
mat.set_density(units, value)
# Check for isotropic scattering nuclides
isotropic = elem.find('isotropic')
if isotropic is not None:
mat.isotropic = isotropic.text.split()
return mat
class Materials(cv.CheckedList):
"""Collection of Materials used for an OpenMC simulation.
This class corresponds directly to the materials.xml input file. It can be
thought of as a normal Python list where each member is a
:class:`Material`. It behaves like a list as the following example
demonstrates:
>>> fuel = openmc.Material()
>>> clad = openmc.Material()
>>> water = openmc.Material()
>>> m = openmc.Materials([fuel])
>>> m.append(water)
>>> m += [clad]
Parameters
----------
materials : Iterable of openmc.Material
Materials to add to the collection
Attributes
----------
cross_sections : str or path-like
Indicates the path to an XML cross section listing file (usually named
cross_sections.xml). If it is not set, the
:envvar:`OPENMC_CROSS_SECTIONS` environment variable will be used for
continuous-energy calculations and
:envvar:`OPENMC_MG_CROSS_SECTIONS` will be used for multi-group
calculations to find the path to the HDF5 cross section file.
"""
def __init__(self, materials=None):
super().__init__(Material, 'materials collection')
self._cross_sections = None
if materials is not None:
self += materials
@property
def cross_sections(self):
return self._cross_sections
@cross_sections.setter
def cross_sections(self, cross_sections):
if cross_sections is not None:
self._cross_sections = Path(cross_sections)
def append(self, material):
"""Append material to collection
Parameters
----------
material : openmc.Material
Material to append
"""
super().append(material)
def insert(self, index, material):
"""Insert material before index
Parameters
----------
index : int
Index in list
material : openmc.Material
Material to insert
"""
super().insert(index, material)
def make_isotropic_in_lab(self):
for material in self:
material.make_isotropic_in_lab()
def export_to_xml(self, path='materials.xml'):
"""Export material collection to an XML file.
Parameters
----------
path : str
Path to file to write. Defaults to 'materials.xml'.
"""
# Check if path is a directory
p = Path(path)
if p.is_dir():
p /= 'materials.xml'
# Write materials to the file one-at-a-time. This significantly reduces
# memory demand over allocating a complete ElementTree and writing it in
# one go.
with open(str(p), 'w', encoding='utf-8',
errors='xmlcharrefreplace') as fh:
# Write the header and the opening tag for the root element.
fh.write("<?xml version='1.0' encoding='utf-8'?>\n")
fh.write('<materials>\n')
# Write the <cross_sections> element.
if self.cross_sections is not None:
element = ET.Element('cross_sections')
element.text = str(self.cross_sections)
clean_indentation(element, level=1)
element.tail = element.tail.strip(' ')
fh.write(' ')
reorder_attributes(element) # TODO: Remove when support is Python 3.8+
ET.ElementTree(element).write(fh, encoding='unicode')
# Write the <material> elements.
for material in sorted(self, key=lambda x: x.id):
element = material.to_xml_element()
clean_indentation(element, level=1)
element.tail = element.tail.strip(' ')
fh.write(' ')
reorder_attributes(element) # TODO: Remove when support is Python 3.8+
ET.ElementTree(element).write(fh, encoding='unicode')
# Write the closing tag for the root element.
fh.write('</materials>\n')
@classmethod
def from_xml(cls, path='materials.xml'):
"""Generate materials collection from XML file
Parameters
----------
path : str, optional
Path to materials XML file
Returns
-------
openmc.Materials
Materials collection
"""
tree = ET.parse(path)
root = tree.getroot()
# Generate each material
materials = cls()
for material in root.findall('material'):
materials.append(Material.from_xml_element(material))
# Check for cross sections settings
xs = tree.find('cross_sections')
if xs is not None:
materials.cross_sections = xs.text
return materials
|
nelsonag/openmc
|
openmc/material.py
|
Python
|
mit
| 46,990
|
[
"Avogadro"
] |
d6bc65982d8441c94068cb23962ab364aa4fdc469c2f3238396a92238e94b0d2
|
"""Element support in foyer."""
import openmm.app.element as elem
class Element(elem.Element):
"""An Element represents a chemical element.
The openmm.app.element module contains objects for all the standard chemical elements,
such as element.hydrogen or element.carbon. You can also call the static method Element.getBySymbol() to
look up the Element with a particular chemical symbol.
Element objects should be considered immutable.
Canonical, periodic table elements will utilize openmm.element,
but custom elements will utilize this subclass foyer.element
"""
def __init__(self, number, name, symbol, mass):
"""Create a new element.
Parameters
----------
number : int
The atomic number of the element
name : string
The name of the element
symbol : string
The chemical symbol of the element
mass : float
The atomic mass of the element
"""
## The atomic number of the element
self._atomic_number = number
## The name of the element
self._name = name
## The chemical symbol of the element
self._symbol = symbol
## The atomic mass of the element
self._mass = mass
# Index this element in a global table
s = symbol.strip().upper()
## If we add a new element, we need to re-hash elements by mass
Element._elements_by_mass = None
if s in Element._elements_by_symbol:
raise ValueError("Duplicate element symbol %s" % s)
|
mosdef-hub/foyer
|
foyer/element.py
|
Python
|
mit
| 1,589
|
[
"OpenMM"
] |
4cb6898ee5d808a4bb0673b7f46aef2ff0d708cdfe4698961b326d681269dda0
|
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
import numpy as np
import pandas as pd
from scipy import optimize
from scipy.stats import boxcox
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.externals import six
from sklearn.externals.joblib import Parallel, delayed
from sklearn.preprocessing import StandardScaler
from sklearn.utils.validation import check_is_fitted
from skutil.base import *
from ..utils import *
from ..utils.fixes import _cols_if_none
__all__ = [
'BoxCoxTransformer',
'FunctionMapper',
'InteractionTermTransformer',
'SelectiveScaler',
'SpatialSignTransformer',
'YeoJohnsonTransformer'
]
# A very small number used to measure differences.
# If the absolute difference between two numbers is
# <= EPS, it is considered equal.
EPS = 1e-12
# A very small number used to represent zero.
ZERO = 1e-16
# Helper funtions:
def _eqls(lam, v):
return np.abs(lam - v) <= EPS
def _validate_rows(X):
m, n = X.shape
if m < 2:
raise ValueError('n_samples should be at least two, but got %i' % m)
class FunctionMapper(BaseSkutil, TransformerMixin):
"""Apply a function to a column or set of columns.
Parameters
----------
cols : array_like, shape=(n_features,), optional (default=None)
The names of the columns on which to apply the transformation.
If no column names are provided, the transformer will be ``fit``
on the entire frame. Note that the transformation will also only
apply to the specified columns, and any other non-specified
columns will still be present after transformation.
fun : function, (default=None)
The function to apply to the feature(s). This function will be
applied via lambda expression to each column (independent of
one another). Therefore, the callable should accept an array-like
argument.
Attributes
----------
is_fit_ : bool
The ``FunctionMapper`` callable is set in the constructor,
but to remain true to the sklearn API, we need to ensure ``fit``
is called prior to ``transform``. Thus, we set this attribute in
the ``fit`` method, which performs some validation, to ensure the
``fun`` parameter has been validated.
Examples
--------
The following example will apply a cube-root transformation
to the first two columns in the iris dataset.
>>> from skutil.utils import load_iris_df
>>> import pandas as pd
>>> import numpy as np
>>>
>>> X = load_iris_df(include_tgt=False)
>>>
>>> # define the function
>>> def cube_root(x):
... return np.power(x, 0.333)
>>>
>>> # make our transformer
>>> trans = FunctionMapper(cols=X.columns[:2], fun=cube_root)
>>> trans.fit_transform(X).head()
sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)
0 1.720366 1.517661 1.4 0.2
1 1.697600 1.441722 1.4 0.2
2 1.674205 1.473041 1.3 0.2
3 1.662258 1.457550 1.5 0.2
4 1.709059 1.531965 1.4 0.2
"""
def __init__(self, cols=None, fun=None, **kwargs):
super(FunctionMapper, self).__init__(cols=cols)
self.fun = fun
self.kwargs = kwargs
def fit(self, X, y=None):
"""Fit the transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to fit. The frame will only
be fit on the prescribed ``cols`` (see ``__init__``) or
all of them if ``cols`` is None. Furthermore, ``X`` will
not be altered in the process of the fit.
y : None
Passthrough for ``sklearn.pipeline.Pipeline``. Even
if explicitly set, will not change behavior of ``fit``.
Returns
-------
self
"""
# Check this second in this case
X, self.cols = validate_is_pd(X, self.cols)
# validate the function. If none, make it a passthrough
if not self.fun:
def pass_through(x):
return x
self.fun = pass_through
else:
# check whether is function
if not hasattr(self.fun, '__call__'):
raise ValueError('passed fun arg is not a function')
# since we aren't checking is fit, we should set
# an arbitrary value to show validation has already occurred
self.is_fit_ = True
# TODO: this might cause issues in de-pickling, as we're
# going to be pickling a non-instance method... solve this.
return self
def transform(self, X):
"""Transform a test matrix given the already-fit transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to transform. The operation will
be applied to a copy of the input data, and the result
will be returned.
Returns
-------
X : Pandas ``DataFrame``
The operation is applied to a copy of ``X``,
and the result set is returned.
"""
check_is_fitted(self, 'is_fit_')
X, _ = validate_is_pd(X, self.cols)
cols = _cols_if_none(X, self.cols)
# apply the function
# TODO: do we want to change the behavior to where the function
# should accept an entire frame and not a series?
X[cols] = X[cols].apply(lambda x: self.fun(x, **self.kwargs))
return X
def _mul(a, b):
"""Multiplies two series objects
(no validation since internally used).
Parameters
----------
a : Pandas ``Series``
One of two Pandas ``Series`` objects that will
be interacted together.
b : Pandas ``Series``
One of two Pandas ``Series`` objects that will
be interacted together.
Returns
-------
product np.ndarray
"""
return (a * b).values
class InteractionTermTransformer(BaseSkutil, TransformerMixin):
"""A class that will generate interaction terms between selected columns.
An interaction captures some relationship between two independent variables
in the form of In = (xi * xj).
Parameters
----------
cols : array_like, shape=(n_features,), optional (default=None)
The names of the columns on which to apply the transformation.
If no column names are provided, the transformer will be ``fit``
on the entire frame. Note that the transformation will also only
apply to the specified columns, and any other non-specified
columns will still be present after transformation. Note that since
this transformer can only operate on numeric columns, not explicitly
setting the ``cols`` parameter may result in errors for categorical data.
as_df : bool, optional (default=True)
Whether to return a Pandas ``DataFrame`` in the ``transform``
method. If False, will return a Numpy ``ndarray`` instead.
Since most skutil transformers depend on explicitly-named
``DataFrame`` features, the ``as_df`` parameter is True by default.
interaction : callable, optional (default=None)
A callable for interactions. Default None will
result in multiplication of two Series objects
name_suffix : str, optional (default='I')
The suffix to add to the new feature name in the form of
<feature_x>_<feature_y>_<suffix>
only_return_interactions : bool, optional (default=False)
If set to True, will only return features in feature_names
and their respective generated interaction terms.
Attributes
----------
fun_ : callable
The interaction term function
Examples
--------
The following example interacts the first two columns of the iris
dataset using the default ``_mul`` function (product).
>>> from skutil.preprocessing import InteractionTermTransformer
>>> from skutil.utils import load_iris_df
>>> import pandas as pd
>>>
>>> X = load_iris_df(include_tgt=False)
>>>
>>> trans = InteractionTermTransformer(cols=X.columns[:2])
>>> X_transform = trans.fit_transform(X)
>>>
>>> assert X_transform.shape[1] == X.shape[1] + 1 # only added one column
>>> X_transform[X_transform.columns[-1]].head()
0 17.85
1 14.70
2 15.04
3 14.26
4 18.00
Name: sepal length (cm)_sepal width (cm)_I, dtype: float64
"""
def __init__(self, cols=None, as_df=True, interaction_function=None,
name_suffix='I', only_return_interactions=False):
super(InteractionTermTransformer, self).__init__(cols=cols, as_df=as_df)
self.interaction_function = interaction_function
self.name_suffix = name_suffix
self.only_return_interactions = only_return_interactions
def fit(self, X, y=None):
"""Fit the transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to fit. The frame will only
be fit on the prescribed ``cols`` (see ``__init__``) or
all of them if ``cols`` is None. Furthermore, ``X`` will
not be altered in the process of the fit.
y : None
Passthrough for ``sklearn.pipeline.Pipeline``. Even
if explicitly set, will not change behavior of ``fit``.
Returns
-------
self
"""
X, self.cols = validate_is_pd(X, self.cols)
cols = _cols_if_none(X, self.cols)
self.fun_ = self.interaction_function if self.interaction_function is not None else _mul
# validate function
if not hasattr(self.fun_, '__call__'):
raise TypeError('require callable for interaction_function')
# validate cols
if len(cols) < 2:
raise ValueError('need at least two columns')
return self
def transform(self, X):
"""Transform a test matrix given the already-fit transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to transform. The operation will
be applied to a copy of the input data, and the result
will be returned.
Returns
-------
X : Pandas ``DataFrame``
The operation is applied to a copy of ``X``,
and the result set is returned.
"""
check_is_fitted(self, 'fun_')
X, _ = validate_is_pd(X, self.cols)
cols = _cols_if_none(X, self.cols)
n_features = len(cols)
suff = self.name_suffix
fun = self.fun_
append_dict = {}
interaction_names = [x for x in cols]
# we can do this in N^2 or we can do it in the uglier N choose 2...
for i in range(n_features - 1):
for j in range(i + 1, n_features):
col_i, col_j = cols[i], cols[j]
new_nm = '%s_%s_%s' % (col_i, col_j, suff)
append_dict[new_nm] = fun(X[col_i], X[col_j])
interaction_names.append(new_nm)
# create DF 2:
df2 = pd.DataFrame.from_dict(append_dict)
X = pd.concat([X, df2], axis=1)
# if we only want to keep interaction names, filter now
X = X if not self.only_return_interactions else X[interaction_names]
# return matrix if needed
return X if self.as_df else X.as_matrix()
class SelectiveScaler(BaseSkutil, TransformerMixin):
"""A class that will apply scaling only to a select group
of columns. Useful for data that may contain features that should not
be scaled, such as those that have been dummied, or for any already-in-scale
features. Perhaps, even, there are some features you'd like to scale in
a different manner than others. This, then, allows two back-to-back
``SelectiveScaler`` instances with different columns & strategies in a
pipeline object.
Parameters
----------
cols : array_like, shape=(n_features,), optional (default=None)
The names of the columns on which to apply the transformation.
If no column names are provided, the transformer will be ``fit``
on the entire frame. Note that the transformation will also only
apply to the specified columns, and any other non-specified
columns will still be present after transformation. Note that since
this transformer can only operate on numeric columns, not explicitly
setting the ``cols`` parameter may result in errors for categorical data.
scaler : instance of a sklearn Scaler, optional (default=StandardScaler)
The scaler to fit against ``cols``. Must be an instance of
``sklearn.preprocessing.BaseScaler``.
as_df : bool, optional (default=True)
Whether to return a Pandas ``DataFrame`` in the ``transform``
method. If False, will return a Numpy ``ndarray`` instead.
Since most skutil transformers depend on explicitly-named
``DataFrame`` features, the ``as_df`` parameter is True by default.
Attributes
----------
is_fit_ : bool
The ``SelectiveScaler`` parameter ``scaler`` is set in the constructor,
but to remain true to the sklearn API, we need to ensure ``fit``
is called prior to ``transform``. Thus, we set this attribute in
the ``fit`` method, which performs some validation, to ensure the
``scaler`` parameter has been validated.
Examples
--------
The following example will scale only the first two features
in the iris dataset:
>>> from skutil.preprocessing import SelectiveScaler
>>> from skutil.utils import load_iris_df
>>> import pandas as pd
>>> import numpy as np
>>>
>>> X = load_iris_df(include_tgt=False)
>>>
>>> trans = SelectiveScaler(cols=X.columns[:2])
>>> X_transform = trans.fit_transform(X)
>>>
>>> X_transform.head()
sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)
0 -0.900681 1.032057 1.4 0.2
1 -1.143017 -0.124958 1.4 0.2
2 -1.385353 0.337848 1.3 0.2
3 -1.506521 0.106445 1.5 0.2
4 -1.021849 1.263460 1.4 0.2
"""
def __init__(self, cols=None, scaler=StandardScaler(), as_df=True):
super(SelectiveScaler, self).__init__(cols=cols, as_df=as_df)
self.scaler = scaler
def fit(self, X, y=None):
"""Fit the transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to fit. The frame will only
be fit on the prescribed ``cols`` (see ``__init__``) or
all of them if ``cols`` is None. Furthermore, ``X`` will
not be altered in the process of the fit.
y : None
Passthrough for ``sklearn.pipeline.Pipeline``. Even
if explicitly set, will not change behavior of ``fit``.
Returns
-------
self
"""
# check on state of X and cols
X, self.cols = validate_is_pd(X, self.cols)
cols = _cols_if_none(X, self.cols)
# throws exception if the cols don't exist
self.scaler.fit(X[cols])
# this is our fit param
self.is_fit_ = True
return self
def transform(self, X):
"""Transform a test matrix given the already-fit transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to transform. The operation will
be applied to a copy of the input data, and the result
will be returned.
Returns
-------
X : Pandas ``DataFrame``
The operation is applied to a copy of ``X``,
and the result set is returned.
"""
# check on state of X and cols
X, _ = validate_is_pd(X, self.cols)
cols = _cols_if_none(X, self.cols)
# Fails through if cols don't exist or if the scaler isn't fit yet
X[cols] = self.scaler.transform(X[cols])
return X if self.as_df else X.as_matrix()
class BoxCoxTransformer(BaseSkutil, TransformerMixin):
"""Estimate a lambda parameter for each feature, and transform
it to a distribution more-closely resembling a Gaussian bell
using the Box-Cox transformation.
Parameters
----------
cols : array_like, shape=(n_features,), optional (default=None)
The names of the columns on which to apply the transformation.
If no column names are provided, the transformer will be ``fit``
on the entire frame. Note that the transformation will also only
apply to the specified columns, and any other non-specified
columns will still be present after transformation. Note that since
this transformer can only operate on numeric columns, not explicitly
setting the ``cols`` parameter may result in errors for categorical data.
n_jobs : int, 1 by default
The number of jobs to use for the computation. This works by
estimating each of the feature lambdas in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but
one are used.
as_df : bool, optional (default=True)
Whether to return a Pandas ``DataFrame`` in the ``transform``
method. If False, will return a Numpy ``ndarray`` instead.
Since most skutil transformers depend on explicitly-named
``DataFrame`` features, the ``as_df`` parameter is True by default.
shift_amt : float, optional (default=1e-6)
Since the Box-Cox transformation requires that all values be positive
(above zero), any features that contain sub-zero elements will be shifted
up by the absolute value of the minimum element plus this amount in the ``fit``
method. In the ``transform`` method, if any of the test data is less than zero
after shifting, it will be truncated at the ``shift_amt`` value.
Attributes
----------
shift_ : dict
The shifts for each feature needed to shift the min value in
the feature up to at least 0.0, as every element must be positive
lambda_ : dict
The lambda values corresponding to each feature
"""
def __init__(self, cols=None, n_jobs=1, as_df=True, shift_amt=1e-6):
super(BoxCoxTransformer, self).__init__(cols=cols, as_df=as_df)
self.n_jobs = n_jobs
self.shift_amt = shift_amt
def fit(self, X, y=None):
"""Fit the transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to fit. The frame will only
be fit on the prescribed ``cols`` (see ``__init__``) or
all of them if ``cols`` is None. Furthermore, ``X`` will
not be altered in the process of the fit.
y : None
Passthrough for ``sklearn.pipeline.Pipeline``. Even
if explicitly set, will not change behavior of ``fit``.
Returns
-------
self
"""
# check on state of X and cols
X, self.cols = validate_is_pd(X, self.cols, assert_all_finite=True) # creates a copy -- we need all to be finite
cols = _cols_if_none(X, self.cols)
# ensure enough rows
_validate_rows(X)
# First step is to compute all the shifts needed, then add back to X...
min_Xs = X[cols].min(axis=0)
shift = np.array([np.abs(x) + self.shift_amt if x <= 0.0 else 0.0 for x in min_Xs])
X[cols] += shift
# now put shift into a dict
self.shift_ = dict(zip(cols, shift))
# Now estimate the lambdas in parallel
self.lambda_ = dict(zip(cols,
Parallel(n_jobs=self.n_jobs)(
delayed(_estimate_lambda_single_y)
(X[i].tolist()) for i in cols)))
return self
def transform(self, X):
"""Transform a test matrix given the already-fit transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to transform. The operation will
be applied to a copy of the input data, and the result
will be returned.
Returns
-------
X : Pandas ``DataFrame``
The operation is applied to a copy of ``X``,
and the result set is returned.
"""
check_is_fitted(self, 'shift_')
# check on state of X and cols
X, _ = validate_is_pd(X, self.cols, assert_all_finite=True)
cols = _cols_if_none(X, self.cols)
_, n_features = X.shape
lambdas_, shifts_ = self.lambda_, self.shift_
# Add the shifts in, and if they're too low,
# we have to truncate at some low value: 1e-6
for nm in cols:
X[nm] += shifts_[nm]
# If the shifts are too low, truncate...
X = X.apply(lambda x: x.apply(lambda y: np.maximum(self.shift_amt, y)))
# do transformations
for nm in cols:
X[nm] = _transform_y(X[nm].tolist(), lambdas_[nm])
return X if self.as_df else X.as_matrix()
def _transform_y(y, lam):
"""Transform a single y, given a single lambda value.
No validation performed.
Parameters
----------
y : array_like, shape (n_samples,)
The vector being transformed
lam : ndarray, shape (n_lambdas,)
The lambda value used for the transformation
"""
# ensure np array
y = np.array(y)
y_prime = np.array([(np.power(x, lam) - 1) / lam if not _eqls(lam, ZERO) else log(x) for x in y])
# rarely -- very rarely -- we can get a NaN. Why?
return y_prime
def _estimate_lambda_single_y(y):
"""Estimate lambda for a single y, given a range of lambdas
through which to search. No validation performed.
Parameters
----------
y : ndarray, shape (n_samples,)
The vector being estimated against
"""
# ensure is array
y = np.array(y)
# Use scipy's log-likelihood estimator
b = boxcox(y, lmbda=None)
# Return lambda corresponding to maximum P
return b[1]
class YeoJohnsonTransformer(BaseSkutil, TransformerMixin):
"""Estimate a lambda parameter for each feature, and transform
it to a distribution more-closely resembling a Gaussian bell
using the Yeo-Johnson transformation.
Parameters
----------
cols : array_like, shape=(n_features,), optional (default=None)
The names of the columns on which to apply the transformation.
If no column names are provided, the transformer will be ``fit``
on the entire frame. Note that the transformation will also only
apply to the specified columns, and any other non-specified
columns will still be present after transformation. Note that since
this transformer can only operate on numeric columns, not explicitly
setting the ``cols`` parameter may result in errors for categorical data.
n_jobs : int, 1 by default
The number of jobs to use for the computation. This works by
estimating each of the feature lambdas in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but
one are used.
as_df : bool, optional (default=True)
Whether to return a Pandas ``DataFrame`` in the ``transform``
method. If False, will return a Numpy ``ndarray`` instead.
Since most skutil transformers depend on explicitly-named
``DataFrame`` features, the ``as_df`` parameter is True by default.
Attributes
----------
lambda_ : dict
The lambda values corresponding to each feature
"""
def __init__(self, cols=None, n_jobs=1, as_df=True):
super(YeoJohnsonTransformer, self).__init__(cols=cols, as_df=as_df)
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Fit the transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to fit. The frame will only
be fit on the prescribed ``cols`` (see ``__init__``) or
all of them if ``cols`` is None. Furthermore, ``X`` will
not be altered in the process of the fit.
y : None
Passthrough for ``sklearn.pipeline.Pipeline``. Even
if explicitly set, will not change behavior of ``fit``.
Returns
-------
self
"""
# check on state of X and cols
X, self.cols = validate_is_pd(X, self.cols, assert_all_finite=True) # creates a copy -- we need all to be finite
cols = _cols_if_none(X, self.cols)
# ensure enough rows
_validate_rows(X)
# Now estimate the lambdas in parallel
self.lambda_ = dict(zip(cols,
Parallel(n_jobs=self.n_jobs)(
delayed(_yj_estimate_lambda_single_y)
(X[nm]) for nm in cols)))
return self
def transform(self, X):
"""Transform a test matrix given the already-fit transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to transform. The operation will
be applied to a copy of the input data, and the result
will be returned.
Returns
-------
X : Pandas ``DataFrame``
The operation is applied to a copy of ``X``,
and the result set is returned.
"""
check_is_fitted(self, 'lambda_')
# check on state of X and cols
X, cols = validate_is_pd(X, self.cols, assert_all_finite=True) # creates a copy -- we need all to be finite
cols = _cols_if_none(X, self.cols)
lambdas_ = self.lambda_
# do transformations
for nm in cols:
X[nm] = _yj_transform_y(X[nm], lambdas_[nm])
return X if self.as_df else X.as_matrix()
def _yj_trans_single_x(x, lam):
if x >= 0:
# Case 1: x >= 0 and lambda is not 0
if not _eqls(lam, ZERO):
return (np.power(x + 1, lam) - 1.0) / lam
# Case 2: x >= 0 and lambda is zero
return log(x + 1)
else:
# Case 2: x < 0 and lambda is not two
if not lam == 2.0:
denom = 2.0 - lam
numer = np.power((-x + 1), (2.0 - lam)) - 1.0
return -numer / denom
# Case 4: x < 0 and lambda is two
return -log(-x + 1)
def _yj_transform_y(y, lam):
"""Transform a single y, given a single lambda value.
No validation performed.
Parameters
----------
y : ndarray, shape (n_samples,)
The vector being transformed
lam : ndarray, shape (n_lambdas,)
The lambda value used for the transformation
"""
y = np.array(y)
return np.array([_yj_trans_single_x(x, lam) for x in y])
def _yj_estimate_lambda_single_y(y):
"""Estimate lambda for a single y, given a range of lambdas
through which to search. No validation performed.
Parameters
----------
y : ndarray, shape (n_samples,)
The vector being estimated against
"""
y = np.array(y)
# Use customlog-likelihood estimator
return _yj_normmax(y)
def _yj_normmax(x, brack=(-2, 2)):
"""Compute optimal YJ transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple
The starting interval for a downhill bracket search
"""
# Use MLE to compute the optimal YJ parameter
def _mle_opt(i, brck):
def _eval_mle(lmb, data):
# Function to minimize
return -_yj_llf(data, lmb)
return optimize.brent(_eval_mle, brack=brck, args=(i,))
return _mle_opt(x, brack) # _mle(x, brack)
def _yj_llf(data, lmb):
"""Transform a y vector given a single lambda value,
and compute the log-likelihood function. No validation
is applied to the input.
Parameters
----------
data : array_like
The vector to transform
lmb : scalar
The lambda value
"""
data = np.asarray(data)
N = data.shape[0]
y = _yj_transform_y(data, lmb)
# We can't take the canonical log of data, as there could be
# zeros or negatives. Thus, we need to shift both distributions
# up by some artbitrary factor just for the LLF computation
min_d, min_y = np.min(data), np.min(y)
if min_d < ZERO:
shift = np.abs(min_d) + 1
data += shift
# Same goes for Y
if min_y < ZERO:
shift = np.abs(min_y) + 1
y += shift
# Compute mean on potentially shifted data
y_mean = np.mean(y, axis=0)
var = np.sum((y - y_mean) ** 2. / N, axis=0)
# If var is 0.0, we'll get a warning. Means all the
# values were nearly identical in y, so we will return
# NaN so we don't optimize for this value of lam
if 0 == var:
return np.nan
# Can't use canonical log due to maybe negatives, so use the truncated log function in utils
llf = (lmb - 1) * np.sum(log(data), axis=0)
llf -= N / 2.0 * log(var)
return llf
class SpatialSignTransformer(BaseSkutil, TransformerMixin):
"""Project the feature space of a matrix into a multi-dimensional sphere
by dividing each feature by its squared norm.
Parameters
----------
cols : array_like, shape=(n_features,), optional (default=None)
The names of the columns on which to apply the transformation.
If no column names are provided, the transformer will be ``fit``
on the entire frame. Note that the transformation will also only
apply to the specified columns, and any other non-specified
columns will still be present after transformation. Note that since
this transformer can only operate on numeric columns, not explicitly
setting the ``cols`` parameter may result in errors for categorical data.
n_jobs : int, 1 by default
The number of jobs to use for the computation. This works by
estimating each of the feature lambdas in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but
one are used.
as_df : bool, optional (default=True)
Whether to return a Pandas ``DataFrame`` in the ``transform``
method. If False, will return a Numpy ``ndarray`` instead.
Since most skutil transformers depend on explicitly-named
``DataFrame`` features, the ``as_df`` parameter is True by default.
Attributes
----------
sq_nms_ : dict
The squared norms for each feature
"""
def __init__(self, cols=None, n_jobs=1, as_df=True):
super(SpatialSignTransformer, self).__init__(cols=cols, as_df=as_df)
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Fit the transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to fit. The frame will only
be fit on the prescribed ``cols`` (see ``__init__``) or
all of them if ``cols`` is None. Furthermore, ``X`` will
not be altered in the process of the fit.
y : None
Passthrough for ``sklearn.pipeline.Pipeline``. Even
if explicitly set, will not change behavior of ``fit``.
Returns
-------
self
"""
# check on state of X and cols
X, self.cols = validate_is_pd(X, self.cols)
cols = _cols_if_none(X, self.cols)
# Now get sqnms in parallel
self.sq_nms_ = dict(zip(cols,
Parallel(n_jobs=self.n_jobs)(
delayed(_sq_norm_single)
(X[nm]) for nm in cols)))
return self
def transform(self, X):
"""Transform a test matrix given the already-fit transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to transform. The operation will
be applied to a copy of the input data, and the result
will be returned.
Returns
-------
X : Pandas ``DataFrame``
The operation is applied to a copy of ``X``,
and the result set is returned.
"""
check_is_fitted(self, 'sq_nms_')
# check on state of X and cols
X, _ = validate_is_pd(X, self.cols)
sq_nms_ = self.sq_nms_
# scale by norms
for nm, the_norm in six.iteritems(sq_nms_):
X[nm] /= the_norm
return X if self.as_df else X.as_matrix()
def _sq_norm_single(x, zero_action=np.inf):
x = np.asarray(x)
nrm = np.dot(x, x)
# What if a squared norm is zero? We want to
# avoid a divide-by-zero situation...
return nrm if not nrm == 0 else zero_action
|
tgsmith61591/skutil
|
skutil/preprocessing/transform.py
|
Python
|
bsd-3-clause
| 34,160
|
[
"Gaussian"
] |
de1389fd5123e4735b6ad93d30258ea35cf4635ee57f480a88a8b882c458c027
|
from ase import Atoms
from gpaw import FermiDirac, Mixer
from gpaw.transport.calculator import Transport
from gpaw.atom.basis import BasisMaker
from gpaw.poisson import PoissonSolver
import pickle
a = 3.6
L = 7.00
basis = BasisMaker('Na').generate(1, 1, energysplit=0.3)
atoms = Atoms('Na12', pbc=(1, 1, 1), cell=[L, L, 12 * a])
atoms.positions[:12, 2] = [i * a for i in range(12)]
atoms.positions[:, :2] = L / 2.
atoms.center()
pl_atoms1 = range(4)
pl_atoms2 = range(8, 12)
pl_cell1 = (L, L, 4 * a)
pl_cell2 = pl_cell1
t = Transport(h=0.3,
xc='LDA',
basis={'Na': basis},
kpts=(2,2,1),
occupations=FermiDirac(0.1),
mode='lcao',
poissonsolver=PoissonSolver(nn=2, relax='GS'),
txt='Na_lcao.txt',
mixer=Mixer(0.1, 5, weight=100.0),
guess_steps=10,
pl_atoms=[pl_atoms1, pl_atoms2],
pl_cells=[pl_cell1, pl_cell2],
pl_kpts=(2,2,15),
analysis_data_list=['tc', 'force'],
edge_atoms=[[0, 3], [0, 11]],
mol_atoms=range(4, 8))
atoms.set_calculator(t)
t.calculate_iv(0.5, 2)
|
robwarm/gpaw-symm
|
gpaw/test/transport.py
|
Python
|
gpl-3.0
| 1,185
|
[
"ASE",
"GPAW"
] |
7f09d57078f8f62b0acb5352bb2597048383ded26ccea4e2b422e78efba82ea6
|
__author__ = 'cdan'
import pandas as pd
import sqlalchemy
import pymssql
server =
database =
user =
password =
year = 2014
ase_sql = 'SELECT 14 as datasource_id, [estimates_year],1300000 + [mgra] as mgra_id,[ethnicity],[popm_0to4],[popm_5to9],[popm_10to14],[popm_15to17],[popm_18to19],[popm_20to24],[popm_25to29],[popm_30to34],[popm_35to39],[popm_40to44],[popm_45to49],[popm_50to54],[popm_55to59],[popm_60to61],[popm_62to64],[popm_65to69],[popm_70to74],[popm_75to79],[popm_80to84],[popm_85plus],[popf_0to4],[popf_5to9],[popf_10to14],[popf_15to17],[popf_18to19],[popf_20to24],[popf_25to29],[popf_30to34],[popf_35to39],[popf_40to44],[popf_45to49],[popf_50to54],[popf_55to59],[popf_60to61],[popf_62to64],[popf_65to69],[popf_70to74],[popf_75to79],[popf_80to84],[popf_85plus] FROM [detailed_pop_tab_mgra] WHERE estimates_year = %d and ethnicity > 0' % year
inc_sql = 'SELECT 14 as datasource_id, [estimates_year],1300000 + [mgra] as mgra_id,[i1],[i2],[i3],[i4],[i5],[i6],[i7],[i8],[i9],[i10] FROM [income_estimates_mgra] where estimates_year = 2014'
housing_sql = '''
SELECT 14 as datasource_id,[estimates_year],1300000 + [mgra] as mgra_id,1 as structure_type_id,[hs_sf] as units,[hh_sf] as occupied,CASE WHEN hs_sf > 0 THEN 1 - (cast(hh_sf as float) / cast(hs_sf as float)) ELSE null END as vacancy FROM[popest_mgra] WHERE estimates_year = %d
UNION ALL
SELECT 14 as datasource_id,[estimates_year],1300000 + [mgra] as mgra_id,2 as structure_type_id,[hs_sfmu] as units,[hh_sfmu] as occupied,CASE WHEN hs_sfmu > 0 THEN 1 - (cast(hh_sfmu as float) / cast(hs_sfmu as float)) ELSE null END as vacancy FROM [popest_mgra] WHERE estimates_year = %d
UNION ALL
SELECT 14 as datasource_id,[estimates_year],1300000 + [mgra] as mgra_id,3 as structure_type_id,[hs_mf] as units,[hh_mf] as occupied,CASE WHEN hs_mf > 0 THEN 1 - (cast(hh_mf as float) / cast(hs_mf as float)) ELSE null END as vacancy FROM [popest_mgra] WHERE estimates_year = %d
UNION ALL
SELECT 14 as datasource_id,[estimates_year],1300000 + [mgra] as mgra_id,4 as structure_type_id,[hs_mh] as units,[hh_mh] as occupied ,CASE WHEN hs_mh > 0 THEN 1 - (cast(hh_mh as float) / cast(hs_mh as float)) ELSE null END as vacancy FROM [popest_mgra] WHERE estimates_year = %d
''' % (year, year, year, year)
population_sql = '''
SELECT 14 as datasource_id,[estimates_year],1300000 + [mgra] as mgra_id,1 as housing_type_id,[hhp] FROM [popest_mgra] where estimates_year = %d
UNION ALL
SELECT 14 as datasource_id,[estimates_year],1300000 + [mgra] as mgra_id,2 as housing_type_id,[gq_mil] FROM [popest_mgra] where estimates_year = %d
UNION ALL
SELECT 14 as datasource_id,[estimates_year],1300000 + [mgra] as mgra_id,3 as housing_type_id,[gq_civ_college] FROM [popest_mgra] where estimates_year = %d
UNION ALL
SELECT 14 as datasource_id,[estimates_year],1300000 + [mgra] as mgra_id,4 as housing_type_id,[gq_civ_other] FROM [popest_mgra] where estimates_year = %d
''' % (year, year, year, year)
age_range = {'0to4':1,'5to9':2,'10to14':3,'15to17':4,'18to19':5,'20to24':6,'25to29':7,'30to34':8,'35to39':9,'40to44':10,
'45to49':11,'50to54':12,'55to59':13,'60to61':14,'62to64':15,'65to69':16,'70to74':17,'75to79':18,'80to84':19,'85plus':20}
age_range_df = pd.DataFrame(age_range.items(), columns=['variable','age_group_id'])
with pymssql.connect(server, user, password, database) as conn:
'''
ase_df = pd.read_sql_query(ase_sql, conn)
#Remove the total population records (ethnicity = 0) and unpivot
ase_df = pd.melt(ase_df, id_vars=['datasource_id','estimates_year','mgra_id', 'ethnicity'])
#Set the sex, Female = 1, Male = 2
ase_df['sex_id'] = ase_df['variable'].str.contains('popm') + 1
ase_df['variable'] = ase_df['variable'].str[5:]
ase_df = pd.merge(ase_df, age_range_df, on='variable').drop('variable', 1)
ase_df.columns = ['datasource_id','year', 'mgra_id', 'ethnicity_id', 'population', 'sex_id', 'age_group_id']
ase_df = ase_df[['datasource_id','year','mgra_id','age_group_id','sex_id','ethnicity_id', 'population']]
print "Writing ASE Table..."
ase_df.to_csv('age_sex_ethnicity.csv', index=False)
print 'Writing ASE Table Complete'
################################################################################################################
income_df = pd.read_sql(inc_sql, conn)
income_df = pd.melt(income_df, id_vars=['datasource_id', 'estimates_year', 'mgra_id'], var_name='income_group_id', value_name='households')
income_df['income_group_id'] = income_df['income_group_id'].str[1:].astype(int) + 10
print "Writing Income Table..."
income_df.to_csv('income.csv', index=False)
print 'Writing Income Table Complete'
'''
housing_df = pd.read_sql(housing_sql, conn)
print "Writing Housing Table..."
housing_df.to_csv('housing.csv', index=False)
print 'Writing Housing Table Complete'
population_df = pd.read_sql(population_sql, conn)
print "Writing Population Table..."
population_df.to_csv('population.csv', index=False)
print 'Writing Population Table Complete'
|
SANDAG/DataSurfer
|
api/utilities/Estimates_ETL.py
|
Python
|
mit
| 5,121
|
[
"ASE"
] |
a47791f41be5c2313a6b489678b0b3e62c1984275ae76ce6997f58b58e6b7d21
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
def disable_GUI(code):
# integrate without visualizer
breakpoint = "visualizer.run(1)"
assert breakpoint in code
code = code.replace(breakpoint, "steps=1\nsystem.integrator.run(steps)", 1)
return code
sample, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@SAMPLES_DIR@/visualization_charged.py",
substitutions=disable_GUI, steps=100)
@skipIfMissingFeatures
class Sample(ut.TestCase):
system = sample.system
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/scripts/samples/test_visualization_charged.py
|
Python
|
gpl-3.0
| 1,260
|
[
"ESPResSo"
] |
021886d640cee8728b9471179b112eccff3f45f1849856e06fdaf2081a64afba
|
import HTSeq
import numpy
from matplotlib import pyplot
bamfile = HTSeq.BAM_Reader( "SRR001432_head.bam" )
gtffile = HTSeq.GFF_Reader( "Homo_sapiens.GRCh37.56_chrom1.gtf" )
halfwinwidth = 3000
fragmentsize = 200
coverage = HTSeq.GenomicArray( "auto", stranded=False, typecode="i" )
for almnt in bamfile:
if almnt.aligned:
almnt.iv.length = fragmentsize
coverage[ almnt.iv ] += 1
tsspos = set()
for feature in gtffile:
if feature.type == "exon" and feature.attr["exon_number"] == "1":
tsspos.add( feature.iv.start_d_as_pos )
profile = numpy.zeros( 2*halfwinwidth, dtype='i' )
for p in tsspos:
window = HTSeq.GenomicInterval( p.chrom, p.pos - halfwinwidth, p.pos + halfwinwidth, "." )
wincvg = numpy.fromiter( coverage[window], dtype='i', count=2*halfwinwidth )
if p.strand == "+":
profile += wincvg
else:
profile += wincvg[::-1]
|
simon-anders/htseq
|
python3/doc/tss1.py
|
Python
|
gpl-3.0
| 887
|
[
"HTSeq"
] |
c20207e0eb0d6d1db004a395d6eb7c49be7363dbf54eda8f4aaeefed7d0c51ce
|
from .bernoulli import Bernoulli
from .exponential import Exponential
from .gaussian import Gaussian, HeteroscedasticGaussian
from .gamma import Gamma
from .poisson import Poisson
from .student_t import StudentT
from .likelihood import Likelihood
from .mixed_noise import MixedNoise
from .binomial import Binomial
|
beckdaniel/GPy
|
GPy/likelihoods/__init__.py
|
Python
|
bsd-3-clause
| 315
|
[
"Gaussian"
] |
914324e05a836440ff7efe4aedca2c84598463a8d8a405a50d135ef765b18e48
|
##
# Copyright 2021 Vrije Universiteit Brussel
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for ORCA, implemented as an easyblock
@author: Alex Domingo (Vrije Universiteit Brussel)
"""
import glob
import os
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.makecp import MakeCp
from easybuild.easyblocks.generic.packedbinary import PackedBinary
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import write_file
from easybuild.tools.py2vs3 import string_type
from easybuild.tools.systemtools import X86_64, get_cpu_architecture
class EB_ORCA(PackedBinary, MakeCp):
"""
ORCA installation files are extracted and placed in standard locations using 'files_to_copy' from MakeCp.
Sanity checks on files are automatically generated based on the contents of 'files_to_copy' by gathering
the target files in the build directory and checking their presence in the installation directory.
Sanity checks also include a quick test calculating the HF energy of a water molecule.
"""
@staticmethod
def extra_options(extra_vars=None):
"""Extra easyconfig parameters for ORCA."""
extra_vars = MakeCp.extra_options()
extra_vars.update(PackedBinary.extra_options())
# files_to_copy is not mandatory here, since we set it by default in install_step
extra_vars['files_to_copy'][2] = CUSTOM
return extra_vars
def __init__(self, *args, **kwargs):
"""Init and validate easyconfig parameters and system architecture"""
super(EB_ORCA, self).__init__(*args, **kwargs)
# If user overwrites 'files_to_copy', custom 'sanity_check_paths' must be present
if self.cfg['files_to_copy'] and not self.cfg['sanity_check_paths']:
raise EasyBuildError("Found 'files_to_copy' option in easyconfig without 'sanity_check_paths'")
# Add orcaarch template for supported architectures
myarch = get_cpu_architecture()
if myarch == X86_64:
orcaarch = 'x86-64'
else:
raise EasyBuildError("Architecture %s is not supported by ORCA on EasyBuild", myarch)
self.cfg.template_values['orcaarch'] = orcaarch
self.cfg.generate_template_values()
def install_step(self):
"""Install ORCA with MakeCp easyblock"""
if not self.cfg['files_to_copy']:
# Put installation files in standard locations
files_to_copy = [
(['auto*', 'orca*', 'otool*'], 'bin'),
(['*.pdf'], 'share'),
]
# Version 5 extra files
if LooseVersion(self.version) >= LooseVersion('5.0.0'):
compoundmethods = (['ORCACompoundMethods'], 'bin')
files_to_copy.append(compoundmethods)
# Shared builds have additional libraries
libs_to_copy = (['liborca*'], 'lib')
if all([glob.glob(p) for p in libs_to_copy[0]]):
files_to_copy.append(libs_to_copy)
self.cfg['files_to_copy'] = files_to_copy
MakeCp.install_step(self)
def sanity_check_step(self):
"""Custom sanity check for ORCA"""
custom_paths = None
if not self.cfg['sanity_check_paths']:
custom_paths = {'files': [], 'dirs': []}
if self.cfg['files_to_copy']:
# Convert 'files_to_copy' to list of files in build directory
for spec in self.cfg['files_to_copy']:
if isinstance(spec, tuple):
file_pattern = spec[0]
dest_dir = spec[1]
elif isinstance(spec, string_type):
file_pattern = spec
dest_dir = ''
else:
raise EasyBuildError(
"Found neither string nor tuple as file to copy: '%s' (type %s)", spec, type(spec)
)
if isinstance(file_pattern, string_type):
file_pattern = [file_pattern]
source_files = []
for pattern in file_pattern:
source_files.extend(glob.glob(pattern))
# Add files to custom sanity checks
for source in source_files:
if os.path.isfile(source):
custom_paths['files'].append(os.path.join(dest_dir, source))
else:
custom_paths['dirs'].append(os.path.join(dest_dir, source))
else:
# Minimal check of files (needed by --module-only)
custom_paths['files'] = ['bin/orca']
# Simple test: HF energy of water molecule
test_input_content = """
!HF DEF2-SVP
%%PAL NPROCS %(nprocs)s END
* xyz 0 1
O 0.0000 0.0000 0.0626
H -0.7920 0.0000 -0.4973
H 0.7920 0.0000 -0.4973
*
"""
nprocs = self.cfg.get('parallel', 1)
test_input_content = test_input_content % {'nprocs': nprocs}
test_input_path = os.path.join(self.builddir, 'eb_test_hf_water.inp')
write_file(test_input_path, test_input_content)
# Reference total energy
test_output_energy = '-75.95934031'
test_output_regex = 'FINAL SINGLE POINT ENERGY[ \t]*%s' % test_output_energy
# ORCA has to be executed using its full path to run in parallel
if os.path.isdir(os.path.join(self.installdir, 'bin')):
orca_bin = '$EBROOTORCA/bin/orca'
else:
orca_bin = '$(which orca)'
test_orca_cmd = "%s %s" % (orca_bin, test_input_path)
custom_commands = [
# Execute test in ORCA
test_orca_cmd,
# Repeat test and check total energy
"%s | grep -c '%s'" % (test_orca_cmd, test_output_regex),
]
super(EB_ORCA, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)
|
akesandgren/easybuild-easyblocks
|
easybuild/easyblocks/o/orca.py
|
Python
|
gpl-2.0
| 7,049
|
[
"ORCA"
] |
62b4c89cbc2c68c1020eeff6e5bc1ec2a233624b2992b0310e1eac093b4478dd
|
visit_schedule_fields = ('visit_schedule_name', 'schedule_name', 'visit_code')
visit_schedule_fieldset_tuple = (
'Visit Schedule', {
'classes': ('collapse',),
'fields': visit_schedule_fields})
visit_schedule_only_fields = ('visit_schedule_name', 'schedule_name')
visit_schedule_only_fieldset_tuple = (
'Visit Schedule', {
'classes': ('collapse',),
'fields': visit_schedule_only_fields})
|
botswana-harvard/edc-visit-schedule
|
edc_visit_schedule/fieldsets.py
|
Python
|
gpl-2.0
| 430
|
[
"VisIt"
] |
9bb2b9edc6c5673910ea4cad2108f1291693662429548ebcb87ebe6e8c04679b
|
"""
Do the initial configuration of a DIRAC component
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
Script.setUsageMessage(
'\n'.join(
[
__doc__.split('\n')[1],
'Usage:',
' %s [options] ... ComponentType System Component|System/Component' %
Script.scriptName,
'Arguments:',
' ComponentType: Name of the ComponentType (ie: agent)',
' System: Name of the DIRAC system (ie: WorkloadManagement)',
' component: Name of the DIRAC component (ie: JobCleaningAgent)']))
Script.parseCommandLine()
args = Script.getPositionalArgs()
componentType = args[0]
if len(args) == 2:
system, component = args[1].split('/')
else:
system = args[1]
component = args[2]
# imports
from DIRAC import gConfig
from DIRAC import exit as DIRACexit
from DIRAC.Core.Utilities.Extensions import extensionsByPriority
from DIRAC.FrameworkSystem.Client.ComponentInstaller import gComponentInstaller
#
gComponentInstaller.exitOnError = True
result = gComponentInstaller.addDefaultOptionsToCS(gConfig, componentType, system, component,
extensionsByPriority(),
specialOptions={},
overwrite=False)
if not result['OK']:
print("ERROR:", result['Message'])
else:
DIRACexit()
|
yujikato/DIRAC
|
tests/Jenkins/dirac-cfg-add-option.py
|
Python
|
gpl-3.0
| 1,540
|
[
"DIRAC"
] |
02cb2d2f7bf525be0121e3de16257f0df74d520f6cbc8791dea431aa1bcb0757
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to read data in the graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import summary
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as var_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import input as input_ops
from tensorflow.python.training import queue_runner
# Default name for key in the feature dict.
KEY_FEATURE_NAME = '__key__'
def _check_enqueue_params(num_queue_runners, num_enqueue_threads):
"""Check enqueue paramerters for deprecation of `num_queue_runners`."""
if num_queue_runners is not None:
# TODO(yifanchen): Remove on Nov 21 2016.
logging.warning('`num_queue_runners` is deprecated, it will be removed on '
'Nov 21 2016')
if num_enqueue_threads is not None:
raise ValueError('`num_queue_runners` and `num_enqueue_threads` can not '
'both be set.')
elif num_enqueue_threads is None:
logging.warning('Default behavior will change and `num_queue_runners` '
'will be replaced by `num_enqueue_threads`.')
num_queue_runners = 2
return num_queue_runners, num_enqueue_threads
def read_batch_examples(file_pattern, batch_size, reader,
randomize_input=True, num_epochs=None,
queue_capacity=10000, num_threads=1,
read_batch_size=1, parse_fn=None,
name=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size`.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` as shown in the tests.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
_, examples = read_keyed_batch_examples(
file_pattern=file_pattern, batch_size=batch_size, reader=reader,
randomize_input=randomize_input, num_epochs=num_epochs,
queue_capacity=queue_capacity, num_threads=num_threads,
read_batch_size=read_batch_size, parse_fn=parse_fn, name=name)
return examples
def read_keyed_batch_examples(
file_pattern, batch_size, reader,
randomize_input=True, num_epochs=None,
queue_capacity=10000, num_threads=1,
read_batch_size=1, parse_fn=None,
name=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size`.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` as shown in the tests.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
return _read_keyed_batch_examples_helper(
file_pattern,
batch_size,
reader,
randomize_input,
num_epochs,
queue_capacity,
num_threads,
read_batch_size,
parse_fn,
setup_shared_queue=False,
name=name)
def _read_keyed_batch_examples_shared_queue(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a shared queue for file
names, setup a worker queue that pulls from the shared queue, read `Example`
protos using provided `reader`, use batch queue to create batches of examples
of size `batch_size`. This provides at most once visit guarantees. Note that
this only works if the parameter servers are not pre-empted or restarted or
the session is not restored from a checkpoint since the state of a queue
is not checkpointed and we will end up restarting from the entire list of
files.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` as shown in the tests.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
return _read_keyed_batch_examples_helper(
file_pattern,
batch_size,
reader,
randomize_input,
num_epochs,
queue_capacity,
num_threads,
read_batch_size,
parse_fn,
setup_shared_queue=True,
name=name)
def _get_shared_file_name_queue(file_names, shuffle, num_epochs, name):
# Creating a dummy variable so we can put the shared queue in ps if there is
# a PS and in the worker otherwise. TODO(rohanj): Figure out how to place an
# op on PS without this hack
dummy_var = var_ops.Variable(initial_value=0, name='queue_placement_var')
with ops.device(dummy_var.device):
shared_file_name_queue = input_ops.string_input_producer(
constant_op.constant(
file_names, name='input'),
shuffle=shuffle,
num_epochs=num_epochs,
name=name,
shared_name=name)
return shared_file_name_queue
def _get_file_names(file_pattern, randomize_input):
"""Parse list of file names from pattern, optionally shuffled.
Args:
file_pattern: File glob pattern, or list of strings.
randomize_input: Whether to shuffle the order of file names.
Returns:
List of file names matching `file_pattern`.
Raises:
ValueError: If `file_pattern` is empty, or pattern matches no files.
"""
if isinstance(file_pattern, list):
file_names = file_pattern
if not file_names:
raise ValueError('No files given to dequeue_examples.')
else:
file_names = list(gfile.Glob(file_pattern))
if not file_names:
raise ValueError('No files match %s.' % file_pattern)
# Sort files so it will be deterministic for unit tests. They'll be shuffled
# in `string_input_producer` if `randomize_input` is enabled.
if not randomize_input:
file_names = sorted(file_names)
return file_names
def _get_examples(file_name_queue, reader, num_threads, read_batch_size,
parse_fn):
with ops.name_scope('read'):
example_list = []
for _ in range(num_threads):
if read_batch_size > 1:
keys, examples_proto = reader().read_up_to(file_name_queue,
read_batch_size)
else:
keys, examples_proto = reader().read(file_name_queue)
if parse_fn:
parsed_examples = parse_fn(examples_proto)
# Map keys into example map because batch_join doesn't support
# tuple of Tensor + dict.
if isinstance(parsed_examples, dict):
parsed_examples[KEY_FEATURE_NAME] = keys
example_list.append(parsed_examples)
else:
example_list.append((keys, parsed_examples))
else:
example_list.append((keys, examples_proto))
return example_list
def _read_keyed_batch_examples_helper(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
parse_fn=None,
setup_shared_queue=False,
name=None):
"""Adds operations to read, queue, batch `Example` protos.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` as shown in the tests.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
setup_shared_queue: Whether to set up a shared queue for file names.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
# Retrieve files to read.
file_names = _get_file_names(file_pattern, randomize_input)
# Check input parameters are given and reasonable.
if (not queue_capacity) or (queue_capacity <= 0):
raise ValueError('Invalid queue_capacity %s.' % queue_capacity)
if (batch_size is None) or (
(not isinstance(batch_size, ops.Tensor)) and
(batch_size <= 0 or batch_size > queue_capacity)):
raise ValueError(
'Invalid batch_size %s, with queue_capacity %s.' %
(batch_size, queue_capacity))
if (read_batch_size is None) or (
(not isinstance(read_batch_size, ops.Tensor)) and
(read_batch_size <= 0)):
raise ValueError('Invalid read_batch_size %s.' % read_batch_size)
if (not num_threads) or (num_threads <= 0):
raise ValueError('Invalid num_threads %s.' % num_threads)
if (num_epochs is not None) and (num_epochs <= 0):
raise ValueError('Invalid num_epochs %s.' % num_epochs)
with ops.name_scope(name, 'read_batch_examples', [file_pattern]) as scope:
with ops.name_scope('file_name_queue') as file_name_queue_scope:
if setup_shared_queue:
shared_file_name_queue = _get_shared_file_name_queue(
file_names, randomize_input, num_epochs, file_name_queue_scope)
file_name_queue = data_flow_ops.FIFOQueue(
capacity=1, dtypes=[dtypes.string], shapes=[[]])
enqueue_op = file_name_queue.enqueue(shared_file_name_queue.dequeue())
queue_runner.add_queue_runner(
queue_runner.QueueRunner(file_name_queue, [enqueue_op]))
else:
file_name_queue = input_ops.string_input_producer(
constant_op.constant(
file_names, name='input'),
shuffle=randomize_input,
num_epochs=num_epochs,
name=file_name_queue_scope)
example_list = _get_examples(file_name_queue, reader, num_threads,
read_batch_size, parse_fn)
enqueue_many = read_batch_size > 1
if num_epochs is None:
allow_smaller_final_batch = False
else:
allow_smaller_final_batch = True
# Setup batching queue given list of read example tensors.
if randomize_input:
if isinstance(batch_size, ops.Tensor):
min_after_dequeue = int(queue_capacity * 0.4)
else:
min_after_dequeue = max(queue_capacity - (3 * batch_size), batch_size)
queued_examples_with_keys = input_ops.shuffle_batch_join(
example_list, batch_size, capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=enqueue_many, name=scope,
allow_smaller_final_batch=allow_smaller_final_batch)
else:
queued_examples_with_keys = input_ops.batch_join(
example_list, batch_size, capacity=queue_capacity,
enqueue_many=enqueue_many, name=scope,
allow_smaller_final_batch=allow_smaller_final_batch)
if parse_fn and isinstance(queued_examples_with_keys, dict):
queued_keys = queued_examples_with_keys.pop(KEY_FEATURE_NAME)
return queued_keys, queued_examples_with_keys
return queued_examples_with_keys
def read_keyed_batch_features(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
reader_num_threads=1,
feature_queue_capacity=100,
num_queue_runners=None,
num_enqueue_threads=None,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() as shown in the tests.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples.
feature_queue_capacity: Capacity of the parsed features queue.
num_queue_runners: Deprecated. Defaults to 2 if this and
`num_enqueue_threads` are both `None`. This is the number of queue
runners to start for the feature queue. Adding multiple queue runners for
the parsed example queue helps maintain a full queue when the subsequent
computations overall are cheaper than parsing. This argument will be
deprecated and replaced with `num_enqueue_threads`.
num_enqueue_threads: Number of threads to enqueue the parsed example queue.
Using multiple threads to enqueue the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing. This argument will replace `num_queue_runners`. This and
`num_queue_runners` can not both be set.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
num_queue_runners, num_enqueue_threads = _check_enqueue_params(
num_queue_runners, num_enqueue_threads)
with ops.name_scope(name, 'read_batch_features', [file_pattern]) as scope:
keys, examples = read_keyed_batch_examples(
file_pattern, batch_size, reader, randomize_input=randomize_input,
num_epochs=num_epochs, queue_capacity=queue_capacity,
num_threads=reader_num_threads, read_batch_size=batch_size,
parse_fn=parse_fn, name=scope)
# Parse the example.
feature_map = parsing_ops.parse_example(examples, features)
return queue_parsed_features(
feature_map,
keys=keys,
feature_queue_capacity=feature_queue_capacity,
num_queue_runners=num_queue_runners,
num_enqueue_threads=num_enqueue_threads,
name=scope)
def _read_keyed_batch_features_shared_queue(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
reader_num_threads=1,
feature_queue_capacity=100,
num_queue_runners=2,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a shared queue for file
names, setup a worker queue that gets filenames from the shared queue,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() as shown in the tests.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples.
feature_queue_capacity: Capacity of the parsed features queue.
num_queue_runners: Number of queue runners to start for the feature queue,
Adding multiple queue runners for the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
with ops.name_scope(name, 'read_batch_features', [file_pattern]) as scope:
keys, examples = _read_keyed_batch_examples_shared_queue(
file_pattern,
batch_size,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=reader_num_threads,
read_batch_size=batch_size,
parse_fn=parse_fn,
name=scope)
# Parse the example.
feature_map = parsing_ops.parse_example(examples, features)
return queue_parsed_features(
feature_map,
keys=keys,
feature_queue_capacity=feature_queue_capacity,
num_queue_runners=num_queue_runners,
name=scope)
def queue_parsed_features(parsed_features,
keys=None,
feature_queue_capacity=100,
num_queue_runners=None,
num_enqueue_threads=None,
name=None):
"""Speeds up parsing by using queues to do it asynchronously.
This function adds the tensors in `parsed_features` to a queue, which allows
the parsing (or any other expensive op before this) to be asynchronous wrt the
rest of the training graph. This greatly improves read latency and speeds up
training since the data will already be parsed and ready when each step of
training needs it.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
parsed_features: A dict of string key to `Tensor` or `SparseTensor` objects.
keys: `Tensor` of string keys.
feature_queue_capacity: Capacity of the parsed features queue.
num_queue_runners: Deprecated. Defaults to 2 if this and
`num_enqueue_threads` are both `None`. This is the number of queue
runners to start for the feature queue. Adding multiple queue runners for
the parsed example queue helps maintain a full queue when the subsequent
computations overall are cheaper than parsing. This argument will be
deprecated and replaced with `num_enqueue_threads`.
num_enqueue_threads: Number of threads to enqueue the parsed example queue.
Using multiple threads to enqueue the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing. This argument will replace `num_queue_runners`. This and
`num_queue_runners` can not both be set.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` corresponding to `keys` if provided, otherwise `None`.
- A dict of string key to `Tensor` or `SparseTensor` objects corresponding
to `parsed_features`.
Raises:
ValueError: for invalid inputs.
"""
num_queue_runners, num_enqueue_threads = _check_enqueue_params(
num_queue_runners, num_enqueue_threads)
args = list(parsed_features.values())
if keys is not None:
args += [keys]
with ops.name_scope(name, 'queue_parsed_features', args):
# Lets also add preprocessed tensors into the queue types for each item of
# the queue.
tensors_to_enqueue = []
# Each entry contains the key, and a boolean which indicates whether the
# tensor was a sparse tensor.
tensors_mapping = []
# TODO(sibyl-Aix6ihai): Most of the functionality here is about pushing sparse
# tensors into a queue. This could be taken care in somewhere else so others
# can reuse it. Also, QueueBase maybe extended to handle sparse tensors
# directly.
for key in sorted(parsed_features.keys()):
tensor = parsed_features[key]
if isinstance(tensor, sparse_tensor.SparseTensor):
tensors_mapping.append((key, True))
tensors_to_enqueue.extend([tensor.indices, tensor.values, tensor.shape])
else:
tensors_mapping.append((key, False))
tensors_to_enqueue.append(tensor)
if keys is not None:
tensors_to_enqueue.append(keys)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(feature_queue_capacity, queue_dtypes)
# Add a summary op to debug if our feature queue is full or not.
summary.scalar('queue/parsed_features/%s/fraction_of_%d_full' %
(input_queue.name, feature_queue_capacity),
math_ops.cast(input_queue.size(), dtypes.float32) *
(1. / feature_queue_capacity))
# Add multiple queue runners so that the queue is always full. Adding more
# than two queue-runners may hog the cpu on the worker to fill up the queue.
#
# Note: this can result in large last batch being lost as the multiple queue
# runner threads do not coordinate with each other. Please use
# `num_enqueue_threads` instead.
if num_queue_runners is not None:
for _ in range(num_queue_runners):
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue, [input_queue.enqueue(tensors_to_enqueue)],
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
# Use a single QueueRunner with multiple threads to enqueue so the queue is
# always full. The threads are coordinated so the last batch will not be
# lost.
elif num_enqueue_threads is not None:
enqueue_ops = [input_queue.enqueue(tensors_to_enqueue)
for _ in range(num_enqueue_threads)]
queue_runner.add_queue_runner(queue_runner.QueueRunner(
input_queue, enqueue_ops,
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
else:
raise AssertionError(
'Either `num_queue_runners` or `num_enqueue_threads` should have '
'been set.')
dequeued_tensors = input_queue.dequeue()
# Reset shapes on dequeued tensors.
for i in range(len(tensors_to_enqueue)):
dequeued_tensors[i].set_shape(tensors_to_enqueue[i].get_shape())
# Recreate feature mapping according to the original dictionary.
dequeued_parsed_features = {}
index = 0
for key, is_sparse_tensor in tensors_mapping:
if is_sparse_tensor:
# Three tensors are (indices, values, shape).
dequeued_parsed_features[key] = sparse_tensor.SparseTensor(
dequeued_tensors[index], dequeued_tensors[index + 1],
dequeued_tensors[index + 2])
index += 3
else:
dequeued_parsed_features[key] = dequeued_tensors[index]
index += 1
dequeued_keys = None
if keys is not None:
dequeued_keys = dequeued_tensors[-1]
return dequeued_keys, dequeued_parsed_features
def read_batch_features(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
feature_queue_capacity=100,
reader_num_threads=1,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() as shown in the tests.
queue_capacity: Capacity for input queue.
feature_queue_capacity: Capacity of the parsed features queue. Set this
value to a small number, for example 5 if the parsed features are large.
reader_num_threads: The number of threads to read examples.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
_, features = read_keyed_batch_features(
file_pattern, batch_size, features, reader,
randomize_input=randomize_input, num_epochs=num_epochs,
queue_capacity=queue_capacity,
feature_queue_capacity=feature_queue_capacity,
reader_num_threads=reader_num_threads,
parse_fn=parse_fn, name=name)
return features
def read_batch_record_features(file_pattern, batch_size, features,
randomize_input=True, num_epochs=None,
queue_capacity=10000, reader_num_threads=1,
name='dequeue_record_examples'):
"""Reads TFRecord, queues, batches and parses `Example` proto.
See more detailed description in `read_examples`.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() as shown in the tests.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples.
name: Name of resulting op.
Returns:
A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
return read_batch_features(
file_pattern=file_pattern,
batch_size=batch_size,
features=features,
reader=io_ops.TFRecordReader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
reader_num_threads=reader_num_threads,
name=name)
|
nanditav/15712-TensorFlow
|
tensorflow/contrib/learn/python/learn/learn_io/graph_io.py
|
Python
|
apache-2.0
| 35,129
|
[
"VisIt"
] |
544c6af81361c645957177022cd89a5ebb390b42d5579ddc1e2af976954171d8
|
# -*- coding: utf-8 -*-
'''
ATTENTION:
If this is a new installation of pygenewiki, edit this file (settings.example.py)
with your username and password for wikipedia and any other custom settings
and save it as settings.py in this directory. Modules rely on settings.py
being present to operate.
'''
'''
Template Settings:
The page_prefix is the "namespace" of the infoboxes. All the respective pages are
in <base_site>/wiki/<page_prefix><entrez id>.
The template_name is the name of the template that the parser attempts to find
when parsing raw wikitext. It immediately follows the opening brackets.
'''
base_site = "en.wikipedia.org"
page_prefix = "Template:PBB/"
template_name = "GNF_Protein_box"
'''
Wikipedia User Settings:
This user should have bot and editing privileges.
'''
wiki_user = "{wpuser}"
wiki_pass = "{wppass}"
'''
Wikimedia Commons Settings:
These should be filled in if uploading images of proteins.
To use the same name and password as Wikipedia, leave these
blank.
'''
commons_user = "{cuser}"
commons_pass = "{cpass}"
'''
Pymol Configuration:
Directs to the installation path of pymol (www.pymol.org) molecular rendering
system. Value should be an absolute path to the pymol binary.
'''
pymol = "{pymol}"
'''
MyGene.Info Configuration
These are fairly static and should not need to be changed.
'''
mygene_base = "http://mygene.info/gene/"
mygene_meta = "http://mygene.info/metadata"
uniprot_url = "http://www.uniprot.org/uniprot/"
'''
ProteinBoxBot Configuration:
pbbhome is the root for all of pbb's working files, including a log and a
store of failed infoboxes, as well as any rendered PDB images.
'''
pbbhome = "{pbbhome}"
|
SuLab/genewiki
|
old-assets/genewiki/.settings.unconf.py
|
Python
|
mit
| 1,675
|
[
"PyMOL"
] |
797c3dee7a1f20b7d796f885a68a1f57d2f8dc2f155c977057bdf0af6bb2cc7f
|
"""Module that allows for imposing a kinetically connected network
structure of weighted ensemble simulation data.
"""
from collections import defaultdict
from copy import deepcopy
import gc
import numpy as np
import networkx as nx
from wepy.analysis.transitions import (
transition_counts,
counts_d_to_matrix,
)
try:
import pandas as pd
except ModuleNotFoundError:
print("Pandas is not installe, that functionality won't work")
class MacroStateNetworkError(Exception):
"""Errors specific to MacroStateNetwork requirements."""
pass
class BaseMacroStateNetwork():
"""A base class for the MacroStateNetwork which doesn't contain a
WepyHDF5 object. Useful for serialization of the object and can
then be reattached later to a WepyHDF5. For this functionality see
the 'MacroStateNetwork' class.
BaseMacroStateNetwork can also be though of as just a way of
mapping macrostate properties to the underlying microstate data.
The network itself is a networkx directed graph.
Upon construction the nodes will be a value called the 'node_id'
which is the label/assignment for the node. This either comes from
an explicit labelling (the 'assignments' argument) or from the
labels/assignments from the contig tree (from the 'assg_field_key'
argument).
Nodes have the following attributes after construction:
- node_id :: Same as the actual node value
- node_idx :: An extra index that is used for 'internal' ordering
of the nodes in a consistent manner. Used for
example in any method which constructs matrices from
edges and ensures they are all the same.
- assignments :: An index trace over the contig_tree dataset used
to construct the network. This is how the
individual microstates are indexed for each node.
- num_samples :: A total of the number of microstates that a node
has. Is the length of the 'assignments' attribute.
Additionally, there are auxiliary node attributes that may be
added by various methods. All of these are prefixed with a single
underscore '_' and any user set values should avoid this.
These auxiliary attributes also make use of namespacing, where
namespaces are similar to file paths and are separated by '/'
characters.
Additionally the auxiliary groups are typically managed such that
they remain consistent across all of the nodes and have metadata
queryable from the BaseMacroStateNetwork object. In contrast user
defined node attributes are not restricted to this structure.
The auxiliary groups are:
- '_groups' :: used to mark nodes as belonging to a higher level group.
- '_observables' :: used for scalar values that are calculated
from the underlying microstate structures. As
opposed to more operational values describing
the network itself. By virtue of being scalar
these are also compatible with output to
tabular formats.
Edge values are simply 2-tuples of node_ids where the first value
is the source and the second value is the target. Edges have the
following attributes following initialization:
- 'weighted_counts' :: The weighted sum of all the transitions
for an edge. This is a floating point
number.
- 'unweighted_counts' :: The unweighted sum of all the
transitions for an edge, this is a
normal count and is a whole integer.
- 'all_transition' :: This is an array of floats of the weight
for every individual transition for an
edge. This is useful for doing more
advanced statistics for a given edge.
A network object can be used as a stateful container for
calculated values over the nodes and edges and has methods to
support this. However, there is no standard way to serialize this
data beyond the generic python techniques like pickle.
"""
ASSIGNMENTS = 'assignments'
"""Key for the microstates that are assigned to a macrostate."""
def __init__(self,
contig_tree,
assg_field_key=None,
assignments=None,
transition_lag_time=2):
"""Create a network of macrostates from the simulation microstates
using a field in the trajectory data or precomputed assignments.
Either 'assg_field_key' or 'assignments' must be given, but not
both.
The 'transition_lag_time' is default set to 2, which is the natural connection
between microstates. The lag time can be increased to vary the
kinetic accuracy of transition probabilities generated through
Markov State Modelling.
The 'transition_lag_time' must be given as an integer greater
than 1.
Arguments
---------
contig_tree : ContigTree object
assg_field_key : str, conditionally optional on 'assignments'
The field in the WepyHDF5 dataset you want to generate macrostates for.
assignments : list of list of array_like of dim (n_traj_frames, observable_shape[0], ...),
conditionally optional on 'assg_field_key'
List of assignments for all frames in each run, where each
element of the outer list is for a run, the elements of
these lists are lists for each trajectory which are
arraylikes of shape (n_traj, observable_shape[0], ...).
See Also
"""
self._graph = nx.DiGraph()
assert not (assg_field_key is None and assignments is None), \
"either assg_field_key or assignments must be given"
assert assg_field_key is not None or assignments is not None, \
"one of assg_field_key or assignments must be given"
self._base_contig_tree = contig_tree.base_contigtree
self._assg_field_key = assg_field_key
# initialize the groups dictionary
self._node_groups = {}
# initialize the list of the observables
self._observables = []
# initialize the list of available layouts
self._layouts = []
# initialize the lookup of the node_idxs from node_ids
self._node_idxs = {}
# initialize the reverse node lookups which is memoized if
# needed
self._node_idx_to_id_dict = None
# validate lag time input
if (
(transition_lag_time is not None) and
(transition_lag_time < 2)
):
raise MacroStateNetworkError(
"transition_lag_time must be an integer value >= 2"
)
self._transition_lag_time = transition_lag_time
## Temporary variables for initialization only
# the temporary assignments dictionary
self._node_assignments = None
# and temporary raw assignments
self._assignments = None
## Code for creating nodes and edges
## Nodes
with contig_tree:
# map the keys to their lists of assignments, depending on
# whether or not we are using a field from the HDF5 traj or
# assignments provided separately
if assg_field_key is not None:
assert type(assg_field_key) == str, "assignment key must be a string"
self._key_init(contig_tree)
else:
self._assignments_init(assignments)
# once we have made the dictionary add the nodes to the network
# and reassign the assignments to the nodes
for node_idx, assg_item in enumerate(self._node_assignments.items()):
assg_key, assigs = assg_item
# count the number of samples (assigs) and use this as a field as well
num_samples = len(assigs)
# save the nodes with attributes, we save the node_id
# as the assg_key, because of certain formats only
# typing the attributes, and we want to avoid data
# loss, through these formats (which should be avoided
# as durable stores of them though)
self._graph.add_node(assg_key,
node_id=assg_key,
node_idx=node_idx,
assignments=assigs,
num_samples=num_samples)
self._node_idxs[assg_key] = node_idx
## Edges
all_transitions_d, \
weighted_counts_d, \
unweighted_counts_d = self._init_transition_counts(
contig_tree,
transition_lag_time,
)
# after calculating the transition counts set these as edge
# values make the edges with these attributes
for edge, all_trans in all_transitions_d.items():
weighted_counts = weighted_counts_d[edge]
unweighted_counts = unweighted_counts_d[edge]
# add the edge with all of the values
self._graph.add_edge(
*edge,
weighted_counts=weighted_counts,
unweighted_counts=unweighted_counts,
all_transitions=all_trans,
)
## Cleanup
# then get rid of the assignments dictionary, this information
# can be accessed from the network
del self._node_assignments
del self._assignments
def _key_init(self, contig_tree):
"""Initialize the assignments structures given the field key to use.
Parameters
----------
"""
wepy_h5 = contig_tree.wepy_h5
# blank assignments
assignments = [[[] for traj_idx in range(wepy_h5.num_run_trajs(run_idx))]
for run_idx in wepy_h5.run_idxs]
test_field = wepy_h5.get_traj_field(
wepy_h5.run_idxs[0],
wepy_h5.run_traj_idxs(0)[0],
self.assg_field_key,
)
# WARN: assg_field shapes can come wrapped with an extra
# dimension. We handle both cases. Test the first traj and see
# how it is
unwrap = False
if len(test_field.shape) == 2 and test_field.shape[1] == 1:
# then we raise flag to unwrap them
unwrap = True
elif len(test_field.shape) == 1:
# then it is unwrapped and we don't need to do anything,
# just assert the flag to not unwrap
unwrap = False
else:
raise ValueError(f"Wrong shape for an assignment type observable: {test_field.shape}")
# the raw assignments
curr_run_idx = -1
for idx_tup, fields_d in wepy_h5.iter_trajs_fields(
[self.assg_field_key], idxs=True):
run_idx = idx_tup[0]
traj_idx = idx_tup[1]
assg_field = fields_d[self.assg_field_key]
# if we need to we unwrap the assignements scalar values
# if they need it
if unwrap:
assg_field = np.ravel(assg_field)
assignments[run_idx][traj_idx].extend(assg_field)
# then just call the assignments constructor to do it the same
# way
self._assignments_init(assignments)
def _assignments_init(self, assignments):
"""Given the assignments structure sets up the other necessary
structures.
Parameters
----------
assignments : list of list of array_like of dim (n_traj_frames, observable_shape[0], ...),
conditionally optional on 'assg_field_key'
List of assignments for all frames in each run, where each
element of the outer list is for a run, the elements of
these lists are lists for each trajectory which are
arraylikes of shape (n_traj, observable_shape[0], ...).
"""
# set the type for the assignment field
self._assg_field_type = type(assignments[0])
# set the raw assignments to the temporary attribute
self._assignments = assignments
# this is the dictionary mapping node_id -> the (run_idx, traj_idx, cycle_idx) frames
self._node_assignments = defaultdict(list)
for run_idx, run in enumerate(assignments):
for traj_idx, traj in enumerate(run):
for frame_idx, assignment in enumerate(traj):
self._node_assignments[assignment].append( (run_idx, traj_idx, frame_idx) )
def _init_transition_counts(self,
contig_tree,
transition_lag_time,
):
"""Given the lag time get the transitions between microstates for the
network using the sliding windows algorithm.
This will create a directed edge between nodes that had at
least one transition, no matter the weight.
See the main class docstring for a description of the fields.
contig_tree should be unopened.
"""
# now count the transitions between the states and set those
# as the edges between nodes
# first get the sliding window transitions from the contig
# tree, once we set edges for a tree we don't really want to
# have multiple sets of transitions on the same network so we
# don't provide the method to add different assignments
# get the weights for the walkers so we can compute
# the weighted transition counts
with contig_tree:
weights = [[] for run_idx in contig_tree.wepy_h5.run_idxs]
for idx_tup, traj_fields_d in contig_tree.wepy_h5.iter_trajs_fields(
['weights'],
idxs=True):
run_idx, traj_idx = idx_tup
weights[run_idx].append(np.ravel(traj_fields_d['weights']))
# get the transitions as trace idxs
trace_transitions = []
for window in contig_tree.sliding_windows(transition_lag_time):
trace_transition = [window[0], window[-1]]
# convert the window trace on the contig to a trace
# over the runs
trace_transitions.append(trace_transition)
# ALERT: I'm not sure this is going to work out since this is
# potentially a lot of data and might make the object too
# large, lets just be aware and maybe we'll have to not do
# this if things are out of control.
## transition distributions
# get an array of all of the transition weights so we can do
# stats on them later.
all_transitions_d = defaultdict(list)
for trace_transition in trace_transitions:
# get the node ids of the edge using the assignments
start = trace_transition[0]
end = trace_transition[-1]
# get the assignments for the transition
start_assignment = self._assignments[start[0]][start[1]][start[2]]
end_assignment = self._assignments[end[0]][end[1]][end[2]]
edge_id = (start_assignment, end_assignment)
# get the weight of the walker that transitioned, this
# uses the trace idxs for the individual walkers
weight = weights[start[0]][start[1]][start[2]]
# append this transition weight to the list for it, but
# according to the node_ids, in edge_id
all_transitions_d[edge_id].append(weight)
# convert the lists in the transition dictionary to numpy arrays
all_transitions_d = {
edge : np.array(transitions_l)
for edge, transitions_l in all_transitions_d.items()
}
gc.collect()
## sum of weighted counts
# then get the weighted counts for those edges
weighted_counts_d = transition_counts(
self._assignments,
trace_transitions,
weights=weights,
)
## Sum of unweighted counts
# also get unweighted counts
unweighted_counts_d = transition_counts(
self._assignments,
trace_transitions,
weights=None,
)
return all_transitions_d, \
weighted_counts_d, \
unweighted_counts_d
# DEBUG: remove this, but account for the 'Weight' field when
# doing gexf stuff elsewhere
# # then we also want to get the transition probabilities so
# # we get the counts matrix and compute the probabilities
# # we first have to replace the keys of the counts of the
# # node_ids with the node_idxs
# node_id_to_idx_dict = self.node_id_to_idx_dict()
# self._countsmat = counts_d_to_matrix(
# {(node_id_to_idx_dict[edge[0]],
# node_id_to_idx_dict[edge[1]]) : counts
# for edge, counts in counts_d.items()})
# self._probmat = normalize_counts(self._countsmat)
# # then we add these attributes to the edges in the network
# node_idx_to_id_dict = self.node_id_to_idx_dict()
# for i_id, j_id in self._graph.edges:
# # i and j are the node idxs so we need to get the
# # actual node_ids of them
# i_idx = node_idx_to_id_dict[i_id]
# j_idx = node_idx_to_id_dict[j_id]
# # convert to a normal float and set it as an explicitly named attribute
# self._graph.edges[i_id, j_id]['transition_probability'] = \
# float(self._probmat[i_idx, j_idx])
# # we also set the general purpose default weight of
# # the edge to be this.
# self._graph.edges[i_id, j_id]['Weight'] = \
# float(self._probmat[i_idx, j_idx])
def node_id_to_idx(self, assg_key):
"""Convert a node_id (which is the assignment value) to a canonical index.
Parameters
----------
assg_key : node_id
Returns
-------
node_idx : int
"""
return self.node_id_to_idx_dict()[assg_key]
def node_idx_to_id(self, node_idx):
"""Convert a node index to its node id.
Parameters
----------
node_idx : int
Returns
-------
node_id : node_id
"""
return self.node_idx_to_id_dict()[node_idx]
def node_id_to_idx_dict(self):
"""Generate a full mapping of node_ids to node_idxs."""
return self._node_idxs
def node_idx_to_id_dict(self):
"""Generate a full mapping of node_idxs to node_ids."""
if self._node_idx_to_id_dict is None:
rev = {node_idx : node_id for node_id, node_idx in self._node_idxs.items()}
self._node_idx_to_id_dict = rev
else:
rev = self._node_idx_to_id_dict
# just reverse the dictionary and return
return rev
@property
def graph(self):
"""The networkx.DiGraph of the macrostate network."""
return self._graph
@property
def num_states(self):
"""The number of states in the network."""
return len(self.graph)
@property
def node_ids(self):
"""A list of the node_ids."""
return list(self.graph.nodes)
@property
def contig_tree(self):
"""The underlying ContigTree"""
return self._base_contig_tree
@property
def assg_field_key(self):
"""The string key of the field used to make macro states from the WepyHDF5 dataset.
Raises
------
MacroStateNetworkError
If this wasn't used to construct the MacroStateNetwork.
"""
if self._assg_field_key is None:
raise MacroStateNetworkError("Assignments were manually defined, no key.")
else:
return self._assg_field_key
### Node attributes & methods
def get_node_attributes(self, node_id):
"""Returns the node attributes of the macrostate.
Parameters
----------
node_id : node_id
Returns
-------
macrostate_attrs : dict
"""
return self.graph.nodes[node_id]
def get_node_attribute(self, node_id, attribute_key):
"""Return the value for a specific node and attribute.
Parameters
----------
node_id : node_id
attribute_key : str
Returns
-------
node_attribute
"""
return self.get_node_attributes(node_id)[attribute_key]
def get_nodes_attribute(self, attribute_key):
"""Get a dictionary mapping nodes to a specific attribute. """
nodes_attr = {}
for node_id in self.graph.nodes:
nodes_attr[node_id] = self.graph.nodes[node_id][attribute_key]
return nodes_attr
def node_assignments(self, node_id):
"""Return the microstates assigned to this macrostate as a run trace.
Parameters
----------
node_id : node_id
Returns
-------
node_assignments : list of tuples of ints (run_idx, traj_idx, cycle_idx)
Run trace of the nodes assigned to this macrostate.
"""
return self.get_node_attribute(node_id, self.ASSIGNMENTS)
def set_nodes_attribute(self, key, values_dict):
"""Set node attributes for the key and values for each node.
Parameters
----------
key : str
values_dict : dict of node_id: values
"""
for node_id, value in values_dict.items():
self.graph.nodes[node_id][key] = value
@property
def node_groups(self):
return self._node_groups
def set_node_group(self, group_name, node_ids):
# push these values to the nodes themselves, overwriting if
# necessary
self._set_group_nodes_attribute(group_name, node_ids)
# then update the group mapping with this
self._node_groups[group_name] = node_ids
def _set_group_nodes_attribute(self, group_name, group_node_ids):
# the key for the attribute of the group goes in a little
# namespace prefixed with _group
group_key = '_groups/{}'.format(group_name)
# make the mapping
values_map = {node_id : True if node_id in group_node_ids else False
for node_id in self.graph.nodes}
# then set them
self.set_nodes_attribute(group_key, values_map)
@property
def observables(self):
"""The list of available observables."""
return self._observables
def node_observables(self, node_id):
"""Dictionary of observables for each node_id."""
node_obs = {}
for obs_name in self.observables:
obs_key = '_observables/{}'.format(obs_name)
node_obs[obs_name] = self.get_nodes_attributes(node_id, obs_key)
return node_obs
def set_nodes_observable(self, observable_name, node_values):
# the key for the attribute of the observable goes in a little
# namespace prefixed with _observable
observable_key = '_observables/{}'.format(observable_name)
self.set_nodes_attribute(observable_key, node_values)
# then add to the list of available observables
self._observables.append(observable_name)
### Edge methods
def get_edge_attributes(self, edge_id):
"""Returns the edge attributes of the macrostate.
Parameters
----------
edge_id : edge_id
Returns
-------
edge_attrs : dict
"""
return self.graph.edges[edge_id]
def get_edge_attribute(self, edge_id, attribute_key):
"""Return the value for a specific edge and attribute.
Parameters
----------
edge_id : edge_id
attribute_key : str
Returns
-------
edge_attribute
"""
return self.get_edge_attributes(edge_id)[attribute_key]
def get_edges_attribute(self, attribute_key):
"""Get a dictionary mapping edges to a specific attribute. """
edges_attr = {}
for edge_id in self.graph.edges:
edges_attr[edge_id] = self.graph.edges[edge_id][attribute_key]
return edges_attr
### Layout stuff
@property
def layouts(self):
return self._layouts
def node_layouts(self, node_id):
"""Dictionary of layouts for each node_id."""
node_layouts = {}
for layout_name in self.layouts:
layout_key = '_layouts/{}'.format(layout_name)
node_layouts[obs_name] = self.get_nodes_attributes(node_id, layout_key)
return node_layouts
def set_nodes_layout(self, layout_name, node_values):
# the key for the attribute of the observable goes in a little
# namespace prefixed with _observable
layout_key = '_layouts/{}'.format(layout_name)
self.set_nodes_attribute(layout_key, node_values)
# then add to the list of available observables
if layout_name not in self._layouts:
self._layouts.append(layout_name)
def write_gexf(self,
filepath,
exclude_node_fields=None,
exclude_edge_fields=None,
layout=None,
):
"""Writes a graph file in the gexf format of the network.
Parameters
----------
filepath : str
"""
layout_key = None
if layout is not None:
layout_key = '_layouts/{}'.format(layout)
if layout not in self.layouts:
raise ValueError("Layout not found, use None for no layout")
### filter the node and edge attributes
# to do this we need to get rid of the assignments in the
# nodes though since this is not really supported or good to
# store in a gexf file which is more for visualization as an
# XML format, so we copy and modify then write the copy
gexf_graph = deepcopy(self._graph)
## Nodes
if exclude_node_fields is None:
exclude_node_fields = [self.ASSIGNMENTS]
else:
exclude_node_fields.append(self.ASSIGNMENTS)
exclude_node_fields = list(set(exclude_node_fields))
# exclude the layouts, we will set the viz manually for the layout
exclude_node_fields.extend(['_layouts/{}'.format(layout_name)
for layout_name in self.layouts])
for node in gexf_graph:
# remove requested fields
for field in exclude_node_fields:
del gexf_graph.nodes[node][field]
# also remove the fields which are not valid gexf types
fields = list(gexf_graph.nodes[node].keys())
for field in fields:
if (type(gexf_graph.nodes[node][field]) not in
nx.readwrite.gexf.GEXF.xml_type):
del gexf_graph.nodes[node][field]
if layout_key is not None:
# set the layout as viz attributes to this
gexf_graph.nodes[node]['viz'] = self._graph.nodes[node][layout_key]
## Edges
if exclude_edge_fields is None:
exclude_edge_fields = ['all_transitions']
else:
exclude_edge_fields.append('all_transitions')
exclude_edge_fields = list(set(exclude_edge_fields))
# TODO: viz and layouts not supported for edges currently
#
# exclude the layouts, we will set the viz manually for the layout
# exclude_edge_fields.extend(['_layouts/{}'.format(layout_name)
# for layout_name in self.layouts])
for edge in gexf_graph.edges:
# remove requested fields
for field in exclude_edge_fields:
del gexf_graph.edges[edge][field]
# also remove the fields which are not valid gexf types
fields = list(gexf_graph.edges[edge].keys())
for field in fields:
if (type(gexf_graph.edges[edge][field]) not in
nx.readwrite.gexf.GEXF.xml_type):
del gexf_graph.edges[edge][field]
# TODO,SNIPPET: we don't support layouts for the edges,
# but maybe we could
# if layout_key is not None:
# # set the layout as viz attributes to this
# gexf_graph.nodes[node]['viz'] = self._graph.nodes[node][layout_key]
# then write this filtered gexf to file
nx.write_gexf(gexf_graph, filepath)
def nodes_to_records(self,
extra_attributes=('_observables/total_weight',),
):
if extra_attributes is None:
extra_attributes = []
# keys which always go into the records
keys = [
'num_samples',
'node_idx',
]
# add all the groups to the keys
keys.extend(['_groups/{}'.format(key) for key in self.node_groups.keys()])
# add the observables
keys.extend(['_observables/{}'.format(obs) for obs in self.observables])
recs = []
for node_id in self.graph.nodes:
rec = {'node_id' : node_id}
# the keys which are always there
for key in keys:
rec[key] = self.get_node_attribute(node_id, key)
# the user defined ones
for extra_key in extra_attributes:
rec[key] = self.get_node_attribute(node_id, extra_key)
recs.append(rec)
return recs
def nodes_to_dataframe(self,
extra_attributes=('_observables/total_weight',),
):
"""Make a dataframe of the nodes and their attributes.
Not all attributes will be added as they are not relevant to a
table style representation anyhow.
The columns will be:
- node_id
- node_idx
- num samples
- groups (as booleans) which is anything in the '_groups' namespace
- observables : anything in the '_observables' namespace and
will assume to be scalars
And anything in the 'extra_attributes' argument.
"""
# TODO: set the column order
# col_order = []
return pd.DataFrame(self.nodes_to_records(
extra_attributes=extra_attributes
))
def edges_to_records(self,
extra_attributes=None,
):
"""Make a dataframe of the nodes and their attributes.
Not all attributes will be added as they are not relevant to a
table style representation anyhow.
The columns will be:
- edge_id
- source
- target
- weighted_counts
- unweighted_counts
"""
if extra_attributes is None:
extra_attributes = []
keys = [
'weighted_counts',
'unweighted_counts',
]
recs = []
for edge_id in self.graph.edges:
rec = {
'edge_id' : edge_id,
'source' : edge_id[0],
'target' : edge_id[1],
}
for key in keys:
rec[key] = self.graph.edges[edge_id][key]
# the user defined ones
for extra_key in extra_attributes:
rec[key] = self.get_node_attribute(node_id, extra_key)
recs.append(rec)
return recs
def edges_to_dataframe(self,
extra_attributes=None,
):
"""Make a dataframe of the nodes and their attributes.
Not all attributes will be added as they are not relevant to a
table style representation anyhow.
The columns will be:
- edge_id
- source
- target
- weighted_counts
- unweighted_counts
"""
return pd.DataFrame(self.edges_to_records(
extra_attributes=extra_attributes
))
def node_map(self, func, map_func=map):
"""Map a function over the nodes.
The function should take as its first argument a node_id and
the second argument a dictionary of the node attributes. This
will not give access to the underlying trajectory data in the
HDF5, to do this use the 'node_fields_map' function.
Extra args not supported use 'functools.partial' to make
functions with arguments for all data.
Parameters
----------
func : callable
The function to map over the nodes.
map_func : callable
The mapping function, implementing the `map` interface
Returns
-------
node_values : dict of node_id : values
The mapping of node_ids to the values computed by the mapped func.
"""
# wrap the function so that we can pass through the node_id
def func_wrapper(args):
node_id, node_attrs = args
return node_id, func(node_attrs)
# zip the node_ids with the node attributes as an iterator
node_attr_it = ((node_id,
{**self.get_node_attributes(node_id), 'node_id' : node_id})
for node_id in self.graph.nodes
)
return {node_id : value for node_id, value
in map_func(func_wrapper, node_attr_it)}
def edge_attribute_to_matrix(self,
attribute_key,
fill_value=np.nan,
):
"""Convert scalar edge attributes to an assymetric matrix.
This will always return matrices of size (num_nodes,
num_nodes).
Additionally, matrices for the same network will always have
the same indexing, which is according to the 'node_idx'
attribute of each node.
For example if you have a matrix like:
>>> msn = MacroStateNetwork(...)
>>> mat = msn.edge_attribute_to_matrix('unweighted_counts')
Then, for example, the node with node_id of '10' having a
'node_idx' of 0 will always be the first element for each
dimension. Using this example the self edge '10'->'10' can be
accessed from the matrix like:
>>> mat[0,0]
For another node ('node_id' '25') having 'node_idx' 4, we can
get the edge from '10'->'25' like:
>>> mat[0,4]
This is because 'node_id' does not necessarily have to be an
integer, and even if they are integers they don't necessarily
have to be a contiguous range from 0 to N.
To get the 'node_id' for a 'node_idx' use the method
'node_idx_to_id'.
>>> msn.node_idx_to_id(0)
=== 10
Parameters
----------
attribute_key : str
The key of the edge attribute the matrix should be made of.
fill_value : Any
The value to put in the array for non-existent edges. Must
be a numpy dtype compatible with the dtype of the
attribute value.
Returns
-------
edge_matrix : numpy.ndarray
Assymetric matrix of dim (n_macrostates,
n_macrostates). The 0-th axis corresponds to the 'source'
node and the 1-st axis corresponds to the 'target' nodes,
i.e. the dimensions mean: (source, target).
"""
# get the datatype of the attribute and validate it will fit in an array
test_edge_id = list(self.graph.edges.keys())[0]
test_attr_value = self.get_edge_attribute(
test_edge_id,
attribute_key,
)
# duck type check
dt = np.dtype(type(test_attr_value))
# TODO: test that its a numerical type
# get the dtype so we can make the matrix
# assert hasattr(test_attr_value, 'dtype')
# do "duck type" test, if the construction fails it was no good!
# allocate the matrix and initialize to zero for each element
mat = np.full(
(self.num_states,
self.num_states),
fill_value,
dtype=dt,
)
# get a dictionary of (node_id, node_id) -> value
edges_attr_d = self.get_edges_attribute(attribute_key)
# make a dictionary of the edge (source, target) mapped to the
# scalar values
# the mapping id->idx
node_id_to_idx_dict = self.node_id_to_idx_dict()
# convert node_ids to node_idxs
edges_idx_attr_d = {}
for edge, value in edges_attr_d.items():
idx_edge = (node_id_to_idx_dict[edge[0]],
node_id_to_idx_dict[edge[1]])
edges_idx_attr_d[idx_edge] = value
# assign to the array
for trans, value in edges_idx_attr_d.items():
source = trans[0]
target = trans[1]
mat[source, target] = value
return mat
class MacroStateNetwork():
"""Provides an abstraction over weighted ensemble data in the form of
a kinetically connected network.
The MacroStateNetwork refers to any grouping of the so called
"micro" states that were observed during simulation,
i.e. trajectory frames, and not necessarily in the usual sense
used in statistical mechanics. Although it is the perfect vehicle
for working with such macrostates.
Because walker trajectories in weighted ensemble there is a
natural way to generate the edges between the macrostate nodes in
the network. These edges are determined automatically and a lag
time can also be specified, which is useful in the creation of
Markov State Models.
This class provides transparent access to an underlying 'WepyHDF5'
dataset. If you wish to have a simple serializable network that
does not reference see the 'BaseMacroStateNetwork' class, which
you can construct standalone or access the instance attached as
the 'base_network' attribute of an object of this class.
For a description of all of the default node and edge attributes
which are set after construction see the docstring for the
'BaseMacroStateNetwork' class docstring.
Warnings
--------
This class is not serializable as it references a 'WepyHDF5'
object. Either construct a 'BaseMacroStateNetwork' or use the
attached instance in the 'base_network' attribute.
"""
def __init__(self,
contig_tree,
base_network=None,
assg_field_key=None,
assignments=None,
transition_lag_time=2,
):
"""For documentation of the following arguments see the constructor
docstring of the 'BaseMacroStateNetwork' class:
- contig_tree
- assg_field_key
- assignments
- transition_lag_time
The other arguments are documented here. This is primarily
optional 'base_network' argument. This is a
'BaseMacroStateNetwork' instance, which allows you to
associate it with a 'WepyHDF5' dataset for access to the
microstate data etc.
Parameters
----------
base_network : BaseMacroStateNetwork object
An already constructed network, which will avoid
recomputing all in-memory network values again for this
object.
"""
self.closed = True
self._contig_tree = contig_tree
self._wepy_h5 = self._contig_tree.wepy_h5
# if we pass a base network use that one instead of building
# one manually
if base_network is not None:
assert isinstance(base_network, BaseMacroStateNetwork)
self._set_base_network_to_self(base_network)
else:
new_network = BaseMacroStateNetwork(contig_tree,
assg_field_key=assg_field_key,
assignments=assignments,
transition_lag_time=transition_lag_time)
self._set_base_network_to_self(new_network)
def _set_base_network_to_self(self, base_network):
self._base_network = base_network
# then make references to this for the attributes we need
# attributes
self._graph = self._base_network._graph
self._assg_field_key = self._base_network._assg_field_key
self._node_idxs = self._base_network._node_idxs
self._node_idx_to_id_dict = self._base_network._node_idx_to_id_dict
self._transition_lag_time = self._base_network._transition_lag_time
# DEBUG: remove once tested
# self._probmat = self._base_network._probmat
# self._countsmat = self._base_network._countsmat
# functions
self.node_id_to_idx = self._base_network.node_id_to_idx
self.node_idx_to_id = self._base_network.node_idx_to_id
self.node_id_to_idx_dict = self._base_network.node_id_to_idx_dict
self.node_idx_to_id_dict = self._base_network.node_idx_to_id_dict
self.get_node_attributes = self._base_network.get_node_attributes
self.get_node_attribute = self._base_network.get_node_attribute
self.get_nodes_attribute = self._base_network.get_nodes_attribute
self.node_assignments = self._base_network.node_assignments
self.set_nodes_attribute = self._base_network.set_nodes_attribute
self.get_edge_attributes = self._base_network.get_edge_attributes
self.get_edge_attribute = self._base_network.get_edge_attribute
self.get_edges_attribute = self._base_network.get_edges_attribute
self.node_groups = self._base_network.node_groups
self.set_node_group = self._base_network.set_node_group
self._set_group_nodes_attribute = self._base_network._set_group_nodes_attribute
self.observables = self._base_network.observables
self.node_observables = self._base_network.node_observables
self.set_nodes_observable = self._base_network.set_nodes_observable
self.nodes_to_records = self._base_network.nodes_to_records
self.nodes_to_dataframe = self._base_network.nodes_to_dataframe
self.edges_to_records = self._base_network.edges_to_records
self.edges_to_dataframe = self._base_network.edges_to_dataframe
self.node_map = self._base_network.node_map
self.edge_attribute_to_matrix = self._base_network.edge_attribute_to_matrix
self.write_gexf = self._base_network.write_gexf
def open(self, mode=None):
if self.closed:
self.wepy_h5.open(mode=mode)
self.closed = False
else:
raise IOError("This file is already open")
def close(self):
self.wepy_h5.close()
self.closed = True
def __enter__(self):
self.wepy_h5.__enter__()
self.closed = False
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.wepy_h5.__exit__(exc_type, exc_value, exc_tb)
self.close()
# from the Base class
@property
def graph(self):
"""The networkx.DiGraph of the macrostate network."""
return self._graph
@property
def num_states(self):
"""The number of states in the network."""
return len(self.graph)
@property
def node_ids(self):
"""A list of the node_ids."""
return list(self.graph.nodes)
@property
def assg_field_key(self):
"""The string key of the field used to make macro states from the WepyHDF5 dataset.
Raises
------
MacroStateNetworkError
If this wasn't used to construct the MacroStateNetwork.
"""
if self._assg_field_key is None:
raise MacroStateNetworkError("Assignments were manually defined, no key.")
else:
return self._assg_field_key
# @property
# def countsmat(self):
# """Return the transition counts matrix of the network.
# Raises
# ------
# MacroStateNetworkError
# If no lag time was given.
# """
# if self._countsmat is None:
# raise MacroStateNetworkError("transition counts matrix not calculated")
# else:
# return self._countsmat
# @property
# def probmat(self):
# """Return the transition probability matrix of the network.
# Raises
# ------
# MacroStateNetworkError
# If no lag time was given.
# """
# if self._probmat is None:
# raise MacroStateNetworkError("transition probability matrix not set")
# else:
# return self._probmat
# unique to the HDF5 holding one
@property
def base_network(self):
return self._base_network
@property
def wepy_h5(self):
"""The WepyHDF5 source object for which the contig tree is being constructed. """
return self._wepy_h5
def state_to_mdtraj(self, node_id, alt_rep=None):
"""Generate an mdtraj.Trajectory object from a macrostate.
By default uses the "main_rep" in the WepyHDF5
object. Alternative representations of the topology can be
specified.
Parameters
----------
node_id : node_id
alt_rep : str
(Default value = None)
Returns
-------
traj : mdtraj.Trajectory
"""
return self.wepy_h5.trace_to_mdtraj(self.base_network.node_assignments(node_id),
alt_rep=alt_rep)
def state_to_traj_fields(self, node_id, alt_rep=None):
return self.states_to_traj_fields([node_id], alt_rep=alt_rep)
def states_to_traj_fields(self, node_ids, alt_rep=None):
node_assignments = []
for node_id in node_ids:
node_assignments.extend(self.base_network.node_assignments(node_id))
# get the right fields
rep_path = self.wepy_h5._choose_rep_path(alt_rep)
fields = [rep_path, 'box_vectors']
return self.wepy_h5.get_trace_fields(node_assignments,
fields)
def get_node_fields(self, node_id, fields):
"""Return the trajectory fields for all the microstates in the
specified macrostate.
Parameters
----------
node_id : node_id
fields : list of str
Field name to retrieve.
Returns
-------
fields : dict of str: array_like
A dictionary mapping the names of the fields to an array of the field.
Like fields of a trace.
"""
node_trace = self.base_network.node_assignments(node_id)
# use the node_trace to get the weights from the HDF5
fields_d = self.wepy_h5.get_trace_fields(node_trace, fields)
return fields_d
def iter_nodes_fields(self, fields):
"""Iterate over all nodes and return the field values for all the
microstates for each.
Parameters
----------
fields : list of str
Returns
-------
nodes_fields : dict of node_id: (dict of field: array_like)
A dictionary with an entry for each node.
Each node has it's own dictionary of node fields for each microstate.
"""
nodes_d = {}
for node_id in self.graph.nodes:
fields_d = self.base_network.get_node_fields(node_id, fields)
nodes_d[node_id] = fields_d
return nodes_d
def microstate_weights(self):
"""Returns the weights of each microstate on the basis of macrostates.
Returns
-------
microstate_weights : dict of node_id: ndarray
"""
node_weights = {}
for node_id in self.graph.nodes:
# get the trace of the frames in the node
node_trace = self.base_network.node_assignments(node_id)
# use the node_trace to get the weights from the HDF5
trace_weights = self.wepy_h5.get_trace_fields(node_trace, ['weights'])['weights']
node_weights[node_id] = trace_weights
return node_weights
def macrostate_weights(self):
"""Compute the total weight of each macrostate.
Returns
-------
macrostate_weights : dict of node_id: float
"""
macrostate_weights = {}
microstate_weights = self.microstate_weights()
for node_id, weights in microstate_weights.items():
macrostate_weights[node_id] = float(sum(weights)[0])
return macrostate_weights
def set_macrostate_weights(self):
"""Compute the macrostate weights and set them as node attributes
'total_weight'."""
self.base_network.set_nodes_observable(
'total_weight',
self.macrostate_weights(),
)
def node_fields_map(self, func, fields, map_func=map):
"""Map a function over the nodes and microstate fields.
The function should take as its arguments:
1. node_id
2. dictionary of all the node attributes
3. fields dictionary mapping traj field names. (The output of
`MacroStateNetwork.get_node_fields`)
This *will* give access to the underlying trajectory data in
the HDF5 which can be requested with the `fields`
argument. The behaviour is very similar to the
`WepyHDF5.compute_observable` function with the added input
data to the mapped function being all of the macrostate node
attributes.
Extra args not supported use 'functools.partial' to make
functions with arguments for all data.
Parameters
----------
func : callable
The function to map over the nodes.
fields : iterable of str
The microstate (trajectory) fields to provide to the mapped function.
map_func : callable
The mapping function, implementing the `map` interface
Returns
-------
node_values : dict of node_id : values
The mapping of node_ids to the values computed by the mapped func.
Returns
-------
node_values : dict of node_id : values
Dictionary mapping nodes to the computed values from the
mapped function.
"""
# wrap the function so that we can pass through the node_id
def func_wrapper(args):
node_id, node_attrs, node_fields = args
# evaluate the wrapped function
result = func(
node_id,
node_attrs,
node_fields,
)
return node_id, result
# zip the node_ids with the node attributes as an iterator
node_attr_fields_it = (
(node_id,
{**self.get_node_attributes(node_id), 'node_id' : node_id},
self.get_node_fields(node_id, fields),
)
for node_id in self.graph.nodes)
# map the inputs to the wrapped function and return as a
# dictionary for the nodes
return {
node_id : value
for node_id, value
in map_func(func_wrapper, node_attr_fields_it)
}
|
ADicksonLab/wepy
|
src/wepy/analysis/network.py
|
Python
|
mit
| 52,176
|
[
"MDTraj"
] |
a586331eb87194678a2714d35642c2d9a91474774983f7c97e0d86822491829e
|
"""
Module to create mitonetwork in graph representation, with nodes, edges and
their respective attributes
"""
import os
import os.path as op
import cPickle as pickle
import numpy as np
import networkx as nx
import vtk
from tvtk.api import tvtk
import wrappers as wr
# pylint: disable=C0103
def find_edges(X, Y, M):
"""
find connections between line segment endpoints in list X and Y
"""
n1 = X[:, np.newaxis]-M
n2 = Y[:, np.newaxis]-M
test1 = np.ravel([np.nonzero(np.all(n == 0, axis=1)) for n in n1])
test2 = np.ravel([np.nonzero(np.all(n == 0, axis=1)) for n in n2])
e_list = [tuple(sorted(i)) for i in zip(test1, test2)]
return e_list
def make_ebunch(e_list, vtkdat, pnts, X, Y):
"""
returns the edge attributes (edge length, etc.) from the edgelist e_list
returned by find_edges
"""
ebunch = {}
for i, el in enumerate(e_list):
p = vtkdat.GetCell(i).GetPointIds()
pids = [p.GetId(k) for k in range(p.GetNumberOfIds())]
# calc length of line by shifting line by one pixel and taking diff.
# of every pixel of the pair of lines
length = np.sum(np.linalg.norm(pnts[np.r_[0, pids[:-1]]][1:] -
pnts[pids][1:],
axis=1))
ebunch[i] = (el[0], el[1],
{'weight': length,
'cellID': i,
'midpoint': tuple((X[i]+Y[i])/2)})
return ebunch
def check_exist(bounds, l1, l2, ex_dic=None):
"""
Check for existence of a connection between nodes.
`a, b, c` and `d` are all the possible connections between the endpoints
of lines `p1` and `p2`. Check each point for the connectiongs if they
coincide (same coordinates).
Returns:
--------
Dictionary of type (eg. 'a') as `True` if a connection exist for the type
"""
if ex_dic is None:
ex_dic = {}
a = l1[bounds] - l2[:bounds]
b = l1[bounds] - l1[:bounds]
c = l2[bounds] - l1[:bounds]
d = l2[bounds] - l2[:bounds]
dics = {'a': a, 'b': b, 'c': c, 'd': d}
for key in sorted(dics):
ex_dic[key] = np.any(np.all(dics[key] == 0, axis=1))
return ex_dic
def makegraph(vtkdata, graphname, scalartype='DY_raw'):
"""
Return networkX graph object from vtk skel
Parameters
----------
vtkdata: vtkPolyData
must use VTK, not tvtk (conversion is handled in function)
graphname : str
name for graph
scalartype : str
point data type to plot on skeleton, can be of type:
DY_minmax,
WidthEq,
DY_raw,
rRFP,
rGFP,
bkstRFP,
bkstGFP,
Returns
-------
nds, edgs : list
list of nodes and edges data
G : networkX
`NetworkX` graph object
"""
nnodes = 0
first = {}
last = {}
scalars = tvtk.to_tvtk(
vtkdata.GetPointData().GetScalars(scalartype)).to_array()
points = tvtk.to_tvtk(vtkdata.GetPoints()).to_array()
G = nx.MultiGraph(cell=graphname)
for i in range(vtkdata.GetNumberOfCells()):
temp = vtkdata.GetCell(i).GetPointIds()
mx = temp.GetNumberOfIds()
first[i] = temp.GetId(0) # first point ids dictionary
last[i] = temp.GetId(mx-1) # last point ids dictionary
fp = points[first.values()] # first point coordinates
lp = points[last.values()] # last point coordinates
# Create node list of graph
for i in range(vtkdata.GetNumberOfLines()-1, -1, -1):
exist = check_exist(i, fp, lp)
# test for a single pixel
exist['single_pt'] = np.all((fp[i] - lp[i]) == 0)
inten1 = scalars[first[i]]
inten2 = scalars[last[i]]
# only add a node if there is no connection to another endpoint
if not (exist['a'] or exist['b']):
G.add_node(nnodes, coord=tuple(fp[i]), inten=inten1)
nnodes += 1
if not (exist['c'] or exist['d'] or exist['single_pt']):
G.add_node(nnodes, coord=tuple(lp[i]), inten=inten2)
nnodes += 1
# Create edgelist of graph
Ncoords = np.array([nat['coord'] for n, nat in G.nodes(data=True)])
edges = find_edges(fp, lp, Ncoords)
ebunch = make_ebunch(edges, vtkdata, points, fp, lp)
G.add_edges_from(ebunch.values())
# Degree connectivity of nodes
for n in G.nodes():
G.node[n]['degree'] = G.degree(n)
return G.nodes(data=True), G.edges(data=True), G
# ===========================================================================
if __name__ == '__main__':
# writes out a pickle file containing the graph list of every file for
# for each mediatype
datadir = op.join(os.getcwd(), 'mutants')
rawdir = op.join(os.getcwd(), 'mutants')
vtkF = wr.ddwalk(op.join(rawdir, 'normalizedVTK'),
'*skeleton.vtk', start=5, stop=-13)
for mediatype in sorted(vtkF.keys())[:]:
nlist = []
elist = []
glist = []
print 'creating edge node lists for %s' % mediatype
print'number of files = %-3d\n' % len(vtkF[mediatype])
for files in sorted(vtkF[mediatype].keys())[:]:
reader = vtk.vtkPolyDataReader()
reader.SetFileName(vtkF[mediatype][files])
reader.Update()
data = reader.GetOutput()
node_data, edge_data, nxgrph = makegraph(data, files)
nlist.append(node_data)
elist.append(edge_data)
glist.append(nxgrph)
filename = op.join(datadir, '%s_grph.pkl' % mediatype)
with open(filename, 'wb') as output:
pickle.dump((nlist, elist, glist), output)
|
moosekaka/sweepython
|
pipeline/make_networkx.py
|
Python
|
mit
| 5,684
|
[
"VTK"
] |
1d57bec60915a40871b2c61bde9326fd417f2faecaa8a5e20400b1309090a926
|
# $Id$
#
# Copyright (C) 2007 greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from __future__ import print_function
import unittest
import doctest
from rdkit.DataStructs import BitUtils, VectCollection, LazySignature, FingerprintSimilarity
from rdkit import DataStructs, Chem
from rdkit.Chem.Fingerprints.FingerprintMols import FingerprinterDetails
def load_tests(loader, tests, ignore):
""" Add the Doctests from the module """
tests.addTests(doctest.DocTestSuite(BitUtils, optionflags=doctest.ELLIPSIS))
tests.addTests(doctest.DocTestSuite(LazySignature, optionflags=doctest.ELLIPSIS))
tests.addTests(doctest.DocTestSuite(VectCollection, optionflags=doctest.ELLIPSIS))
return tests
class TestCaseAdditional(unittest.TestCase):
def test_VectCollection(self):
# We mainly test the use of Reset
bv1 = DataStructs.ExplicitBitVect(10)
bv1.SetBitsFromList((1, 3, 5))
bv2 = DataStructs.ExplicitBitVect(10)
bv2.SetBitsFromList((6, 8))
vc = VectCollection.VectCollection()
self.assertEqual(vc.GetOrVect(), None)
vc.AddVect(1, bv1)
vc.AddVect(2, bv2)
onBits = set([1, 3, 5, 6, 8])
for i, onOff in enumerate(vc.GetOrVect()):
self.assertEqual(i in onBits, onOff == 1)
vc.Reset()
self.assertEqual(onBits, set(vc.GetOnBits()))
vc = VectCollection.VectCollection()
self.assertEqual(vc.GetOrVect(), None)
vc.AddVect(1, bv1)
vc.AddVect(2, bv2)
for i in onBits:
self.assertEqual(vc[i], 1)
def test_LazySig(self):
self.assertRaises(ValueError, LazySignature.LazySig, lambda x: 1, 0)
# Check cache works
obj = LazySignature.LazySig(lambda x: x, 10)
self.assertNotIn(8, obj._cache)
self.assertEqual(obj[8], 8)
self.assertIn(8, obj._cache)
obj._cache[8] = 'cached'
self.assertEqual(obj[8], 'cached')
def test__init__(self):
from rdkit.Chem.Fingerprints import FingerprintMols
ms = [Chem.MolFromSmiles('CCOC'), Chem.MolFromSmiles('CCO'), Chem.MolFromSmiles('COC')]
fps = [FingerprintMols.FingerprintMol(x) for x in ms]
self.assertAlmostEqual(FingerprintSimilarity(fps[0], fps[1]), 0.6, places=2)
details = FingerprinterDetails()
fpArgs = details.__dict__
fps = []
for i, x in enumerate(ms, 1):
fpArgs['fpSize'] = 16 * i
fps.append(FingerprintMols.FingerprintMol(x, **fpArgs))
self.assertAlmostEqual(FingerprintSimilarity(fps[0], fps[1]), 0.555, places=2)
self.assertAlmostEqual(FingerprintSimilarity(fps[1], fps[0]), 0.555, places=2)
fpArgs['fpSize'] = 1024
fpArgs['tgtDensity'] = 0.8
fp = FingerprintMols.FingerprintMol(ms[0], **fpArgs)
self.assertEqual(len(fp), 64)
fp = DataStructs.FoldToTargetDensity(fp, density=0.1, minLength=2)
self.assertEqual(len(fp), 4)
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
rvianello/rdkit
|
rdkit/DataStructs/UnitTestDocTests.py
|
Python
|
bsd-3-clause
| 3,019
|
[
"RDKit"
] |
6c9d3e6b2666bfbbea6e5207ebd465bf565615ac8198d34dd9ca5f1a135095ea
|
##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing the libsmm library, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
import shutil
from distutils.version import LooseVersion
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
class EB_libsmm(EasyBlock):
"""
Support for the CP2K small matrix library
Notes: - build can take really really long, and no real rebuilding needed for each get_version
- CP2K can be built without this
"""
@staticmethod
def extra_options():
# default dimensions
dd = [1,4,5,6,9,13,16,17,22]
extra_vars = {
'transpose_flavour': [1, "Transpose flavour of routines", CUSTOM],
'max_tiny_dim': [12, "Maximum tiny dimension", CUSTOM],
'dims': [dd, "Generate routines for these matrix dims", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def configure_step(self):
"""Configure build: change to tools/build_libsmm dir"""
try:
dst = 'tools/build_libsmm'
os.chdir(dst)
self.log.debug('Change to directory %s' % dst)
except OSError, err:
self.log.exception('Failed to change to directory %s: %s' % (dst, err))
def build_step(self):
"""Build libsmm
Possible iterations over precision (single/double) and type (real/complex)
- also type of transpose matrix
- all set in the config file
Make the config.in file (is source afterwards in the build)
"""
fn = 'config.in'
cfg_tpl = """# This config file was generated by EasyBuild
# the build script can generate optimized routines packed in a library for
# 1) 'nn' => C=C+MATMUL(A,B)
# 2) 'tn' => C=C+MATMUL(TRANSPOSE(A),B)
# 3) 'nt' => C=C+MATMUL(A,TRANSPOSE(B))
# 4) 'tt' => C=C+MATMUL(TRANPOSE(A),TRANPOSE(B))
#
# select a tranpose_flavor from the list 1 2 3 4
#
transpose_flavor=%(transposeflavour)s
# 1) d => double precision real
# 2) s => single precision real
# 3) z => double precision complex
# 4) c => single precision complex
#
# select a data_type from the list 1 2 3 4
#
data_type=%(datatype)s
# target compiler... this are the options used for building the library.
# They should be aggessive enough to e.g. perform vectorization for the specific CPU (e.g. -ftree-vectorize -march=native),
# and allow some flexibility in reordering floating point expressions (-ffast-math).
# Higher level optimisation (in particular loop nest optimization) should not be used.
#
target_compile="%(targetcompile)s"
# target dgemm link options... these are the options needed to link blas (e.g. -lblas)
# blas is used as a fall back option for sizes not included in the library or in those cases where it is faster
# the same blas library should thus also be used when libsmm is linked.
#
OMP_NUM_THREADS=1
blas_linking="%(LIBBLAS)s"
# matrix dimensions for which optimized routines will be generated.
# since all combinations of M,N,K are being generated the size of the library becomes very large
# if too many sizes are being optimized for. Numbers have to be ascending.
#
dims_small="%(dims)s"
# tiny dimensions are used as primitves and generated in an 'exhaustive' search.
# They should be a sequence from 1 to N,
# where N is a number that is large enough to have good cache performance
# (e.g. for modern SSE cpus 8 to 12)
# Too large (>12?) is not beneficial, but increases the time needed to build the library
# Too small (<8) will lead to a slow library, but the build might proceed quickly
# The minimum number for a successful build is 4
#
dims_tiny="%(tiny_dims)s"
# host compiler... this is used only to compile a few tools needed to build the library.
# The library itself is not compiled this way.
# This compiler needs to be able to deal with some Fortran2003 constructs.
#
host_compile="%(hostcompile)s "
# number of processes to use in parallel for compiling / building and benchmarking the library.
# Should *not* be more than the physical (available) number of cores of the machine
#
tasks=%(tasks)s
"""
# only GCC is supported for now
if self.toolchain.comp_family() == toolchain.GCC: #@UndefinedVariable
hostcompile = os.getenv('F90')
# optimizations
opts = "-O2 -funroll-loops -ffast-math -ftree-vectorize -march=native -fno-inline-functions"
# Depending on the get_version, we need extra options
extra = ''
gccVersion = LooseVersion(get_software_version('GCC'))
if gccVersion >= LooseVersion('4.6'):
extra = "-flto"
targetcompile = "%s %s %s" % (hostcompile, opts, extra)
else:
self.log.error('No supported compiler found (tried GCC)')
if not os.getenv('LIBBLAS'):
self.log.error('No BLAS library specifications found (LIBBLAS not set)!')
cfgdict = {
'datatype': None,
'transposeflavour': self.cfg['transpose_flavour'],
'targetcompile': targetcompile,
'hostcompile': hostcompile,
'dims': ' '.join([str(d) for d in self.cfg['dims']]),
'tiny_dims': ' '.join([str(d) for d in range(1, self.cfg['max_tiny_dim']+1)]),
'tasks': self.cfg['parallel'],
'LIBBLAS': "%s %s" % (os.getenv('LDFLAGS'), os.getenv('LIBBLAS'))
}
# configure for various iterations
datatypes = [(1, 'double precision real'), (3, 'double precision complex')]
for (dt, descr) in datatypes:
cfgdict['datatype'] = dt
try:
txt = cfg_tpl % cfgdict
f = open(fn, 'w')
f.write(txt)
f.close()
self.log.debug("config file %s for datatype %s ('%s'): %s" % (fn, dt, descr, txt))
except IOError, err:
self.log.error("Failed to write %s: %s" % (fn, err))
self.log.info("Building for datatype %s ('%s')..." % (dt, descr))
run_cmd("./do_clean")
run_cmd("./do_all")
def install_step(self):
"""Install CP2K: clean, and copy lib directory to install dir"""
run_cmd("./do_clean")
try:
shutil.copytree('lib', os.path.join(self.installdir, 'lib'))
except Exception, err:
self.log.error("Something went wrong during dir lib copying to installdir: %s" % err)
def sanity_check_step(self):
"""Custom sanity check for libsmm"""
custom_paths = {
'files': ["lib/libsmm_%s.a" % x for x in ["dnn", "znn"]],
'dirs': []
}
super(EB_libsmm, self).sanity_check_step(custom_paths=custom_paths)
|
omula/easybuild-easyblocks
|
easybuild/easyblocks/l/libsmm.py
|
Python
|
gpl-2.0
| 8,289
|
[
"CP2K"
] |
70abf99bd79d91673c5ad5fa1f29e04f66a98cbcdc70ef237ba21c4097af7cc6
|
"""The suite of window functions."""
import numpy as np
from scipy import special, linalg
from scipy.fftpack import fft
def boxcar(M, sym=True):
"""The M-point boxcar window.
"""
return np.ones(M, float)
def triang(M, sym=True):
"""The M-point triangular window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1,int((M+1)/2)+1)
if M % 2 == 0:
w = (2*n-1.0)/M
w = np.r_[w, w[::-1]]
else:
w = 2*n/(M+1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""The M-point Parzen window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
n = np.arange(-(M-1)/2.0,(M-1)/2.0+0.5,1.0)
na = np.extract(n < -(M-1)/4.0, n)
nb = np.extract(abs(n) <= (M-1)/4.0, n)
wa = 2*(1-np.abs(na)/(M/2.0))**3.0
wb = 1-6*(np.abs(nb)/(M/2.0))**2.0 + 6*(np.abs(nb)/(M/2.0))**3.0
w = np.r_[wa,wb,wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""The M-point Bohman window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
fac = np.abs(np.linspace(-1,1,M)[1:-1])
w = (1 - fac) * np.cos(np.pi*fac) + 1.0/np.pi*np.sin(np.pi*fac)
w = np.r_[0,w,0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
"""The M-point Blackman window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
n = np.arange(0,M)
w = 0.42-0.5*np.cos(2.0*np.pi*n/(M-1)) + 0.08*np.cos(4.0*np.pi*n/(M-1))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""A minimum 4-term Blackman-Harris window according to Nuttall.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0,M)
fac = n*2*np.pi/(M-1.0)
w = a[0] - a[1]*np.cos(fac) + a[2]*np.cos(2*fac) - a[3]*np.cos(3*fac)
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""The M-point minimum 4-term Blackman-Harris window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
a = [0.35875, 0.48829, 0.14128, 0.01168];
n = np.arange(0,M)
fac = n*2*np.pi/(M-1.0)
w = a[0] - a[1]*np.cos(fac) + a[2]*np.cos(2*fac) - a[3]*np.cos(3*fac)
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""The M-point Flat top window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0,M)
fac = n*2*np.pi/(M-1.0)
w = a[0] - a[1]*np.cos(fac) + a[2]*np.cos(2*fac) - a[3]*np.cos(3*fac) + \
a[4]*np.cos(4*fac)
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
"""The M-point Bartlett window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
n = np.arange(0,M)
w = np.where(np.less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
if not sym and not odd:
w = w[:-1]
return w
def hanning(M, sym=True):
"""The M-point Hanning window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
n = np.arange(0,M)
w = 0.5-0.5*np.cos(2.0*np.pi*n/(M-1))
if not sym and not odd:
w = w[:-1]
return w
hann = hanning
def barthann(M, sym=True):
"""Return the M-point modified Bartlett-Hann window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
n = np.arange(0,M)
fac = np.abs(n/(M-1.0)-0.5)
w = 0.62 - 0.48*fac + 0.38*np.cos(2*np.pi*fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
"""The M-point Hamming window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
n = np.arange(0,M)
w = 0.54-0.46*np.cos(2.0*np.pi*n/(M-1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
"""Return a Kaiser window of length M with shape parameter beta.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0,M)
alpha = (M-1)/2.0
w = special.i0(beta * np.sqrt(1-((n-alpha)/alpha)**2.0))/special.i0(beta)
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
"""Return a Gaussian window of length M with standard-deviation std.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0,M) - (M-1.0)/2.0
sig2 = 2*std*std
w = np.exp(-n**2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
"""Return a window with a generalized Gaussian shape.
The Gaussian shape is defined as ``exp(-0.5*(x/sig)**(2*p))``, the
half-power point is at ``(2*log(2)))**(1/(2*p)) * sig``.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0,M) - (M-1.0)/2.0
w = np.exp(-0.5*(n/sig)**(2*p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
"""Dolph-Chebyshev window.
Parameters
----------
M : int
Window size.
at : float
Attenuation (in dB).
sym : bool
Generates symmetric window if True.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0/order * np.arccosh(10**(np.abs(at)/20.)))
k = np.r_[0:M]*1.0
x = beta * np.cos(np.pi*k/M)
#find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2*(order%2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <=1 ] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fft(p))
n = (M + 1) / 2
w = w[:n] / w[0]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j*np.pi / M * np.r_[0:M])
w = np.real(fft(p))
n = M / 2 + 1
w = w / w[1]
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return the M-point slepian window.
"""
if (M*width > 27.38):
raise ValueError("Cannot reliably obtain slepian sequences for"
" M*width > 27.38.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
twoF = width/2.0
alpha = (M-1)/2.0
m = np.arange(0,M) - alpha
n = m[:,np.newaxis]
k = m[np.newaxis,:]
AF = twoF*special.sinc(twoF*(n-k))
[lam,vec] = linalg.eig(AF)
ind = np.argmax(abs(lam),axis=-1)
w = np.abs(vec[:,ind])
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def get_window(window, Nx, fftbins=True):
"""
Return a window of length `Nx` and type `window`.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True, create a "periodic" window ready to use with ifftshift
and be multiplied by the result of an fft (SEE ALSO fftfreq).
Notes
-----
Window types:
boxcar, triang, blackman, hamming, hanning, bartlett,
parzen, bohman, blackmanharris, nuttall, barthann,
kaiser (needs beta), gaussian (needs std),
general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError, AttributeError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, str):
if window in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'slepian', 'optimal', 'slep', 'dss',
'chebwin', 'cheb']:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
if winstr in ['blackman', 'black', 'blk']:
winfunc = blackman
elif winstr in ['triangle', 'triang', 'tri']:
winfunc = triang
elif winstr in ['hamming', 'hamm', 'ham']:
winfunc = hamming
elif winstr in ['bartlett', 'bart', 'brt']:
winfunc = bartlett
elif winstr in ['hanning', 'hann', 'han']:
winfunc = hanning
elif winstr in ['blackmanharris', 'blackharr','bkh']:
winfunc = blackmanharris
elif winstr in ['parzen', 'parz', 'par']:
winfunc = parzen
elif winstr in ['bohman', 'bman', 'bmn']:
winfunc = bohman
elif winstr in ['nuttall', 'nutl', 'nut']:
winfunc = nuttall
elif winstr in ['barthann', 'brthan', 'bth']:
winfunc = barthann
elif winstr in ['flattop', 'flat', 'flt']:
winfunc = flattop
elif winstr in ['kaiser', 'ksr']:
winfunc = kaiser
elif winstr in ['gaussian', 'gauss', 'gss']:
winfunc = gaussian
elif winstr in ['general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs']:
winfunc = general_gaussian
elif winstr in ['boxcar', 'box', 'ones']:
winfunc = boxcar
elif winstr in ['slepian', 'slep', 'optimal', 'dss']:
winfunc = slepian
elif winstr in ['chebwin', 'cheb']:
winfunc = chebwin
else:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
|
jasonmccampbell/scipy-refactor
|
scipy/signal/windows.py
|
Python
|
bsd-3-clause
| 12,780
|
[
"Gaussian"
] |
966a5d142ed434085ed74d5794057af80130394299f3f6a31757bc4e64dca263
|
'''
PathwayGenie (c) GeneGenie Bioinformatics Ltd. 2018
PathwayGenie is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
import unittest
from parts_genie.rbs_calculator import RbsCalculator
import parts_genie.vienna_utils as utils
class TestRbsCalculator(unittest.TestCase):
'''Test class for RbsCalculator.'''
def test_calc_kinetic_score(self):
'''Tests calc_kinetic_score method.'''
r_rna = 'acctcctta'
calc = RbsCalculator(r_rna, utils)
m_rna = 'TTCTAGAGGGGGGATCTCCCCCCAAAAAATAAGAGGTACACATGACTAAAACTTTCA' + \
'AAGGCTCAGTATTCCCACTGAG'
start_pos = 41
self.assertAlmostEqual(calc.calc_kinetic_score(m_rna, start_pos),
0.528571428571428)
def test_get_calc_dgs(self):
'''Tests calc_dgs method.'''
r_rna = 'acctcctta'
calc = RbsCalculator(r_rna, utils)
m_rna = 'TTCTAGAGGGGGGATCTCCCCCCAAAAAATAAGAGGTACACATGACTAAAACTTTCA' + \
'AAGGCTCAGTATTCCCACTGAG'
dgs = calc.calc_dgs(m_rna)
self.assertEqual(list(dgs.keys()), [41, 74])
self.assertAlmostEqual(dgs[41][0], -6.088674036389431)
self.assertAlmostEqual(dgs[74][0], 5.793940143051147)
dgs = calc.calc_dgs(m_rna)
self.assertEqual(list(dgs.keys()), [41, 74])
self.assertAlmostEqual(dgs[41][0], -6.088674036389431)
self.assertAlmostEqual(dgs[74][0], 5.793940143051147)
def test_mfe_fail(self):
'''Tests mfe method.'''
m_rna = 'GCGGGAATTACACATGGCATGGACGAACTTTATAAATGA'
energies, bp_xs, bp_ys = utils.run('mfe', [m_rna], temp=37.0,
dangles='none')
self.assertEqual(energies, [0.0])
self.assertEqual(bp_xs, [[]])
self.assertEqual(bp_ys, [[]])
def test_subopt(self):
'''Tests subopt method.'''
r_rna = 'ACCTCCTTA'
m_rna = 'AACCTAATTGATAGCGGCCTAGGACCCCCATCAAC'
_, _, bp_ys = utils.run('subopt', [m_rna, r_rna], temp=37.0,
dangles='all', energy_gap=3.0)
nt_in_r_rna = False
for bp_y in bp_ys:
for nt_y in bp_y:
if nt_y > len(m_rna):
nt_in_r_rna = True
self.assertTrue(nt_in_r_rna)
def test_subopt_fail(self):
'''Tests subopt method.'''
r_rna = 'CCC'
m_rna = 'CCC'
energies, bp_xs, bp_ys = utils.run('subopt', [m_rna, r_rna], temp=37.0,
dangles='all', energy_gap=3.0)
self.assertEqual(energies, [])
self.assertEqual(bp_xs, [])
self.assertEqual(bp_ys, [])
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
neilswainston/PathwayGenie
|
parts_genie/test/test_rbs_calculator_vienna.py
|
Python
|
mit
| 2,848
|
[
"VisIt"
] |
7ee4a44c60f92a48e859ac199105b5fba466847f16449d5eff076155c4bd7aef
|
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2009-2011 by the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module provides classes and methods for working with molecules and
molecular configurations. A molecule is represented internally using a graph
data type, where atoms correspond to vertices and bonds correspond to edges.
Both :class:`Atom` and :class:`Bond` objects store semantic information that
describe the corresponding atom or bond.
"""
import cython
import logging
import os
import re
import numpy
import urllib
import element as elements
try:
import openbabel
except:
pass
from rdkit import Chem
from .graph import Vertex, Edge, Graph, getVertexConnectivityValue
from .group import GroupAtom, GroupBond, Group, ActionError
from .atomtype import AtomType, atomTypes, getAtomType
import rmgpy.constants as constants
import rmgpy.molecule.parser as parser
import rmgpy.molecule.generator as generator
import rmgpy.molecule.resonance as resonance
################################################################################
class Atom(Vertex):
"""
An atom. The attributes are:
=================== =================== ====================================
Attribute Type Description
=================== =================== ====================================
`atomType` :class:`AtomType` The :ref:`atom type <atom-types>`
`element` :class:`Element` The chemical element the atom represents
`radicalElectrons` ``short`` The number of radical electrons
`charge` ``short`` The formal charge of the atom
`label` ``str`` A string label that can be used to tag individual atoms
`coords` ``numpy array`` The (x,y,z) coordinates in Angstrom
`lonePairs` ``short`` The number of lone electron pairs
=================== =================== ====================================
Additionally, the ``mass``, ``number``, and ``symbol`` attributes of the
atom's element can be read (but not written) directly from the atom object,
e.g. ``atom.symbol`` instead of ``atom.element.symbol``.
"""
def __init__(self, element=None, radicalElectrons=0, charge=0, label='', lonePairs=-100, coords=numpy.array([])):
Vertex.__init__(self)
if isinstance(element, str):
self.element = elements.__dict__[element]
else:
self.element = element
self.radicalElectrons = radicalElectrons
self.charge = charge
self.label = label
self.atomType = None
self.lonePairs = lonePairs
self.coords = coords
def __str__(self):
"""
Return a human-readable string representation of the object.
"""
return '{0}{1}{2}'.format(
str(self.element),
'.' * self.radicalElectrons,
'+' * self.charge if self.charge > 0 else '-' * -self.charge,
)
def __repr__(self):
"""
Return a representation that can be used to reconstruct the object.
"""
return "<Atom '{0}'>".format(str(self))
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
d = {
'edges': self.edges,
'connectivity1': self.connectivity1,
'connectivity2': self.connectivity2,
'connectivity3': self.connectivity3,
'sortingLabel': self.sortingLabel,
'atomType': self.atomType.label if self.atomType else None,
'lonePairs': self.lonePairs,
}
return (Atom, (self.element.symbol, self.radicalElectrons, self.charge, self.label), d)
def __setstate__(self, d):
"""
A helper function used when unpickling an object.
"""
self.edges = d['edges']
self.connectivity1 = d['connectivity1']
self.connectivity2 = d['connectivity2']
self.connectivity3 = d['connectivity3']
self.sortingLabel = d['sortingLabel']
self.atomType = atomTypes[d['atomType']] if d['atomType'] else None
self.lonePairs = d['lonePairs']
@property
def mass(self): return self.element.mass
@property
def number(self): return self.element.number
@property
def symbol(self): return self.element.symbol
@property
def bonds(self): return self.edges
def equivalent(self, other):
"""
Return ``True`` if `other` is indistinguishable from this atom, or
``False`` otherwise. If `other` is an :class:`Atom` object, then all
attributes except `label` must match exactly. If `other` is an
:class:`GroupAtom` object, then the atom must match any of the
combinations in the atom pattern.
"""
cython.declare(atom=Atom, ap=GroupAtom)
if isinstance(other, Atom):
atom = other
return (
self.element is atom.element and
self.radicalElectrons == atom.radicalElectrons and
self.lonePairs == atom.lonePairs and
self.charge == atom.charge
)
elif isinstance(other, GroupAtom):
cython.declare(a=AtomType, radical=cython.short, lp=cython.short, charge=cython.short)
ap = other
for a in ap.atomType:
if self.atomType.equivalent(a): break
else:
return False
if ap.radicalElectrons:
for radical in ap.radicalElectrons:
if self.radicalElectrons == radical: break
else:
return False
if ap.lonePairs:
for lp in ap.lonePairs:
if self.lonePairs == lp: break
else:
return False
if ap.charge:
for charge in ap.charge:
if self.charge == charge: break
else:
return False
return True
def getDescriptor(self):
return (self.getAtomConnectivityValue(), self.number)
def getAtomConnectivityValue(self):
return -1*self.connectivity
def isSpecificCaseOf(self, other):
"""
Return ``True`` if `self` is a specific case of `other`, or ``False``
otherwise. If `other` is an :class:`Atom` object, then this is the same
as the :meth:`equivalent()` method. If `other` is an
:class:`GroupAtom` object, then the atom must match or be more
specific than any of the combinations in the atom pattern.
"""
if isinstance(other, Atom):
return self.equivalent(other)
elif isinstance(other, GroupAtom):
cython.declare(atom=GroupAtom, a=AtomType, radical=cython.short, lp = cython.short, charge=cython.short)
atom = other
if self.atomType is None:
return False
for a in atom.atomType:
if self.atomType.isSpecificCaseOf(a): break
else:
return False
if atom.radicalElectrons:
for radical in atom.radicalElectrons:
if self.radicalElectrons == radical: break
else:
return False
if atom.lonePairs:
for lp in atom.lonePairs:
if self.lonePairs == lp: break
else:
return False
if atom.charge:
for charge in atom.charge:
if self.charge == charge: break
else:
return False
return True
def copy(self):
"""
Generate a deep copy of the current atom. Modifying the
attributes of the copy will not affect the original.
"""
cython.declare(a=Atom)
#a = Atom(self.element, self.radicalElectrons, self.spinMultiplicity, self.charge, self.label)
a = Atom.__new__(Atom)
a.edges = {}
a.resetConnectivityValues()
a.element = self.element
a.radicalElectrons = self.radicalElectrons
a.charge = self.charge
a.label = self.label
a.atomType = self.atomType
a.lonePairs = self.lonePairs
a.coords = self.coords[:]
return a
def isHydrogen(self):
"""
Return ``True`` if the atom represents a hydrogen atom or ``False`` if
not.
"""
return self.element.number == 1
def isNonHydrogen(self):
"""
Return ``True`` if the atom does not represent a hydrogen atom or
``False`` if not.
"""
return self.element.number > 1
def isCarbon(self):
"""
Return ``True`` if the atom represents a carbon atom or ``False`` if
not.
"""
return self.element.number == 6
def isNitrogen(self):
"""
Return ``True`` if the atom represents a nitrogen atom or ``False`` if
not.
"""
return self.element.number == 7
def isOxygen(self):
"""
Return ``True`` if the atom represents an oxygen atom or ``False`` if
not.
"""
return self.element.number == 8
def incrementRadical(self):
"""
Update the atom pattern as a result of applying a GAIN_RADICAL action,
where `radical` specifies the number of radical electrons to add.
"""
# Set the new radical electron count
self.radicalElectrons += 1
if self.radicalElectrons <= 0:
raise ActionError('Unable to update Atom due to GAIN_RADICAL action: Invalid radical electron set "{0}".'.format(self.radicalElectrons))
def decrementRadical(self):
"""
Update the atom pattern as a result of applying a LOSE_RADICAL action,
where `radical` specifies the number of radical electrons to remove.
"""
cython.declare(radicalElectrons=cython.short)
# Set the new radical electron count
radicalElectrons = self.radicalElectrons = self.radicalElectrons - 1
if radicalElectrons < 0:
raise ActionError('Unable to update Atom due to LOSE_RADICAL action: Invalid radical electron set "{0}".'.format(self.radicalElectrons))
def setLonePairs(self,lonePairs):
"""
Set the number of lone electron pairs.
"""
# Set the number of electron pairs
self.lonePairs = lonePairs
if self.lonePairs < 0:
raise ActionError('Unable to update Atom due to setLonePairs : Invalid lone electron pairs set "{0}".'.format(self.setLonePairs))
self.updateCharge()
def incrementLonePairs(self):
"""
Update the lone electron pairs pattern as a result of applying a GAIN_PAIR action.
"""
# Set the new lone electron pairs count
self.lonePairs += 1
if self.lonePairs <= 0:
raise ActionError('Unable to update Atom due to GAIN_PAIR action: Invalid lone electron pairs set "{0}".'.format(self.lonePairs))
self.updateCharge()
def decrementLonePairs(self):
"""
Update the lone electron pairs pattern as a result of applying a LOSE_PAIR action.
"""
# Set the new lone electron pairs count
self.lonePairs -= 1
if self.lonePairs < 0:
raise ActionError('Unable to update Atom due to LOSE_PAIR action: Invalid lone electron pairs set "{0}".'.format(self.lonePairs))
self.updateCharge()
def updateCharge(self):
"""
Update self.charge, according to the valence, and the
number and types of bonds, radicals, and lone pairs.
"""
valences = {'H': 1, 'C': 4, 'O': 2, 'N': 3, 'S': 2, 'Si': 4, 'He': 0, 'Ne': 0, 'Ar': 0, 'Cl': 1}
orders = {'S': 1, 'D': 2, 'T': 3, 'B': 1.5}
valence = valences[self.symbol]
order = 0
for atom2, bond in self.bonds.items():
order += orders[bond.order]
if self.symbol == 'H' or self.symbol == 'He':
self.charge = 2 - valence - order - self.radicalElectrons - 2*self.lonePairs
else:
self.charge = 8 - valence - order - self.radicalElectrons - 2*self.lonePairs
def applyAction(self, action):
"""
Update the atom pattern as a result of applying `action`, a tuple
containing the name of the reaction recipe action along with any
required parameters. The available actions can be found
:ref:`here <reaction-recipe-actions>`.
"""
# Invalidate current atom type
self.atomType = None
act = action[0].upper()
# Modify attributes if necessary
if act in ['CHANGE_BOND', 'FORM_BOND', 'BREAK_BOND']:
# Nothing else to do here
pass
elif act == 'GAIN_RADICAL':
for i in range(action[2]): self.incrementRadical()
elif act == 'LOSE_RADICAL':
for i in range(abs(action[2])): self.decrementRadical()
elif action[0].upper() == 'GAIN_PAIR':
for i in range(action[2]): self.incrementLonePairs()
elif action[0].upper() == 'LOSE_PAIR':
for i in range(abs(action[2])): self.decrementLonePairs()
else:
raise ActionError('Unable to update Atom: Invalid action {0}".'.format(action))
def setSpinMultiplicity(self,spinMultiplicity):
"""
Set the spin multiplicity.
"""
raise NotImplementedError("I thought multiplicity was now a molecule attribute not atom?")
# Set the spin multiplicity
self.spinMultiplicity = spinMultiplicity
if self.spinMultiplicity < 0:
raise ActionError('Unable to update Atom due to spin multiplicity : Invalid spin multiplicity set "{0}".'.format(self.spinMultiplicity))
self.updateCharge()
################################################################################
class Bond(Edge):
"""
A chemical bond. The attributes are:
=================== =================== ====================================
Attribute Type Description
=================== =================== ====================================
`order` ``str`` The :ref:`bond type <bond-types>`
=================== =================== ====================================
"""
def __init__(self, atom1, atom2, order='S'):
Edge.__init__(self, atom1, atom2)
self.order = order
def __str__(self):
"""
Return a human-readable string representation of the object.
"""
return self.order
def __repr__(self):
"""
Return a representation that can be used to reconstruct the object.
"""
return '<Bond "{0}">'.format(self.order)
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (Bond, (self.vertex1, self.vertex2, self.order))
@property
def atom1(self):
return self.vertex1
@property
def atom2(self):
return self.vertex2
def equivalent(self, other):
"""
Return ``True`` if `other` is indistinguishable from this bond, or
``False`` otherwise. `other` can be either a :class:`Bond` or a
:class:`GroupBond` object.
"""
cython.declare(bond=Bond, bp=GroupBond)
if isinstance(other, Bond):
bond = other
return (self.order == bond.order)
elif isinstance(other, GroupBond):
bp = other
return (self.order in bp.order)
def isSpecificCaseOf(self, other):
"""
Return ``True`` if `self` is a specific case of `other`, or ``False``
otherwise. `other` can be either a :class:`Bond` or a
:class:`GroupBond` object.
"""
# There are no generic bond types, so isSpecificCaseOf is the same as equivalent
return self.equivalent(other)
def copy(self):
"""
Generate a deep copy of the current bond. Modifying the
attributes of the copy will not affect the original.
"""
#return Bond(self.vertex1, self.vertex2, self.order)
cython.declare(b=Bond)
b = Bond.__new__(Bond)
b.vertex1 = self.vertex1
b.vertex2 = self.vertex2
b.order = self.order
return b
def isSingle(self):
"""
Return ``True`` if the bond represents a single bond or ``False`` if
not.
"""
return self.order == 'S'
def isDouble(self):
"""
Return ``True`` if the bond represents a double bond or ``False`` if
not.
"""
return self.order == 'D'
def isTriple(self):
"""
Return ``True`` if the bond represents a triple bond or ``False`` if
not.
"""
return self.order == 'T'
def isBenzene(self):
"""
Return ``True`` if the bond represents a benzene bond or ``False`` if
not.
"""
return self.order == 'B'
def incrementOrder(self):
"""
Update the bond as a result of applying a CHANGE_BOND action to
increase the order by one.
"""
if self.order == 'S': self.order = 'D'
elif self.order == 'D': self.order = 'T'
else:
raise ActionError('Unable to update Bond due to CHANGE_BOND action: Invalid bond order "{0}".'.format(self.order))
def decrementOrder(self):
"""
Update the bond as a result of applying a CHANGE_BOND action to
decrease the order by one.
"""
if self.order == 'D': self.order = 'S'
elif self.order == 'T': self.order = 'D'
else:
raise ActionError('Unable to update Bond due to CHANGE_BOND action: Invalid bond order "{0}".'.format(self.order))
def __changeBond(self, order):
"""
Update the bond as a result of applying a CHANGE_BOND action,
where `order` specifies whether the bond is incremented or decremented
in bond order, and should be 1 or -1.
"""
if order == 1:
if self.order == 'S': self.order = 'D'
elif self.order == 'D': self.order = 'T'
else:
raise ActionError('Unable to update Bond due to CHANGE_BOND action: Invalid bond order "{0}".'.format(self.order))
elif order == -1:
if self.order == 'D': self.order = 'S'
elif self.order == 'T': self.order = 'D'
else:
raise ActionError('Unable to update Bond due to CHANGE_BOND action: Invalid bond order "{0}".'.format(self.order))
else:
raise ActionError('Unable to update Bond due to CHANGE_BOND action: Invalid order "{0}".'.format(order))
def applyAction(self, action):
"""
Update the bond as a result of applying `action`, a tuple
containing the name of the reaction recipe action along with any
required parameters. The available actions can be found
:ref:`here <reaction-recipe-actions>`.
"""
if action[0].upper() == 'CHANGE_BOND':
if action[2] == 1:
self.incrementOrder()
elif action[2] == -1:
self.decrementOrder()
elif action[2] == 'B':
self.order = 'B'
else:
raise ActionError('Unable to update Bond due to CHANGE_BOND action: Invalid order "{0}".'.format(action[2]))
else:
raise ActionError('Unable to update GroupBond: Invalid action {0}.'.format(action))
#################################################################################
class Molecule(Graph):
"""
A representation of a molecular structure using a graph data type, extending
the :class:`Graph` class. The `atoms` and `bonds` attributes are aliases
for the `vertices` and `edges` attributes. Other attributes are:
======================= =========== ========================================
Attribute Type Description
======================= =========== ========================================
`symmetryNumber` ``int`` The (estimated) external + internal symmetry number of the molecule
`multiplicity` ``int`` The multiplicity of this species, multiplicity = 2*total_spin+1
======================= =========== ========================================
A new molecule object can be easily instantiated by passing the `SMILES` or
`InChI` string representing the molecular structure.
"""
def __init__(self, atoms=None, symmetry=-1, multiplicity=-187, props=None, SMILES=''):
Graph.__init__(self, atoms)
self.symmetryNumber = symmetry
self.multiplicity = multiplicity
self._fingerprint = None
self.InChI = ''
if SMILES != '': self.fromSMILES(SMILES)
self.props = props or {}
if multiplicity != -187: # it was set explicitly, so re-set it (fromSMILES etc may have changed it)
self.multiplicity = multiplicity
def __hash__(self):
return hash((self.getFingerprint()))
def __richcmp__(x, y, op):
if op == 2:#Py_EQ
return x.is_equal(y)
if op == 3:#Py_NE
return not x.is_equal(y)
else:
raise NotImplementedError("Can only check equality of molecules, not > or <")
def is_equal(self,other):
"""Method to test equality of two Molecule objects."""
if not isinstance(other, Molecule): return False #different type
elif self is other: return True #same reference in memory
elif self.getFingerprint() != other.getFingerprint(): return False
else:
return self.isIsomorphic(other)
def __str__(self):
"""
Return a human-readable string representation of the object.
"""
return '<Molecule "{0}">'.format(self.toSMILES())
def __repr__(self):
"""
Return a representation that can be used to reconstruct the object.
"""
cython.declare(multiplicity=cython.int)
multiplicity = self.multiplicity
if multiplicity != self.getRadicalCount() + 1:
return 'Molecule(SMILES="{0}", multiplicity={1:d})'.format(self.toSMILES(), multiplicity)
return 'Molecule(SMILES="{0}")'.format(self.toSMILES())
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (Molecule, (self.vertices, self.symmetryNumber, self.multiplicity, self.props))
def __getAtoms(self): return self.vertices
def __setAtoms(self, atoms): self.vertices = atoms
atoms = property(__getAtoms, __setAtoms)
def addAtom(self, atom):
"""
Add an `atom` to the graph. The atom is initialized with no bonds.
"""
self._fingerprint = None
return self.addVertex(atom)
def addBond(self, bond):
"""
Add a `bond` to the graph as an edge connecting the two atoms `atom1`
and `atom2`.
"""
self._fingerprint = None
return self.addEdge(bond)
def getBonds(self, atom):
"""
Return a list of the bonds involving the specified `atom`.
"""
return self.getEdges(atom)
def getBond(self, atom1, atom2):
"""
Returns the bond connecting atoms `atom1` and `atom2`.
"""
return self.getEdge(atom1, atom2)
def hasAtom(self, atom):
"""
Returns ``True`` if `atom` is an atom in the graph, or ``False`` if
not.
"""
return self.hasVertex(atom)
def hasBond(self, atom1, atom2):
"""
Returns ``True`` if atoms `atom1` and `atom2` are connected
by an bond, or ``False`` if not.
"""
return self.hasEdge(atom1, atom2)
def removeAtom(self, atom):
"""
Remove `atom` and all bonds associated with it from the graph. Does
not remove atoms that no longer have any bonds as a result of this
removal.
"""
self._fingerprint = None
return self.removeVertex(atom)
def removeBond(self, bond):
"""
Remove the bond between atoms `atom1` and `atom2` from the graph.
Does not remove atoms that no longer have any bonds as a result of
this removal.
"""
self._fingerprint = None
return self.removeEdge(bond)
def sortAtoms(self):
"""
Sort the atoms in the graph. This can make certain operations, e.g.
the isomorphism functions, much more efficient.
"""
return self.sortVertices()
def update(self):
"""
Update connectivity values, atom types of atoms.
Update multiplicity, and sort atoms using the new
connectivity values.
"""
self.updateAtomTypes()
self.updateMultiplicity()
self.sortVertices()
def getFormula(self):
"""
Return the molecular formula for the molecule.
"""
cython.declare(atom=Atom, symbol=str, elements=dict, keys=list, formula=str)
cython.declare(hasCarbon=cython.bint, hasHydrogen=cython.bint)
# Count the number of each element in the molecule
hasCarbon = False; hasHydrogen = False
elements = {}
for atom in self.vertices:
symbol = atom.element.symbol
elements[symbol] = elements.get(symbol, 0) + 1
# Use the Hill system to generate the formula
formula = ''
# Carbon and hydrogen always come first if carbon is present
if hasCarbon:
count = elements['C']
formula += 'C{0:d}'.format(count) if count > 1 else 'C'
del elements['C']
if hasHydrogen:
count = elements['H']
formula += 'H{0:d}'.format(count) if count > 1 else 'H'
del elements['H']
# Other atoms are in alphabetical order
# (This includes hydrogen if carbon is not present)
keys = elements.keys()
keys.sort()
for key in keys:
count = elements[key]
formula += '{0}{1:d}'.format(key, count) if count > 1 else key
return formula
def getMolecularWeight(self):
"""
Return the molecular weight of the molecule in kg/mol.
"""
cython.declare(atom=Atom, mass=cython.double)
mass = 0
for atom in self.vertices:
mass += atom.element.mass
return mass
def getRadicalCount(self):
"""
Return the number of unpaired electrons.
"""
cython.declare(atom=Atom, radicals=cython.short)
radicals = 0
for atom in self.vertices:
radicals += atom.radicalElectrons
return radicals
def getNumAtoms(self, element = None):
"""
Return the number of atoms in molecule. If element is given, ie. "H" or "C",
the number of atoms of that element is returned.
"""
cython.declare(numAtoms=cython.int, atom=Atom)
if element == None:
return len(self.vertices)
else:
numAtoms = 0
for atom in self.vertices:
if atom.element.symbol == element:
numAtoms += 1
return numAtoms
def getNumberOfRadicalElectrons(self):
"""
Return the total number of radical electrons on all atoms in the
molecule. In this function, monoradical atoms count as one, biradicals
count as two, etc.
"""
cython.declare(numRadicals=cython.int, atom=Atom)
numRadicals = 0
for atom in self.vertices:
numRadicals += atom.radicalElectrons
return numRadicals
def copy(self, deep=False):
"""
Create a copy of the current graph. If `deep` is ``True``, a deep copy
is made: copies of the vertices and edges are used in the new graph.
If `deep` is ``False`` or not specified, a shallow copy is made: the
original vertices and edges are used in the new graph.
"""
other = cython.declare(Molecule)
g = Graph.copy(self, deep)
other = Molecule(g.vertices)
other.multiplicity = self.multiplicity
return other
def merge(self, other):
"""
Merge two molecules so as to store them in a single :class:`Molecule`
object. The merged :class:`Molecule` object is returned.
"""
g = Graph.merge(self, other)
molecule = Molecule(atoms=g.vertices)
return molecule
def split(self):
"""
Convert a single :class:`Molecule` object containing two or more
unconnected molecules into separate class:`Molecule` objects.
"""
graphs = Graph.split(self)
molecules = []
for g in graphs:
molecule = Molecule(atoms=g.vertices)
molecules.append(molecule)
return molecules
def deleteHydrogens(self):
"""
Irreversibly delete all non-labeled hydrogens without updating
connectivity values. If there's nothing but hydrogens, it does nothing.
It destroys information; be careful with it.
"""
cython.declare(atom=Atom, hydrogens=list)
# Check that the structure contains at least one heavy atom
for atom in self.vertices:
if not atom.isHydrogen():
break
else:
# No heavy atoms, so leave explicit
return
hydrogens = []
for atom in self.vertices:
if atom.isHydrogen() and atom.label == '':
hydrogens.append(atom)
# Remove the hydrogen atoms from the structure
for atom in hydrogens:
self.removeAtom(atom)
def connectTheDots(self):
"""
Delete all bonds, and set them again based on the Atoms' coords.
Does not detect bond type.
"""
cython.declare(criticalDistance=float, i=int, atom1=Atom, atom2=Atom,
bond=Bond, atoms=list, zBoundary=float)
# groupBond=GroupBond,
self._fingerprint = None
atoms = self.vertices
# Ensure there are coordinates to work with
for atom in atoms:
assert len(atom.coords) != 0
# If there are any bonds, remove them
for atom1 in atoms:
for bond in self.getBonds(atom1):
self.removeEdge(bond)
# Sort atoms by distance on the z-axis
sortedAtoms = sorted(atoms, key=lambda x: x.coords[2])
for i, atom1 in enumerate(sortedAtoms):
for atom2 in sortedAtoms[i+1:]:
# Set upper limit for bond distance
criticalDistance = (atom1.element.covRadius + atom2.element.covRadius + 0.45)**2
# First atom that is more than 4.0 Anstroms away in the z-axis, break the loop
# Atoms are sorted along the z-axis, so all following atoms should be even further
zBoundary = (atom1.coords[2] - atom2.coords[2])**2
if zBoundary > 16.0:
break
distanceSquared = sum((atom1.coords - atom2.coords)**2)
if distanceSquared > criticalDistance or distanceSquared < 0.40:
continue
else:
# groupBond = GroupBond(atom1, atom2, ['S','D','T','B'])
bond = Bond(atom1, atom2, 'S')
self.addBond(bond)
self.updateAtomTypes()
def updateAtomTypes(self):
"""
Iterate through the atoms in the structure, checking their atom types
to ensure they are correct (i.e. accurately describe their local bond
environment) and complete (i.e. are as detailed as possible).
"""
for atom in self.vertices:
atom.atomType = getAtomType(atom, atom.edges)
def updateMultiplicity(self):
"""
Update the multiplicity of a newly formed molecule.
"""
# Assume this is always true
# There are cases where 2 radicalElectrons is a singlet, but
# the triplet is often more stable,
self.multiplicity = self.getRadicalCount() + 1
def clearLabeledAtoms(self):
"""
Remove the labels from all atoms in the molecule.
"""
for atom in self.vertices:
atom.label = ''
def containsLabeledAtom(self, label):
"""
Return :data:`True` if the molecule contains an atom with the label
`label` and :data:`False` otherwise.
"""
for atom in self.vertices:
if atom.label == label: return True
return False
def getLabeledAtom(self, label):
"""
Return the atoms in the molecule that are labeled.
"""
for atom in self.vertices:
if atom.label == label: return atom
raise ValueError('No atom in the molecule has the label "{0}".'.format(label))
def getLabeledAtoms(self):
"""
Return the labeled atoms as a ``dict`` with the keys being the labels
and the values the atoms themselves. If two or more atoms have the
same label, the value is converted to a list of these atoms.
"""
labeled = {}
for atom in self.vertices:
if atom.label != '':
if atom.label in labeled:
if isinstance(labeled[atom.label],list):
labeled[atom.label].append(atom)
else:
labeled[atom.label] = [labeled[atom.label]]
labeled[atom.label].append(atom)
else:
labeled[atom.label] = atom
return labeled
def getFingerprint(self):
"""
Return a string containing the "fingerprint" used to accelerate graph
isomorphism comparisons with other molecules. The fingerprint is a
short string containing a summary of selected information about the
molecule. Two fingerprint strings matching is a necessary (but not
sufficient) condition for the associated molecules to be isomorphic.
"""
if self._fingerprint is None:
self._fingerprint = self.getFormula()
return self._fingerprint
def isIsomorphic(self, other, initialMap=None):
"""
Returns :data:`True` if two graphs are isomorphic and :data:`False`
otherwise. The `initialMap` attribute can be used to specify a required
mapping from `self` to `other` (i.e. the atoms of `self` are the keys,
while the atoms of `other` are the values). The `other` parameter must
be a :class:`Molecule` object, or a :class:`TypeError` is raised.
Also ensures multiplicities are also equal.
"""
# It only makes sense to compare a Molecule to a Molecule for full
# isomorphism, so raise an exception if this is not what was requested
if not isinstance(other, Molecule):
raise TypeError('Got a {0} object for parameter "other", when a Molecule object is required.'.format(other.__class__))
# Do the quick isomorphism comparison using the fingerprint
# Two fingerprint strings matching is a necessary (but not
# sufficient!) condition for the associated molecules to be isomorphic
if self.getFingerprint() != other.getFingerprint():
return False
# check multiplicity
if self.multiplicity != other.multiplicity:
return False
# Do the full isomorphism comparison
result = Graph.isIsomorphic(self, other, initialMap)
return result
def findIsomorphism(self, other, initialMap=None):
"""
Returns :data:`True` if `other` is isomorphic and :data:`False`
otherwise, and the matching mapping. The `initialMap` attribute can be
used to specify a required mapping from `self` to `other` (i.e. the
atoms of `self` are the keys, while the atoms of `other` are the
values). The returned mapping also uses the atoms of `self` for the keys
and the atoms of `other` for the values. The `other` parameter must
be a :class:`Molecule` object, or a :class:`TypeError` is raised.
"""
# It only makes sense to compare a Molecule to a Molecule for full
# isomorphism, so raise an exception if this is not what was requested
if not isinstance(other, Molecule):
raise TypeError('Got a {0} object for parameter "other", when a Molecule object is required.'.format(other.__class__))
# Do the quick isomorphism comparison using the fingerprint
# Two fingerprint strings matching is a necessary (but not
# sufficient!) condition for the associated molecules to be isomorphic
if self.getFingerprint() != other.getFingerprint():
return []
# check multiplicity
if self.multiplicity != other.multiplicity:
return []
# Do the isomorphism comparison
result = Graph.findIsomorphism(self, other, initialMap)
return result
def isSubgraphIsomorphic(self, other, initialMap=None):
"""
Returns :data:`True` if `other` is subgraph isomorphic and :data:`False`
otherwise. The `initialMap` attribute can be used to specify a required
mapping from `self` to `other` (i.e. the atoms of `self` are the keys,
while the atoms of `other` are the values). The `other` parameter must
be a :class:`Group` object, or a :class:`TypeError` is raised.
"""
cython.declare(group=Group, atom=Atom)
cython.declare(carbonCount=cython.short, nitrogenCount=cython.short, oxygenCount=cython.short, sulfurCount=cython.short, radicalCount=cython.short)
# It only makes sense to compare a Molecule to a Group for subgraph
# isomorphism, so raise an exception if this is not what was requested
if not isinstance(other, Group):
raise TypeError('Got a {0} object for parameter "other", when a Molecule object is required.'.format(other.__class__))
group = other
# Count the number of carbons, oxygens, and radicals in the molecule
carbonCount = 0; nitrogenCount = 0; oxygenCount = 0; sulfurCount = 0; radicalCount = 0
for atom in self.vertices:
if atom.element.symbol == 'C':
carbonCount += 1
elif atom.element.symbol == 'N':
nitrogenCount += 1
elif atom.element.symbol == 'O':
oxygenCount += 1
elif atom.element.symbol == 'S':
sulfurCount += 1
radicalCount += atom.radicalElectrons
if group.multiplicity:
if self.multiplicity not in group.multiplicity: return False
# If the molecule has fewer of any of these things than the functional
# group does, then we know the subgraph isomorphism fails without
# needing to perform the full isomorphism check
if (radicalCount < group.radicalCount or
carbonCount < group.carbonCount or
nitrogenCount < group.nitrogenCount or
oxygenCount < group.oxygenCount or
sulfurCount < group.sulfurCount):
return False
# Do the isomorphism comparison
result = Graph.isSubgraphIsomorphic(self, other, initialMap)
return result
def findSubgraphIsomorphisms(self, other, initialMap=None):
"""
Returns :data:`True` if `other` is subgraph isomorphic and :data:`False`
otherwise. Also returns the lists all of valid mappings. The
`initialMap` attribute can be used to specify a required mapping from
`self` to `other` (i.e. the atoms of `self` are the keys, while the
atoms of `other` are the values). The returned mappings also use the
atoms of `self` for the keys and the atoms of `other` for the values.
The `other` parameter must be a :class:`Group` object, or a
:class:`TypeError` is raised.
"""
cython.declare(group=Group, atom=Atom)
cython.declare(carbonCount=cython.short, nitrogenCount=cython.short, oxygenCount=cython.short, sulfurCount=cython.short, radicalCount=cython.short)
# It only makes sense to compare a Molecule to a Group for subgraph
# isomorphism, so raise an exception if this is not what was requested
if not isinstance(other, Group):
raise TypeError('Got a {0} object for parameter "other", when a Group object is required.'.format(other.__class__))
group = other
# Count the number of carbons, oxygens, and radicals in the molecule
carbonCount = 0; nitrogenCount = 0; oxygenCount = 0; sulfurCount = 0; radicalCount = 0
for atom in self.vertices:
if atom.element.symbol == 'C':
carbonCount += 1
elif atom.element.symbol == 'N':
nitrogenCount += 1
elif atom.element.symbol == 'O':
oxygenCount += 1
elif atom.element.symbol == 'S':
sulfurCount += 1
radicalCount += atom.radicalElectrons
if group.multiplicity:
if self.multiplicity not in group.multiplicity: return []
# If the molecule has fewer of any of these things than the functional
# group does, then we know the subgraph isomorphism fails without
# needing to perform the full isomorphism check
if (radicalCount < group.radicalCount or
carbonCount < group.carbonCount or
nitrogenCount < group.nitrogenCount or
oxygenCount < group.oxygenCount or
sulfurCount < group.sulfurCount):
return []
# Do the isomorphism comparison
result = Graph.findSubgraphIsomorphisms(self, other, initialMap)
return result
def isAtomInCycle(self, atom):
"""
Return :data:`True` if `atom` is in one or more cycles in the structure,
and :data:`False` if not.
"""
return self.isVertexInCycle(atom)
def isBondInCycle(self, bond):
"""
Return :data:`True` if the bond between atoms `atom1` and `atom2`
is in one or more cycles in the graph, or :data:`False` if not.
"""
return self.isEdgeInCycle(bond)
def draw(self, path):
"""
Generate a pictorial representation of the chemical graph using the
:mod:`draw` module. Use `path` to specify the file to save
the generated image to; the image type is automatically determined by
extension. Valid extensions are ``.png``, ``.svg``, ``.pdf``, and
``.ps``; of these, the first is a raster format and the remainder are
vector formats.
"""
from .draw import MoleculeDrawer
format = os.path.splitext(path)[-1][1:].lower()
MoleculeDrawer().draw(self, format, path=path)
def _repr_png_(self):
"""
Return a png picture of the molecule, useful for ipython-qtconsole.
"""
from .draw import MoleculeDrawer
tempFileName = 'temp_molecule.png'
MoleculeDrawer().draw(self, 'png', tempFileName)
png = open(tempFileName,'rb').read()
os.unlink(tempFileName)
return png
def fromInChI(self, inchistr, backend='try-all'):
"""
Convert an InChI string `inchistr` to a molecular structure.
"""
parser.fromInChI(self, inchistr, backend)
return self
def fromAugmentedInChI(self, aug_inchi):
"""
Convert an Augmented InChI string `aug_inchi` to a molecular structure.
"""
parser.fromAugmentedInChI(self, aug_inchi)
return self
def fromSMILES(self, smilesstr, backend='try-all'):
"""
Convert a SMILES string `smilesstr` to a molecular structure.
"""
parser.fromSMILES(self, smilesstr, backend)
return self
def fromSMARTS(self, smartsstr):
"""
Convert a SMARTS string `smartsstr` to a molecular structure. Uses
`RDKit <http://rdkit.org/>`_ to perform the conversion.
This Kekulizes everything, removing all aromatic atom types.
"""
parser.fromSMARTS(self, smartsstr)
return self
def fromAdjacencyList(self, adjlist, saturateH=False):
"""
Convert a string adjacency list `adjlist` to a molecular structure.
Skips the first line (assuming it's a label) unless `withLabel` is
``False``.
"""
from .adjlist import fromAdjacencyList
self.vertices, self.multiplicity = fromAdjacencyList(adjlist, group=False, saturateH=saturateH)
self.updateAtomTypes()
# Check if multiplicity is possible
n_rad = self.getRadicalCount()
multiplicity = self.multiplicity
if not (n_rad + 1 == multiplicity or n_rad - 1 == multiplicity or n_rad - 3 == multiplicity or n_rad - 5 == multiplicity):
raise ValueError('Impossible multiplicity for molecule\n{0}\n multiplicity = {1} and number of unpaired electrons = {2}'.format(self.toAdjacencyList(),multiplicity,n_rad))
if self.getNetCharge() != 0:
raise ValueError('Non-neutral molecule encountered. Currently, RMG does not support ion chemistry.\n {0}'.format(adjlist))
return self
def fromXYZ(self, atomicNums, coordinates):
"""
Create an RMG molecule from a list of coordinates and a corresponding
list of atomic numbers. These are typically received from CCLib and the
molecule is sent to `ConnectTheDots` so will only contain single bonds.
"""
_rdkit_periodic_table = elements.GetPeriodicTable()
for i, atNum in enumerate(atomicNums):
atom = Atom(_rdkit_periodic_table.GetElementSymbol(int(atNum)))
atom.coords = coordinates[i]
self.addAtom(atom)
return self.connectTheDots()
def toSingleBonds(self):
"""
Returns a copy of the current molecule, consisting of only single bonds.
This is useful for isomorphism comparison against something that was made
via fromXYZ, which does not attempt to perceive bond orders
"""
cython.declare(atom1=Atom, atom2=Atom, bond=Bond, newMol=Molecule, atoms=list, mapping=dict)
newMol = Molecule()
atoms = self.atoms
mapping = {}
for atom1 in atoms:
atom2 = newMol.addAtom(Atom(atom1.element))
mapping[atom1] = atom2
for atom1 in atoms:
for atom2 in atom1.bonds:
bond = Bond(mapping[atom1], mapping[atom2], 'S')
newMol.addBond(bond)
newMol.updateAtomTypes()
return newMol
def toInChI(self):
"""
Convert a molecular structure to an InChI string. Uses
`RDKit <http://rdkit.org/>`_ to perform the conversion.
Perceives aromaticity.
or
Convert a molecular structure to an InChI string. Uses
`OpenBabel <http://openbabel.org/>`_ to perform the conversion.
"""
return generator.toInChI(self)
def toAugmentedInChI(self):
"""
Adds an extra layer to the InChI denoting the multiplicity
of the molecule.
Separate layer with a forward slash character.
"""
return generator.toAugmentedInChI(self)
def toInChIKey(self):
"""
Convert a molecular structure to an InChI Key string. Uses
`OpenBabel <http://openbabel.org/>`_ to perform the conversion.
or
Convert a molecular structure to an InChI Key string. Uses
`RDKit <http://rdkit.org/>`_ to perform the conversion.
Removes check-sum dash (-) and character so that only
the 14 + 9 characters remain.
"""
return generator.toInChIKey(self)
def toAugmentedInChIKey(self):
"""
Adds an extra layer to the InChIKey denoting the multiplicity
of the molecule.
Simply append the multiplicity string, do not separate by a
character like forward slash.
"""
return generator.toAugmentedInChIKey(self)
def toSMARTS(self):
"""
Convert a molecular structure to an SMARTS string. Uses
`RDKit <http://rdkit.org/>`_ to perform the conversion.
Perceives aromaticity and removes Hydrogen atoms.
"""
return generator.toSMARTS(self)
def toSMILES(self):
"""
Convert a molecular structure to an SMILES string.
If there is a Nitrogen atom present it uses
`OpenBabel <http://openbabel.org/>`_ to perform the conversion,
and the SMILES may or may not be canonical.
Otherwise, it uses `RDKit <http://rdkit.org/>`_ to perform the
conversion, so it will be canonical SMILES.
While converting to an RDMolecule it will perceive aromaticity
and removes Hydrogen atoms.
"""
return generator.toSMILES(self)
def toRDKitMol(self, *args, **kwargs):
"""
Convert a molecular structure to a RDKit rdmol object.
"""
return generator.toRDKitMol(self, *args, **kwargs)
def toAdjacencyList(self, label='', removeH=False, removeLonePairs=False, oldStyle=False):
"""
Convert the molecular structure to a string adjacency list.
"""
from .adjlist import toAdjacencyList
result = toAdjacencyList(self.vertices, self.multiplicity, label=label, group=False, removeH=removeH, removeLonePairs=removeLonePairs, oldStyle=oldStyle)
return result
def isLinear(self):
"""
Return :data:`True` if the structure is linear and :data:`False`
otherwise.
"""
atomCount = len(self.vertices)
# Monatomic molecules are definitely nonlinear
if atomCount == 1:
return False
# Diatomic molecules are definitely linear
elif atomCount == 2:
return True
# Cyclic molecules are definitely nonlinear
elif self.isCyclic():
return False
# True if all bonds are double bonds (e.g. O=C=O)
allDoubleBonds = True
for atom1 in self.vertices:
for bond in atom1.edges.values():
if not bond.isDouble(): allDoubleBonds = False
if allDoubleBonds: return True
# True if alternating single-triple bonds (e.g. H-C#C-H)
# This test requires explicit hydrogen atoms
for atom in self.vertices:
bonds = atom.edges.values()
if len(bonds)==1:
continue # ok, next atom
if len(bonds)>2:
break # fail!
if bonds[0].isSingle() and bonds[1].isTriple():
continue # ok, next atom
if bonds[1].isSingle() and bonds[0].isTriple():
continue # ok, next atom
break # fail if we haven't continued
else:
# didn't fail
return True
# not returned yet? must be nonlinear
return False
def isAromatic(self):
"""
Returns ``True`` if the molecule is aromatic, or ``False`` if not.
Iterates over the SSSR's and searches for rings that consist solely of Cb
atoms. Assumes that aromatic rings always consist of 6 atoms.
In cases of naphthalene, where a 6 + 4 aromatic system exists,
there will be at least one 6 membered aromatic ring so this algorithm
will not fail for fused aromatic rings.
"""
cython.declare(SSSR=list, vertices=list, polycyclicVertices=list)
SSSR = self.getSmallestSetOfSmallestRings()
if SSSR:
for cycle in SSSR:
if len(cycle) == 6:
for atom in cycle:
#print atom.atomType.label
if atom.atomType.label == 'Cb' or atom.atomType.label == 'Cbf':
continue
# Go onto next cycle if a non Cb atomtype was discovered in this cycle
break
else:
# Molecule is aromatic when all 6 atoms are type 'Cb'
return True
return False
def countInternalRotors(self):
"""
Determine the number of internal rotors in the structure. Any single
bond not in a cycle and between two atoms that also have other bonds
are considered to be internal rotors.
"""
count = 0
for atom1 in self.vertices:
for atom2, bond in atom1.edges.items():
if self.vertices.index(atom1) < self.vertices.index(atom2) and bond.isSingle() and not self.isBondInCycle(bond):
if len(atom1.edges) > 1 and len(atom2.edges) > 1:
count += 1
return count
def calculateCp0(self):
"""
Return the value of the heat capacity at zero temperature in J/mol*K.
"""
if len(self.atoms) == 1:
return 2.5 * constants.R
else:
return (3.5 if self.isLinear() else 4.0) * constants.R
def calculateCpInf(self):
"""
Return the value of the heat capacity at infinite temperature in J/mol*K.
"""
cython.declare(Natoms=cython.int, Nvib=cython.int, Nrotors=cython.int)
if len(self.vertices) == 1:
return self.calculateCp0()
else:
Natoms = len(self.vertices)
Nvib = 3 * Natoms - (5 if self.isLinear() else 6)
Nrotors = self.countInternalRotors()
Nvib -= Nrotors
return self.calculateCp0() + (Nvib + 0.5 * Nrotors) * constants.R
def getSymmetryNumber(self):
"""
Returns the symmetry number of Molecule.
First checks whether the value is stored as an attribute of Molecule.
If not, it calls the calculateSymmetryNumber method.
"""
if self.symmetryNumber == -1:
self.calculateSymmetryNumber()
return self.symmetryNumber
def calculateSymmetryNumber(self):
"""
Return the symmetry number for the structure. The symmetry number
includes both external and internal modes.
"""
from rmgpy.molecule.symmetry import calculateSymmetryNumber
self.symmetryNumber = calculateSymmetryNumber(self)
return self.symmetryNumber
def isRadical(self):
"""
Return ``True`` if the molecule contains at least one radical electron,
or ``False`` otherwise.
"""
cython.declare(atom=Atom)
for atom in self.vertices:
if atom.radicalElectrons > 0:
return True
return False
def generateResonanceIsomers(self):
return resonance.generateResonanceIsomers(self)
def getURL(self):
"""
Get a URL to the molecule's info page on the RMG website.
"""
# eg. http://dev.rmg.mit.edu/database/kinetics/reaction/reactant1=1%20C%200%20%7B2,S%7D;2%20O%200%20%7B1,S%7D;__reactant2=1%20C%202T;__product1=1%20C%201;__product2=1%20C%200%20%7B2,S%7D;2%20O%201%20%7B1,S%7D;
base_url = "http://rmg.mit.edu/database/molecule/"
adjlist = self.toAdjacencyList(removeH=False)
url = base_url + urllib.quote(adjlist)
return url.strip('_')
def getRadicalAtoms(self):
"""
Return the atoms in the molecule that have unpaired electrons.
"""
radicalAtomsList = []
for atom in self.vertices:
if atom.radicalElectrons > 0:
radicalAtomsList.append(atom)
return radicalAtomsList
def updateLonePairs(self):
"""
Iterate through the atoms in the structure and calculate the
number of lone electron pairs, assuming a neutral molecule.
"""
cython.declare(atom1=Atom, atom2=Atom, bond12=Bond, order=float)
for atom1 in self.vertices:
order = 0
if not atom1.isHydrogen():
for atom2, bond12 in atom1.edges.items():
if bond12.isSingle():
order = order + 1
if bond12.isDouble():
order = order + 2
if bond12.isTriple():
order = order + 3
if bond12.isBenzene():
order = order + 1.5
atom1.lonePairs = 4 - atom1.radicalElectrons - int(order)
else:
atom1.lonePairs = 0
def getNetCharge(self):
"""
Iterate through the atoms in the structure and calculate the net charge
on the overall molecule.
"""
charge = 0
for atom in self.vertices:
charge += atom.charge
return charge
def saturate(self):
"""
Saturate the molecule by replacing all radicals with bonds to hydrogen atoms. Changes self molecule object.
"""
cython.declare(added=dict, atom=Atom, i=int, H=Atom, bond=Bond)
added = {}
for atom in self.atoms:
for i in range(atom.radicalElectrons):
H = Atom('H', radicalElectrons=0, lonePairs=0, charge=0)
bond = Bond(atom, H, 'S')
self.addAtom(H)
self.addBond(bond)
if atom not in added:
added[atom] = []
added[atom].append([H, bond])
atom.decrementRadical()
# Update the atom types of the saturated structure (not sure why
# this is necessary, because saturating with H shouldn't be
# changing atom types, but it doesn't hurt anything and is not
# very expensive, so will do it anyway)
self.sortVertices()
self.updateAtomTypes()
self.updateLonePairs()
self.multiplicity = 1
return added
|
chatelak/RMG-Py
|
rmgpy/molecule/molecule.py
|
Python
|
mit
| 60,426
|
[
"RDKit",
"cclib"
] |
66ac3ec11c0a4da3a7cba1f6e79c56054acb4bd56644a768c377f9e9d89fc16e
|
#!/usr/bin/env python3
#
# Tests the twisted gaussian logpdf toy distribution.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import pints
import pints.toy
import unittest
import numpy as np
class TestTwistedGaussianLogPDF(unittest.TestCase):
"""
Tests the twisted gaussian logpdf toy distribution.
"""
def test_twisted_gaussian_logpdf(self):
# Test TwistedGaussianLogPDF basics.
# Test basics
f = pints.toy.TwistedGaussianLogPDF()
self.assertEqual(f.n_parameters(), 10)
self.assertTrue(np.isscalar(f(np.zeros(10))))
# Test errors
self.assertRaises(ValueError, pints.toy.TwistedGaussianLogPDF, 1)
self.assertRaises(ValueError, pints.toy.TwistedGaussianLogPDF, b=-1)
def test_sampling_and_kl_divergence(self):
# Test TwistedGaussianLogPDF.kl_divergence() and .sample().
# Ensure consistent output
np.random.seed(1)
# Create banana LogPDFs
d = 6
log_pdf1 = pints.toy.TwistedGaussianLogPDF(d, 0.01, 90)
log_pdf2 = pints.toy.TwistedGaussianLogPDF(d, 0.02, 80)
log_pdf3 = pints.toy.TwistedGaussianLogPDF(d, 0.04, 100)
# Sample from each
n = 10000
samples1 = log_pdf1.sample(n)
samples2 = log_pdf2.sample(n)
samples3 = log_pdf3.sample(n)
# Compare calculated divergences
# This also tests the "untwist" method.
s11 = log_pdf1.kl_divergence(samples1)
s12 = log_pdf1.kl_divergence(samples2)
s13 = log_pdf1.kl_divergence(samples3)
self.assertLess(s11, s12)
self.assertLess(s11, s13)
self.assertAlmostEqual(s11, 0.0012248323505286152)
s21 = log_pdf2.kl_divergence(samples1)
s22 = log_pdf2.kl_divergence(samples2)
s23 = log_pdf2.kl_divergence(samples3)
self.assertLess(s22, s21)
self.assertLess(s22, s23)
s31 = log_pdf3.kl_divergence(samples1)
s32 = log_pdf3.kl_divergence(samples2)
s33 = log_pdf3.kl_divergence(samples3)
self.assertLess(s33, s32)
self.assertLess(s33, s31)
self.assertEqual(log_pdf3.kl_divergence(samples1),
log_pdf3.distance(samples1))
# Test sample() errors
self.assertRaises(ValueError, log_pdf1.sample, -1)
# Test kl_divergence() errors
self.assertEqual(samples1.shape, (n, d))
x = np.ones((n, d + 1))
self.assertRaises(ValueError, log_pdf1.kl_divergence, x)
x = np.ones((n, d, 2))
self.assertRaises(ValueError, log_pdf1.kl_divergence, x)
# Test suggested bounds
f = pints.toy.TwistedGaussianLogPDF()
bounds = f.suggested_bounds()
bounds1 = [[-50, 50], [-100, 100]]
bounds1 = np.transpose(bounds1).tolist()
self.assertTrue(np.array_equal(bounds, bounds1))
def test_values_sensitivity(self):
# Tests values of log pdf and sensitivities
log_pdf = pints.toy.TwistedGaussianLogPDF(dimension=2)
self.assertEqual(log_pdf([-20, -30]), -4.1604621594033908)
x = [-1, 2]
l, dl = log_pdf.evaluateS1(x)
self.assertEqual(l, log_pdf(x))
self.assertAlmostEqual(l, -35.3455121594)
self.assertEqual(dl[0], -1.5799)
self.assertEqual(dl[1], 7.9)
# higher dimensions
log_pdf = pints.toy.TwistedGaussianLogPDF(dimension=4, b=0.3, V=200)
x = [-1, 2, -3, 12]
l, dl = log_pdf.evaluateS1(x)
self.assertAlmostEqual(l, -1747.4699253160925)
self.assertEqual(l, log_pdf(x))
self.assertAlmostEqual(dl[0], -34.619949999999996)
self.assertAlmostEqual(dl[1], 57.699999999999996)
self.assertEqual(dl[2], 3)
self.assertEqual(dl[3], -12)
if __name__ == '__main__':
unittest.main()
|
martinjrobins/hobo
|
pints/tests/test_toy_twisted_gaussian_logpdf.py
|
Python
|
bsd-3-clause
| 3,965
|
[
"Gaussian"
] |
10b7563040489b7f63073b2c718d7e6d490d6db4098aa013b13e3e2d1a81360b
|
import ast
import importlib
import importlib.machinery
import decimal
import sys
def _call_with_frames_removed(f, *args, **kwargs):
return f(*args, **kwargs)
class FloatLiteral(float):
def __new__(cls, *args):
obj = super().__new__(cls, *args)
if args and len(args) == 1 and isinstance(args[0], str):
obj._str = args[0]
return obj
# optionally use _str in repr/str
class Decimal(decimal.Decimal):
def __new__(cls, value="0", *args, **kwargs):
try:
value = value._str
except AttributeError:
pass
return super().__new__(cls, value, *args, **kwargs)
decimal.Decimal = Decimal
class FloatNodeWrapper(ast.NodeTransformer):
def visit_Num(self, node):
if isinstance(node.n, float):
return ast.Call(func=ast.Name(id='FloatLiteral', ctx=ast.Load()),
args=[ast.Str(s=str(node.n))], keywords=[])
return node
class FloatLiteralLoader(importlib.machinery.SourceFileLoader):
def source_to_code(self, data, path, *, _optimize=-1):
source = importlib._bootstrap.decode_source(data)
tree = _call_with_frames_removed(compile, source, path, 'exec',
dont_inherit=True,
optimize=_optimize,
flags=ast.PyCF_ONLY_AST)
tree = FloatNodeWrapper().visit(tree)
ast.fix_missing_locations(tree)
return _call_with_frames_removed(compile, tree, path, 'exec',
dont_inherit=True,
optimize=_optimize)
_real_pathfinder = sys.meta_path[-1]
class FloatLiteralFinder(type(_real_pathfinder)):
@classmethod
def find_module(cls, fullname, path=None):
spec = _real_pathfinder.find_spec(fullname, path)
if not spec: return spec
loader = spec.loader
loader.__class__ = FloatLiteralLoader
return loader
sys.meta_path[-1] = FloatLiteralFinder
|
abarnert/floatliteralhack
|
floatliteral_ast.py
|
Python
|
mit
| 2,069
|
[
"VisIt"
] |
0cddd2dccfe56299c3fe0d372c01ca8347276a2ab17834493f86626aceaa7cf3
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('get_invocation_report')
@click.argument("invocation_id", type=str)
@pass_context
@custom_exception
@json_output
def cli(ctx, invocation_id):
"""Get a Markdown report for an invocation.
Output:
The invocation report.
For example::
{'markdown': '\n# Workflow Execution Summary of Example workflow\n\n
## Workflow Inputs\n\n\n## Workflow Outputs\n\n\n
## Workflow\n```galaxy\n
workflow_display(workflow_id=f2db41e1fa331b3e)\n```\n',
'render_format': 'markdown',
'workflows': {'f2db41e1fa331b3e': {'name': 'Example workflow'}}}
"""
return ctx.gi.invocations.get_invocation_report(invocation_id)
|
galaxy-iuc/parsec
|
parsec/commands/invocations/get_invocation_report.py
|
Python
|
apache-2.0
| 836
|
[
"Galaxy"
] |
d3f422db29c83eb6a11a910cc3fc7d930d23aa0d1a3fdbf150478450640822b3
|
import sys, os, math, time
import arcpy
from arcpy import env
from arcpy.sa import *
arcpy.CheckOutExtension("spatial")
#Metadata exists in one of two standard formats (finds the correct name for each field)
def acquireMetadata(metadata, band):
band = str(band)
metadatalist = []
if ("RADIANCE_MAXIMUM_BAND_" + band) in metadata.keys():
BANDFILE = "FILE_NAME_BAND_" + band
LMAX = "RADIANCE_MAXIMUM_BAND_" + band
LMIN = "RADIANCE_MINIMUM_BAND_" + band
QCALMAX = "QUANTIZE_CAL_MAX_BAND_" + band
QCALMIN = "QUANTIZE_CAL_MIN_BAND_" + band
DATE = "DATE_ACQUIRED"
metadatalist = [BANDFILE, LMAX, LMIN, QCALMAX, QCALMIN, DATE]
elif ("LMAX_BAND" + band) in metadata.keys():
BANDFILE = "BAND" + band + "_FILE_NAME"
LMAX = "LMAX_BAND" + band
LMIN = "LMIN_BAND" + band
QCALMAX = "QCALMAX_BAND" + band
QCALMIN = "QCALMIN_BAND" + band
DATE ="ACQUISITION_DATE"
metadatalist = [BANDFILE, LMAX, LMIN, QCALMAX, QCALMIN, DATE]
else:
arcpy.AddError('There was a problem reading the metadata for this file. Please make sure the _MTL.txt is in Level 1 data format')
return metadatalist
#Calculate the radiance from metadata on band.
def calcRadiance (LMAX, LMIN, QCALMAX, QCALMIN, QCAL, band):
LMAX = float(LMAX)
LMIN = float(LMIN)
QCALMAX = float(QCALMAX)
QCALMIN = float(QCALMIN)
gain = (LMAX - LMIN)/(QCALMAX-QCALMIN)
inraster = Raster(QCAL)
outname = 'RadianceB'+str(band)+'.tif'
arcpy.AddMessage('Band'+str(band))
arcpy.AddMessage('LMAX ='+str(LMAX))
arcpy.AddMessage('LMIN ='+str(LMIN))
arcpy.AddMessage('QCALMAX ='+str(QCALMAX))
arcpy.AddMessage('QCALMIN ='+str(QCALMIN))
arcpy.AddMessage('gain ='+str(gain))
outraster = (gain * (inraster-QCALMIN)) + LMIN
#outraster.save(outname)
return outraster
def calcReflectance(solarDist, ESUN, solarElevation, radiance, scaleFactor):
#Value for solar zenith is 90 degrees minus solar elevation (angle from horizon to the center of the sun)
# See Landsat7_Handbook 11.3.2 Radiance to Reflectance
solarZenith = ((90.0 - (float(solarElevation)))*math.pi)/180 #Converted from degrees to radians
solarDist = float(solarDist)
ESUN = float(ESUN)
outname = 'ReflectanceB'+str(band)+'.tif'
arcpy.AddMessage('Band'+str(band))
arcpy.AddMessage('solarDist ='+str(solarDist))
arcpy.AddMessage('solarDistSquared ='+str(math.pow(solarDist, 2)))
arcpy.AddMessage('ESUN ='+str(ESUN))
arcpy.AddMessage('solarZenith ='+str(solarZenith))
outraster = (math.pi * radiance * math.pow(solarDist, 2)) / (ESUN * math.cos(solarZenith)) * scaleFactor
return outraster
#Calculate the solar distance based on julian day
def calcSolarDist (jday):
#Values taken from d.csv file which is a formatted version of the d.xls file
#associated with the Landsat7 handbook, representing the distance of the sun
#for each julian day (1-366).
#this line keeps the relative path were this script is executing
filepath = os.path.join(os.path.dirname(sys.argv[0]), 'd.csv')
f = open(filepath, "r")
lines = f.readlines()[2:]
distances = []
for x in range(len(lines)):
distances.append(float(lines[x].strip().split(',')[1]))
f.close()
jday = int(jday)
dist = distances[jday - 1]
return dist
def calcJDay (date):
#Seperate date aspects into list (check for consistnecy in formatting of all
#Landsat7 metatdata) YYYY-MM-DD
dt = date.rsplit("-")
#Cast each part of the date as a in integer in the 9 int tuple mktime
t = time.mktime((int(dt[0]), int(dt[1]), int(dt[2]), 0, 0, 0, 0, 0, 0))
#As part of the time package the 7th int in mktime is calulated as Julian Day
#from the completion of other essential parts of the tuple
jday = time.gmtime(t)[7]
return jday
def getESUN(bandNum, SIType):
SIType = SIType
ESUN = {}
#from NASA's Landsat7_Handbook Table 11.3
#ETM+ Solar Spectral Irradiances (generated using the combined Chance-Kurucz Solar Spectrum within MODTRAN 5)
if SIType == 'ETM+ ChKur':
ESUN = {'b1':1970,'b2':1842,'b3':1547,'b4':1044,'b5':225.7,'b7':82.06,'b8':1369}
#from NASA's Landsat7_Handbook Table 9.1
#from the LPS ACCA algorith to correct for cloud cover
if SIType == 'LPS ACAA Algorithm':
ESUN = {'b1':1969,'b2':1840,'b3':1551,'b4':1044,'b5':225.7,'b7':82.06,'b8':1368}
#from Revised Landsat-5 TM Radiometric Calibration Procedures and Postcalibration, Table-2
#Gyanesh Chander and Brian Markham. Nov 2003.
#Landsat 5 ChKur
if SIType == 'Landsat 5 ChKur':
ESUN = {'b1':1957,'b2':1826,'b3':1554,'b4':1036,'b5':215,'b7':80.67}
#from Revised Landsat-5 TM Radiometric Calibration Procedures and Postcalibration, Table-2
#Gyanesh Chander and Brian Markham. Nov 2003.
#Landsat 4 ChKur
if SIType == 'Landsat 4 ChKur':
ESUN = {'b1':1957,'b2':1825,'b3':1557,'b4':1033,'b5':214.9,'b7':80.72}
bandNum = str(bandNum)
return ESUN[bandNum]
def readMetadata(metadataFile):
f = metadataFile
#Create an empty dictionary with which to populate all the metadata fields.
metadata = {}
#Each item in the txt document is seperated by a space and each key is
#equated with '='. This loop strips and seperates then fills the dictonary.
for line in f:
if not line.strip() == "END":
val = line.strip().split('=')
metadata [val[0].strip()] = val[1].strip().strip('"')
else:
break
return metadata
#Takes the unicode parameter input from Arc and turns it into a nice python list
def cleanList(bandList):
bandList = list(bandList)
for x in range(len(bandList)):
bandList[x] = str(bandList[x])
while ';' in bandList:
bandList.remove(';')
return bandList
#////////////////////////////////////MAIN LOOP///////////////////////////////////////
# TM5
work_dic = 'F:\\Data\\HRB\\RS\\Landsat\\Landsat5\\TM\\132_32\\LT51320322011318IKR01\\'
metadataPath = work_dic + 'LT51320322011318IKR01_MTL.txt'
out_dic = 'F:\\Data\\HRB\\RS\\Landsat\\Landsat5\\TM\\132_32\\LT51320322011318IKR01\\'
SIType = 'Landsat 5 ChKur'
keepRad = 'false'
keepRef = 'true'
scaleFactor = 1.0
min_ndvi = 0.15
env.workspace = work_dic
arcpy.env.overwriteOutput = True
ref_file_exit = 'false'
arcpy.AddMessage(scaleFactor)
if SIType =='Landsat 4 ChKur' :
bandList = cleanList(['5','7'])
else:
bandList = cleanList(['3','4'])
metadataFile = open(metadataPath)
metadata = readMetadata(metadataFile)
metadataFile.close()
successful = []
failed = []
if SIType =='Landsat 4 ChKur' :
# from http://landsat.gsfc.nasa.gov/the-multispectral-scanner-system/
# band 5 and 7 of MSS are equivalent to 3 and 4 of TM
ref_file_exit = os.path.exists(work_dic + "ReflectanceB5.tif")
ref_file_exit = os.path.exists(work_dic + "ReflectanceB7.tif")
else:
ref_file_exit = os.path.exists(work_dic + "ReflectanceB3.tif")
ref_file_exit = os.path.exists(work_dic + "ReflectanceB4.tif")
if ref_file_exit:
metlist = acquireMetadata(metadata, '5')
print 'Reflectance files existed'
else:
print 'Calculating reflectances'
for band in bandList:
bandstr = str(band)
print bandstr
metlist = acquireMetadata(metadata, band)
BANDFILE = metlist[0]
LMAX = metlist[1]
LMIN = metlist[2]
QCALMAX = metlist[3]
QCALMIN = metlist[4]
DATE = metlist[5]
ESUNVAL = "b" + band
#try:
radianceRaster = calcRadiance(metadata[LMAX], metadata[LMIN], metadata[QCALMAX], metadata[QCALMIN], metadata[BANDFILE], band)
reflectanceRaster = calcReflectance(calcSolarDist(calcJDay(metadata[DATE])), getESUN(ESUNVAL, SIType), metadata['SUN_ELEVATION'], radianceRaster, scaleFactor)
outname = 'ReflectanceB'+ bandstr
reflectanceRaster.save(outname)
successful.append(BANDFILE)
DATE = metlist[5]
day = metadata[DATE]
if SIType =='Landsat 4 ChKur' :
nir = Raster('ReflectanceB7.tif')
red = Raster('ReflectanceB5.tif')
else:
nir = Raster('ReflectanceB4.tif')
red = Raster('ReflectanceB3.tif')
ndvi_out_ras = out_dic + "ndvi_" + day + ".tif"
print 'Calculating NDVI'
raw_ndvi = (nir-red)/(nir+red)
ndvi = Con((raw_ndvi < min_ndvi) | (raw_ndvi > 1.0), 0, raw_ndvi)
arcpy.gp.SetNull_sa(ndvi, ndvi, ndvi_out_ras, "value = 0")
print 'NDVI file saved'
if keepRef != 'true':
arcpy.Delete_management(nir)
arcpy.Delete_management(red)
print 'Reflectance files deleted'
|
ColorTyWorld/GISRS
|
src/python/landsat/ndvi.py
|
Python
|
gpl-3.0
| 8,766
|
[
"Brian"
] |
5614222ed2d06e6a229aa0d8f8b097afd20286f3f9a594fcb0b1607791f51949
|
"""Test the DataRecoveryAgent"""
import unittest
from collections import defaultdict
from mock import MagicMock as Mock, patch, ANY
from parameterized import parameterized, param
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.TransformationSystem.Agent.DataRecoveryAgent import DataRecoveryAgent
from DIRAC.TransformationSystem.Utilities.JobInfo import TaskInfoException
from DIRAC.tests.Utilities.utils import MatchStringWith
MODULE_NAME = "DIRAC.TransformationSystem.Agent.DataRecoveryAgent"
class TestDRA(unittest.TestCase):
"""Test the DataRecoveryAgent"""
dra = None
@patch("DIRAC.Core.Base.AgentModule.PathFinder", new=Mock())
@patch("DIRAC.ConfigurationSystem.Client.PathFinder.getSystemInstance", new=Mock())
@patch("%s.ReqClient" % MODULE_NAME, new=Mock())
def setUp(self):
self.dra = DataRecoveryAgent(agentName="ILCTransformationSystem/DataRecoveryAgent", loadName="TestDRA")
self.dra.transNoInput = ["MCGeneration"]
self.dra.transWithInput = ["MCSimulation", "MCReconstruction"]
self.dra.transformationTypes = ["MCGeneration", "MCSimulation", "MCReconstruction"]
self.dra.reqClient = Mock(name="reqMock", spec=DIRAC.RequestManagementSystem.Client.ReqClient.ReqClient)
self.dra.tClient = Mock(
name="transMock", spec=DIRAC.TransformationSystem.Client.TransformationClient.TransformationClient
)
self.dra.fcClient = Mock(name="fcMock", spec=DIRAC.Resources.Catalog.FileCatalogClient.FileCatalogClient)
self.dra.jobMon = Mock(
name="jobMonMock", spec=DIRAC.WorkloadManagementSystem.Client.JobMonitoringClient.JobMonitoringClient
)
self.dra.printEveryNJobs = 10
self.dra.log = Mock(name="LogMock")
self.dra.addressTo = "myself"
self.dra.addressFrom = "me"
def tearDown(self):
pass
def getTestMock(self, nameID=0, jobID=1234567):
"""create a JobInfo object with mocks"""
from DIRAC.TransformationSystem.Utilities.JobInfo import JobInfo
testJob = Mock(name="jobInfoMock_%s" % nameID, spec=JobInfo)
testJob.jobID = jobID
testJob.tType = "testType"
testJob.otherTasks = []
testJob.errorCounts = []
testJob.status = "Done"
testJob.transFileStatus = ["Assigned", "Assigned"]
testJob.inputFileStatus = ["Exists", "Exists"]
testJob.outputFiles = ["/my/stupid/file.lfn", "/my/stupid/file2.lfn"]
testJob.outputFileStatus = ["Exists", "Exists"]
testJob.inputFiles = ["inputfile.lfn", "inputfile2.lfn"]
testJob.pendingRequest = False
testJob.getTaskInfo = Mock()
return testJob
@patch("DIRAC.Core.Base.AgentModule.PathFinder", new=Mock())
@patch("DIRAC.ConfigurationSystem.Client.PathFinder.getSystemInstance", new=Mock())
@patch("%s.ReqClient" % MODULE_NAME, new=Mock())
def test_init(self):
"""test for DataRecoveryAgent initialisation...................................................."""
res = DataRecoveryAgent(agentName="ILCTransformationSystem/DataRecoveryAgent", loadName="TestDRA")
self.assertIsInstance(res, DataRecoveryAgent)
def test_beginExecution(self):
"""test for DataRecoveryAgent beginExecution...................................................."""
theOps = Mock(name="OpsInstance")
theOps.getValue.side_effect = [["MCGeneration"], ["MCReconstruction", "Merge"]]
with patch("DIRAC.TransformationSystem.Agent.DataRecoveryAgent.Operations", return_value=theOps):
res = self.dra.beginExecution()
assert isinstance(self.dra.transformationTypes, list)
assert set(["MCGeneration", "MCReconstruction", "Merge"]) == set(self.dra.transformationTypes)
assert set(["MCGeneration"]) == set(self.dra.transNoInput)
assert set(["MCReconstruction", "Merge"]) == set(self.dra.transWithInput)
self.assertFalse(self.dra.enabled)
self.assertTrue(res["OK"])
def test_getEligibleTransformations_success(self):
"""test for DataRecoveryAgent getEligibleTransformations success................................"""
transInfoDict = dict(
TransformationID=1234,
TransformationName="TestProd12",
Type="TestProd",
AuthorDN="/some/cert/owner",
AuthorGroup="Test_Prod",
)
self.dra.tClient.getTransformations = Mock(return_value=S_OK([transInfoDict]))
res = self.dra.getEligibleTransformations(status="Active", typeList=["TestProds"])
self.assertTrue(res["OK"])
self.assertIsInstance(res["Value"], dict)
vals = res["Value"]
self.assertIn("1234", vals)
self.assertIsInstance(vals["1234"], dict)
self.assertEqual(transInfoDict, vals["1234"])
def test_getEligibleTransformations_failed(self):
"""test for DataRecoveryAgent getEligibleTransformations failure................................"""
self.dra.tClient.getTransformations = Mock(return_value=S_ERROR("No can Do"))
res = self.dra.getEligibleTransformations(status="Active", typeList=["TestProds"])
self.assertFalse(res["OK"])
self.assertEqual("No can Do", res["Message"])
def test_treatTransformation1(self):
"""test for DataRecoveryAgent treatTransformation success1.........................................."""
getJobMock = Mock(name="getJobMOck")
getJobMock.getJobs.return_value = (Mock(name="jobsMOck"), 50, 50)
tinfoMock = Mock(name="infoMock", return_value=getJobMock)
self.dra.checkAllJobs = Mock()
# catch the printout to check path taken
transInfoDict = dict(
TransformationID=1234,
TransformationName="TestProd12",
Type="TestProd",
AuthorDN="/some/cert/owner",
AuthorGroup="Test_Prod",
)
with patch("%s.TransformationInfo" % MODULE_NAME, new=tinfoMock):
self.dra.treatTransformation(1234, transInfoDict) # returns None
# check we start with the summary right away
for _name, args, _kwargs in self.dra.log.notice.mock_calls:
self.assertNotIn("Getting Tasks:", str(args))
def test_treatTransformation2(self):
"""test for DataRecoveryAgent treatTransformation success2.........................................."""
getJobMock = Mock(name="getJobMOck")
getJobMock.getJobs.return_value = (Mock(name="jobsMock"), 50, 50)
tinfoMock = Mock(name="infoMock", return_value=getJobMock)
self.dra.checkAllJobs = Mock()
# catch the printout to check path taken
transInfoDict = dict(
TransformationID=1234,
TransformationName="TestProd12",
Type="MCSimulation",
AuthorDN="/some/cert/owner",
AuthorGroup="Test_Prod",
)
with patch("%s.TransformationInfo" % MODULE_NAME, new=tinfoMock):
self.dra.treatTransformation(1234, transInfoDict) # returns None
self.dra.log.notice.assert_any_call(MatchStringWith("Getting tasks..."))
def test_treatTransformation3(self):
"""test for DataRecoveryAgent treatTransformation skip.............................................."""
getJobMock = Mock(name="getJobMOck")
getJobMock.getJobs.return_value = (Mock(name="jobsMock"), 50, 50)
self.dra.checkAllJobs = Mock()
self.dra.jobCache[1234] = (50, 50)
# catch the printout to check path taken
transInfoDict = dict(
TransformationID=1234,
TransformationName="TestProd12",
Type="TestProd",
AuthorDN="/some/cert/owner",
AuthorGroup="Test_Prod",
)
with patch("%s.TransformationInfo" % MODULE_NAME, autospec=True, return_value=getJobMock):
self.dra.treatTransformation(transID=1234, transInfoDict=transInfoDict) # returns None
self.dra.log.notice.assert_called_with(MatchStringWith("Skipping transformation 1234"))
def test_checkJob(self):
"""test for DataRecoveryAgent checkJob No inputFiles............................................."""
from DIRAC.TransformationSystem.Utilities.TransformationInfo import TransformationInfo
tInfoMock = Mock(name="tInfoMock", spec=TransformationInfo)
from DIRAC.TransformationSystem.Utilities.JobInfo import JobInfo
# Test First option for MCGeneration
tInfoMock.reset_mock()
testJob = JobInfo(jobID=1234567, status="Failed", tID=123, tType="MCGeneration")
testJob.outputFiles = ["/my/stupid/file.lfn"]
testJob.outputFileStatus = ["Exists"]
self.dra.checkJob(testJob, tInfoMock)
self.assertIn("setJobDone", tInfoMock.method_calls[0])
self.assertEqual(self.dra.todo["NoInputFiles"][0]["Counter"], 1)
self.assertEqual(self.dra.todo["NoInputFiles"][1]["Counter"], 0)
# Test Second option for MCGeneration
tInfoMock.reset_mock()
testJob.status = "Done"
testJob.outputFileStatus = ["Missing"]
self.dra.checkJob(testJob, tInfoMock)
self.assertIn("setJobFailed", tInfoMock.method_calls[0])
self.assertEqual(self.dra.todo["NoInputFiles"][0]["Counter"], 1)
self.assertEqual(self.dra.todo["NoInputFiles"][1]["Counter"], 1)
# Test Second option for MCGeneration
tInfoMock.reset_mock()
testJob.status = "Done"
testJob.outputFileStatus = ["Exists"]
self.dra.checkJob(testJob, tInfoMock)
self.assertEqual(tInfoMock.method_calls, [])
self.assertEqual(self.dra.todo["NoInputFiles"][0]["Counter"], 1)
self.assertEqual(self.dra.todo["NoInputFiles"][1]["Counter"], 1)
@parameterized.expand(
[
param(
0,
["setJobDone", "setInputProcessed"],
jStat="Failed",
ifStat=["Exists"],
ofStat=["Exists"],
tFiStat=["Assigned"],
others=True,
),
param(
1,
["setJobFailed"],
ifStat=["Exists"],
ofStat=["Missing"],
others=True,
ifProcessed=["/my/inputfile.lfn"],
),
param(
2, ["setJobFailed", "cleanOutputs"], ifStat=["Exists"], others=True, ifProcessed=["/my/inputfile.lfn"]
),
param(3, ["cleanOutputs", "setJobFailed", "setInputDeleted"], ifStat=["Missing"]),
param(4, ["cleanOutputs", "setJobFailed"], tFiStat=["Deleted"], ifStat=["Missing"]),
param(5, ["setJobDone", "setInputProcessed"], jStat="Failed", ifStat=["Exists"], tFiStat=["Assigned"]),
param(6, ["setJobDone"], jStat="Failed", ifStat=["Exists"], tFiStat=["Processed"]),
param(7, ["setInputProcessed"], jStat="Done", ifStat=["Exists"], tFiStat=["Assigned"]),
param(
8,
["setInputMaxReset"],
jStat="Failed",
ifStat=["Exists"],
ofStat=["Missing"],
tFiStat=["Assigned"],
errorCount=[14],
),
param(
9,
["setInputUnused"],
jStat="Failed",
ifStat=["Exists"],
ofStat=["Missing"],
tFiStat=["Assigned"],
errorCount=[2],
),
param(
10,
["setInputUnused", "setJobFailed"],
jStat="Done",
ifStat=["Exists"],
ofStat=["Missing"],
tFiStat=["Assigned"],
),
param(
11,
["cleanOutputs", "setInputUnused"],
jStat="Failed",
ifStat=["Exists"],
ofStat=["Missing", "Exists"],
tFiStat=["Assigned"],
),
param(
12,
["cleanOutputs", "setInputUnused", "setJobFailed"],
jStat="Done",
ifStat=["Exists"],
ofStat=["Missing", "Exists"],
tFiStat=["Assigned"],
),
param(
13, ["setJobFailed"], jStat="Done", ifStat=["Exists"], ofStat=["Missing", "Missing"], tFiStat=["Unused"]
),
param(14, [], jStat="Strange", ifStat=["Exists"], ofStat=["Exists"], tFiStat=["Processed"]),
param(
-1,
[],
jStat="Failed",
ifStat=["Exists"],
ofStat=["Missing", "Missing"],
outFiles=["/my/stupid/file.lfn", "/my/stupid/file2.lfn"],
tFiStat=["Processed"],
others=True,
),
]
)
def test_checkJob_others_(
self,
counter,
infoCalls,
jID=1234567,
jStat="Done",
others=False,
inFiles=["/my/inputfile.lfn"],
outFiles=["/my/stupid/file.lfn"],
ifStat=[],
ofStat=["Exists"],
ifProcessed=[],
tFiStat=["Processed"],
errorCount=[],
):
from DIRAC.TransformationSystem.Utilities.TransformationInfo import TransformationInfo
from DIRAC.TransformationSystem.Utilities.JobInfo import JobInfo
tInfoMock = Mock(name="tInfoMock", spec=TransformationInfo)
testJob = JobInfo(jobID=jID, status=jStat, tID=123, tType="MCSimulation")
testJob.outputFiles = outFiles
testJob.outputFileStatus = ofStat
testJob.otherTasks = others
testJob.inputFiles = inFiles
testJob.inputFileStatus = ifStat
testJob.transFileStatus = tFiStat
testJob.errorCounts = errorCount
self.dra.inputFilesProcessed = set(ifProcessed)
self.dra.checkJob(testJob, tInfoMock)
gLogger.notice("Testing counter", counter)
gLogger.notice("Expecting calls", infoCalls)
gLogger.notice("Called", tInfoMock.method_calls)
assert len(infoCalls) == len(tInfoMock.method_calls)
for index, infoCall in enumerate(infoCalls):
self.assertIn(infoCall, tInfoMock.method_calls[index])
for count in range(15):
gLogger.notice("Checking Counter:", count)
if count == counter:
self.assertEqual(self.dra.todo["InputFiles"][count]["Counter"], 1)
else:
self.assertEqual(self.dra.todo["InputFiles"][count]["Counter"], 0)
if 0 <= counter <= 2:
assert set(testJob.inputFiles).issubset(self.dra.inputFilesProcessed)
@parameterized.expand(
[
param(["cleanOutputs", "setJobFailed"]),
param([], jID=667, jStat="Failed", ofStat=["Missing"]),
param([], jID=668, jStat="Failed", ofStat=["Missing"], inFiles=["some"]),
]
)
def test_failHard(self, infoCalls, jID=666, jStat="Done", inFiles=None, ofStat=["Exists"]):
"""Test the job.failHard function."""
from DIRAC.TransformationSystem.Utilities.TransformationInfo import TransformationInfo
from DIRAC.TransformationSystem.Utilities.JobInfo import JobInfo
tInfoMock = Mock(name="tInfoMock", spec=TransformationInfo)
tInfoMock.reset_mock()
testJob = JobInfo(jobID=666, status=jStat, tID=123, tType="MCSimulation")
testJob.outputFiles = ["/my/stupid/file.lfn"]
testJob.outputFileStatus = ofStat
testJob.otherTasks = True
testJob.inputFiles = inFiles
testJob.inputFileExists = True
testJob.fileStatus = "Processed"
self.dra.inputFilesProcessed = set()
self.dra._DataRecoveryAgent__failJobHard(testJob, tInfoMock) # pylint: disable=protected-access, no-member
gLogger.notice("Expecting calls", infoCalls)
gLogger.notice("Called", tInfoMock.method_calls)
assert len(infoCalls) == len(tInfoMock.method_calls)
for index, infoCall in enumerate(infoCalls):
self.assertIn(infoCall, tInfoMock.method_calls[index])
if jStat == "Done":
self.assertIn("Failing job %s" % jID, self.dra.notesToSend)
else:
self.assertNotIn("Failing job %s" % jID, self.dra.notesToSend)
def test_notOnlyKeepers(self):
"""test for __notOnlyKeepers function"""
funcToTest = self.dra._DataRecoveryAgent__notOnlyKeepers # pylint: disable=protected-access, no-member
self.assertTrue(funcToTest("MCGeneration"))
self.dra.todo["InputFiles"][0]["Counter"] = 3 # keepers
self.dra.todo["InputFiles"][3]["Counter"] = 0
self.assertFalse(funcToTest("MCSimulation"))
self.dra.todo["InputFiles"][0]["Counter"] = 3 # keepers
self.dra.todo["InputFiles"][3]["Counter"] = 3
self.assertTrue(funcToTest("MCSimulation"))
def test_checkAllJob(self):
"""test for DataRecoveryAgent checkAllJobs ....................................................."""
from DIRAC.TransformationSystem.Utilities.JobInfo import JobInfo
# test with additional task dicts
from DIRAC.TransformationSystem.Utilities.TransformationInfo import TransformationInfo
tInfoMock = Mock(name="tInfoMock", spec=TransformationInfo)
mockJobs = dict([(i, self.getTestMock()) for i in range(11)])
mockJobs[2].pendingRequest = True
mockJobs[3].getJobInformation = Mock(side_effect=(RuntimeError("ARGJob1"), None))
mockJobs[4].getTaskInfo = Mock(side_effect=(TaskInfoException("ARG1"), None))
taskDict = True
lfnTaskDict = True
self.dra.checkAllJobs(mockJobs, tInfoMock, taskDict, lfnTaskDict)
self.dra.log.error.assert_any_call(MatchStringWith("+++++ Exception"), "ARGJob1")
self.dra.log.error.assert_any_call(MatchStringWith("Skip Task, due to TaskInfoException: ARG1"))
self.dra.log.reset_mock()
# test inputFile None
mockJobs = dict([(i, self.getTestMock(nameID=i)) for i in range(5)])
mockJobs[1].inputFiles = []
mockJobs[1].getTaskInfo = Mock(side_effect=(TaskInfoException("NoInputFile"), None))
mockJobs[1].tType = "MCSimulation"
tInfoMock.reset_mock()
self.dra.checkAllJobs(mockJobs, tInfoMock, taskDict, lfnTaskDict=True)
self.dra.log.notice.assert_any_call(MatchStringWith("Failing job hard"))
def test_checkAllJob_2(self):
"""Test where failJobHard fails (via cleanOutputs)."""
from DIRAC.TransformationSystem.Utilities.TransformationInfo import TransformationInfo
tInfoMock = Mock(name="tInfoMock", spec=TransformationInfo)
mockJobs = dict([(i, self.getTestMock()) for i in range(5)])
mockJobs[2].pendingRequest = True
mockJobs[3].getTaskInfo = Mock(side_effect=(TaskInfoException("ARGJob3"), None))
mockJobs[3].inputFiles = []
mockJobs[3].tType = "MCReconstruction"
self.dra._DataRecoveryAgent__failJobHard = Mock(side_effect=(RuntimeError("ARGJob4"), None), name="FJH")
self.dra.checkAllJobs(mockJobs, tInfoMock, tasksDict=True, lfnTaskDict=True)
mockJobs[3].getTaskInfo.assert_called()
self.dra._DataRecoveryAgent__failJobHard.assert_called()
self.dra.log.error.assert_any_call(MatchStringWith("+++++ Exception"), "ARGJob4")
self.dra.log.reset_mock()
def test_execute(self):
"""test for DataRecoveryAgent execute .........................................................."""
self.dra.treatTransformation = Mock()
self.dra.transformationsToIgnore = [123, 456, 789]
self.dra.jobCache = defaultdict(lambda: (0, 0))
self.dra.jobCache[123] = (10, 10)
self.dra.jobCache[124] = (10, 10)
self.dra.jobCache[125] = (10, 10)
# Eligible fails
self.dra.log.reset_mock()
self.dra.getEligibleTransformations = Mock(return_value=S_ERROR("outcast"))
res = self.dra.execute()
self.assertFalse(res["OK"])
self.dra.log.error.assert_any_call(ANY, MatchStringWith("outcast"))
self.assertEqual("Failure to get transformations", res["Message"])
d123 = dict(
TransformationID=123,
TransformationName="TestProd123",
Type="MCGeneration",
AuthorDN="/some/cert/owner",
AuthorGroup="Test_Prod",
)
d124 = dict(
TransformationID=124,
TransformationName="TestProd124",
Type="MCGeneration",
AuthorDN="/some/cert/owner",
AuthorGroup="Test_Prod",
)
d125 = dict(
TransformationID=125,
TransformationName="TestProd125",
Type="MCGeneration",
AuthorDN="/some/cert/owner",
AuthorGroup="Test_Prod",
)
# Eligible succeeds
self.dra.log.reset_mock()
self.dra.getEligibleTransformations = Mock(return_value=S_OK({123: d123, 124: d124, 125: d125}))
res = self.dra.execute()
self.assertTrue(res["OK"])
self.dra.log.notice.assert_any_call(
MatchStringWith("Will ignore the following transformations: [123, 456, 789]")
)
self.dra.log.notice.assert_any_call(MatchStringWith("Ignoring Transformation: 123"))
self.dra.log.notice.assert_any_call(MatchStringWith("Running over Transformation: 124"))
# Notes To Send
self.dra.log.reset_mock()
self.dra.getEligibleTransformations = Mock(return_value=S_OK({123: d123, 124: d124, 125: d125}))
self.dra.notesToSend = "Da hast du deine Karte"
sendmailMock = Mock()
sendmailMock.sendMail.return_value = S_OK("Nice Card")
notificationMock = Mock(return_value=sendmailMock)
with patch("%s.NotificationClient" % MODULE_NAME, new=notificationMock):
res = self.dra.execute()
self.assertTrue(res["OK"])
self.dra.log.notice.assert_any_call(
MatchStringWith("Will ignore the following transformations: [123, 456, 789]")
)
self.dra.log.notice.assert_any_call(MatchStringWith("Ignoring Transformation: 123"))
self.dra.log.notice.assert_any_call(MatchStringWith("Running over Transformation: 124"))
self.assertNotIn(124, self.dra.jobCache) # was popped
self.assertIn(125, self.dra.jobCache) # was not popped
gLogger.notice("JobCache: %s" % self.dra.jobCache)
# sending notes fails
self.dra.log.reset_mock()
self.dra.notesToSend = "Da hast du deine Karte"
sendmailMock = Mock()
sendmailMock.sendMail.return_value = S_ERROR("No stamp")
notificationMock = Mock(return_value=sendmailMock)
with patch("%s.NotificationClient" % MODULE_NAME, new=notificationMock):
res = self.dra.execute()
self.assertTrue(res["OK"])
self.assertNotIn(124, self.dra.jobCache) # was popped
self.assertIn(125, self.dra.jobCache) # was not popped
self.dra.log.error.assert_any_call(MatchStringWith("Cannot send notification mail"), ANY)
self.assertEqual("", self.dra.notesToSend)
def test_printSummary(self):
"""test DataRecoveryAgent printSummary.........................................................."""
self.dra.notesToSend = ""
self.dra.printSummary()
self.assertNotIn(" Other Tasks --> Keep : 0", self.dra.notesToSend)
self.dra.notesToSend = "Note This"
self.dra.printSummary()
def test_setPendingRequests_1(self):
"""Check the setPendingRequests function."""
mockJobs = dict((i, self.getTestMock(jobID=i)) for i in range(11))
reqMock = Mock()
reqMock.Status = "Done"
reqClient = Mock(name="reqMock", spec=DIRAC.RequestManagementSystem.Client.ReqClient.ReqClient)
reqClient.readRequestsForJobs.return_value = S_OK({"Successful": {}})
self.dra.reqClient = reqClient
self.dra.setPendingRequests(mockJobs)
for _index, mj in mockJobs.items():
self.assertFalse(mj.pendingRequest)
def test_setPendingRequests_2(self):
"""Check the setPendingRequests function."""
mockJobs = dict((i, self.getTestMock(jobID=i)) for i in range(11))
reqMock = Mock()
reqMock.RequestID = 666
reqClient = Mock(name="reqMock", spec=DIRAC.RequestManagementSystem.Client.ReqClient.ReqClient)
reqClient.readRequestsForJobs.return_value = S_OK({"Successful": {6: reqMock}})
reqClient.getRequestStatus.return_value = {"Value": "Done"}
self.dra.reqClient = reqClient
self.dra.setPendingRequests(mockJobs)
for _index, mj in mockJobs.items():
self.assertFalse(mj.pendingRequest)
reqClient.getRequestStatus.assert_called_once_with(666)
def test_setPendingRequests_3(self):
"""Check the setPendingRequests function."""
mockJobs = dict((i, self.getTestMock(jobID=i)) for i in range(11))
reqMock = Mock()
reqMock.RequestID = 555
reqClient = Mock(name="reqMock", spec=DIRAC.RequestManagementSystem.Client.ReqClient.ReqClient)
reqClient.readRequestsForJobs.return_value = S_OK({"Successful": {5: reqMock}})
reqClient.getRequestStatus.return_value = {"Value": "Pending"}
self.dra.reqClient = reqClient
self.dra.setPendingRequests(mockJobs)
for index, mj in mockJobs.items():
if index == 5:
self.assertTrue(mj.pendingRequest)
else:
self.assertFalse(mj.pendingRequest)
reqClient.getRequestStatus.assert_called_once_with(555)
def test_setPendingRequests_Fail(self):
"""Check the setPendingRequests function."""
mockJobs = dict((i, self.getTestMock(jobID=i)) for i in range(11))
reqMock = Mock()
reqMock.Status = "Done"
reqClient = Mock(name="reqMock", spec=DIRAC.RequestManagementSystem.Client.ReqClient.ReqClient)
reqClient.readRequestsForJobs.side_effect = (S_ERROR("Failure"), S_OK({"Successful": {}}))
self.dra.reqClient = reqClient
self.dra.setPendingRequests(mockJobs)
for _index, mj in mockJobs.items():
self.assertFalse(mj.pendingRequest)
def test_getLFNStatus(self):
"""Check the getLFNStatus function."""
mockJobs = dict((i, self.getTestMock(jobID=i)) for i in range(11))
self.dra.fcClient.exists.return_value = S_OK(
{"Successful": {"/my/stupid/file.lfn": True, "/my/stupid/file2.lfn": True}}
)
lfnExistence = self.dra.getLFNStatus(mockJobs)
self.assertEqual(lfnExistence, {"/my/stupid/file.lfn": True, "/my/stupid/file2.lfn": True})
self.dra.fcClient.exists.side_effect = (
S_ERROR("args"),
S_OK({"Successful": {"/my/stupid/file.lfn": True, "/my/stupid/file2.lfn": True}}),
)
lfnExistence = self.dra.getLFNStatus(mockJobs)
self.assertEqual(lfnExistence, {"/my/stupid/file.lfn": True, "/my/stupid/file2.lfn": True})
|
DIRACGrid/DIRAC
|
src/DIRAC/TransformationSystem/test/Test_DRA.py
|
Python
|
gpl-3.0
| 27,210
|
[
"DIRAC"
] |
ca599a275403cec2c0fb5d0b3ac14c631766b16e7471ddcb132e5dc31ab521d0
|
########################################################################
# $Id$
########################################################################
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.List import intListToString
from DIRAC.Core.Utilities.Pfn import pfnparse, pfnunparse
import os, stat
from types import ListType, StringTypes, DictType
class FileManagerBase( object ):
_base_tables = {}
_base_tables['FC_FileAncestors'] = { "Fields":
{
"FileID": "INT NOT NULL DEFAULT 0",
"AncestorID": "INT NOT NULL DEFAULT 0",
"AncestorDepth": "INT NOT NULL DEFAULT 0"
},
"Indexes": {"FileID": ["FileID"],
"AncestorID": ["AncestorID"],
"AncestorDepth": ["AncestorDepth"]},
"UniqueIndexes": { "File_Ancestor": ["FileID","AncestorID"]}
}
def __init__( self, database = None ):
self.db = None
if database is not None:
self.setDatabase( database )
def _getConnection( self, connection ):
if connection:
return connection
res = self.db._getConnection()
if res['OK']:
return res['Value']
gLogger.warn( "Failed to get MySQL connection", res['Message'] )
return connection
def setDatabase( self, database ):
self.db = database
result = self.db._createTables( self._base_tables )
if not result['OK']:
gLogger.error( "Failed to create tables", str( self._base_tables.keys() ) )
return result
if result['Value']:
gLogger.info( "Tables created: %s" % ','.join( result['Value'] ) )
result = self.db._createTables( self._tables )
if not result['OK']:
gLogger.error( "Failed to create tables", str( self._tables.keys() ) )
elif result['Value']:
gLogger.info( "Tables created: %s" % ','.join( result['Value'] ) )
return result
def getFileCounters( self, connection = False ):
connection = self._getConnection( connection )
resultDict = {}
req = "SELECT COUNT(*) FROM FC_Files;"
res = self.db._query( req, connection )
if not res['OK']:
return res
resultDict['Files'] = res['Value'][0][0]
req = "SELECT COUNT(FileID) FROM FC_Files WHERE FileID NOT IN ( SELECT FileID FROM FC_Replicas )"
res = self.db._query( req, connection )
if not res['OK']:
return res
resultDict['Files w/o Replicas'] = res['Value'][0][0]
req = "SELECT COUNT(RepID) FROM FC_Replicas WHERE FileID NOT IN ( SELECT FileID FROM FC_Files )"
res = self.db._query( req, connection )
if not res['OK']:
return res
resultDict['Replicas w/o Files'] = res['Value'][0][0]
treeTable = self.db.dtree.getTreeTable()
req = "SELECT COUNT(FileID) FROM FC_Files WHERE DirID NOT IN ( SELECT DirID FROM %s)" % treeTable
res = self.db._query( req, connection )
if not res['OK']:
return res
resultDict['Orphan Files'] = res['Value'][0][0]
req = "SELECT COUNT(FileID) FROM FC_Files WHERE FileID NOT IN ( SELECT FileID FROM FC_FileInfo)"
res = self.db._query( req, connection )
if not res['OK']:
resultDict['Files w/o FileInfo'] = 0
else:
resultDict['Files w/o FileInfo'] = res['Value'][0][0]
req = "SELECT COUNT(FileID) FROM FC_FileInfo WHERE FileID NOT IN ( SELECT FileID FROM FC_Files)"
res = self.db._query( req, connection )
if not res['OK']:
resultDict['FileInfo w/o Files'] = 0
else:
resultDict['FileInfo w/o Files'] = res['Value'][0][0]
return S_OK( resultDict )
def getReplicaCounters( self, connection = False ):
connection = self._getConnection( connection )
req = "SELECT COUNT(*) FROM FC_Replicas;"
res = self.db._query( req, connection )
if not res['OK']:
return res
return S_OK( {'Replicas':res['Value'][0][0]} )
######################################################
#
# File write methods
#
def _insertFiles( self, lfns, uid, gid, connection = False ):
"""To be implemented on derived class
"""
return S_ERROR( "To be implemented on derived class" )
def _deleteFiles( self, toPurge, connection = False ):
"""To be implemented on derived class
"""
return S_ERROR( "To be implemented on derived class" )
def _insertReplicas( self, lfns, master = False, connection = False ):
"""To be implemented on derived class
"""
return S_ERROR( "To be implemented on derived class" )
def _findFiles( self, lfns, metadata = ["FileID"], allStatus = False, connection = False ):
"""To be implemented on derived class
"""
return S_ERROR( "To be implemented on derived class" )
def _getFileReplicas( self, fileIDs, fields_input = ['PFN'], allStatus = False, connection = False ):
"""To be implemented on derived class
"""
return S_ERROR( "To be implemented on derived class" )
def _getFileIDFromGUID( self, guid, connection = False ):
"""To be implemented on derived class
"""
return S_ERROR( "To be implemented on derived class" )
def getLFNForGUID( self, guids, connection = False ):
"""Returns the LFN matching a given GUID
"""
return S_ERROR( "To be implemented on derived class" )
def _setFileParameter( self, fileID, paramName, paramValue, connection = False ):
"""To be implemented on derived class
"""
return S_ERROR( "To be implemented on derived class" )
def _deleteReplicas( self, lfns, connection = False ):
"""To be implemented on derived class
"""
return S_ERROR( "To be implemented on derived class" )
def _setReplicaStatus( self, fileID, se, status, connection = False ):
"""To be implemented on derived class
"""
return S_ERROR( "To be implemented on derived class" )
def _setReplicaHost( self, fileID, se, newSE, connection = False ):
"""To be implemented on derived class
"""
return S_ERROR( "To be implemented on derived class" )
def _getDirectoryFiles( self, dirID, fileNames, metadata, allStatus = False, connection = False ):
"""To be implemented on derived class
"""
return S_ERROR( "To be implemented on derived class" )
def _findFileIDs( self, lfns, connection=False ):
""" To be implemented on derived class
Should return following the successful/failed convention
Successful is a dictionary with keys the lfn, and values the FileID"""
return S_ERROR( "To be implemented on derived class" )
def _getDirectoryReplicas( self, dirID, allStatus = False, connection = False ):
""" To be implemented on derived class
Should return with only one value, being a list of all the replicas (FileName,FileID,SEID,PFN)
"""
return S_ERROR( "To be implemented on derived class" )
def countFilesInDir( self, dirId ):
""" Count how many files there is in a given Directory
:param dirID : directory id
:returns S_OK(value) or S_ERROR
"""
return S_ERROR( "To be implemented on derived class" )
def _getFileLFNs(self,fileIDs):
""" Get the file LFNs for a given list of file IDs
"""
stringIDs = intListToString(fileIDs)
treeTable = self.db.dtree.getTreeTable()
req = "SELECT F.FileID, CONCAT(D.DirName,'/',F.FileName) from FC_Files as F, %s as D WHERE F.FileID IN ( %s ) AND F.DirID=D.DirID" % (treeTable,stringIDs)
result = self.db._query(req)
if not result['OK']:
return result
fileNameDict = {}
for row in result['Value']:
fileNameDict[row[0]] = row[1]
failed = {}
successful = fileNameDict
if len(fileNameDict) != len(fileIDs):
for id_ in fileIDs:
if not id_ in fileNameDict:
failed[id_] = "File ID not found"
return S_OK({'Successful':successful,'Failed':failed})
def _getFileLFNs_old(self,fileIDs):
""" Get the file LFNs for a given list of file IDs
"""
stringIDs = intListToString(fileIDs)
req = "SELECT DirID, FileID, FileName from FC_Files WHERE FileID IN ( %s )" % stringIDs
result = self.db._query(req)
if not result['OK']:
return result
dirPathDict = {}
fileNameDict = {}
for row in result['Value']:
if not row[0] in dirPathDict:
dirPathDict[row[0]] = self.db.dtree.getDirectoryPath(row[0])['Value']
fileNameDict[row[1]] = '%s/%s' % (dirPathDict[row[0]],row[2])
failed = {}
successful = fileNameDict
for id_ in fileIDs:
if not id_ in fileNameDict:
failed[id_] = "File ID not found"
return S_OK({'Successful':successful,'Failed':failed})
def addFile( self, lfns, credDict, connection = False ):
""" Add files to the catalog
:param lfns : dict { lfn : info}. 'info' is a dict containing PFN, SE, Size and Checksum
the SE parameter can be a list if we have several replicas to register
"""
connection = self._getConnection( connection )
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo( info, ['PFN', 'SE', 'Size', 'Checksum'] )
if not res['OK']:
failed[lfn] = res['Message']
lfns.pop( lfn )
res = self._addFiles( lfns, credDict, connection = connection )
if not res['OK']:
for lfn in lfns.keys():
failed[lfn] = res['Message']
else:
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
return S_OK( {'Successful':successful, 'Failed':failed} )
def _addFiles( self, lfns, credDict, connection = False ):
""" Main file adding method
"""
connection = self._getConnection( connection )
successful = {}
result = self.db.ugManager.getUserAndGroupID( credDict )
if not result['OK']:
return result
uid, gid = result['Value']
# prepare lfns with master replicas - the first in the list or a unique replica
masterLfns = {}
extraLfns = {}
for lfn in lfns:
masterLfns[lfn] = dict( lfns[lfn] )
if 'SE' in lfns[lfn] and type( lfns[lfn]['SE'] ) == ListType:
masterLfns[lfn]['SE'] = lfns[lfn]['SE'][0]
if len( lfns[lfn]['SE'] ) > 1:
extraLfns[lfn] = dict( lfns[lfn] )
extraLfns[lfn]['SE'] = lfns[lfn]['SE'][1:]
# Check whether the supplied files have been registered already
existingMetadata, failed = self._getExistingMetadata( masterLfns.keys(), connection = connection )
if existingMetadata:
success, fail = self._checkExistingMetadata( existingMetadata, masterLfns )
successful.update( success )
failed.update( fail )
for lfn in ( success.keys() + fail.keys() ):
masterLfns.pop( lfn )
# If GUIDs are supposed to be unique check their pre-existance
if self.db.uniqueGUID:
fail = self._checkUniqueGUID( masterLfns, connection = connection )
failed.update( fail )
for lfn in fail:
masterLfns.pop( lfn )
# If we have files left to register
if masterLfns:
# Create the directories for the supplied files and store their IDs
directories = self._getFileDirectories( masterLfns.keys() )
for directory, fileNames in directories.items():
res = self.db.dtree.makeDirectories( directory, credDict )
if not res['OK']:
for fileName in fileNames:
lfn = os.path.join( directory, fileName )
failed[lfn] = res['Message']
masterLfns.pop( lfn )
continue
for fileName in fileNames:
if not fileName:
failed[directory] = "Is no a valid file"
masterLfns.pop( directory )
continue
lfn = "%s/%s" % ( directory, fileName )
lfn = lfn.replace( '//', '/' )
# This condition should never be true, we would not be here otherwise...
if not res['OK']:
failed[lfn] = "Failed to create directory for file"
masterLfns.pop( lfn )
else:
masterLfns[lfn]['DirID'] = res['Value']
# If we still have files left to register
if masterLfns:
res = self._insertFiles( masterLfns, uid, gid, connection = connection )
if not res['OK']:
for lfn in masterLfns.keys():
failed[lfn] = res['Message']
masterLfns.pop( lfn )
else:
for lfn, error in res['Value']['Failed'].items():
failed[lfn] = error
masterLfns.pop( lfn )
masterLfns = res['Value']['Successful']
# Add the ancestors
if masterLfns:
res = self._populateFileAncestors( masterLfns, connection = connection )
toPurge = []
if not res['OK']:
for lfn in masterLfns.keys():
failed[lfn] = "Failed while registering ancestors"
toPurge.append( masterLfns[lfn]['FileID'] )
else:
failed.update( res['Value']['Failed'] )
for lfn, error in res['Value']['Failed'].items():
toPurge.append( masterLfns[lfn]['FileID'] )
if toPurge:
self._removeFileAncestors( toPurge, connection = connection )
self._deleteFiles( toPurge, connection = connection )
# Register the replicas
newlyRegistered = {}
if masterLfns:
res = self._insertReplicas( masterLfns, master = True, connection = connection )
toPurge = []
if not res['OK']:
for lfn in masterLfns.keys():
failed[lfn] = "Failed while registering replica"
toPurge.append( masterLfns[lfn]['FileID'] )
else:
newlyRegistered = res['Value']['Successful']
successful.update( newlyRegistered )
failed.update( res['Value']['Failed'] )
for lfn, error in res['Value']['Failed'].items():
toPurge.append( masterLfns[lfn]['FileID'] )
if toPurge:
self._removeFileAncestors( toPurge, connection = connection )
self._deleteFiles( toPurge, connection = connection )
# Add extra replicas for successfully registered LFNs
for lfn in extraLfns.keys():
if not lfn in successful:
extraLfns.pop( lfn )
if extraLfns:
res = self._findFiles( extraLfns.keys(), ['FileID','DirID'], connection=connection )
if not res['OK']:
for lfn in lfns.keys():
failed[lfn] = 'Failed while registering extra replicas'
successful.pop( lfn )
extraLfns.pop( lfn )
else:
failed.update(res['Value']['Failed'])
for lfn in res['Value']['Failed'].keys():
successful.pop(lfn)
extraLfns.pop( lfn )
for lfn,fileDict in res['Value']['Successful'].items():
extraLfns[lfn]['FileID'] = fileDict['FileID']
extraLfns[lfn]['DirID'] = fileDict['DirID']
if extraLfns:
res = self._insertReplicas( extraLfns, master = False, connection = connection )
if not res['OK']:
for lfn in extraLfns.keys():
failed[lfn] = "Failed while registering extra replicas"
successful.pop( lfn )
else:
newlyRegistered = res['Value']['Successful']
successful.update( newlyRegistered )
failed.update( res['Value']['Failed'] )
return S_OK( {'Successful':successful, 'Failed':failed} )
def _updateDirectoryUsage( self, directorySEDict, change, connection = False ):
connection = self._getConnection( connection )
for directoryID in directorySEDict.keys():
result = self.db.dtree.getPathIDsByID( directoryID )
if not result['OK']:
return result
parentIDs = result['Value']
dirDict = directorySEDict[directoryID]
for seID in dirDict.keys() :
seDict = dirDict[seID]
files = seDict['Files']
size = seDict['Size']
insertTuples = []
for dirID in parentIDs:
insertTuples.append( '(%d,%d,%d,%d,UTC_TIMESTAMP())' % ( dirID, seID, size, files ) )
req = "INSERT INTO FC_DirectoryUsage (DirID,SEID,SESize,SEFiles,LastUpdate) "
req += "VALUES %s" % ','.join( insertTuples )
req += " ON DUPLICATE KEY UPDATE SESize=SESize%s%d, SEFiles=SEFiles%s%d, LastUpdate=UTC_TIMESTAMP() " \
% ( change, size, change, files )
res = self.db._update( req )
if not res['OK']:
gLogger.warn( "Failed to update FC_DirectoryUsage", res['Message'] )
return S_OK()
def _populateFileAncestors( self, lfns, connection = False ):
connection = self._getConnection( connection )
successful = {}
failed = {}
for lfn, lfnDict in lfns.items():
originalFileID = lfnDict['FileID']
originalDepth = lfnDict.get( 'AncestorDepth', 1 )
ancestors = lfnDict.get( 'Ancestors', [] )
if type( ancestors ) == type( ' ' ):
ancestors = [ancestors]
if lfn in ancestors:
ancestors.remove( lfn )
if not ancestors:
successful[lfn] = True
continue
res = self._findFiles( ancestors, connection = connection )
if res['Value']['Failed']:
failed[lfn] = "Failed to resolve ancestor files"
continue
ancestorIDs = res['Value']['Successful']
fileIDLFNs = {}
toInsert = {}
for ancestor in ancestorIDs.keys():
fileIDLFNs[ancestorIDs[ancestor]['FileID']] = ancestor
toInsert[ancestorIDs[ancestor]['FileID']] = originalDepth
res = self._getFileAncestors( fileIDLFNs.keys() )
if not res['OK']:
failed[lfn] = "Failed to obtain all ancestors"
continue
fileIDAncestorDict = res['Value']
for fileIDDict in fileIDAncestorDict.values():
for ancestorID, relativeDepth in fileIDDict.items():
toInsert[ancestorID] = relativeDepth + originalDepth
res = self._insertFileAncestors( originalFileID, toInsert, connection = connection )
if not res['OK']:
if "Duplicate" in res['Message']:
failed[lfn] = "Failed to insert ancestor files: duplicate entry"
else:
failed[lfn] = "Failed to insert ancestor files"
else:
successful[lfn] = True
return S_OK( {'Successful':successful, 'Failed':failed} )
def _insertFileAncestors( self, fileID, ancestorDict, connection = False ):
connection = self._getConnection( connection )
ancestorTuples = []
for ancestorID, depth in ancestorDict.items():
ancestorTuples.append( "(%d,%d,%d)" % ( fileID, ancestorID, depth ) )
if not ancestorTuples:
return S_OK()
req = "INSERT INTO FC_FileAncestors (FileID, AncestorID, AncestorDepth) VALUES %s" \
% intListToString( ancestorTuples )
return self.db._update( req, connection )
def _getFileAncestors( self, fileIDs, depths = [], connection = False ):
connection = self._getConnection( connection )
req = "SELECT FileID, AncestorID, AncestorDepth FROM FC_FileAncestors WHERE FileID IN (%s)" \
% intListToString( fileIDs )
if depths:
req = "%s AND AncestorDepth IN (%s);" % ( req, intListToString( depths ) )
res = self.db._query( req, connection )
if not res['OK']:
return res
fileIDAncestors = {}
for fileID, ancestorID, depth in res['Value']:
if not fileIDAncestors.has_key( fileID ):
fileIDAncestors[fileID] = {}
fileIDAncestors[fileID][ancestorID] = depth
return S_OK( fileIDAncestors )
def _getFileDescendents( self, fileIDs, depths, connection = False ):
connection = self._getConnection( connection )
req = "SELECT AncestorID, FileID, AncestorDepth FROM FC_FileAncestors WHERE AncestorID IN (%s)" \
% intListToString( fileIDs )
if depths:
req = "%s AND AncestorDepth IN (%s);" % ( req, intListToString( depths ) )
res = self.db._query( req, connection )
if not res['OK']:
return res
fileIDAncestors = {}
for ancestorID, fileID, depth in res['Value']:
if not fileIDAncestors.has_key( ancestorID ):
fileIDAncestors[ancestorID] = {}
fileIDAncestors[ancestorID][fileID] = depth
return S_OK( fileIDAncestors )
def addFileAncestors(self,lfns, connection = False ):
""" Add file ancestors to the catalog """
connection = self._getConnection( connection )
failed = {}
successful = {}
result = self._findFiles( lfns.keys(), connection = connection )
if not result['OK']:
return result
if result['Value']['Failed']:
failed.update(result['Value']['Failed'])
for lfn in result['Value']['Failed']:
lfns.pop(lfn)
if not lfns:
return S_OK({'Successful':successful,'Failed':failed})
for lfn in result['Value']['Successful']:
lfns[lfn]['FileID'] = result['Value']['Successful'][lfn]['FileID']
result = self._populateFileAncestors(lfns, connection)
if not result['OK']:
return result
failed.update(result['Value']['Failed'])
successful = result['Value']['Successful']
return S_OK({'Successful':successful,'Failed':failed})
def _removeFileAncestors(self, fileIDs, connection = False ):
""" Remove from the FC_FileAncestors the entries corresponding to the input files"""
connection = self._getConnection( connection )
successful = {}
failed = {}
for FileID in fileIDs:
res = self.db.deleteEntries( "FC_FileAncestors" , { 'AncestorID' : FileID } )
if not res[ 'OK' ]:
failed[FileID] = res['Message']
continue
res = self.db.deleteEntries( "FC_FileAncestors" , { 'FileID' : FileID } )
if not res[ 'OK' ]:
failed[FileID] = res['Message']
continue
successful[FileID] = 'OK'
#Once could/should? fix the depth of related files.
return S_OK( {'Successful' : successful, 'Failed' : failed} )
def _getFileRelatives( self, lfns, depths, relation, connection = False ):
connection = self._getConnection( connection )
failed = {}
successful = {}
result = self._findFiles( lfns.keys(), connection = connection )
if not result['OK']:
return result
if result['Value']['Failed']:
failed.update(result['Value']['Failed'])
for lfn in result['Value']['Failed']:
lfns.pop(lfn)
if not lfns:
return S_OK({'Successful':successful,'Failed':failed})
inputIDDict = {}
for lfn in result['Value']['Successful']:
inputIDDict[ result['Value']['Successful'][lfn]['FileID'] ] = lfn
inputIDs = inputIDDict.keys()
if relation == 'ancestor':
result = self._getFileAncestors(inputIDs,depths, connection)
else:
result = self._getFileDescendents(inputIDs,depths, connection)
if not result['OK']:
return result
failed = {}
successful = {}
relDict = result['Value']
for id_ in inputIDs:
if id_ in relDict:
aList = relDict[id_].keys()
result = self._getFileLFNs(aList)
if not result['OK']:
failed[inputIDDict[id]] = "Failed to find %s" % relation
else:
if result['Value']['Successful']:
resDict = {}
for aID in result['Value']['Successful']:
resDict[ result['Value']['Successful'][aID] ] = relDict[id_][aID]
successful[inputIDDict[id_]] = resDict
for aID in result['Value']['Failed']:
failed[inputIDDict[id_]] = "Failed to get the ancestor LFN"
else:
successful[inputIDDict[id_]] = {}
return S_OK({'Successful':successful,'Failed':failed})
def getFileAncestors( self, lfns, depths, connection = False ):
return self._getFileRelatives(lfns, depths, 'ancestor', connection)
def getFileDescendents( self, lfns, depths, connection = False ):
return self._getFileRelatives(lfns, depths, 'descendent', connection)
def _getExistingMetadata( self, lfns, connection = False ):
connection = self._getConnection( connection )
# Check whether the files already exist before adding
res = self._findFiles( lfns, ['FileID', 'Size', 'Checksum', 'GUID'], connection = connection )
successful = res['Value']['Successful']
failed = res['Value']['Failed']
for lfn, error in res['Value']['Failed'].items():
if error == 'No such file or directory':
failed.pop( lfn )
return successful, failed
def _checkExistingMetadata( self, existingLfns, lfns ):
failed = {}
successful = {}
fileIDLFNs = {}
for lfn, fileDict in existingLfns.items():
fileIDLFNs[fileDict['FileID']] = lfn
# For those that exist get the replicas to determine whether they are already registered
res = self._getFileReplicas( fileIDLFNs.keys() )
if not res['OK']:
for lfn in fileIDLFNs.values():
failed[lfn] = 'Failed checking pre-existing replicas'
else:
replicaDict = res['Value']
for fileID, lfn in fileIDLFNs.items():
fileMetadata = existingLfns[lfn]
existingGuid = fileMetadata['GUID']
existingSize = fileMetadata['Size']
existingChecksum = fileMetadata['Checksum']
newGuid = lfns[lfn]['GUID']
newSize = lfns[lfn]['Size']
newChecksum = lfns[lfn]['Checksum']
# Ensure that the key file metadata is the same
if ( existingGuid != newGuid ) or \
( existingSize != newSize ) or \
( existingChecksum != newChecksum ):
failed[lfn] = "File already registered with alternative metadata"
# If the DB does not have replicas for this file return an error
elif not fileID in replicaDict or not replicaDict[fileID]:
failed[lfn] = "File already registered with no replicas"
# If the supplied SE is not in the existing replicas return an error
elif not lfns[lfn]['SE'] in replicaDict[fileID].keys():
failed[lfn] = "File already registered with alternative replicas"
# If we get here the file being registered already exists exactly in the DB
else:
successful[lfn] = True
return successful, failed
def _checkUniqueGUID( self, lfns, connection = False ):
connection = self._getConnection( connection )
guidLFNs = {}
failed = {}
for lfn, fileDict in lfns.items():
guidLFNs[fileDict['GUID']] = lfn
res = self._getFileIDFromGUID( guidLFNs.keys(), connection = connection )
if not res['OK']:
return dict.fromkeys( lfns, res['Message'] )
for guid, fileID in res['Value'].items():
failed[guidLFNs[guid]] = "GUID already registered for another file %s" % fileID # resolve this to LFN
return failed
def removeFile( self, lfns, connection = False ):
connection = self._getConnection( connection )
""" Remove file from the catalog """
successful = {}
failed = {}
res = self._findFiles( lfns, ['DirID', 'FileID', 'Size'], connection = connection )
if not res['OK']:
return res
for lfn, error in res['Value']['Failed'].items():
if error == 'No such file or directory':
successful[lfn] = True
else:
failed[lfn] = error
fileIDLfns = {}
lfns = res['Value']['Successful']
for lfn, lfnDict in lfns.items():
fileIDLfns[lfnDict['FileID']] = lfn
res = self._computeStorageUsageOnRemoveFile( lfns, connection = connection )
if not res['OK']:
return res
directorySESizeDict = res['Value']
#Remove files from Ancestor tables
res = self._removeFileAncestors(fileIDLfns.keys(), connection = connection )
if res['OK'] and res['Value']:
for fid in res['Value']['Successful'].keys():
successful[fileIDLfns[fid]] = True
for fid, reason in res['Value']['Failed'].items():
failed[fileIDLfns[fid]] = reason
# Now do removal
res = self._deleteFiles( fileIDLfns.keys(), connection = connection )
if not res['OK']:
for lfn in fileIDLfns.values():
failed[lfn] = res['Message']
else:
# Update the directory usage
self._updateDirectoryUsage( directorySESizeDict, '-', connection = connection )
for lfn in fileIDLfns.values():
successful[lfn] = True
return S_OK( {"Successful":successful, "Failed":failed} )
def _computeStorageUsageOnRemoveFile( self, lfns, connection = False ):
# Resolve the replicas to calculate reduction in storage usage
fileIDLfns = {}
for lfn, lfnDict in lfns.items():
fileIDLfns[lfnDict['FileID']] = lfn
res = self._getFileReplicas( fileIDLfns.keys(), connection = connection )
if not res['OK']:
return res
directorySESizeDict = {}
for fileID, seDict in res['Value'].items():
dirID = lfns[fileIDLfns[fileID]]['DirID']
size = lfns[fileIDLfns[fileID]]['Size']
directorySESizeDict.setdefault( dirID, {} )
directorySESizeDict[dirID].setdefault( 0, {'Files':0,'Size':0} )
directorySESizeDict[dirID][0]['Size'] += size
directorySESizeDict[dirID][0]['Files'] += 1
for seName in seDict.keys():
res = self.db.seManager.findSE( seName )
if not res['OK']:
return res
seID = res['Value']
size = lfns[fileIDLfns[fileID]]['Size']
directorySESizeDict[dirID].setdefault( seID, {'Files':0,'Size':0} )
directorySESizeDict[dirID][seID]['Size'] += size
directorySESizeDict[dirID][seID]['Files'] += 1
return S_OK( directorySESizeDict )
def _setFileOwner( self, fileID, owner, connection = False ):
""" Set the file owner """
connection = self._getConnection( connection )
if type( owner ) in StringTypes:
result = self.db.ugManager.findUser( owner )
if not result['OK']:
return result
owner = result['Value']
return self._setFileParameter( fileID, 'UID', owner, connection = connection )
def _setFileGroup( self, fileID, group, connection = False ):
""" Set the file group """
connection = self._getConnection( connection )
if type( group ) in StringTypes:
result = self.db.ugManager.findGroup( group )
if not result['OK']:
return result
group = result['Value']
return self._setFileParameter( fileID, 'GID', group, connection = connection )
def _setFileMode( self, fileID, mode, connection = False ):
""" Set the file mode """
connection = self._getConnection( connection )
return self._setFileParameter( fileID, 'Mode', mode, connection = connection )
def setFileStatus( self, lfns, connection = False ):
""" Get set the group for the supplied files """
connection = self._getConnection( connection )
res = self._findFiles( lfns, ['FileID', 'UID'], connection = connection )
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for lfn in res['Value']['Successful'].keys():
status = lfns[lfn]
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setFileStatus( fileID, status, connection = connection )
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = True
return S_OK( {'Successful':successful, 'Failed':failed} )
def _setFileStatus( self, fileID, status, connection = False ):
""" Set the file owner """
connection = self._getConnection( connection )
if type( status ) in StringTypes:
if not status in self.db.validFileStatus:
return S_ERROR( 'Invalid file status %s' % status )
result = self._getStatusInt( status, connection = connection )
if not result['OK']:
return result
status = result['Value']
return self._setFileParameter( fileID, 'Status', status, connection = connection )
######################################################
#
# Replica write methods
#
def addReplica( self, lfns, connection = False ):
""" Add replica to the catalog """
connection = self._getConnection( connection )
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo( info, ['PFN', 'SE'] )
if not res['OK']:
failed[lfn] = res['Message']
lfns.pop( lfn )
res = self._addReplicas( lfns, connection = connection )
if not res['OK']:
for lfn in lfns.keys():
failed[lfn] = res['Message']
else:
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
return S_OK( {'Successful':successful, 'Failed':failed} )
def _addReplicas( self, lfns, connection = False ):
connection = self._getConnection( connection )
successful = {}
res = self._findFiles( lfns.keys(), ['DirID', 'FileID', 'Size'], connection = connection )
failed = res['Value']['Failed']
for lfn in failed.keys():
lfns.pop( lfn )
lfnFileIDDict = res['Value']['Successful']
for lfn, fileDict in lfnFileIDDict.items():
lfns[lfn].update( fileDict )
res = self._insertReplicas( lfns, connection = connection )
if not res['OK']:
for lfn in lfns.keys():
failed[lfn] = res['Message']
else:
successful = res['Value']['Successful']
failed.update( res['Value']['Failed'] )
return S_OK( {'Successful':successful, 'Failed':failed} )
def removeReplica( self, lfns, connection = False ):
""" Remove replica from catalog """
connection = self._getConnection( connection )
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo( info, ['SE'] )
if not res['OK']:
failed[lfn] = res['Message']
lfns.pop( lfn )
res = self._deleteReplicas( lfns, connection = connection )
if not res['OK']:
for lfn in lfns.keys():
failed[lfn] = res['Message']
else:
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
return S_OK( {'Successful':successful, 'Failed':failed} )
def setReplicaStatus( self, lfns, connection = False ):
""" Set replica status in the catalog """
connection = self._getConnection( connection )
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo( info, ['SE', 'Status'] )
if not res['OK']:
failed[lfn] = res['Message']
continue
status = info['Status']
se = info['SE']
res = self._findFiles( [lfn], ['FileID'], connection = connection )
if not res['Value']['Successful'].has_key( lfn ):
failed[lfn] = res['Value']['Failed'][lfn]
continue
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setReplicaStatus( fileID, se, status, connection = connection )
if res['OK']:
successful[lfn] = res['Value']
else:
failed[lfn] = res['Message']
return S_OK( {'Successful':successful, 'Failed':failed} )
def setReplicaHost( self, lfns, connection = False ):
""" Set replica host in the catalog """
connection = self._getConnection( connection )
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo( info, ['SE', 'NewSE'] )
if not res['OK']:
failed[lfn] = res['Message']
continue
newSE = info['NewSE']
se = info['SE']
res = self._findFiles( [lfn], ['FileID'], connection = connection )
if not res['Value']['Successful'].has_key( lfn ):
failed[lfn] = res['Value']['Failed'][lfn]
continue
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setReplicaHost( fileID, se, newSE, connection = connection )
if res['OK']:
successful[lfn] = res['Value']
else:
failed[lfn] = res['Message']
return S_OK( {'Successful':successful, 'Failed':failed} )
######################################################
#
# File read methods
#
def exists( self, lfns, connection = False ):
""" Determine whether a file exists in the catalog """
connection = self._getConnection( connection )
res = self._findFiles( lfns, allStatus = True, connection = connection )
successful = res['Value']['Successful']
origFailed = res['Value']['Failed']
for lfn in successful:
successful[lfn] = lfn
failed = {}
if self.db.uniqueGUID:
guidList = []
val = None
#Try to identify if the GUID is given
# We consider only 2 options :
# either {lfn : guid}
# or P lfn : {PFN : .., GUID : ..} }
if type( lfns ) == DictType:
val = lfns.values()
# We have values, take the first to identify the type
if val:
val = val[0]
if type( val ) == DictType and 'GUID' in val:
# We are in the case {lfn : {PFN:.., GUID:..}}
guidList = [lfns[lfn]['GUID'] for lfn in lfns]
pass
elif type( val ) in StringTypes:
# We hope that it is the GUID which is given
guidList = lfns.values()
if guidList:
# A dict { guid: lfn to which it is supposed to be associated }
guidToGivenLfn = dict( zip( guidList, lfns ) )
res = self.getLFNForGUID( guidList, connection )
if not res['OK']:
return res
guidLfns = res['Value']['Successful']
for guid, realLfn in guidLfns.items():
successful[guidToGivenLfn[guid]] = realLfn
for lfn, error in origFailed.items():
# It could be in successful because the guid exists with another lfn
if lfn in successful:
continue
if error == 'No such file or directory':
successful[lfn] = False
else:
failed[lfn] = error
return S_OK( {"Successful":successful, "Failed":failed} )
def isFile( self, lfns, connection = False ):
""" Determine whether a path is a file in the catalog """
connection = self._getConnection( connection )
#TO DO, should check whether it is a directory if it fails
return self.exists( lfns, connection = connection )
def getFileSize( self, lfns, connection = False ):
""" Get file size from the catalog """
connection = self._getConnection( connection )
#TO DO, should check whether it is a directory if it fails
res = self._findFiles( lfns, ['Size'], connection = connection )
if not res['OK']:
return res
totalSize = 0
for lfn in res['Value']['Successful'].keys():
size = res['Value']['Successful'][lfn]['Size']
res['Value']['Successful'][lfn] = size
totalSize += size
res['TotalSize'] = totalSize
return res
def getFileMetadata( self, lfns, connection = False ):
""" Get file metadata from the catalog """
connection = self._getConnection( connection )
#TO DO, should check whether it is a directory if it fails
return self._findFiles( lfns, ['Size', 'Checksum',
'ChecksumType', 'UID',
'GID', 'GUID',
'CreationDate', 'ModificationDate',
'Mode', 'Status'], connection = connection )
def getPathPermissions( self, paths, credDict, connection = False ):
""" Get the permissions for the supplied paths """
connection = self._getConnection( connection )
res = self.db.ugManager.getUserAndGroupID( credDict )
if not res['OK']:
return res
uid, gid = res['Value']
res = self._findFiles( paths, metadata = ['Mode', 'UID', 'GID'], connection = connection )
if not res['OK']:
return res
successful = {}
for dirName, dirDict in res['Value']['Successful'].items():
mode = dirDict['Mode']
p_uid = dirDict['UID']
p_gid = dirDict['GID']
successful[dirName] = {}
if p_uid == uid:
successful[dirName]['Read'] = mode & stat.S_IRUSR
successful[dirName]['Write'] = mode & stat.S_IWUSR
successful[dirName]['Execute'] = mode & stat.S_IXUSR
elif p_gid == gid:
successful[dirName]['Read'] = mode & stat.S_IRGRP
successful[dirName]['Write'] = mode & stat.S_IWGRP
successful[dirName]['Execute'] = mode & stat.S_IXGRP
else:
successful[dirName]['Read'] = mode & stat.S_IROTH
successful[dirName]['Write'] = mode & stat.S_IWOTH
successful[dirName]['Execute'] = mode & stat.S_IXOTH
return S_OK( {'Successful':successful, 'Failed':res['Value']['Failed']} )
######################################################
#
# Replica read methods
#
def __getReplicasForIDs( self, fileIDLfnDict, allStatus, connection = False ):
""" Get replicas for files with already resolved IDs
"""
replicas = {}
if fileIDLfnDict:
fields = []
if not self.db.lfnPfnConvention or self.db.lfnPfnConvention == "Weak":
fields = ['PFN']
res = self._getFileReplicas( fileIDLfnDict.keys(), fields_input=fields,
allStatus = allStatus, connection = connection )
if not res['OK']:
return res
for fileID, seDict in res['Value'].items():
lfn = fileIDLfnDict[fileID]
replicas[lfn] = {}
for se, repDict in seDict.items():
pfn = repDict.get('PFN','')
#if not pfn or self.db.lfnPfnConvention:
# res = self._resolvePFN( lfn, se )
# if res['OK']:
# pfn = res['Value']
replicas[lfn][se] = pfn
result = S_OK( replicas )
return result
def getReplicas( self, lfns, allStatus, connection = False ):
""" Get file replicas from the catalog """
connection = self._getConnection( connection )
# Get FileID <-> LFN correspondence first
res = self._findFileIDs( lfns, connection = connection )
if not res['OK']:
return res
failed = res['Value']['Failed']
fileIDLFNs = {}
for lfn, fileID in res['Value']['Successful'].items():
fileIDLFNs[fileID] = lfn
result = self.__getReplicasForIDs( fileIDLFNs, allStatus, connection)
if not result['OK']:
return result
replicas = result['Value']
result = S_OK( { "Successful": replicas, 'Failed': failed } )
if self.db.lfnPfnConvention:
sePrefixDict = {}
resSE = self.db.seManager.getSEPrefixes()
if resSE['OK']:
sePrefixDict = resSE['Value']
result['Value']['SEPrefixes'] = sePrefixDict
return result
def getReplicasByMetadata( self, metaDict, path, allStatus, credDict, connection = False ):
""" Get file replicas for files corresponding to the given metadata """
connection = self._getConnection( connection )
# Get FileID <-> LFN correspondence first
failed = {}
result = self.db.fmeta.findFilesByMetadata( metaDict, path, credDict, extra = True)
if not result['OK']:
return result
fileIDLFNs = result['Value']
result = self.__getReplicasForIDs( fileIDLFNs, allStatus, connection)
if not result['OK']:
return result
replicas = result['Value']
result = S_OK( { "Successful": replicas, 'Failed': failed } )
if self.db.lfnPfnConvention:
sePrefixDict = {}
resSE = self.db.seManager.getSEPrefixes()
if resSE['OK']:
sePrefixDict = resSE['Value']
result['Value']['SEPrefixes'] = sePrefixDict
return result
def _resolvePFN(self,lfn,se):
resSE = self.db.seManager.getSEDefinition(se)
if not resSE['OK']:
return resSE
pfnDict = dict(resSE['Value']['SEDict'])
if "PFNPrefix" in pfnDict:
return S_OK(pfnDict['PFNPrefix']+lfn)
else:
pfnDict['FileName'] = lfn
return pfnunparse(pfnDict)
def getReplicaStatus( self, lfns, connection = False ):
""" Get replica status from the catalog """
connection = self._getConnection( connection )
res = self._findFiles( lfns, connection = connection )
failed = res['Value']['Failed']
fileIDLFNs = {}
for lfn, fileDict in res['Value']['Successful'].items():
fileID = fileDict['FileID']
fileIDLFNs[fileID] = lfn
successful = {}
if fileIDLFNs:
res = self._getFileReplicas( fileIDLFNs.keys(), allStatus = True, connection = connection )
if not res['OK']:
return res
for fileID, seDict in res['Value'].items():
lfn = fileIDLFNs[fileID]
requestedSE = lfns[lfn]
if not requestedSE:
failed[lfn] = "Replica info not supplied"
elif requestedSE not in seDict.keys():
failed[lfn] = "No replica at supplied site"
else:
successful[lfn] = seDict[requestedSE]['Status']
return S_OK( {'Successful':successful, 'Failed':failed} )
######################################################
#
# General usage methods
#
def _getStatusInt( self, status, connection = False ):
connection = self._getConnection( connection )
req = "SELECT StatusID FROM FC_Statuses WHERE Status = '%s';" % status
res = self.db._query( req, connection )
if not res['OK']:
return res
if res['Value']:
return S_OK( res['Value'][0][0] )
req = "INSERT INTO FC_Statuses (Status) VALUES ('%s');" % status
res = self.db._update( req, connection )
if not res['OK']:
return res
return S_OK( res['lastRowId'] )
def _getIntStatus(self,statusID,connection=False):
if statusID in self.statusDict:
return S_OK(self.statusDict[statusID])
connection = self._getConnection(connection)
req = "SELECT StatusID,Status FROM FC_Statuses"
res = self.db._query(req,connection)
if not res['OK']:
return res
if res['Value']:
for row in res['Value']:
self.statusDict[int(row[0])] = row[1]
if statusID in self.statusDict:
return S_OK(self.statusDict[statusID])
return S_OK('Unknown')
def getFilesInDirectory( self, dirID, verbose = False, connection = False ):
connection = self._getConnection( connection )
files = {}
res = self._getDirectoryFiles( dirID, [], ['FileID', 'Size', 'GUID',
'Checksum', 'ChecksumType',
'Type', 'UID',
'GID', 'CreationDate',
'ModificationDate', 'Mode',
'Status'], connection = connection )
if not res['OK']:
return res
if not res['Value']:
return S_OK( files )
fileIDNames = {}
for fileName, fileDict in res['Value'].items():
files[fileName] = {}
files[fileName]['MetaData'] = fileDict
fileIDNames[fileDict['FileID']] = fileName
if verbose:
result = self._getFileReplicas( fileIDNames.keys(), connection = connection )
if not result['OK']:
return result
for fileID, seDict in result['Value'].items():
fileName = fileIDNames[fileID]
files[fileName]['Replicas'] = seDict
return S_OK( files )
def getDirectoryReplicas( self, dirID, path, allStatus = False, connection = False ):
""" Get the replicas for all the Files in the given Directory
:param DirID : ID of the directory
:param path : useless
:param allStatus : whether all replicas and file status are considered
If False, take the visibleFileStatus and visibleReplicaStatus values from the configuration
"""
connection = self._getConnection( connection )
result = self._getDirectoryReplicas( dirID, allStatus, connection)
if not result['OK']:
return result
resultDict = {}
seDict = {}
for fileName, fileID, seID, pfn in result['Value']:
resultDict.setdefault( fileName, {} )
if not seID in seDict:
res = self.db.seManager.getSEName(seID)
if not res['OK']:
seDict[seID] = 'Unknown'
else:
seDict[seID] = res['Value']
se = seDict[seID]
resultDict[fileName][se] = pfn
return S_OK( resultDict )
def _getFileDirectories( self, lfns ):
""" For a list of lfn, returns a dictionary with key the directory, and value
the files in that directory. It does not make any query, just splits the names
:param lfns list of lfns
"""
dirDict = {}
for lfn in lfns:
lfnDir = os.path.dirname( lfn )
lfnFile = os.path.basename( lfn )
dirDict.setdefault( lfnDir, [] )
dirDict[lfnDir].append( lfnFile )
return dirDict
def _checkInfo( self, info, requiredKeys ):
if not info:
return S_ERROR( "Missing parameters" )
for key in requiredKeys:
if not key in info:
return S_ERROR( "Missing '%s' parameter" % key )
return S_OK()
# def _checkLFNPFNConvention( self, lfn, pfn, se ):
# """ Check that the PFN corresponds to the LFN-PFN convention """
# if pfn == lfn:
# return S_OK()
# if ( len( pfn ) < len( lfn ) ) or ( pfn[-len( lfn ):] != lfn ) :
# return S_ERROR( 'PFN does not correspond to the LFN convention' )
# return S_OK()
def _checkLFNPFNConvention( self, lfn, pfn, se ):
""" Check that the PFN corresponds to the LFN-PFN convention
"""
# Check if the PFN corresponds to the LFN convention
if pfn == lfn:
return S_OK()
lfn_pfn = True # flag that the lfn is contained in the pfn
if ( len( pfn ) < len( lfn ) ) or ( pfn[-len( lfn ):] != lfn ) :
return S_ERROR( 'PFN does not correspond to the LFN convention' )
if not pfn.endswith( lfn ):
return S_ERROR()
# Check if the pfn corresponds to the SE definition
result = self._getStorageElement( se )
if not result['OK']:
return result
selement = result['Value']
res = pfnparse( pfn )
if not res['OK']:
return res
pfnDict = res['Value']
protocol = pfnDict['Protocol']
pfnpath = pfnDict['Path']
result = selement.getStorageParameters( protocol )
if not result['OK']:
return result
seDict = result['Value']
sePath = seDict['Path']
ind = pfnpath.find( sePath )
if ind == -1:
return S_ERROR( 'The given PFN %s does not correspond to the %s SE definition' % ( pfn, se ) )
# Check the full LFN-PFN-SE convention
if lfn_pfn:
seAccessDict = dict( seDict )
seAccessDict['Path'] = sePath + '/' + lfn
check_pfn = pfnunparse( seAccessDict )
if check_pfn != pfn:
return S_ERROR( 'PFN does not correspond to the LFN convention' )
return S_OK()
def _getStorageElement( self, seName ):
from DIRAC.Resources.Storage.StorageElement import StorageElement
storageElement = StorageElement( seName )
if not storageElement.valid:
return S_ERROR( storageElement.errorReason )
return S_OK( storageElement )
def setFileGroup( self, lfns, uid=0, gid=0, connection = False ):
""" Get set the group for the supplied files
:param lfns : dictionary < lfn : group >
:param uid : useless
:param gid : useless
"""
connection = self._getConnection( connection )
res = self._findFiles( lfns, ['FileID', 'GID'], connection = connection )
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for lfn in res['Value']['Successful'].keys():
group = lfns[lfn]
if type( group ) in StringTypes:
groupRes = self.db.ugManager.findGroup( group )
if not groupRes['OK']:
return groupRes
group = groupRes['Value']
currentGroup = res['Value']['Successful'][lfn]['GID']
if int( group ) == int( currentGroup ):
successful[lfn] = True
else:
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setFileGroup( fileID, group, connection = connection )
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = True
return S_OK( {'Successful':successful, 'Failed':failed} )
def setFileOwner( self, lfns, uid=0, gid=0, connection = False ):
""" Get set the group for the supplied files
:param lfns : dictionary < lfn : group >
:param uid : useless
:param gid : useless
"""
connection = self._getConnection( connection )
res = self._findFiles( lfns, ['FileID', 'UID'], connection = connection )
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for lfn in res['Value']['Successful'].keys():
owner = lfns[lfn]
if type( owner ) in StringTypes:
userRes = self.db.ugManager.findUser( owner )
if not userRes['OK']:
return userRes
owner = userRes['Value']
currentOwner = res['Value']['Successful'][lfn]['UID']
if int( owner ) == int( currentOwner ):
successful[lfn] = True
else:
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setFileOwner( fileID, owner, connection = connection )
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = True
return S_OK( {'Successful':successful, 'Failed':failed} )
def setFileMode( self, lfns, uid=0, gid=0, connection = False ):
""" Get set the mode for the supplied files """
connection = self._getConnection( connection )
res = self._findFiles( lfns, ['FileID', 'Mode'], connection = connection )
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for lfn in res['Value']['Successful'].keys():
mode = lfns[lfn]
currentMode = res['Value']['Successful'][lfn]['Mode']
if int( currentMode ) == int( mode ):
successful[lfn] = True
else:
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setFileMode( fileID, mode, connection = connection )
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = True
return S_OK( {'Successful':successful, 'Failed':failed} )
def changePathOwner( self, paths, credDict, recursive = False ):
""" Bulk method to change Owner for the given paths """
return self._changePathFunction( paths, credDict, self.db.dtree.changeDirectoryOwner,
self.setFileOwner, recursive )
def changePathGroup( self, paths, credDict, recursive = False ):
""" Bulk method to change Owner for the given paths """
return self._changePathFunction( paths, credDict, self.db.dtree.changeDirectoryGroup,
self.setFileGroup, recursive )
def changePathMode( self, paths, credDict, recursive = False ):
""" Bulk method to change Owner for the given paths """
return self._changePathFunction( paths, credDict, self.db.dtree.changeDirectoryMode,
self.setFileMode, recursive )
def _changePathFunction( self, paths, credDict, change_function_directory, change_function_file, recursive = False ):
""" A generic function to change Owner, Group or Mode for the given paths """
result = self.db.ugManager.getUserAndGroupID( credDict )
if not result['OK']:
return result
uid, gid = result['Value']
dirList = []
result = self.db.isDirectory( paths, credDict )
if not result['OK']:
return result
for p in result['Value']['Successful']:
if result['Value']['Successful'][p]:
dirList.append( p )
fileList = []
if len( dirList ) < len( paths ):
result = self.isFile( paths )
if not result['OK']:
return result
fileList = result['Value']['Successful'].keys()
successful = {}
failed = {}
dirArgs = {}
fileArgs = {}
for path in paths:
if ( not path in dirList ) and ( not path in fileList ):
failed[path] = 'Path not found'
if path in dirList:
dirArgs[path] = paths[path]
elif path in fileList:
fileArgs[path] = paths[path]
if dirArgs:
result = change_function_directory( dirArgs, uid, gid )
if not result['OK']:
return result
successful.update( result['Value']['Successful'] )
failed.update( result['Value']['Failed'] )
if fileArgs:
result = change_function_file( fileArgs, uid, gid )
if not result['OK']:
return result
successful.update( result['Value']['Successful'] )
failed.update( result['Value']['Failed'] )
return S_OK( {'Successful':successful, 'Failed':failed} )
|
miloszz/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/FileManagerBase.py
|
Python
|
gpl-3.0
| 56,703
|
[
"DIRAC"
] |
9b5d961f23a9686cf49626c5faacd8b720552acbacec9f955bf8fc38f4228adc
|
"""
End-to-end tests for Student's Profile Page.
"""
from datetime import datetime
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.lms.learner_profile import LearnerProfilePage
from common.test.acceptance.tests.helpers import AcceptanceTest, EventsTestMixin
class LearnerProfileTestMixin(EventsTestMixin):
"""
Mixin with helper methods for testing learner profile pages.
"""
PRIVACY_PUBLIC = 'all_users'
PRIVACY_PRIVATE = 'private'
PUBLIC_PROFILE_FIELDS = ['username', 'country', 'language_proficiencies', 'bio']
PRIVATE_PROFILE_FIELDS = ['username']
PUBLIC_PROFILE_EDITABLE_FIELDS = ['country', 'language_proficiencies', 'bio']
USER_SETTINGS_CHANGED_EVENT_NAME = "edx.user.settings.changed"
def log_in_as_unique_user(self):
"""
Create a unique user and return the account's username and id.
"""
username = f"test_{self.unique_id[0:6]}"
auto_auth_page = AutoAuthPage(self.browser, username=username).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def set_public_profile_fields_data(self, profile_page):
"""
Fill in the public profile fields of a user.
"""
# These value_for_dropdown_field method calls used to include
# focus_out = True, but a change in selenium is focusing out of the
# drop down after selection without any more action needed.
profile_page.value_for_dropdown_field('language_proficiencies', 'English')
profile_page.value_for_dropdown_field('country', 'United Arab Emirates')
profile_page.set_value_for_textarea_field('bio', 'Nothing Special')
# Waits here for text to appear/save on bio field
profile_page.wait_for_ajax()
def visit_profile_page(self, username, privacy=None):
"""
Visit a user's profile page and if a privacy is specified and
is different from the displayed value, then set the privacy to that value.
"""
profile_page = LearnerProfilePage(self.browser, username)
# Change the privacy if requested by loading the page and
# changing the drop down
if privacy is not None:
profile_page.visit()
# Change the privacy setting if it is not the desired one already
profile_page.privacy = privacy
# Verify the current setting is as expected
if privacy == self.PRIVACY_PUBLIC:
assert profile_page.privacy == 'all_users'
else:
assert profile_page.privacy == 'private'
if privacy == self.PRIVACY_PUBLIC:
self.set_public_profile_fields_data(profile_page)
# Reset event tracking so that the tests only see events from
# loading the profile page.
self.start_time = datetime.now()
# Load the page
profile_page.visit()
return profile_page
def initialize_different_user(self, privacy=None, birth_year=None):
"""
Initialize the profile page for a different test user
"""
username, user_id = self.log_in_as_unique_user()
# Set the privacy for the new user
if privacy is None:
privacy = self.PRIVACY_PUBLIC
self.visit_profile_page(username, privacy=privacy)
# Set the user's year of birth
if birth_year:
self.set_birth_year(birth_year)
# Log the user out
LogoutPage(self.browser).visit()
return username, user_id
class LearnerProfileA11yTest(LearnerProfileTestMixin, AcceptanceTest):
"""
Class to test learner profile accessibility.
"""
a11y = True
def test_editable_learner_profile_a11y(self):
"""
Test the accessibility of the editable version of the profile page
(user viewing her own public profile).
"""
username, _ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
profile_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('language_proficiencies')
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('bio')
profile_page.a11y_audit.check_for_accessibility_errors()
def test_read_only_learner_profile_a11y(self):
"""
Test the accessibility of the read-only version of a public profile page
(user viewing someone else's profile page).
"""
# initialize_different_user should cause country, language, and bio to be filled out (since
# privacy is public). It doesn't appear that this is happening, although the method
# works in regular bokchoy tests. Perhaps a problem with phantomjs? So this test is currently
# only looking at a read-only profile page with a username.
different_username, _ = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
profile_page.a11y_audit.check_for_accessibility_errors()
def test_badges_accessibility(self):
"""
Test the accessibility of the badge listings and sharing modal.
"""
username = 'testcert'
AutoAuthPage(self.browser, username=username).visit()
profile_page = self.visit_profile_page(username)
profile_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
'color-contrast' # AC-938
]
})
profile_page.display_accomplishments()
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.badges[0].display_modal()
profile_page.a11y_audit.check_for_accessibility_errors()
|
edx/edx-platform
|
common/test/acceptance/tests/lms/test_learner_profile.py
|
Python
|
agpl-3.0
| 6,461
|
[
"VisIt"
] |
7c5b33d38db0eeb48aef067ea351176edb3b92a3b9d5f15050b300a8e66fc454
|
import numpy as np
import scipy.sparse
import pytest
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_no_warnings
from sklearn.utils._testing import ignore_warnings
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.naive_bayes import MultinomialNB, ComplementNB
from sklearn.naive_bayes import CategoricalNB
DISCRETE_NAIVE_BAYES_CLASSES = [
BernoulliNB, CategoricalNB, ComplementNB, MultinomialNB]
ALL_NAIVE_BAYES_CLASSES = DISCRETE_NAIVE_BAYES_CLASSES + [GaussianNB]
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
with pytest.raises(
ValueError,
match="The target label.* in y do not exist in the initial classes"
):
GaussianNB().partial_fit(X, y, classes=[0, 1])
# TODO remove in 1.2 once sigma_ attribute is removed (GH #18842)
def test_gnb_var():
clf = GaussianNB()
clf.fit(X, y)
with pytest.warns(FutureWarning, match="Attribute sigma_ was deprecated"):
assert_array_equal(clf.sigma_, clf.var_)
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf = GaussianNB().fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.var_, clf_sw.var_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.var_, clf2.var_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.var_, clf_sw.var_)
def test_gnb_neg_priors():
"""Test whether an error is raised in case of negative priors"""
clf = GaussianNB(priors=np.array([-1., 2.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_priors():
"""Test whether the class prior override is properly used"""
clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y)
assert_array_almost_equal(clf.predict_proba([[-0.1, -0.1]]),
np.array([[0.825303662161683,
0.174696337838317]]), 8)
assert_array_almost_equal(clf.class_prior_, np.array([0.3, 0.7]))
def test_gnb_priors_sum_isclose():
# test whether the class prior sum is properly tested"""
X = np.array([[-1, -1], [-2, -1], [-3, -2], [-4, -5], [-5, -4],
[1, 1], [2, 1], [3, 2], [4, 4], [5, 5]])
priors = np.array([0.08, 0.14, 0.03, 0.16, 0.11, 0.16, 0.07, 0.14,
0.11, 0.0])
Y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
clf = GaussianNB(priors=priors)
# smoke test for issue #9633
clf.fit(X, Y)
def test_gnb_wrong_nb_priors():
""" Test whether an error is raised if the number of prior is different
from the number of class"""
clf = GaussianNB(priors=np.array([.25, .25, .25, .25]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_greater_one():
"""Test if an error is raised if the sum of prior greater than one"""
clf = GaussianNB(priors=np.array([2., 1.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_large_bias():
"""Test if good prediction when class prior favor largely one class"""
clf = GaussianNB(priors=np.array([0.01, 0.99]))
clf.fit(X, y)
assert clf.predict([[-0.1, -0.1]]) == np.array([2])
def test_gnb_check_update_with_no_data():
""" Test when the partial fit is called without any data"""
# Create an empty array
prev_points = 100
mean = 0.
var = 1.
x_empty = np.empty((0, X.shape[1]))
tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean,
var, x_empty)
assert tmean == mean
assert tvar == var
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.var_, clf_pf.var_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.var_, clf_pf2.var_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_gnb_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
# TODO: Remove in version 1.1
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_deprecated_coef_intercept(DiscreteNaiveBayes):
est = DiscreteNaiveBayes().fit(X2, y2)
for att in ["coef_", "intercept_"]:
with pytest.warns(FutureWarning):
hasattr(est, att)
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_prior(DiscreteNaiveBayes):
# Test whether class priors are properly set.
clf = DiscreteNaiveBayes().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_partial_fit(DiscreteNaiveBayes):
clf1 = DiscreteNaiveBayes()
clf1.fit([[0, 1], [1, 0], [1, 1]], [0, 1, 1])
clf2 = DiscreteNaiveBayes()
clf2.partial_fit([[0, 1], [1, 0], [1, 1]], [0, 1, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
if DiscreteNaiveBayes is CategoricalNB:
for i in range(len(clf1.category_count_)):
assert_array_equal(clf1.category_count_[i],
clf2.category_count_[i])
else:
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = DiscreteNaiveBayes()
# all categories have to appear in the first partial fit
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
clf3.partial_fit([[1, 1]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
if DiscreteNaiveBayes is CategoricalNB:
# the categories for each feature of CategoricalNB are mapped to an
# index chronologically with each call of partial fit and therefore
# the category_count matrices cannot be compared for equality
for i in range(len(clf1.category_count_)):
assert_array_equal(clf1.category_count_[i].shape,
clf3.category_count_[i].shape)
assert_array_equal(np.sum(clf1.category_count_[i], axis=1),
np.sum(clf3.category_count_[i], axis=1))
# assert category 0 occurs 1x in the first class and 0x in the 2nd
# class
assert_array_equal(clf1.category_count_[0][0], np.array([1, 0]))
# assert category 1 occurs 0x in the first class and 2x in the 2nd
# class
assert_array_equal(clf1.category_count_[0][1], np.array([0, 2]))
# assert category 0 occurs 0x in the first class and 1x in the 2nd
# class
assert_array_equal(clf1.category_count_[1][0], np.array([0, 1]))
# assert category 1 occurs 1x in the first class and 1x in the 2nd
# class
assert_array_equal(clf1.category_count_[1][1], np.array([1, 1]))
else:
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
@pytest.mark.parametrize('NaiveBayes', ALL_NAIVE_BAYES_CLASSES)
def test_NB_partial_fit_no_first_classes(NaiveBayes):
# classes is required for first call to partial fit
with pytest.raises(
ValueError,
match="classes must be passed on the first call to partial_fit."
):
NaiveBayes().partial_fit(X2, y2)
# check consistency of consecutive classes values
clf = NaiveBayes()
clf.partial_fit(X2, y2, classes=np.unique(y2))
with pytest.raises(
ValueError,
match="is not the same as on last call to partial_fit"
):
clf.partial_fit(X2, y2, classes=np.arange(42))
# TODO: Remove in version 1.1
@ignore_warnings(category=FutureWarning)
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for DiscreteNaiveBayes, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = DiscreteNaiveBayes().fit(X, y)
assert clf.predict(X[-1:]) == 2
assert clf.predict_proba([X[0]]).shape == (1, 2)
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for DiscreteNaiveBayes, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = DiscreteNaiveBayes().fit(X, y)
assert clf.predict_proba(X[0:1]).shape == (1, 3)
assert clf.predict_proba(X[:2]).shape == (2, 3)
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_uniform_prior(DiscreteNaiveBayes):
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
clf = DiscreteNaiveBayes()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_almost_equal(prior, np.array([.5, .5]))
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_provide_prior(DiscreteNaiveBayes):
# Test whether discrete NB classes use provided prior
clf = DiscreteNaiveBayes(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_almost_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_provide_prior_with_partial_fit(DiscreteNaiveBayes):
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = DiscreteNaiveBayes(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = DiscreteNaiveBayes(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_sample_weight_multiclass(DiscreteNaiveBayes):
# check shape consistency for number of samples at fit time
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = DiscreteNaiveBayes().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = DiscreteNaiveBayes()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# TODO: Remove in version 1.1
@ignore_warnings(category=FutureWarning)
@pytest.mark.parametrize('DiscreteNaiveBayes', [BernoulliNB, ComplementNB,
MultinomialNB])
def test_discretenb_coef_intercept_shape(DiscreteNaiveBayes):
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
clf = DiscreteNaiveBayes()
clf.fit(X, y)
assert clf.coef_.shape == (1, 3)
assert clf.intercept_.shape == (1,)
@pytest.mark.parametrize('DiscreteNaiveBayes', DISCRETE_NAIVE_BAYES_CLASSES)
@pytest.mark.parametrize('use_partial_fit', [False, True])
@pytest.mark.parametrize('train_on_single_class_y', [False, True])
def test_discretenb_degenerate_one_class_case(
DiscreteNaiveBayes,
use_partial_fit,
train_on_single_class_y,
):
# Most array attributes of a discrete naive Bayes classifier should have a
# first-axis length equal to the number of classes. Exceptions include:
# ComplementNB.feature_all_, CategoricalNB.n_categories_.
# Confirm that this is the case for binary problems and the degenerate
# case of a single class in the training set, when fitting with `fit` or
# `partial_fit`.
# Non-regression test for handling degenerate one-class case:
# https://github.com/scikit-learn/scikit-learn/issues/18974
X = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
y = [1, 1, 2]
if train_on_single_class_y:
X = X[:-1]
y = y[:-1]
classes = sorted(list(set(y)))
num_classes = len(classes)
clf = DiscreteNaiveBayes()
if use_partial_fit:
clf.partial_fit(X, y, classes=classes)
else:
clf.fit(X, y)
assert clf.predict(X[:1]) == y[0]
# Check that attributes have expected first-axis lengths
attribute_names = [
'classes_',
'class_count_',
'class_log_prior_',
'feature_count_',
'feature_log_prob_',
]
for attribute_name in attribute_names:
attribute = getattr(clf, attribute_name, None)
if attribute is None:
# CategoricalNB has no feature_count_ attribute
continue
if isinstance(attribute, np.ndarray):
assert attribute.shape[0] == num_classes
else:
# CategoricalNB.feature_log_prob_ is a list of arrays
for element in attribute:
assert element.shape[0] == num_classes
@pytest.mark.parametrize('kind', ('dense', 'sparse'))
def test_mnnb(kind):
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
if kind == 'dense':
X = X2
elif kind == 'sparse':
X = scipy.sparse.csr_matrix(X2)
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def test_mnb_prior_unobserved_targets():
# test smoothing of prior for yet unobserved targets
# Create toy training data
X = np.array([[0, 1], [1, 0]])
y = np.array([0, 1])
clf = MultinomialNB()
assert_no_warnings(
clf.partial_fit, X, y, classes=[0, 1, 2]
)
assert clf.predict([[0, 1]]) == 0
assert clf.predict([[1, 0]]) == 1
assert clf.predict([[1, 1]]) == 0
# add a training example with previously unobserved class
assert_no_warnings(
clf.partial_fit, [[1, 1]], [2]
)
assert clf.predict([[0, 1]]) == 0
assert clf.predict([[1, 0]]) == 1
assert clf.predict([[1, 1]]) == 2
# TODO: Remove in version 1.1
@ignore_warnings(category=FutureWarning)
def test_mnb_sample_weight():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1 / 3.0, 2 / 3.0, 2 / 3.0, 1 / 3.0, 1 / 3.0,
2 / 3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_bnb_feature_log_prob():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_almost_equal(clf.feature_log_prob_, (num - denom))
def test_cnb():
# Tests ComplementNB when alpha=1.0 for the toy example in Manning,
# Raghavan, and Schuetze's "Introduction to Information Retrieval" book:
# https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo.
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1).
Y = np.array([0, 0, 0, 1])
# Check that weights are correct. See steps 4-6 in Table 4 of
# Rennie et al. (2003).
theta = np.array([
[
(0 + 1) / (3 + 6),
(1 + 1) / (3 + 6),
(1 + 1) / (3 + 6),
(0 + 1) / (3 + 6),
(0 + 1) / (3 + 6),
(1 + 1) / (3 + 6)
],
[
(1 + 1) / (6 + 6),
(3 + 1) / (6 + 6),
(0 + 1) / (6 + 6),
(1 + 1) / (6 + 6),
(1 + 1) / (6 + 6),
(0 + 1) / (6 + 6)
]])
weights = np.zeros(theta.shape)
normed_weights = np.zeros(theta.shape)
for i in range(2):
weights[i] = -np.log(theta[i])
normed_weights[i] = weights[i] / weights[i].sum()
# Verify inputs are nonnegative.
clf = ComplementNB(alpha=1.0)
assert_raises(ValueError, clf.fit, -X, Y)
clf.fit(X, Y)
# Check that counts/weights are correct.
feature_count = np.array([[1, 3, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1]])
assert_array_equal(clf.feature_count_, feature_count)
class_count = np.array([3, 1])
assert_array_equal(clf.class_count_, class_count)
feature_all = np.array([1, 4, 1, 1, 1, 1])
assert_array_equal(clf.feature_all_, feature_all)
assert_array_almost_equal(clf.feature_log_prob_, weights)
clf = ComplementNB(alpha=1.0, norm=True)
clf.fit(X, Y)
assert_array_almost_equal(clf.feature_log_prob_, normed_weights)
def test_categoricalnb():
# Check the ability to predict the training set.
clf = CategoricalNB()
y_pred = clf.fit(X2, y2).predict(X2)
assert_array_equal(y_pred, y2)
X3 = np.array([[1, 4], [2, 5]])
y3 = np.array([1, 2])
clf = CategoricalNB(alpha=1, fit_prior=False)
clf.fit(X3, y3)
assert_array_equal(clf.n_categories_, np.array([3, 6]))
# Check error is raised for X with negative entries
X = np.array([[0, -1]])
y = np.array([1])
error_msg = "Negative values in data passed to CategoricalNB (input X)"
assert_raise_message(ValueError, error_msg, clf.predict, X)
assert_raise_message(ValueError, error_msg, clf.fit, X, y)
# Test alpha
X3_test = np.array([[2, 5]])
# alpha=1 increases the count of all categories by one so the final
# probability for each category is not 50/50 but 1/3 to 2/3
bayes_numerator = np.array([[1/3*1/3, 2/3*2/3]])
bayes_denominator = bayes_numerator.sum()
assert_array_almost_equal(clf.predict_proba(X3_test),
bayes_numerator / bayes_denominator)
# Assert category_count has counted all features
assert len(clf.category_count_) == X3.shape[1]
# Check sample_weight
X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y = np.array([1, 1, 2, 2])
clf = CategoricalNB(alpha=1, fit_prior=False)
clf.fit(X, y)
assert_array_equal(clf.predict(np.array([[0, 0]])), np.array([1]))
assert_array_equal(clf.n_categories_, np.array([2, 2]))
for factor in [1., 0.3, 5, 0.0001]:
X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y = np.array([1, 1, 2, 2])
sample_weight = np.array([1, 1, 10, 0.1]) * factor
clf = CategoricalNB(alpha=1, fit_prior=False)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(np.array([[0, 0]])), np.array([2]))
assert_array_equal(clf.n_categories_, np.array([2, 2]))
@pytest.mark.parametrize(
"min_categories, exp_X1_count, exp_X2_count, new_X, exp_n_categories_",
[
# check min_categories with int > observed categories
(3, np.array([[2, 0, 0], [1, 1, 0]]), np.array([[1, 1, 0], [1, 1, 0]]),
np.array([[0, 2]]), np.array([3, 3]),
),
# check with list input
([3, 4], np.array([[2, 0, 0], [1, 1, 0]]),
np.array([[1, 1, 0, 0], [1, 1, 0, 0]]), np.array([[0, 3]]),
np.array([3, 4]),
),
# check min_categories with min less than actual
([1, np.array([[2, 0], [1, 1]]), np.array([[1, 1], [1, 1]]),
np.array([[0, 1]]), np.array([2, 2])]
),
]
)
def test_categoricalnb_with_min_categories(min_categories, exp_X1_count,
exp_X2_count, new_X,
exp_n_categories_):
X_n_categories = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y_n_categories = np.array([1, 1, 2, 2])
expected_prediction = np.array([1])
clf = CategoricalNB(alpha=1, fit_prior=False,
min_categories=min_categories)
clf.fit(X_n_categories, y_n_categories)
X1_count, X2_count = clf.category_count_
assert_array_equal(X1_count, exp_X1_count)
assert_array_equal(X2_count, exp_X2_count)
predictions = clf.predict(new_X)
assert_array_equal(predictions, expected_prediction)
assert_array_equal(clf.n_categories_, exp_n_categories_)
@pytest.mark.parametrize(
"min_categories, error_msg",
[
('bad_arg', "'min_categories' should have integral"),
([[3, 2], [2, 4]], "'min_categories' should have shape"),
(1., "'min_categories' should have integral"),
]
)
def test_categoricalnb_min_categories_errors(min_categories, error_msg):
X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y = np.array([1, 1, 2, 2])
clf = CategoricalNB(alpha=1, fit_prior=False,
min_categories=min_categories)
with pytest.raises(ValueError, match=error_msg):
clf.fit(X, y)
def test_alpha():
# Setting alpha=0 should not output nan results when p(x_i|y_j)=0 is a case
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
nb = BernoulliNB(alpha=0.)
assert_warns(UserWarning, nb.partial_fit, X, y, classes=[0, 1])
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
assert_warns(UserWarning, nb.partial_fit, X, y, classes=[0, 1])
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[2. / 3, 1. / 3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = CategoricalNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1., 0.], [0., 1.]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test sparse X
X = scipy.sparse.csr_matrix(X)
nb = BernoulliNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[2. / 3, 1. / 3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test for alpha < 0
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
expected_msg = ('Smoothing parameter alpha = -1.0e-01. '
'alpha should be > 0.')
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
c_nb = CategoricalNB(alpha=-0.1)
assert_raise_message(ValueError, expected_msg, b_nb.fit, X, y)
assert_raise_message(ValueError, expected_msg, m_nb.fit, X, y)
assert_raise_message(ValueError, expected_msg, c_nb.fit, X, y)
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
assert_raise_message(ValueError, expected_msg, b_nb.partial_fit,
X, y, classes=[0, 1])
assert_raise_message(ValueError, expected_msg, m_nb.partial_fit,
X, y, classes=[0, 1])
def test_alpha_vector():
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
# Setting alpha=np.array with same length
# as number of features should be fine
alpha = np.array([1, 2])
nb = MultinomialNB(alpha=alpha)
nb.partial_fit(X, y, classes=[0, 1])
# Test feature probabilities uses pseudo-counts (alpha)
feature_prob = np.array([[1 / 2, 1 / 2], [2 / 5, 3 / 5]])
assert_array_almost_equal(nb.feature_log_prob_, np.log(feature_prob))
# Test predictions
prob = np.array([[5 / 9, 4 / 9], [25 / 49, 24 / 49]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test alpha non-negative
alpha = np.array([1., -0.1])
expected_msg = ('Smoothing parameter alpha = -1.0e-01. '
'alpha should be > 0.')
m_nb = MultinomialNB(alpha=alpha)
assert_raise_message(ValueError, expected_msg, m_nb.fit, X, y)
# Test that too small pseudo-counts are replaced
ALPHA_MIN = 1e-10
alpha = np.array([ALPHA_MIN / 2, 0.5])
m_nb = MultinomialNB(alpha=alpha)
m_nb.partial_fit(X, y, classes=[0, 1])
assert_array_almost_equal(m_nb._check_alpha(),
[ALPHA_MIN, 0.5],
decimal=12)
# Test correct dimensions
alpha = np.array([1., 2., 3.])
m_nb = MultinomialNB(alpha=alpha)
expected_msg = ('alpha should be a scalar or a numpy array '
'with shape [n_features]')
assert_raise_message(ValueError, expected_msg, m_nb.fit, X, y)
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
X, y = load_digits(return_X_y=True)
binary_3v8 = np.logical_or(y == 3, y == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert scores.mean() > 0.86
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert scores.mean() > 0.94
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert scores.mean() > 0.83
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert scores.mean() > 0.92
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert scores.mean() > 0.77
scores = cross_val_score(GaussianNB(var_smoothing=0.1), X, y, cv=10)
assert scores.mean() > 0.89
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert scores.mean() > 0.86
# FIXME: remove in 1.2
@pytest.mark.parametrize("Estimator", DISCRETE_NAIVE_BAYES_CLASSES)
def test_n_features_deprecation(Estimator):
# Check that we raise the proper deprecation warning if accessing
# `n_features_`.
X = np.array([[1, 2], [3, 4]])
y = np.array([1, 0])
est = Estimator().fit(X, y)
with pytest.warns(FutureWarning, match="n_features_ was deprecated"):
est.n_features_
|
anntzer/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
Python
|
bsd-3-clause
| 34,620
|
[
"Gaussian"
] |
19e21e42b0ce9fc8201e03370b7434a43a1d9179096925a8524663ca825c3630
|
"""
Module containing many types of goodness-of-fit test methods.
"""
from __future__ import division
from builtins import zip
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
from future.utils import with_metaclass
__author__ = 'wittawat'
from abc import ABCMeta, abstractmethod
import autograd
import autograd.numpy as np
import kgof.data as data
import kgof.util as util
import kgof.kernel as kernel
import logging
import matplotlib.pyplot as plt
import scipy
import scipy.stats as stats
class GofTest(with_metaclass(ABCMeta, object)):
"""
Abstract class for a goodness-of-fit test.
"""
def __init__(self, p, alpha):
"""
p: an UnnormalizedDensity
alpha: significance level of the test
"""
self.p = p
self.alpha = alpha
@abstractmethod
def perform_test(self, dat):
"""perform the goodness-of-fit test and return values computed in a dictionary:
{
alpha: 0.01,
pvalue: 0.0002,
test_stat: 2.3,
h0_rejected: True,
time_secs: ...
}
dat: an instance of Data
"""
raise NotImplementedError()
@abstractmethod
def compute_stat(self, dat):
"""Compute the test statistic"""
raise NotImplementedError()
# end of GofTest
#------------------------------------------------------
class H0Simulator(with_metaclass(ABCMeta, object)):
"""
An abstract class representing a simulator to draw samples from the
null distribution. For some tests, these are needed to conduct the test.
"""
def __init__(self, n_simulate, seed):
"""
n_simulate: The number of times to simulate from the null distribution.
Must be a positive integer.
seed: a random seed
"""
assert n_simulate > 0
self.n_simulate = n_simulate
self.seed = seed
@abstractmethod
def simulate(self, gof, dat):
"""
gof: a GofTest
dat: a Data (observed data)
Simulate from the null distribution and return a dictionary.
One of the item is
sim_stats: a numpy array of stats.
"""
raise NotImplementedError()
# end of H0Simulator
#-------------------
class FSSDH0SimCovObs(H0Simulator):
"""
An asymptotic null distribution simulator for FSSD. Simulate from the
asymptotic null distribution given by the weighted sum of chi-squares. The
eigenvalues (weights) are computed from the covarince matrix wrt. the
observed sample.
This is not the correct null distribution; but has the correct asymptotic
types-1 error at alpha.
"""
def __init__(self, n_simulate=3000, seed=10):
super(FSSDH0SimCovObs, self).__init__(n_simulate, seed)
def simulate(self, gof, dat, fea_tensor=None):
"""
fea_tensor: n x d x J feature matrix
"""
assert isinstance(gof, FSSD)
n_simulate = self.n_simulate
seed = self.seed
if fea_tensor is None:
_, fea_tensor = gof.compute_stat(dat, return_feature_tensor=True)
J = fea_tensor.shape[2]
X = dat.data()
n = X.shape[0]
# n x d*J
Tau = fea_tensor.reshape(n, -1)
# Make sure it is a matrix i.e, np.cov returns a scalar when Tau is
# 1d.
cov = np.cov(Tau.T) + np.zeros((1, 1))
#cov = Tau.T.dot(Tau/n)
arr_nfssd, eigs = FSSD.list_simulate_spectral(cov, J, n_simulate,
seed=self.seed)
return {'sim_stats': arr_nfssd}
# end of FSSDH0SimCovObs
#-----------------------
class FSSDH0SimCovDraw(H0Simulator):
"""
An asymptotic null distribution simulator for FSSD. Simulate from the
asymptotic null distribution given by the weighted sum of chi-squares. The
eigenvalues (weights) are computed from the covarince matrix wrt. the
sample drawn from p (the density to test against).
- The UnnormalizedDensity p is required to implement get_datasource() method.
"""
def __init__(self, n_draw=2000, n_simulate=3000, seed=10):
"""
n_draw: number of samples to draw from the UnnormalizedDensity p
"""
super(FSSDH0SimCovDraw, self).__init__(n_simulate, seed)
self.n_draw = n_draw
def simulate(self, gof, dat, fea_tensor=None):
"""
fea_tensor: n x d x J feature matrix
This method does not use dat.
"""
dat = None
#assert isinstance(gof, FSSD)
# p = an UnnormalizedDensity
p = gof.p
ds = p.get_datasource()
if ds is None:
raise ValueError('DataSource associated with p must be available.')
Xdraw = ds.sample(n=self.n_draw, seed=self.seed)
_, fea_tensor = gof.compute_stat(Xdraw, return_feature_tensor=True)
X = Xdraw.data()
J = fea_tensor.shape[2]
n = self.n_draw
# n x d*J
Tau = fea_tensor.reshape(n, -1)
# Make sure it is a matrix i.e, np.cov returns a scalar when Tau is
# 1d.
#cov = np.cov(Tau.T) + np.zeros((1, 1))
cov = old_div(Tau.T.dot(Tau),n) + np.zeros((1, 1))
n_simulate = self.n_simulate
arr_nfssd, eigs = FSSD.list_simulate_spectral(cov, J, n_simulate,
seed=self.seed)
return {'sim_stats': arr_nfssd}
# end of FSSDH0SimCovDraw
#-----------------------
class FSSD(GofTest):
"""
Goodness-of-fit test using The Finite Set Stein Discrepancy statistic.
and a set of paired test locations. The statistic is n*FSSD^2.
The statistic can be negative because of the unbiased estimator.
H0: the sample follows p
H1: the sample does not follow p
p is specified to the constructor in the form of an UnnormalizedDensity.
"""
#NULLSIM_* are constants used to choose the way to simulate from the null
#distribution to do the test.
# Same as NULLSIM_COVQ; but assume that sample can be drawn from p.
# Use the drawn sample to compute the covariance.
NULLSIM_COVP = 1
def __init__(self, p, k, V, null_sim=FSSDH0SimCovObs(n_simulate=3000,
seed=101), alpha=0.01):
"""
p: an instance of UnnormalizedDensity
k: a DifferentiableKernel object
V: J x dx numpy array of J locations to test the difference
null_sim: an instance of H0Simulator for simulating from the null distribution.
alpha: significance level
"""
super(FSSD, self).__init__(p, alpha)
self.k = k
self.V = V
self.null_sim = null_sim
def perform_test(self, dat, return_simulated_stats=False):
"""
dat: an instance of Data
"""
with util.ContextTimer() as t:
alpha = self.alpha
null_sim = self.null_sim
n_simulate = null_sim.n_simulate
X = dat.data()
n = X.shape[0]
J = self.V.shape[0]
nfssd, fea_tensor = self.compute_stat(dat, return_feature_tensor=True)
sim_results = null_sim.simulate(self, dat, fea_tensor)
arr_nfssd = sim_results['sim_stats']
# approximate p-value with the permutations
pvalue = np.mean(arr_nfssd > nfssd)
results = {'alpha': self.alpha, 'pvalue': pvalue, 'test_stat': nfssd,
'h0_rejected': pvalue < alpha, 'n_simulate': n_simulate,
'time_secs': t.secs,
}
if return_simulated_stats:
results['sim_stats'] = arr_nfssd
return results
def compute_stat(self, dat, return_feature_tensor=False):
"""
The statistic is n*FSSD^2.
"""
X = dat.data()
n = X.shape[0]
# n x d x J
Xi = self.feature_tensor(X)
unscaled_mean = FSSD.ustat_h1_mean_variance(Xi, return_variance=False)
stat = n*unscaled_mean
#print 'Xi: {0}'.format(Xi)
#print 'Tau: {0}'.format(Tau)
#print 't1: {0}'.format(t1)
#print 't2: {0}'.format(t2)
#print 'stat: {0}'.format(stat)
if return_feature_tensor:
return stat, Xi
else:
return stat
def get_H1_mean_variance(self, dat):
"""
Return the mean and variance under H1 of the test statistic (divided by
n).
"""
X = dat.data()
Xi = self.feature_tensor(X)
mean, variance = FSSD.ustat_h1_mean_variance(Xi, return_variance=True)
return mean, variance
def feature_tensor(self, X):
"""
Compute the feature tensor which is n x d x J.
The feature tensor can be used to compute the statistic, and the
covariance matrix for simulating from the null distribution.
X: n x d data numpy array
return an n x d x J numpy array
"""
k = self.k
J = self.V.shape[0]
n, d = X.shape
# n x d matrix of gradients
grad_logp = self.p.grad_log(X)
#assert np.all(util.is_real_num(grad_logp))
# n x J matrix
#print 'V'
#print self.V
K = k.eval(X, self.V)
#assert np.all(util.is_real_num(K))
list_grads = np.array([np.reshape(k.gradX_y(X, v), (1, n, d)) for v in self.V])
stack0 = np.concatenate(list_grads, axis=0)
#a numpy array G of size n x d x J such that G[:, :, J]
# is the derivative of k(X, V_j) with respect to X.
dKdV = np.transpose(stack0, (1, 2, 0))
# n x d x J tensor
grad_logp_K = util.outer_rows(grad_logp, K)
#print 'grad_logp'
#print grad_logp.dtype
#print grad_logp
#print 'K'
#print K
Xi = old_div((grad_logp_K + dKdV),np.sqrt(d*J))
#Xi = (grad_logp_K + dKdV)
return Xi
@staticmethod
def power_criterion(p, dat, k, test_locs, reg=1e-2, use_unbiased=True,
use_2terms=False):
"""
Compute the mean and standard deviation of the statistic under H1.
Return mean/sd.
use_2terms: True if the objective should include the first term in the power
expression. This term carries the test threshold and is difficult to
compute (depends on the optimized test locations). If True, then
the objective will be -1/(n**0.5*sigma_H1) + n**0.5 FSSD^2/sigma_H1,
which ignores the test threshold in the first term.
"""
X = dat.data()
n = X.shape[0]
V = test_locs
fssd = FSSD(p, k, V, null_sim=None)
fea_tensor = fssd.feature_tensor(X)
u_mean, u_variance = FSSD.ustat_h1_mean_variance(fea_tensor,
return_variance=True, use_unbiased=use_unbiased)
# mean/sd criterion
sigma_h1 = np.sqrt(u_variance + reg)
ratio = old_div(u_mean,sigma_h1)
if use_2terms:
obj = old_div(-1.0,(np.sqrt(n)*sigma_h1)) + np.sqrt(n)*ratio
#print obj
else:
obj = ratio
return obj
@staticmethod
def ustat_h1_mean_variance(fea_tensor, return_variance=True,
use_unbiased=True):
"""
Compute the mean and variance of the asymptotic normal distribution
under H1 of the test statistic.
fea_tensor: feature tensor obtained from feature_tensor()
return_variance: If false, avoid computing and returning the variance.
use_unbiased: If True, use the unbiased version of the mean. Can be
negative.
Return the mean [and the variance]
"""
Xi = fea_tensor
n, d, J = Xi.shape
#print 'Xi'
#print Xi
#assert np.all(util.is_real_num(Xi))
assert n > 1, 'Need n > 1 to compute the mean of the statistic.'
# n x d*J
# Tau = Xi.reshape(n, d*J)
Tau = np.reshape(Xi, [n, d*J])
if use_unbiased:
t1 = np.sum(np.mean(Tau, 0)**2)*(old_div(n,float(n-1)))
t2 = old_div(np.sum(np.mean(Tau**2, 0)),float(n-1))
# stat is the mean
stat = t1 - t2
else:
stat = np.sum(np.mean(Tau, 0)**2)
if not return_variance:
return stat
# compute the variance
# mu: d*J vector
mu = np.mean(Tau, 0)
variance = 4*np.mean(np.dot(Tau, mu)**2) - 4*np.sum(mu**2)**2
return stat, variance
@staticmethod
def list_simulate_spectral(cov, J, n_simulate=1000, seed=82):
"""
Simulate the null distribution using the spectrums of the covariance
matrix. This is intended to be used to approximate the null
distribution.
Return (a numpy array of simulated n*FSSD values, eigenvalues of cov)
"""
# eigen decompose
eigs, _ = np.linalg.eig(cov)
eigs = np.real(eigs)
# sort in decreasing order
eigs = -np.sort(-eigs)
sim_fssds = FSSD.simulate_null_dist(eigs, J, n_simulate=n_simulate,
seed=seed)
return sim_fssds, eigs
@staticmethod
def simulate_null_dist(eigs, J, n_simulate=2000, seed=7):
"""
Simulate the null distribution using the spectrums of the covariance
matrix of the U-statistic. The simulated statistic is n*FSSD^2 where
FSSD is an unbiased estimator.
- eigs: a numpy array of estimated eigenvalues of the covariance
matrix. eigs is of length d*J, where d is the input dimension, and
- J: the number of test locations.
Return a numpy array of simulated statistics.
"""
d = old_div(len(eigs),J)
assert d>0
# draw at most d x J x block_size values at a time
block_size = max(20, int(old_div(1000.0,(d*J))))
fssds = np.zeros(n_simulate)
from_ind = 0
with util.NumpySeedContext(seed=seed):
while from_ind < n_simulate:
to_draw = min(block_size, n_simulate-from_ind)
# draw chi^2 random variables.
chi2 = np.random.randn(d*J, to_draw)**2
# an array of length to_draw
sim_fssds = eigs.dot(chi2-1.0)
# store
end_ind = from_ind+to_draw
fssds[from_ind:end_ind] = sim_fssds
from_ind = end_ind
return fssds
@staticmethod
def fssd_grid_search_kernel(p, dat, test_locs, list_kernel):
"""
Linear search for the best kernel in the list that maximizes
the test power criterion, fixing the test locations to V.
- p: UnnormalizedDensity
- dat: a Data object
- list_kernel: list of kernel candidates
return: (best kernel index, array of test power criteria)
"""
V = test_locs
X = dat.data()
n_cand = len(list_kernel)
objs = np.zeros(n_cand)
for i in range(n_cand):
ki = list_kernel[i]
objs[i] = FSSD.power_criterion(p, dat, ki, test_locs)
logging.info('(%d), obj: %5.4g, k: %s' %(i, objs[i], str(ki)))
#Widths that come early in the list
# are preferred if test powers are equal.
#bestij = np.unravel_index(objs.argmax(), objs.shape)
besti = objs.argmax()
return besti, objs
# end of FSSD
# --------------------------------------
class GaussFSSD(FSSD):
"""
FSSD using an isotropic Gaussian kernel.
"""
def __init__(self, p, sigma2, V, alpha=0.01, n_simulate=3000, seed=10):
k = kernel.KGauss(sigma2)
null_sim = FSSDH0SimCovObs(n_simulate=n_simulate, seed=seed)
super(GaussFSSD, self).__init__(p, k, V, null_sim, alpha)
@staticmethod
def power_criterion(p, dat, gwidth, test_locs, reg=1e-2, use_2terms=False):
"""
use_2terms: True if the objective should include the first term in the power
expression. This term carries the test threshold and is difficult to
compute (depends on the optimized test locations). If True, then
the objective will be -1/(n**0.5*sigma_H1) + n**0.5 FSSD^2/sigma_H1,
which ignores the test threshold in the first term.
"""
k = kernel.KGauss(gwidth)
return FSSD.power_criterion(p, dat, k, test_locs, reg, use_2terms=use_2terms)
@staticmethod
def optimize_auto_init(p, dat, J, **ops):
"""
Optimize parameters by calling optimize_locs_widths(). Automatically
initialize the test locations and the Gaussian width.
Return optimized locations, Gaussian width, optimization info
"""
assert J>0
# Use grid search to initialize the gwidth
X = dat.data()
n_gwidth_cand = 5
gwidth_factors = 2.0**np.linspace(-3, 3, n_gwidth_cand)
med2 = util.meddistance(X, 1000)**2
k = kernel.KGauss(med2*2)
# fit a Gaussian to the data and draw to initialize V0
V0 = util.fit_gaussian_draw(X, J, seed=829, reg=1e-6)
list_gwidth = np.hstack( ( (med2)*gwidth_factors ) )
besti, objs = GaussFSSD.grid_search_gwidth(p, dat, V0, list_gwidth)
gwidth = list_gwidth[besti]
assert util.is_real_num(gwidth), 'gwidth not real. Was %s'%str(gwidth)
assert gwidth > 0, 'gwidth not positive. Was %.3g'%gwidth
logging.info('After grid search, gwidth=%.3g'%gwidth)
V_opt, gwidth_opt, info = GaussFSSD.optimize_locs_widths(p, dat,
gwidth, V0, **ops)
# set the width bounds
#fac_min = 5e-2
#fac_max = 5e3
#gwidth_lb = fac_min*med2
#gwidth_ub = fac_max*med2
#gwidth_opt = max(gwidth_lb, min(gwidth_opt, gwidth_ub))
return V_opt, gwidth_opt, info
@staticmethod
def grid_search_gwidth(p, dat, test_locs, list_gwidth):
"""
Linear search for the best Gaussian width in the list that maximizes
the test power criterion, fixing the test locations.
- V: a J x dx np-array for J test locations
return: (best width index, list of test power objectives)
"""
list_gauss_kernel = [kernel.KGauss(gw) for gw in list_gwidth]
besti, objs = FSSD.fssd_grid_search_kernel(p, dat, test_locs,
list_gauss_kernel)
return besti, objs
@staticmethod
def optimize_locs_widths(p, dat, gwidth0, test_locs0, reg=1e-2,
max_iter=100, tol_fun=1e-5, disp=False, locs_bounds_frac=100,
gwidth_lb=None, gwidth_ub=None, use_2terms=False,
):
"""
Optimize the test locations and the Gaussian kernel width by
maximizing a test power criterion. data should not be the same data as
used in the actual test (i.e., should be a held-out set).
This function is deterministic.
- data: a Data object
- test_locs0: Jxd numpy array. Initial V.
- reg: reg to add to the mean/sqrt(variance) criterion to become
mean/sqrt(variance + reg)
- gwidth0: initial value of the Gaussian width^2
- max_iter: #gradient descent iterations
- tol_fun: termination tolerance of the objective value
- disp: True to print convergence messages
- locs_bounds_frac: When making box bounds for the test_locs, extend
the box defined by coordinate-wise min-max by std of each coordinate
multiplied by this number.
- gwidth_lb: absolute lower bound on the Gaussian width^2
- gwidth_ub: absolute upper bound on the Gaussian width^2
- use_2terms: If True, then besides the signal-to-noise ratio
criterion, the objective function will also include the first term
that is dropped.
#- If the lb, ub bounds are None, use fraction of the median heuristics
# to automatically set the bounds.
Return (V test_locs, gaussian width, optimization info log)
"""
J = test_locs0.shape[0]
X = dat.data()
n, d = X.shape
# Parameterize the Gaussian width with its square root (then square later)
# to automatically enforce the positivity.
def obj(sqrt_gwidth, V):
return -GaussFSSD.power_criterion(
p, dat, sqrt_gwidth**2, V, reg=reg, use_2terms=use_2terms)
flatten = lambda gwidth, V: np.hstack((gwidth, V.reshape(-1)))
def unflatten(x):
sqrt_gwidth = x[0]
V = np.reshape(x[1:], (J, d))
return sqrt_gwidth, V
def flat_obj(x):
sqrt_gwidth, V = unflatten(x)
return obj(sqrt_gwidth, V)
# gradient
#grad_obj = autograd.elementwise_grad(flat_obj)
# Initial point
x0 = flatten(np.sqrt(gwidth0), test_locs0)
#make sure that the optimized gwidth is not too small or too large.
fac_min = 1e-2
fac_max = 1e2
med2 = util.meddistance(X, subsample=1000)**2
if gwidth_lb is None:
gwidth_lb = max(fac_min*med2, 1e-3)
if gwidth_ub is None:
gwidth_ub = min(fac_max*med2, 1e5)
# Make a box to bound test locations
X_std = np.std(X, axis=0)
# X_min: length-d array
X_min = np.min(X, axis=0)
X_max = np.max(X, axis=0)
# V_lb: J x d
V_lb = np.tile(X_min - locs_bounds_frac*X_std, (J, 1))
V_ub = np.tile(X_max + locs_bounds_frac*X_std, (J, 1))
# (J*d+1) x 2. Take square root because we parameterize with the square
# root
x0_lb = np.hstack((np.sqrt(gwidth_lb), np.reshape(V_lb, -1)))
x0_ub = np.hstack((np.sqrt(gwidth_ub), np.reshape(V_ub, -1)))
x0_bounds = list(zip(x0_lb, x0_ub))
# optimize. Time the optimization as well.
# https://docs.scipy.org/doc/scipy/reference/optimize.minimize-lbfgsb.html
grad_obj = autograd.elementwise_grad(flat_obj)
with util.ContextTimer() as timer:
opt_result = scipy.optimize.minimize(
flat_obj, x0, method='L-BFGS-B',
bounds=x0_bounds,
tol=tol_fun,
options={
'maxiter': max_iter, 'ftol': tol_fun, 'disp': disp,
'gtol': 1.0e-07,
},
jac=grad_obj,
)
opt_result = dict(opt_result)
opt_result['time_secs'] = timer.secs
x_opt = opt_result['x']
sq_gw_opt, V_opt = unflatten(x_opt)
gw_opt = sq_gw_opt**2
assert util.is_real_num(gw_opt), 'gw_opt is not real. Was %s' % str(gw_opt)
return V_opt, gw_opt, opt_result
# end of class GaussFSSD
def bootstrapper_rademacher(n):
"""
Produce a sequence of i.i.d {-1, 1} random variables.
Suitable for boostrapping on an i.i.d. sample.
"""
return 2.0*np.random.randint(0, 1+1, n)-1.0
def bootstrapper_multinomial(n):
"""
Produce a sequence of i.i.d Multinomial(n; 1/n,... 1/n) random variables.
This is described on page 5 of Liu et al., 2016 (ICML 2016).
"""
import warnings
warnings.warn('Somehow bootstrapper_multinomial() does not give the right null distribution.')
M = np.random.multinomial(n, old_div(np.ones(n),float(n)), size=1)
return M.reshape(-1) - old_div(1.0,n)
class IMQFSSD(FSSD):
"""
FSSD using the inverse multiquadric kernel (IMQ).
k(x,y) = (c^2 + ||x-y||^2)^b
where c > 0 and b < 0.
"""
def __init__(self, p, b, c, V, alpha=0.01, n_simulate=3000, seed=10):
"""
n_simulate: number of times to draw from the null distribution.
"""
k = kernel.KIMQ(b=b, c=c)
null_sim = FSSDH0SimCovObs(n_simulate=n_simulate, seed=seed)
super(IMQFSSD, self).__init__(p, k, V, null_sim, alpha)
@staticmethod
def power_criterion(p, dat, b, c, test_locs, reg=1e-2):
k = kernel.KIMQ(b=b, c=c)
return FSSD.power_criterion(p, dat, k, test_locs, reg)
#@staticmethod
#def optimize_auto_init(p, dat, J, **ops):
# """
# Optimize parameters by calling optimize_locs_widths(). Automatically
# initialize the test locations and the Gaussian width.
# Return optimized locations, Gaussian width, optimization info
# """
# assert J>0
# # Use grid search to initialize the gwidth
# X = dat.data()
# n_gwidth_cand = 5
# gwidth_factors = 2.0**np.linspace(-3, 3, n_gwidth_cand)
# med2 = util.meddistance(X, 1000)**2
# k = kernel.KGauss(med2*2)
# # fit a Gaussian to the data and draw to initialize V0
# V0 = util.fit_gaussian_draw(X, J, seed=829, reg=1e-6)
# list_gwidth = np.hstack( ( (med2)*gwidth_factors ) )
# besti, objs = GaussFSSD.grid_search_gwidth(p, dat, V0, list_gwidth)
# gwidth = list_gwidth[besti]
# assert util.is_real_num(gwidth), 'gwidth not real. Was %s'%str(gwidth)
# assert gwidth > 0, 'gwidth not positive. Was %.3g'%gwidth
# logging.info('After grid search, gwidth=%.3g'%gwidth)
# V_opt, gwidth_opt, info = GaussFSSD.optimize_locs_widths(p, dat,
# gwidth, V0, **ops)
# # set the width bounds
# #fac_min = 5e-2
# #fac_max = 5e3
# #gwidth_lb = fac_min*med2
# #gwidth_ub = fac_max*med2
# #gwidth_opt = max(gwidth_lb, min(gwidth_opt, gwidth_ub))
# return V_opt, gwidth_opt, info
@staticmethod
def optimize_locs(p, dat, b, c, test_locs0, reg=1e-5, max_iter=100,
tol_fun=1e-5, disp=False, locs_bounds_frac=100):
"""
Optimize just the test locations by maximizing a test power criterion,
keeping the kernel parameters b, c fixed to the specified values. data
should not be the same data as used in the actual test (i.e., should be
a held-out set). This function is deterministic.
- p: an UnnormalizedDensity specifying the problem
- dat: a Data object
- b, c: kernel parameters of the IMQ kernel. Not optimized.
- test_locs0: Jxd numpy array. Initial V.
- reg: reg to add to the mean/sqrt(variance) criterion to become
mean/sqrt(variance + reg)
- max_iter: #gradient descent iterations
- tol_fun: termination tolerance of the objective value
- disp: True to print convergence messages
- locs_bounds_frac: When making box bounds for the test_locs, extend
the box defined by coordinate-wise min-max by std of each coordinate
multiplied by this number.
Return (V test_locs, optimization info log)
"""
J = test_locs0.shape[0]
X = dat.data()
n, d = X.shape
def obj(V):
return -IMQFSSD.power_criterion(p, dat, b, c, V, reg=reg)
flatten = lambda V: np.reshape(V, -1)
def unflatten(x):
V = np.reshape(x, (J, d))
return V
def flat_obj(x):
V = unflatten(x)
return obj(V)
# Initial point
x0 = flatten(test_locs0)
# Make a box to bound test locations
X_std = np.std(X, axis=0)
# X_min: length-d array
X_min = np.min(X, axis=0)
X_max = np.max(X, axis=0)
# V_lb: J x d
V_lb = np.tile(X_min - locs_bounds_frac*X_std, (J, 1))
V_ub = np.tile(X_max + locs_bounds_frac*X_std, (J, 1))
# (J*d) x 2.
x0_bounds = list(zip(V_lb.reshape(-1)[:, np.newaxis], V_ub.reshape(-1)[:, np.newaxis]))
# optimize. Time the optimization as well.
# https://docs.scipy.org/doc/scipy/reference/optimize.minimize-lbfgsb.html
grad_obj = autograd.elementwise_grad(flat_obj)
with util.ContextTimer() as timer:
opt_result = scipy.optimize.minimize(
flat_obj, x0, method='L-BFGS-B',
bounds=x0_bounds,
tol=tol_fun,
options={
'maxiter': max_iter, 'ftol': tol_fun, 'disp': disp,
'gtol': 1.0e-06,
},
jac=grad_obj,
)
opt_result = dict(opt_result)
opt_result['time_secs'] = timer.secs
x_opt = opt_result['x']
V_opt = unflatten(x_opt)
return V_opt, opt_result
@staticmethod
def optimize_locs_params(p, dat, b0, c0, test_locs0, reg=1e-2,
max_iter=100, tol_fun=1e-5, disp=False, locs_bounds_frac=100,
b_lb= -20.0, b_ub= -1e-4, c_lb=1e-6, c_ub=1e3,
):
"""
Optimize the test locations and the the two parameters (b and c) of the
IMQ kernel by maximizing the test power criterion.
k(x,y) = (c^2 + ||x-y||^2)^b
where c > 0 and b < 0.
data should not be the same data as used in the actual test (i.e.,
should be a held-out set). This function is deterministic.
- p: UnnormalizedDensity specifying the problem.
- b0: initial parameter value for b (in the kernel)
- c0: initial parameter value for c (in the kernel)
- dat: a Data object (training set)
- test_locs0: Jxd numpy array. Initial V.
- reg: reg to add to the mean/sqrt(variance) criterion to become
mean/sqrt(variance + reg)
- max_iter: #gradient descent iterations
- tol_fun: termination tolerance of the objective value
- disp: True to print convergence messages
- locs_bounds_frac: When making box bounds for the test_locs, extend
the box defined by coordinate-wise min-max by std of each coordinate
multiplied by this number.
- b_lb: absolute lower bound on b. b is always < 0.
- b_ub: absolute upper bound on b
- c_lb: absolute lower bound on c. c is always > 0.
- c_ub: absolute upper bound on c
#- If the lb, ub bounds are None
Return (V test_locs, b, c, optimization info log)
"""
"""
In the optimization, we will parameterize b with its square root.
Square back and negate to form b. c is not parameterized in any special
way since it enters to the kernel with c^2. Absolute value of c will be
taken to make sure it is positive.
"""
J = test_locs0.shape[0]
X = dat.data()
n, d = X.shape
def obj(sqrt_neg_b, c, V):
b = -sqrt_neg_b**2
return -IMQFSSD.power_criterion(p, dat, b, c, V, reg=reg)
flatten = lambda sqrt_neg_b, c, V: np.hstack((sqrt_neg_b, c, V.reshape(-1)))
def unflatten(x):
sqrt_neg_b = x[0]
c = x[1]
V = np.reshape(x[2:], (J, d))
return sqrt_neg_b, c, V
def flat_obj(x):
sqrt_neg_b, c, V = unflatten(x)
return obj(sqrt_neg_b, c, V)
# gradient
#grad_obj = autograd.elementwise_grad(flat_obj)
# Initial point
b02 = np.sqrt(-b0)
x0 = flatten(b02, c0, test_locs0)
# Make a box to bound test locations
X_std = np.std(X, axis=0)
# X_min: length-d array
X_min = np.min(X, axis=0)
X_max = np.max(X, axis=0)
# V_lb: J x d
V_lb = np.tile(X_min - locs_bounds_frac*X_std, (J, 1))
V_ub = np.tile(X_max + locs_bounds_frac*X_std, (J, 1))
# (J*d+2) x 2. Make sure to bound the reparamterized values (not the original)
"""
For b, b2 := sqrt(-b)
lb <= b <= ub < 0 means
sqrt(-ub) <= b2 <= sqrt(-lb)
Note the positions of ub, lb.
"""
x0_lb = np.hstack((np.sqrt(-b_ub), c_lb, np.reshape(V_lb, -1)))
x0_ub = np.hstack((np.sqrt(-b_lb), c_ub, np.reshape(V_ub, -1)))
x0_bounds = list(zip(x0_lb, x0_ub))
# optimize. Time the optimization as well.
# https://docs.scipy.org/doc/scipy/reference/optimize.minimize-lbfgsb.html
grad_obj = autograd.elementwise_grad(flat_obj)
with util.ContextTimer() as timer:
opt_result = scipy.optimize.minimize(
flat_obj, x0, method='L-BFGS-B',
bounds=x0_bounds,
tol=tol_fun,
options={
'maxiter': max_iter, 'ftol': tol_fun, 'disp': disp,
'gtol': 1.0e-06,
},
jac=grad_obj,
)
opt_result = dict(opt_result)
opt_result['time_secs'] = timer.secs
x_opt = opt_result['x']
sqrt_neg_b, c, V_opt = unflatten(x_opt)
b = -sqrt_neg_b**2
assert util.is_real_num(b), 'b is not real. Was {}'.format(b)
assert b < 0
assert util.is_real_num(c), 'c is not real. Was {}'.format(c)
assert c > 0
return V_opt, b, c, opt_result
# end of class IMQFSSD
class KernelSteinTest(GofTest):
"""
Goodness-of-fit test using kernelized Stein discrepancy test of
Chwialkowski et al., 2016 and Liu et al., 2016 in ICML 2016.
Mainly follow the details in Chwialkowski et al., 2016.
The test statistic is n*V_n where V_n is a V-statistic.
- This test runs in O(n^2 d^2) time.
H0: the sample follows p
H1: the sample does not follow p
p is specified to the constructor in the form of an UnnormalizedDensity.
"""
def __init__(self, p, k, bootstrapper=bootstrapper_rademacher, alpha=0.01,
n_simulate=500, seed=11):
"""
p: an instance of UnnormalizedDensity
k: a KSTKernel object
bootstrapper: a function: (n) |-> numpy array of n weights
to be multiplied in the double sum of the test statistic for generating
bootstrap samples from the null distribution.
alpha: significance level
n_simulate: The number of times to simulate from the null distribution
by bootstrapping. Must be a positive integer.
"""
super(KernelSteinTest, self).__init__(p, alpha)
self.k = k
self.bootstrapper = bootstrapper
self.n_simulate = n_simulate
self.seed = seed
def perform_test(self, dat, return_simulated_stats=False, return_ustat_gram=False):
"""
dat: a instance of Data
"""
with util.ContextTimer() as t:
alpha = self.alpha
n_simulate = self.n_simulate
X = dat.data()
n = X.shape[0]
_, H = self.compute_stat(dat, return_ustat_gram=True)
test_stat = n*np.mean(H)
# bootrapping
sim_stats = np.zeros(n_simulate)
with util.NumpySeedContext(seed=self.seed):
for i in range(n_simulate):
W = self.bootstrapper(n)
# n * [ (1/n^2) * \sum_i \sum_j h(x_i, x_j) w_i w_j ]
boot_stat = W.dot(H.dot(old_div(W,float(n))))
# This is a bootstrap version of n*V_n
sim_stats[i] = boot_stat
# approximate p-value with the permutations
pvalue = np.mean(sim_stats > test_stat)
results = {'alpha': self.alpha, 'pvalue': pvalue, 'test_stat': test_stat,
'h0_rejected': pvalue < alpha, 'n_simulate': n_simulate,
'time_secs': t.secs,
}
if return_simulated_stats:
results['sim_stats'] = sim_stats
if return_ustat_gram:
results['H'] = H
return results
def compute_stat(self, dat, return_ustat_gram=False):
"""
Compute the V statistic as in Section 2.2 of Chwialkowski et al., 2016.
return_ustat_gram: If True, then return the n x n matrix used to
compute the statistic (by taking the mean of all the elements)
"""
X = dat.data()
n, d = X.shape
k = self.k
# n x d matrix of gradients
grad_logp = self.p.grad_log(X)
# n x n
gram_glogp = grad_logp.dot(grad_logp.T)
# n x n
K = k.eval(X, X)
B = np.zeros((n, n))
C = np.zeros((n, n))
for i in range(d):
grad_logp_i = grad_logp[:, i]
B += k.gradX_Y(X, X, i)*grad_logp_i
C += (k.gradY_X(X, X, i).T * grad_logp_i).T
H = K*gram_glogp + B + C + k.gradXY_sum(X, X)
# V-statistic
stat = n*np.mean(H)
if return_ustat_gram:
return stat, H
else:
return stat
#print 't1: {0}'.format(t1)
#print 't2: {0}'.format(t2)
#print 't3: {0}'.format(t3)
#print 't4: {0}'.format(t4)
# end KernelSteinTest
class LinearKernelSteinTest(GofTest):
"""
Goodness-of-fit test using the linear-version of kernelized Stein
discrepancy test of Liu et al., 2016 in ICML 2016. Described in Liu et al.,
2016.
- This test runs in O(n d^2) time.
- test stat = sqrt(n_half)*linear-time Stein discrepancy
- Asymptotically normal under both H0 and H1.
H0: the sample follows p
H1: the sample does not follow p
p is specified to the constructor in the form of an UnnormalizedDensity.
"""
def __init__(self, p, k, alpha=0.01, seed=11):
"""
p: an instance of UnnormalizedDensity
k: a LinearKSTKernel object
alpha: significance level
n_simulate: The number of times to simulate from the null distribution
by bootstrapping. Must be a positive integer.
"""
super(LinearKernelSteinTest, self).__init__(p, alpha)
self.k = k
self.seed = seed
def perform_test(self, dat):
"""
dat: a instance of Data
"""
with util.ContextTimer() as t:
alpha = self.alpha
X = dat.data()
n = X.shape[0]
# H: length-n vector
_, H = self.compute_stat(dat, return_pointwise_stats=True)
test_stat = np.sqrt(old_div(n,2))*np.mean(H)
stat_var = np.mean(H**2)
pvalue = stats.norm.sf(test_stat, loc=0, scale=np.sqrt(stat_var) )
results = {'alpha': self.alpha, 'pvalue': pvalue, 'test_stat': test_stat,
'h0_rejected': pvalue < alpha, 'time_secs': t.secs,
}
return results
def compute_stat(self, dat, return_pointwise_stats=False):
"""
Compute the linear-time statistic described in Eq. 17 of Liu et al., 2016
"""
X = dat.data()
n, d = X.shape
k = self.k
# Divide the sample into two halves of equal size.
n_half = old_div(n,2)
X1 = X[:n_half, :]
# May throw away last sample
X2 = X[n_half:(2*n_half), :]
assert X1.shape[0] == n_half
assert X2.shape[0] == n_half
# score vectors
S1 = self.p.grad_log(X1)
# n_half x d
S2 = self.p.grad_log(X2)
Kvec = k.pair_eval(X1, X2)
A = np.sum(S1*S2, 1)*Kvec
B = np.sum(S2*k.pair_gradX_Y(X1, X2), 1)
C = np.sum(S1*k.pair_gradY_X(X1, X2), 1)
D = k.pair_gradXY_sum(X1, X2)
H = A + B + C + D
assert len(H) == n_half
stat = np.mean(H)
if return_pointwise_stats:
return stat, H
else:
return stat
# end LinearKernelSteinTest
class SteinWitness(object):
"""
Construct a callable object representing the Stein witness function.
The witness function g is defined as in Eq. 1 of
A Linear-Time Kernel Goodness-of-Fit Test
Wittawat Jitkrittum, Wenkai Xu, Zoltan Szabo, Kenji Fukumizu,
Arthur Gretton
NIPS 2017
The witness function requires taking an expectation over the sample
generating distribution. This is approximated by an empirical
expectation using the sample in the input (dat). The witness function
is a d-variate (d = dimension of the data) function, which depends on
the kernel k and the model p.
The constructed object can be called as if it is a function: (J x d) numpy
array |-> (J x d) outputs
"""
def __init__(self, p, k, dat):
"""
:params p: an UnnormalizedDensity object
:params k: a DifferentiableKernel
:params dat: a kgof.data.Data
"""
self.p = p
self.k = k
self.dat = dat
def __call__(self, V):
"""
:params V: a numpy array of size J x d (data matrix)
:returns (J x d) numpy array representing witness evaluations at the J
points.
"""
J = V.shape[0]
X = self.dat.data()
n, d = X.shape
# construct the feature tensor (n x d x J)
fssd = FSSD(self.p, self.k, V, null_sim=None, alpha=None)
# When X, V contain many points, this can use a lot of memory.
# Process chunk by chunk.
block_rows = util.constrain(50000//(d*J), 10, 5000)
avg_rows = []
for (f, t) in util.ChunkIterable(start=0, end=n, chunk_size=block_rows):
assert f<t
Xblock = X[f:t, :]
b = Xblock.shape[0]
F = fssd.feature_tensor(Xblock)
Tau = np.reshape(F, [b, d*J])
# witness evaluations computed on only a subset of data
avg_rows.append(Tau.mean(axis=0))
# an array of length d*J
witness_evals = (float(b)/n)*np.sum(np.vstack(avg_rows), axis=0)
assert len(witness_evals) == d*J
return np.reshape(witness_evals, [J, d])
|
wittawatj/kernel-gof
|
kgof/goftest.py
|
Python
|
mit
| 41,550
|
[
"Gaussian"
] |
ab85cf746b43449d0ab830b1a47b0c662edd12bfe6fba54d00dbc6af8bb7537f
|
""" @package antlr3.tree
@brief ANTLR3 runtime package, treewizard module
A utility module to create ASTs at runtime.
See <http://www.antlr.org/wiki/display/~admin/2007/07/02/Exploring+Concept+of+TreeWizard> for an overview. Note that the API of the Python implementation is slightly different.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from constants import INVALID_TOKEN_TYPE
from tokens import CommonToken
from tree import CommonTree, CommonTreeAdaptor
def computeTokenTypes(tokenNames):
"""
Compute a dict that is an inverted index of
tokenNames (which maps int token types to names).
"""
if tokenNames is None:
return {}
return dict((name, type) for type, name in enumerate(tokenNames))
## token types for pattern parser
EOF = -1
BEGIN = 1
END = 2
ID = 3
ARG = 4
PERCENT = 5
COLON = 6
DOT = 7
class TreePatternLexer(object):
def __init__(self, pattern):
## The tree pattern to lex like "(A B C)"
self.pattern = pattern
## Index into input string
self.p = -1
## Current char
self.c = None
## How long is the pattern in char?
self.n = len(pattern)
## Set when token type is ID or ARG
self.sval = None
self.error = False
self.consume()
__idStartChar = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
)
__idChar = __idStartChar | frozenset('0123456789')
def nextToken(self):
self.sval = ""
while self.c != EOF:
if self.c in (' ', '\n', '\r', '\t'):
self.consume()
continue
if self.c in self.__idStartChar:
self.sval += self.c
self.consume()
while self.c in self.__idChar:
self.sval += self.c
self.consume()
return ID
if self.c == '(':
self.consume()
return BEGIN
if self.c == ')':
self.consume()
return END
if self.c == '%':
self.consume()
return PERCENT
if self.c == ':':
self.consume()
return COLON
if self.c == '.':
self.consume()
return DOT
if self.c == '[': # grab [x] as a string, returning x
self.consume()
while self.c != ']':
if self.c == '\\':
self.consume()
if self.c != ']':
self.sval += '\\'
self.sval += self.c
else:
self.sval += self.c
self.consume()
self.consume()
return ARG
self.consume()
self.error = True
return EOF
return EOF
def consume(self):
self.p += 1
if self.p >= self.n:
self.c = EOF
else:
self.c = self.pattern[self.p]
class TreePatternParser(object):
def __init__(self, tokenizer, wizard, adaptor):
self.tokenizer = tokenizer
self.wizard = wizard
self.adaptor = adaptor
self.ttype = tokenizer.nextToken() # kickstart
def pattern(self):
if self.ttype == BEGIN:
return self.parseTree()
elif self.ttype == ID:
node = self.parseNode()
if self.ttype == EOF:
return node
return None # extra junk on end
return None
def parseTree(self):
if self.ttype != BEGIN:
return None
self.ttype = self.tokenizer.nextToken()
root = self.parseNode()
if root is None:
return None
while self.ttype in (BEGIN, ID, PERCENT, DOT):
if self.ttype == BEGIN:
subtree = self.parseTree()
self.adaptor.addChild(root, subtree)
else:
child = self.parseNode()
if child is None:
return None
self.adaptor.addChild(root, child)
if self.ttype != END:
return None
self.ttype = self.tokenizer.nextToken()
return root
def parseNode(self):
# "%label:" prefix
label = None
if self.ttype == PERCENT:
self.ttype = self.tokenizer.nextToken()
if self.ttype != ID:
return None
label = self.tokenizer.sval
self.ttype = self.tokenizer.nextToken()
if self.ttype != COLON:
return None
self.ttype = self.tokenizer.nextToken() # move to ID following colon
# Wildcard?
if self.ttype == DOT:
self.ttype = self.tokenizer.nextToken()
wildcardPayload = CommonToken(0, ".")
node = WildcardTreePattern(wildcardPayload)
if label is not None:
node.label = label
return node
# "ID" or "ID[arg]"
if self.ttype != ID:
return None
tokenName = self.tokenizer.sval
self.ttype = self.tokenizer.nextToken()
if tokenName == "nil":
return self.adaptor.nil()
text = tokenName
# check for arg
arg = None
if self.ttype == ARG:
arg = self.tokenizer.sval
text = arg
self.ttype = self.tokenizer.nextToken()
# create node
treeNodeType = self.wizard.getTokenType(tokenName)
if treeNodeType == INVALID_TOKEN_TYPE:
return None
node = self.adaptor.createFromType(treeNodeType, text)
if label is not None and isinstance(node, TreePattern):
node.label = label
if arg is not None and isinstance(node, TreePattern):
node.hasTextArg = True
return node
class TreePattern(CommonTree):
"""
When using %label:TOKENNAME in a tree for parse(), we must
track the label.
"""
def __init__(self, payload):
CommonTree.__init__(self, payload)
self.label = None
self.hasTextArg = None
def toString(self):
if self.label is not None:
return '%' + self.label + ':' + CommonTree.toString(self)
else:
return CommonTree.toString(self)
class WildcardTreePattern(TreePattern):
pass
class TreePatternTreeAdaptor(CommonTreeAdaptor):
"""This adaptor creates TreePattern objects for use during scan()"""
def createWithPayload(self, payload):
return TreePattern(payload)
class TreeWizard(object):
"""
Build and navigate trees with this object. Must know about the names
of tokens so you have to pass in a map or array of token names (from which
this class can build the map). I.e., Token DECL means nothing unless the
class can translate it to a token type.
In order to create nodes and navigate, this class needs a TreeAdaptor.
This class can build a token type -> node index for repeated use or for
iterating over the various nodes with a particular type.
This class works in conjunction with the TreeAdaptor rather than moving
all this functionality into the adaptor. An adaptor helps build and
navigate trees using methods. This class helps you do it with string
patterns like "(A B C)". You can create a tree from that pattern or
match subtrees against it.
"""
def __init__(self, adaptor=None, tokenNames=None, typeMap=None):
self.adaptor = adaptor
if typeMap is None:
self.tokenNameToTypeMap = computeTokenTypes(tokenNames)
else:
if tokenNames is not None:
raise ValueError("Can't have both tokenNames and typeMap")
self.tokenNameToTypeMap = typeMap
def getTokenType(self, tokenName):
"""Using the map of token names to token types, return the type."""
try:
return self.tokenNameToTypeMap[tokenName]
except KeyError:
return INVALID_TOKEN_TYPE
def create(self, pattern):
"""
Create a tree or node from the indicated tree pattern that closely
follows ANTLR tree grammar tree element syntax:
(root child1 ... child2).
You can also just pass in a node: ID
Any node can have a text argument: ID[foo]
(notice there are no quotes around foo--it's clear it's a string).
nil is a special name meaning "give me a nil node". Useful for
making lists: (nil A B C) is a list of A B C.
"""
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, self.adaptor)
return parser.pattern()
def index(self, tree):
"""Walk the entire tree and make a node name to nodes mapping.
For now, use recursion but later nonrecursive version may be
more efficient. Returns a dict int -> list where the list is
of your AST node type. The int is the token type of the node.
"""
m = {}
self._index(tree, m)
return m
def _index(self, t, m):
"""Do the work for index"""
if t is None:
return
ttype = self.adaptor.getType(t)
elements = m.get(ttype)
if elements is None:
m[ttype] = elements = []
elements.append(t)
for i in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, i)
self._index(child, m)
def find(self, tree, what):
"""Return a list of matching token.
what may either be an integer specifzing the token type to find or
a string with a pattern that must be matched.
"""
if isinstance(what, (int, long)):
return self._findTokenType(tree, what)
elif isinstance(what, basestring):
return self._findPattern(tree, what)
else:
raise TypeError("'what' must be string or integer")
def _findTokenType(self, t, ttype):
"""Return a List of tree nodes with token type ttype"""
nodes = []
def visitor(tree, parent, childIndex, labels):
nodes.append(tree)
self.visit(t, ttype, visitor)
return nodes
def _findPattern(self, t, pattern):
"""Return a List of subtrees matching pattern."""
subtrees = []
# Create a TreePattern from the pattern
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
# don't allow invalid patterns
if (tpattern is None or tpattern.isNil()
or isinstance(tpattern, WildcardTreePattern)):
return None
rootTokenType = tpattern.getType()
def visitor(tree, parent, childIndex, label):
if self._parse(tree, tpattern, None):
subtrees.append(tree)
self.visit(t, rootTokenType, visitor)
return subtrees
def visit(self, tree, what, visitor):
"""Visit every node in tree matching what, invoking the visitor.
If what is a string, it is parsed as a pattern and only matching
subtrees will be visited.
The implementation uses the root node of the pattern in combination
with visit(t, ttype, visitor) so nil-rooted patterns are not allowed.
Patterns with wildcard roots are also not allowed.
If what is an integer, it is used as a token type and visit will match
all nodes of that type (this is faster than the pattern match).
The labels arg of the visitor action method is never set (it's None)
since using a token type rather than a pattern doesn't let us set a
label.
"""
if isinstance(what, (int, long)):
self._visitType(tree, None, 0, what, visitor)
elif isinstance(what, basestring):
self._visitPattern(tree, what, visitor)
else:
raise TypeError("'what' must be string or integer")
def _visitType(self, t, parent, childIndex, ttype, visitor):
"""Do the recursive work for visit"""
if t is None:
return
if self.adaptor.getType(t) == ttype:
visitor(t, parent, childIndex, None)
for i in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, i)
self._visitType(child, t, i, ttype, visitor)
def _visitPattern(self, tree, pattern, visitor):
"""
For all subtrees that match the pattern, execute the visit action.
"""
# Create a TreePattern from the pattern
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
# don't allow invalid patterns
if (tpattern is None or tpattern.isNil()
or isinstance(tpattern, WildcardTreePattern)):
return
rootTokenType = tpattern.getType()
def rootvisitor(tree, parent, childIndex, labels):
labels = {}
if self._parse(tree, tpattern, labels):
visitor(tree, parent, childIndex, labels)
self.visit(tree, rootTokenType, rootvisitor)
def parse(self, t, pattern, labels=None):
"""
Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels
on the various nodes and '.' (dot) as the node/subtree wildcard,
return true if the pattern matches and fill the labels Map with
the labels pointing at the appropriate nodes. Return false if
the pattern is malformed or the tree does not match.
If a node specifies a text arg in pattern, then that must match
for that node in t.
"""
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
return self._parse(t, tpattern, labels)
def _parse(self, t1, t2, labels):
"""
Do the work for parse. Check to see if the t2 pattern fits the
structure and token types in t1. Check text if the pattern has
text arguments on nodes. Fill labels map with pointers to nodes
in tree matched against nodes in pattern with labels.
"""
# make sure both are non-null
if t1 is None or t2 is None:
return False
# check roots (wildcard matches anything)
if not isinstance(t2, WildcardTreePattern):
if self.adaptor.getType(t1) != t2.getType():
return False
if t2.hasTextArg and self.adaptor.getText(t1) != t2.getText():
return False
if t2.label is not None and labels is not None:
# map label in pattern to node in t1
labels[t2.label] = t1
# check children
n1 = self.adaptor.getChildCount(t1)
n2 = t2.getChildCount()
if n1 != n2:
return False
for i in range(n1):
child1 = self.adaptor.getChild(t1, i)
child2 = t2.getChild(i)
if not self._parse(child1, child2, labels):
return False
return True
def equals(self, t1, t2, adaptor=None):
"""
Compare t1 and t2; return true if token types/text, structure match
exactly.
The trees are examined in their entirety so that (A B) does not match
(A B C) nor (A (B C)).
"""
if adaptor is None:
adaptor = self.adaptor
return self._equals(t1, t2, adaptor)
def _equals(self, t1, t2, adaptor):
# make sure both are non-null
if t1 is None or t2 is None:
return False
# check roots
if adaptor.getType(t1) != adaptor.getType(t2):
return False
if adaptor.getText(t1) != adaptor.getText(t2):
return False
# check children
n1 = adaptor.getChildCount(t1)
n2 = adaptor.getChildCount(t2)
if n1 != n2:
return False
for i in range(n1):
child1 = adaptor.getChild(t1, i)
child2 = adaptor.getChild(t2, i)
if not self._equals(child1, child2, adaptor):
return False
return True
|
leriomaggio/code-coherence-evaluation-tool
|
code_comments_coherence/source_code_analysis/code_analysis/antlr3/treewizard.py
|
Python
|
bsd-3-clause
| 18,025
|
[
"VisIt"
] |
2ba7566b9d61866aa430fdbfb92d0464324c4e6e8339282948b38d18622c0dbe
|
from test import test_support
import unittest
import codecs
import locale
import sys, StringIO
def coding_checker(self, coder):
def check(input, expect):
self.assertEqual(coder(input), (expect, len(input)))
return check
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self):
self._buffer = ""
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = ""
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class ReadTest(unittest.TestCase):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue()
r = codecs.getreader(self.encoding)(q)
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(c)
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), u"")
self.assertEqual(r.bytebuffer, "")
self.assertEqual(r.charbuffer, u"")
# do the check again, this time using a incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# Check whether the reset method works properly
d.reset()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
u"".join(codecs.iterdecode(encoded, self.encoding))
)
def test_readline(self):
def getreader(input):
stream = StringIO.StringIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = u"foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = u"foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = u"foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
lineends = ("\n", "\r\n", "\r", u"\u2028")
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(lineends):
vw.append((i*200+200)*u"\u3042" + lineend)
vwo.append((i*200+200)*u"\u3042")
self.assertEqual(readalllines("".join(vw), True), "|".join(vw))
self.assertEqual(readalllines("".join(vw), False), "|".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in xrange(80):
for lineend in lineends:
s = 10*(size*u"a" + lineend + u"xxx\n")
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=True),
size*u"a" + lineend,
)
self.assertEqual(
reader.readline(keepends=True),
"xxx\n",
)
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=False),
size*u"a",
)
self.assertEqual(
reader.readline(keepends=False),
"xxx",
)
def test_mixed_readline_and_read(self):
lines = ["Humpty Dumpty sat on a wall,\n",
"Humpty Dumpty had a great fall.\r\n",
"All the king's horses and all the king's men\r",
"Couldn't put Humpty together again."]
data = ''.join(lines)
def getreader():
stream = StringIO.StringIO(data.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
# Issue #8260: Test readline() followed by read()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.read(), ''.join(lines[1:]))
self.assertEqual(f.read(), '')
# Issue #16636: Test readline() followed by readlines()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.readlines(), lines[1:])
self.assertEqual(f.read(), '')
# Test read() followed by read()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(), data[5:])
self.assertEqual(f.read(), '')
# Issue #12446: Test read() followed by readlines()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.readlines(), [lines[0][5:]] + lines[1:])
self.assertEqual(f.read(), '')
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
' #-------------------- TODAY\'S ARTICLES\r\n',
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
' #-------------------- ACTIVE ARTICLES redirect\r\n',
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
' #-------------------- LOGIN PAGE redirect\r\n',
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
' #-------------------- ARTICLES OF A SPECIFIC DATE\r\n',
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
' #-------------------- RECENT ARTICLES\r\n',
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = StringIO.StringIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue()
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write(u"foo\r")
self.assertEqual(reader.readline(keepends=False), u"foo")
writer.write(u"\nbar\r")
self.assertEqual(reader.readline(keepends=False), u"")
self.assertEqual(reader.readline(keepends=False), u"bar")
writer.write(u"baz")
self.assertEqual(reader.readline(keepends=False), u"baz")
self.assertEqual(reader.readline(keepends=False), u"")
# Lineends
writer.write(u"foo\r")
self.assertEqual(reader.readline(keepends=True), u"foo\r")
writer.write(u"\nbar\r")
self.assertEqual(reader.readline(keepends=True), u"\n")
self.assertEqual(reader.readline(keepends=True), u"bar\r")
writer.write(u"baz")
self.assertEqual(reader.readline(keepends=True), u"baz")
self.assertEqual(reader.readline(keepends=True), u"")
writer.write(u"foo\r\n")
self.assertEqual(reader.readline(keepends=True), u"foo\r\n")
def test_bug1098990_a(self):
s1 = u"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = u"offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = u"next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = StringIO.StringIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), u"")
def test_bug1098990_b(self):
s1 = u"aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = u"bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = u"stillokay:bbbbxx\r\n"
s4 = u"broken!!!!badbad\r\n"
s5 = u"againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = StringIO.StringIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), u"")
class UTF32Test(ReadTest):
encoding = "utf-32"
spamle = ('\xff\xfe\x00\x00'
's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = ('\x00\x00\xfe\xff'
'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = StringIO.StringIO()
f = writer(s)
f.write(u"spam")
f.write(u"spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = StringIO.StringIO(d)
f = reader(s)
self.assertEqual(f.read(), u"spamspam")
def test_badbom(self):
s = StringIO.StringIO(4*"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = StringIO.StringIO(8*"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"", # first byte of BOM read
u"", # second byte of BOM read
u"", # third byte of BOM read
u"", # fourth byte of BOM read => byteorder known
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual((u'\ufffd', 1),
codecs.utf_32_decode('\x01', 'replace', True))
self.assertEqual((u'', 1),
codecs.utf_32_decode('\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = '\xff\xfe\x00\x00' + '\x00\x00\x01\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = '\x00\x00\xfe\xff' + '\x00\x01\x00\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
class UTF32LETest(ReadTest):
encoding = "utf-32-le"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual(u"\U00010203".encode(self.encoding), "\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = '\x00\x00\x01\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
class UTF32BETest(ReadTest):
encoding = "utf-32-be"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual(u"\U00010203".encode(self.encoding), "\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = '\x00\x01\x00\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
class UTF16Test(ReadTest):
encoding = "utf-16"
spamle = '\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = '\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = StringIO.StringIO()
f = writer(s)
f.write(u"spam")
f.write(u"spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = StringIO.StringIO(d)
f = reader(s)
self.assertEqual(f.read(), u"spamspam")
def test_badbom(self):
s = StringIO.StringIO("\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = StringIO.StringIO("\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"", # first byte of BOM read
u"", # second byte of BOM read => byteorder known
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual((u'\ufffd', 1),
codecs.utf_16_decode('\x01', 'replace', True))
self.assertEqual((u'', 1),
codecs.utf_16_decode('\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode, "\xff", "strict", True)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = u'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(test_support.unlink, test_support.TESTFN)
with open(test_support.TESTFN, 'wb') as fp:
fp.write(s)
with codecs.open(test_support.TESTFN, 'U', encoding=self.encoding) as reader:
self.assertEqual(reader.read(), s1)
class UTF16LETest(ReadTest):
encoding = "utf-16-le"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', u'\ufffd'),
(b'A\x00Z', u'A\ufffd'),
(b'A\x00B\x00C\x00D\x00Z', u'ABCD\ufffd'),
(b'\x00\xd8', u'\ufffd'),
(b'\x00\xd8A', u'\ufffd'),
(b'\x00\xd8A\x00', u'\ufffdA'),
(b'\x00\xdcA\x00', u'\ufffdA'),
]
for raw, expected in tests:
try:
with self.assertRaises(UnicodeDecodeError):
codecs.utf_16_le_decode(raw, 'strict', True)
self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
except:
print 'raw=%r' % raw
raise
class UTF16BETest(ReadTest):
encoding = "utf-16-be"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', u'\ufffd'),
(b'\x00A\xff', u'A\ufffd'),
(b'\x00A\x00B\x00C\x00DZ', u'ABCD\ufffd'),
(b'\xd8\x00', u'\ufffd'),
(b'\xd8\x00\xdc', u'\ufffd'),
(b'\xd8\x00\x00A', u'\ufffdA'),
(b'\xdc\x00\x00A', u'\ufffdA'),
]
for raw, expected in tests:
try:
with self.assertRaises(UnicodeDecodeError):
codecs.utf_16_be_decode(raw, 'strict', True)
self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
except:
print 'raw=%r' % raw
raise
class UTF8Test(ReadTest):
encoding = "utf-8"
def test_partial(self):
self.check_partial(
u"\x00\xff\u07ff\u0800\uffff\U00010000",
[
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800\uffff",
u"\x00\xff\u07ff\u0800\uffff",
u"\x00\xff\u07ff\u0800\uffff",
u"\x00\xff\u07ff\u0800\uffff",
u"\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
class UTF7Test(ReadTest):
encoding = "utf-7"
def test_ascii(self):
# Set D (directly encoded characters)
set_d = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789'
'\'(),-./:?')
self.assertEqual(set_d.encode(self.encoding), set_d)
self.assertEqual(set_d.decode(self.encoding), set_d)
# Set O (optional direct characters)
set_o = ' !"#$%&*;<=>@[]^_`{|}'
self.assertEqual(set_o.encode(self.encoding), set_o)
self.assertEqual(set_o.decode(self.encoding), set_o)
# +
self.assertEqual(u'a+b'.encode(self.encoding), 'a+-b')
self.assertEqual('a+-b'.decode(self.encoding), u'a+b')
# White spaces
ws = ' \t\n\r'
self.assertEqual(ws.encode(self.encoding), ws)
self.assertEqual(ws.decode(self.encoding), ws)
# Other ASCII characters
other_ascii = ''.join(sorted(set(chr(i) for i in range(0x80)) -
set(set_d + set_o + '+' + ws)))
self.assertEqual(other_ascii.encode(self.encoding),
'+AAAAAQACAAMABAAFAAYABwAIAAsADAAOAA8AEAARABIAEwAU'
'ABUAFgAXABgAGQAaABsAHAAdAB4AHwBcAH4Afw-')
def test_partial(self):
self.check_partial(
u"a+-b",
[
u"a",
u"a",
u"a+",
u"a+-",
u"a+-b",
]
)
def test_errors(self):
tests = [
('\xe1b', u'\ufffdb'),
('a\xe1b', u'a\ufffdb'),
('a\xe1\xe1b', u'a\ufffd\ufffdb'),
('a+IK', u'a\ufffd'),
('a+IK-b', u'a\ufffdb'),
('a+IK,b', u'a\ufffdb'),
('a+IKx', u'a\u20ac\ufffd'),
('a+IKx-b', u'a\u20ac\ufffdb'),
('a+IKwgr', u'a\u20ac\ufffd'),
('a+IKwgr-b', u'a\u20ac\ufffdb'),
('a+IKwgr,', u'a\u20ac\ufffd'),
('a+IKwgr,-b', u'a\u20ac\ufffd-b'),
('a+IKwgrB', u'a\u20ac\u20ac\ufffd'),
('a+IKwgrB-b', u'a\u20ac\u20ac\ufffdb'),
('a+/,+IKw-b', u'a\ufffd\u20acb'),
('a+//,+IKw-b', u'a\ufffd\u20acb'),
('a+///,+IKw-b', u'a\uffff\ufffd\u20acb'),
('a+////,+IKw-b', u'a\uffff\ufffd\u20acb'),
('a+IKw-b\xe1', u'a\u20acb\ufffd'),
('a+IKw\xe1b', u'a\u20ac\ufffdb'),
]
for raw, expected in tests:
try:
with self.assertRaises(UnicodeDecodeError):
codecs.utf_7_decode(raw, 'strict', True)
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
except:
print 'raw=%r' % raw
raise
def test_nonbmp(self):
self.assertEqual(u'\U000104A0'.encode(self.encoding), '+2AHcoA-')
self.assertEqual(u'\ud801\udca0'.encode(self.encoding), '+2AHcoA-')
self.assertEqual('+2AHcoA-'.decode(self.encoding), u'\U000104A0')
self.assertEqual('+2AHcoA'.decode(self.encoding), u'\U000104A0')
self.assertEqual(u'\u20ac\U000104A0'.encode(self.encoding), '+IKzYAdyg-')
self.assertEqual('+IKzYAdyg-'.decode(self.encoding), u'\u20ac\U000104A0')
self.assertEqual('+IKzYAdyg'.decode(self.encoding), u'\u20ac\U000104A0')
self.assertEqual(u'\u20ac\u20ac\U000104A0'.encode(self.encoding),
'+IKwgrNgB3KA-')
self.assertEqual('+IKwgrNgB3KA-'.decode(self.encoding),
u'\u20ac\u20ac\U000104A0')
self.assertEqual('+IKwgrNgB3KA'.decode(self.encoding),
u'\u20ac\u20ac\U000104A0')
def test_lone_surrogates(self):
tests = [
('a+2AE-b', u'a\ud801b'),
('a+2AE\xe1b', u'a\ufffdb'),
('a+2AE', u'a\ufffd'),
('a+2AEA-b', u'a\ufffdb'),
('a+2AH-b', u'a\ufffdb'),
('a+IKzYAQ-b', u'a\u20ac\ud801b'),
('a+IKzYAQ\xe1b', u'a\u20ac\ufffdb'),
('a+IKzYAQA-b', u'a\u20ac\ufffdb'),
('a+IKzYAd-b', u'a\u20ac\ufffdb'),
('a+IKwgrNgB-b', u'a\u20ac\u20ac\ud801b'),
('a+IKwgrNgB\xe1b', u'a\u20ac\u20ac\ufffdb'),
('a+IKwgrNgB', u'a\u20ac\u20ac\ufffd'),
('a+IKwgrNgBA-b', u'a\u20ac\u20ac\ufffdb'),
]
for raw, expected in tests:
try:
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
except:
print 'raw=%r' % raw
raise
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, "\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
class ReadBufferTest(unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("c", "spam")),
("spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), ("", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
class CharBufferTest(unittest.TestCase):
def test_string(self):
self.assertEqual(codecs.charbuffer_encode("spam"), ("spam", 4))
def test_empty(self):
self.assertEqual(codecs.charbuffer_encode(""), ("", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.charbuffer_encode)
self.assertRaises(TypeError, codecs.charbuffer_encode, 42)
class UTF8SigTest(ReadTest):
encoding = "utf-8-sig"
def test_partial(self):
self.check_partial(
u"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
[
u"",
u"",
u"", # First BOM has been read and skipped
u"",
u"",
u"\ufeff", # Second BOM has been read and emitted
u"\ufeff\x00", # "\x00" read and emitted
u"\ufeff\x00", # First byte of encoded u"\xff" read
u"\ufeff\x00\xff", # Second byte of encoded u"\xff" read
u"\ufeff\x00\xff", # First byte of encoded u"\u07ff" read
u"\ufeff\x00\xff\u07ff", # Second byte of encoded u"\u07ff" read
u"\ufeff\x00\xff\u07ff",
u"\ufeff\x00\xff\u07ff",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
u"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
unicode("\xef\xbb\xbf", "utf-8-sig")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = u"spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = u"ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + "ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + range(1, 11) + \
[64, 128, 256, 512, 1024]:
istream = reader(StringIO.StringIO(bytestring))
ostream = StringIO.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = u"ABC\u00A1\u2200XYZ"
bytestring = "ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + range(1, 11) + \
[64, 128, 256, 512, 1024]:
istream = reader(StringIO.StringIO(bytestring))
ostream = StringIO.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(""), ("", 0))
def test_raw(self):
decode = codecs.escape_decode
for b in range(256):
b = chr(b)
if b != '\\':
self.assertEqual(decode(b + '0'), (b + '0', 2))
def test_escape(self):
decode = codecs.escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", b"[]")
check(br'[\"]', b'["]')
check(br"[\']", b"[']")
check(br"[\\]", br"[\]")
check(br"[\a]", b"[\x07]")
check(br"[\b]", b"[\x08]")
check(br"[\t]", b"[\x09]")
check(br"[\n]", b"[\x0a]")
check(br"[\v]", b"[\x0b]")
check(br"[\f]", b"[\x0c]")
check(br"[\r]", b"[\x0d]")
check(br"[\7]", b"[\x07]")
check(br"[\8]", br"[\8]")
check(br"[\78]", b"[\x078]")
check(br"[\41]", b"[!]")
check(br"[\418]", b"[!8]")
check(br"[\101]", b"[A]")
check(br"[\1010]", b"[A0]")
check(br"[\501]", b"[A]")
check(br"[\x41]", b"[A]")
check(br"[\X41]", br"[\X41]")
check(br"[\x410]", b"[A0]")
for b in range(256):
b = chr(b)
if b not in '\n"\'\\abtnvfr01234567x':
check('\\' + b, '\\' + b)
def test_errors(self):
decode = codecs.escape_decode
self.assertRaises(ValueError, decode, br"\x")
self.assertRaises(ValueError, decode, br"[\x]")
self.assertEqual(decode(br"[\x]\x", "ignore"), (b"[]", 6))
self.assertEqual(decode(br"[\x]\x", "replace"), (b"[?]?", 6))
self.assertRaises(ValueError, decode, br"\x0")
self.assertRaises(ValueError, decode, br"[\x0]")
self.assertEqual(decode(br"[\x0]\x0", "ignore"), (b"[]", 8))
self.assertEqual(decode(br"[\x0]\x0", "replace"), (b"[?]?", 8))
class RecodingTest(unittest.TestCase):
def test_recoding(self):
f = StringIO.StringIO()
f2 = codecs.EncodedFile(f, "unicode_internal", "utf-8")
f2.write(u"a")
f2.close()
# Python used to crash on this at exit because of a refcount
# bug in _codecsmodule.c
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
(u"\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
u"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
(u"\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
(u"\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
(u"\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
u"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
u"\u0065\u0073\u006B\u0079",
"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
(u"\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
u"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
u"\u05D1\u05E8\u05D9\u05EA",
"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
(u"\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
u"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
u"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
u"\u0939\u0948\u0902",
"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
(u"\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
u"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
(u"\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
u"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
u"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
(u"\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
u"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
u"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
u"\u0438",
"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
(u"\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
u"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
u"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
u"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
u"\u0061\u00F1\u006F\u006C",
"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
(u"\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
u"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
u"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
u"\u0056\u0069\u1EC7\u0074",
"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
(u"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
(u"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
u"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
u"\u004F\u004E\u004B\u0045\u0059\u0053",
"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
(u"\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
u"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
u"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
(u"\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
(u"\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
u"\u308B\u0035\u79D2\u524D",
"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
(u"\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
(u"\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
"d9juau41awczczp"),
# (S) -> $1.00 <-
(u"\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
u"\u003C\u002D",
"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print repr(i)
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(uni.encode("punycode").lower(), puny.lower())
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
class UnicodeInternalTest(unittest.TestCase):
def test_bug1251300(self):
# Decoding with unicode_internal used to not correctly handle "code
# points" above 0x10ffff on UCS-4 builds.
if sys.maxunicode > 0xffff:
ok = [
("\x00\x10\xff\xff", u"\U0010ffff"),
("\x00\x00\x01\x01", u"\U00000101"),
("", u""),
]
not_ok = [
"\x7f\xff\xff\xff",
"\x80\x00\x00\x00",
"\x81\x00\x00\x00",
"\x00",
"\x00\x00\x00\x00\x00",
]
for internal, uni in ok:
if sys.byteorder == "little":
internal = "".join(reversed(internal))
self.assertEqual(uni, internal.decode("unicode_internal"))
for internal in not_ok:
if sys.byteorder == "little":
internal = "".join(reversed(internal))
self.assertRaises(UnicodeDecodeError, internal.decode,
"unicode_internal")
def test_decode_error_attributes(self):
if sys.maxunicode > 0xffff:
try:
"\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal")
except UnicodeDecodeError, ex:
self.assertEqual("unicode_internal", ex.encoding)
self.assertEqual("\x00\x00\x00\x00\x00\x11\x11\x00", ex.object)
self.assertEqual(4, ex.start)
self.assertEqual(8, ex.end)
else:
self.fail()
def test_decode_callback(self):
if sys.maxunicode > 0xffff:
codecs.register_error("UnicodeInternalTest", codecs.ignore_errors)
decoder = codecs.getdecoder("unicode_internal")
ab = u"ab".encode("unicode_internal")
ignored = decoder("%s\x22\x22\x22\x22%s" % (ab[:4], ab[4:]),
"UnicodeInternalTest")
self.assertEqual((u"ab", 12), ignored)
def test_encode_length(self):
# Issue 3739
encoder = codecs.getencoder("unicode_internal")
self.assertEqual(encoder(u"a")[1], 1)
self.assertEqual(encoder(u"\xe9\u0142")[1], 2)
encoder = codecs.getencoder("string-escape")
self.assertEqual(encoder(r'\x00')[1], 4)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
('foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
'\xb8\x8f\xef\xbb\xbf',
'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
('CAFE',
'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
('\xc3\x9f',
'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
('\xc4\xb0',
'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
('\xc5\x83\xcd\xba',
'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
('j\xcc\x8c\xc2\xa0\xc2\xaa',
'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
('\xe1\xbe\xb7',
'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
('\xc7\xb0',
'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
('\xce\x90',
'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
('\xce\xb0',
'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
('\xe1\xba\x96',
'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
('\xe1\xbd\x96',
'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(' ',
' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
('\xc2\xa0',
' '),
# 3.16 Non-ASCII multibyte space character U+1680.
('\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
('\xe2\x80\x80',
' '),
# 3.18 Zero Width Space U+200b.
('\xe2\x80\x8b',
''),
# 3.19 Non-ASCII multibyte space character U+3000.
('\xe3\x80\x80',
' '),
# 3.20 ASCII control characters U+0010 U+007F.
('\x10\x7f',
'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
('\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
('\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
('\xef\xbb\xbf',
''),
# 3.24 Non-ASCII control character U+1D175.
('\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
('\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
('\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
('\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
('\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
('\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
('\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
('\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
('\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
('\xcd\x81',
'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
('\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
('\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
('\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
('\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
('foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
('foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
('foo\xef\xb9\xb6bar',
'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
('\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
('\xd8\xa71\xd8\xa8',
'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#('\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
('X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
'\xaa\xce\xb0\xe2\x80\x80',
'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
('X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
'\x80',
'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = unicode(orig, "utf-8")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = unicode(prepped, "utf-8")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception,e:
raise test_support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
class IDNACodecTest(unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(unicode("python.org", "idna"), u"python.org")
self.assertEqual(unicode("python.org.", "idna"), u"python.org.")
self.assertEqual(unicode("xn--pythn-mua.org", "idna"), u"pyth\xf6n.org")
self.assertEqual(unicode("xn--pythn-mua.org.", "idna"), u"pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual(u"python.org".encode("idna"), "python.org")
self.assertEqual("python.org.".encode("idna"), "python.org.")
self.assertEqual(u"pyth\xf6n.org".encode("idna"), "xn--pythn-mua.org")
self.assertEqual(u"pyth\xf6n.org.".encode("idna"), "xn--pythn-mua.org.")
def test_stream(self):
import StringIO
r = codecs.getreader("idna")(StringIO.StringIO("abc"))
r.read(3)
self.assertEqual(r.read(), u"")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode("python.org", "idna")),
u"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode("python.org.", "idna")),
u"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
u"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
u"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode("xn--xam", ), u"")
self.assertEqual(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
self.assertEqual(decoder.decode(u"rg"), u"")
self.assertEqual(decoder.decode(u"", True), u"org")
decoder.reset()
self.assertEqual(decoder.decode("xn--xam", ), u"")
self.assertEqual(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
self.assertEqual(decoder.decode("rg."), u"org.")
self.assertEqual(decoder.decode("", True), u"")
def test_incremental_encode(self):
self.assertEqual(
"".join(codecs.iterencode(u"python.org", "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterencode(u"python.org.", "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
"xn--pythn-mua.org."
)
self.assertEqual(
"".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode(u"\xe4x"), "")
self.assertEqual(encoder.encode(u"ample.org"), "xn--xample-9ta.")
self.assertEqual(encoder.encode(u"", True), "org")
encoder.reset()
self.assertEqual(encoder.encode(u"\xe4x"), "")
self.assertEqual(encoder.encode(u"ample.org."), "xn--xample-9ta.org.")
self.assertEqual(encoder.encode(u"", True), "")
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode('\xe4\xf6\xfc', 'latin-1'),
u'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode('abc'), u'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, '\xff', 'ascii')
def test_encode(self):
self.assertEqual(codecs.encode(u'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
self.assertEqual(codecs.encode(u'abc'), 'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, u'\xffff', 'ascii')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as a dotless "i"
oldlocale = locale.getlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
def test_all(self):
api = (
"encode", "decode",
"register", "CodecInfo", "Codec", "IncrementalEncoder",
"IncrementalDecoder", "StreamReader", "StreamWriter", "lookup",
"getencoder", "getdecoder", "getincrementalencoder",
"getincrementaldecoder", "getreader", "getwriter",
"register_error", "lookup_error",
"strict_errors", "replace_errors", "ignore_errors",
"xmlcharrefreplace_errors", "backslashreplace_errors",
"open", "EncodedFile",
"iterencode", "iterdecode",
"BOM", "BOM_BE", "BOM_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_BE", "BOM_UTF16_LE",
"BOM_UTF32", "BOM_UTF32_BE", "BOM_UTF32_LE",
"BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", # Undocumented
"StreamReaderWriter", "StreamRecoder",
)
self.assertEqual(sorted(api), sorted(codecs.__all__))
for api in codecs.__all__:
getattr(codecs, api)
class StreamReaderTest(unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = StringIO.StringIO('\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), [u'\ud55c\n', u'\uae00'])
class EncodedFileTest(unittest.TestCase):
def test_basic(self):
f = StringIO.StringIO('\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), '\\\xd5\n\x00\x00\xae')
f = StringIO.StringIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin1')
ef.write('\xc3\xbc')
self.assertEqual(f.getvalue(), '\xfc')
class Str2StrTest(unittest.TestCase):
def test_read(self):
sin = codecs.encode("\x80", "base64_codec")
reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
sout = reader.read()
self.assertEqual(sout, "\x80")
self.assertIsInstance(sout, str)
def test_readline(self):
sin = codecs.encode("\x80", "base64_codec")
reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
sout = reader.readline()
self.assertEqual(sout, "\x80")
self.assertIsInstance(sout, str)
all_unicode_encodings = [
"ascii",
"base64_codec",
"big5",
"big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"euc_jis_2004",
"euc_jisx0213",
"euc_jp",
"euc_kr",
"gb18030",
"gb2312",
"gbk",
"hex_codec",
"hp_roman8",
"hz",
"idna",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"johab",
"koi8_r",
"koi8_u",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"rot_13",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"tis_620",
"unicode_escape",
"unicode_internal",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
# The following encodings work only with str, not unicode
all_string_encodings = [
"quopri_codec",
"string_escape",
"uu_codec",
]
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_streams = [
"base64_codec",
"hex_codec",
"punycode",
"unicode_internal"
]
broken_incremental_coders = broken_unicode_with_streams[:]
if sys.flags.py3k_warning:
broken_unicode_with_streams.append("rot_13")
# The following encodings only support "strict" mode
only_strict_mode = [
"idna",
"zlib_codec",
"bz2_codec",
]
try:
import bz2
except ImportError:
pass
else:
all_unicode_encodings.append("bz2_codec")
broken_unicode_with_streams.append("bz2_codec")
try:
import zlib
except ImportError:
pass
else:
all_unicode_encodings.append("zlib_codec")
broken_unicode_with_streams.append("zlib_codec")
class BasicUnicodeTest(unittest.TestCase):
def test_basics(self):
s = u"abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
(bytes, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "encoding=%r" % encoding)
(chars, size) = codecs.getdecoder(encoding)(bytes)
self.assertEqual(chars, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_streams:
# check stream reader/writer
q = Queue()
writer = codecs.getwriter(encoding)(q)
encodedresult = ""
for c in s:
writer.write(c)
encodedresult += q.read()
q = Queue()
reader = codecs.getreader(encoding)(q)
decodedresult = u""
for c in encodedresult:
q.write(c)
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "encoding=%r" % encoding)
if encoding not in broken_incremental_coders:
# check incremental decoder/encoder and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = ""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode(u"", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = u""
for c in encodedresult:
decodedresult += decoder.decode(c)
decodedresult += decoder.decode("", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
# check iterencode()/iterdecode()
result = u"".join(codecs.iterdecode(
codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "encoding=%r" % encoding)
# check iterencode()/iterdecode() with empty string
result = u"".join(codecs.iterdecode(
codecs.iterencode(u"", encoding), encoding))
self.assertEqual(result, u"")
if encoding not in only_strict_mode:
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = "".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = u"".join(decoder.decode(c)
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
@test_support.cpython_only
def test_basics_capi(self):
from _testcapi import codec_incrementalencoder, codec_incrementaldecoder
s = u"abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
if encoding not in broken_incremental_coders:
# check incremental decoder/encoder and iterencode()/iterdecode()
try:
cencoder = codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check C API
encodedresult = ""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode(u"", True)
cdecoder = codec_incrementaldecoder(encoding)
decodedresult = u""
for c in encodedresult:
decodedresult += cdecoder.decode(c)
decodedresult += cdecoder.decode("", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
if encoding not in only_strict_mode:
# check incremental decoder/encoder with errors argument
try:
cencoder = codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = "".join(cencoder.encode(c) for c in s)
cdecoder = codec_incrementaldecoder(encoding, "ignore")
decodedresult = u"".join(cdecoder.decode(c)
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
def test_seek(self):
# all codecs should be able to encode these
s = u"%s\n%s\n" % (100*u"abc123", 100*u"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_streams:
continue
reader = codecs.getreader(encoding)(StringIO.StringIO(s.encode(encoding)))
for t in xrange(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
line = reader.readline()
self.assertEqual(s[:len(line)], line)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
class BasicStrTest(unittest.TestCase):
def test_basics(self):
s = "abc123"
for encoding in all_string_encodings:
(bytes, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s))
(chars, size) = codecs.getdecoder(encoding)(bytes)
self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
class CharmapTest(unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict", u"abc"),
(u"abc", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", u"ab"
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict", u"ab\ufffe"
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace", u"ab"),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace", u"ab\ufffe"),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab"),
(u"ab", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab\ufffe"),
(u"ab", 3)
)
allbytes = "".join(chr(i) for i in xrange(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", u""),
(u"", len(allbytes))
)
def test_decode_with_int2str_map(self):
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: u'a', 1: u'b', 2: u'c'}),
(u"abc", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: u'Aa', 1: u'Bb', 2: u'Cc'}),
(u"AaBbCc", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: u'\U0010FFFF', 1: u'b', 2: u'c'}),
(u"\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: u'a', 1: u'b', 2: u''}),
(u"ab", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: u'a', 1: u'b'}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: u'a', 1: u'b', 2: None}
)
# Issue #14850
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: u'a', 1: u'b', 2: u'\ufffe'}
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace",
{0: u'a', 1: u'b'}),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace",
{0: u'a', 1: u'b', 2: None}),
(u"ab\ufffd", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace",
{0: u'a', 1: u'b', 2: u'\ufffe'}),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore",
{0: u'a', 1: u'b'}),
(u"ab", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore",
{0: u'a', 1: u'b', 2: None}),
(u"ab", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore",
{0: u'a', 1: u'b', 2: u'\ufffe'}),
(u"ab", 3)
)
allbytes = "".join(chr(i) for i in xrange(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", {}),
(u"", len(allbytes))
)
def test_decode_with_int2int_map(self):
a = ord(u'a')
b = ord(u'b')
c = ord(u'c')
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: a, 1: b, 2: c}),
(u"abc", 3)
)
# Issue #15379
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: 0x10FFFF, 1: b, 2: c}),
(u"\U0010FFFFbc", 3)
)
self.assertRaises(TypeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: 0x110000, 1: b, 2: c}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: a, 1: b},
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: a, 1: b, 2: 0xFFFE},
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace",
{0: a, 1: b}),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace",
{0: a, 1: b, 2: 0xFFFE}),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore",
{0: a, 1: b}),
(u"ab", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore",
{0: a, 1: b, 2: 0xFFFE}),
(u"ab", 3)
)
class WithStmtTest(unittest.TestCase):
def test_encodedfile(self):
f = StringIO.StringIO("\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), "\xfc")
def test_streamreaderwriter(self):
f = StringIO.StringIO("\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), u"\xfc")
class UnicodeEscapeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.unicode_escape_encode(u""), ("", 0))
self.assertEqual(codecs.unicode_escape_decode(""), (u"", 0))
def test_raw_encode(self):
encode = codecs.unicode_escape_encode
for b in range(32, 127):
if b != ord('\\'):
self.assertEqual(encode(unichr(b)), (chr(b), 1))
def test_raw_decode(self):
decode = codecs.unicode_escape_decode
for b in range(256):
if b != ord('\\'):
self.assertEqual(decode(chr(b) + '0'), (unichr(b) + u'0', 2))
def test_escape_encode(self):
encode = codecs.unicode_escape_encode
check = coding_checker(self, encode)
check(u'\t', r'\t')
check(u'\n', r'\n')
check(u'\r', r'\r')
check(u'\\', r'\\')
for b in range(32):
if chr(b) not in '\t\n\r':
check(unichr(b), '\\x%02x' % b)
for b in range(127, 256):
check(unichr(b), '\\x%02x' % b)
check(u'\u20ac', r'\u20ac')
check(u'\U0001d120', r'\U0001d120')
def test_escape_decode(self):
decode = codecs.unicode_escape_decode
check = coding_checker(self, decode)
check("[\\\n]", u"[]")
check(r'[\"]', u'["]')
check(r"[\']", u"[']")
check(r"[\\]", ur"[\]")
check(r"[\a]", u"[\x07]")
check(r"[\b]", u"[\x08]")
check(r"[\t]", u"[\x09]")
check(r"[\n]", u"[\x0a]")
check(r"[\v]", u"[\x0b]")
check(r"[\f]", u"[\x0c]")
check(r"[\r]", u"[\x0d]")
check(r"[\7]", u"[\x07]")
check(r"[\8]", ur"[\8]")
check(r"[\78]", u"[\x078]")
check(r"[\41]", u"[!]")
check(r"[\418]", u"[!8]")
check(r"[\101]", u"[A]")
check(r"[\1010]", u"[A0]")
check(r"[\x41]", u"[A]")
check(r"[\x410]", u"[A0]")
check(r"\u20ac", u"\u20ac")
check(r"\U0001d120", u"\U0001d120")
for b in range(256):
if chr(b) not in '\n"\'\\abtnvfr01234567xuUN':
check('\\' + chr(b), u'\\' + unichr(b))
def test_decode_errors(self):
decode = codecs.unicode_escape_decode
for c, d in ('x', 2), ('u', 4), ('U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
"\\" + c + "0"*i)
self.assertRaises(UnicodeDecodeError, decode,
"[\\" + c + "0"*i + "]")
data = "[\\" + c + "0"*i + "]\\" + c + "0"*i
self.assertEqual(decode(data, "ignore"), (u"[]", len(data)))
self.assertEqual(decode(data, "replace"),
(u"[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, r"\U00110000")
self.assertEqual(decode(r"\U00110000", "ignore"), (u"", 10))
self.assertEqual(decode(r"\U00110000", "replace"), (u"\ufffd", 10))
class RawUnicodeEscapeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.raw_unicode_escape_encode(u""), ("", 0))
self.assertEqual(codecs.raw_unicode_escape_decode(""), (u"", 0))
def test_raw_encode(self):
encode = codecs.raw_unicode_escape_encode
for b in range(256):
self.assertEqual(encode(unichr(b)), (chr(b), 1))
def test_raw_decode(self):
decode = codecs.raw_unicode_escape_decode
for b in range(256):
self.assertEqual(decode(chr(b) + '0'), (unichr(b) + u'0', 2))
def test_escape_encode(self):
encode = codecs.raw_unicode_escape_encode
check = coding_checker(self, encode)
for b in range(256):
if chr(b) not in 'uU':
check(u'\\' + unichr(b), '\\' + chr(b))
check(u'\u20ac', r'\u20ac')
check(u'\U0001d120', r'\U0001d120')
def test_escape_decode(self):
decode = codecs.raw_unicode_escape_decode
check = coding_checker(self, decode)
for b in range(256):
if chr(b) not in 'uU':
check('\\' + chr(b), u'\\' + unichr(b))
check(r"\u20ac", u"\u20ac")
check(r"\U0001d120", u"\U0001d120")
def test_decode_errors(self):
decode = codecs.raw_unicode_escape_decode
for c, d in ('u', 4), ('U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
"\\" + c + "0"*i)
self.assertRaises(UnicodeDecodeError, decode,
"[\\" + c + "0"*i + "]")
data = "[\\" + c + "0"*i + "]\\" + c + "0"*i
self.assertEqual(decode(data, "ignore"), (u"[]", len(data)))
self.assertEqual(decode(data, "replace"),
(u"[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, r"\U00110000")
self.assertEqual(decode(r"\U00110000", "ignore"), (u"", 10))
self.assertEqual(decode(r"\U00110000", "replace"), (u"\ufffd", 10))
class BomTest(unittest.TestCase):
def test_seek0(self):
data = u"1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
self.addCleanup(test_support.unlink, test_support.TESTFN)
for encoding in tests:
# Check if the BOM is written only once
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# Check that the BOM is written after a seek(0)
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# (StreamWriter) Check that the BOM is written after a seek(0)
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# Check that the BOM is not written after a seek() at a position
# different than the start
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# (StreamWriter) Check that the BOM is not written after a seek()
# at a position different than the start
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
class TransformCodecTest(unittest.TestCase):
def test_quopri_stateless(self):
# Should encode with quotetabs=True
encoded = codecs.encode(b"space tab\teol \n", "quopri-codec")
self.assertEqual(encoded, b"space=20tab=09eol=20\n")
# But should still support unescaped tabs and spaces
unescaped = b"space tab eol\n"
self.assertEqual(codecs.decode(unescaped, "quopri-codec"), unescaped)
def test_uu_invalid(self):
# Missing "begin" line
self.assertRaises(ValueError, codecs.decode, "", "uu-codec")
def test_main():
test_support.run_unittest(
UTF32Test,
UTF32LETest,
UTF32BETest,
UTF16Test,
UTF16LETest,
UTF16BETest,
UTF8Test,
UTF8SigTest,
UTF7Test,
UTF16ExTest,
ReadBufferTest,
CharBufferTest,
EscapeDecodeTest,
RecodingTest,
PunycodeTest,
UnicodeInternalTest,
NameprepTest,
IDNACodecTest,
CodecsModuleTest,
StreamReaderTest,
EncodedFileTest,
Str2StrTest,
BasicUnicodeTest,
BasicStrTest,
CharmapTest,
WithStmtTest,
UnicodeEscapeTest,
RawUnicodeEscapeTest,
BomTest,
TransformCodecTest,
)
if __name__ == "__main__":
test_main()
|
Jeff-Tian/mybnb
|
Python27/Lib/test/test_codecs.py
|
Python
|
apache-2.0
| 82,840
|
[
"FEFF"
] |
352d901de7740a6624c839e17f0fe3f88106cadf61446cc4726e4d4521185609
|
# (C) British Crown Copyright 2010 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test pickling of Iris objects.
"""
from __future__ import with_statement
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import cPickle
import StringIO
import biggus
import numpy as np
import iris
class TestPickle(tests.IrisTest):
def pickle_then_unpickle(self, obj):
"""Returns a generator of ("cpickle protocol number", object) tuples."""
for protocol in xrange(1 + cPickle.HIGHEST_PROTOCOL):
str_buffer = StringIO.StringIO()
cPickle.dump(obj, str_buffer, protocol)
# move the str_buffer back to the start and reconstruct
str_buffer.seek(0)
reconstructed_obj = cPickle.load(str_buffer)
yield protocol, reconstructed_obj
def assertCubeData(self, cube1, cube2):
np.testing.assert_array_equal(cube1.lazy_data().ndarray(),
cube2.lazy_data().ndarray())
@tests.skip_data
def test_cube_pickle(self):
cube = iris.load_cube(tests.get_data_path(('PP', 'globClim1', 'theta.pp')))
self.assertTrue(cube.has_lazy_data())
self.assertCML(cube, ('cube_io', 'pickling', 'theta.cml'), checksum=False)
for _, recon_cube in self.pickle_then_unpickle(cube):
self.assertTrue(recon_cube.has_lazy_data())
self.assertCML(recon_cube, ('cube_io', 'pickling', 'theta.cml'), checksum=False)
self.assertCubeData(cube, recon_cube)
@tests.skip_data
def test_cube_with_deferred_coord_points(self):
# Data with 2d lats and lons that when loaded results in points that
# are LazyArray objects.
filename = tests.get_data_path(('NetCDF',
'rotated',
'xy',
'rotPole_landAreaFraction.nc'))
cube = iris.load_cube(filename)
# Pickle and unpickle. Do not perform any CML tests
# to avoid side effects.
_, recon_cube = next(self.pickle_then_unpickle(cube))
self.assertEqual(recon_cube, cube)
@tests.skip_data
def test_cubelist_pickle(self):
cubelist = iris.load(tests.get_data_path(('PP', 'COLPEX', 'theta_and_orog_subset.pp')))
single_cube = cubelist[0]
self.assertCML(cubelist, ('cube_io', 'pickling', 'cubelist.cml'))
self.assertCML(single_cube, ('cube_io', 'pickling', 'single_cube.cml'))
for _, reconstructed_cubelist in self.pickle_then_unpickle(cubelist):
self.assertCML(reconstructed_cubelist, ('cube_io', 'pickling', 'cubelist.cml'))
self.assertCML(reconstructed_cubelist[0], ('cube_io', 'pickling', 'single_cube.cml'))
for cube_orig, cube_reconstruct in zip(cubelist, reconstructed_cubelist):
self.assertArrayEqual(cube_orig.data, cube_reconstruct.data)
self.assertEqual(cube_orig, cube_reconstruct)
def test_picking_equality_misc(self):
items_to_test = [
iris.unit.Unit("hours since 2007-01-15 12:06:00", calendar=iris.unit.CALENDAR_STANDARD),
iris.unit.as_unit('1'),
iris.unit.as_unit('meters'),
iris.unit.as_unit('no-unit'),
iris.unit.as_unit('unknown')
]
for orig_item in items_to_test:
for protocol, reconstructed_item in self.pickle_then_unpickle(orig_item):
fail_msg = ('Items are different after pickling at protocol %s.'
'\nOrig item: %r\nNew item: %r' % (protocol, orig_item, reconstructed_item)
)
self.assertEqual(orig_item, reconstructed_item, fail_msg)
if __name__ == "__main__":
tests.main()
|
scollis/iris
|
lib/iris/tests/test_pickling.py
|
Python
|
gpl-3.0
| 4,584
|
[
"NetCDF"
] |
c8ea6cdb2fdb6ee7c7926142c26b8d6d8057c20c3ecd5f02c7cb4bcaf4f0ef8b
|
"""feedfinder: Find the Web feed for a Web page
http://www.aaronsw.com/2002/feedfinder/
Usage:
feed(uri) - returns feed found for a URI
feeds(uri) - returns all feeds found for a URI
>>> import feedfinder
>>> feedfinder.feed('scripting.com')
'http://scripting.com/rss.xml'
>>>
>>> feedfinder.feeds('scripting.com')
['http://delong.typepad.com/sdj/atom.xml',
'http://delong.typepad.com/sdj/index.rdf',
'http://delong.typepad.com/sdj/rss.xml']
>>>
Can also use from the command line. Feeds are returned one per line:
$ python feedfinder.py diveintomark.org
http://diveintomark.org/xml/atom.xml
How it works:
0. At every step, feeds are minimally verified to make sure they are really feeds.
1. If the URI points to a feed, it is simply returned; otherwise
the page is downloaded and the real fun begins.
2. Feeds pointed to by LINK tags in the header of the page (autodiscovery)
3. <A> links to feeds on the same server ending in ".rss", ".rdf", ".xml", or
".atom"
4. <A> links to feeds on the same server containing "rss", "rdf", "xml", or "atom"
5. <A> links to feeds on external servers ending in ".rss", ".rdf", ".xml", or
".atom"
6. <A> links to feeds on external servers containing "rss", "rdf", "xml", or "atom"
7. Try some guesses about common places for feeds (index.xml, atom.xml, etc.).
8. As a last ditch effort, we search Syndic8 for feeds matching the URI
"""
__version__ = "1.371"
__date__ = "2006-04-24"
__maintainer__ = "Aaron Swartz (me@aaronsw.com)"
__author__ = "Mark Pilgrim (http://diveintomark.org)"
__copyright__ = "Copyright 2002-4, Mark Pilgrim; 2006 Aaron Swartz"
__license__ = "Python"
__credits__ = """Abe Fettig for a patch to sort Syndic8 feeds by popularity
Also Jason Diamond, Brian Lalor for bug reporting and patches"""
_debug = 0
import sgmllib, urllib, urlparse, re, sys, robotparser
import threading
class TimeoutError(Exception): pass
def timelimit(timeout):
"""borrowed from web.py"""
def _1(function):
def _2(*args, **kw):
class Dispatch(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
self.error = None
self.setDaemon(True)
self.start()
def run(self):
try:
self.result = function(*args, **kw)
except:
self.error = sys.exc_info()
c = Dispatch()
c.join(timeout)
if c.isAlive():
raise TimeoutError, 'took too long'
if c.error:
raise c.error[0], c.error[1]
return c.result
return _2
return _1
# XML-RPC support allows feedfinder to query Syndic8 for possible matches.
# Python 2.3 now comes with this module by default, otherwise you can download it
try:
import xmlrpclib # http://www.pythonware.com/products/xmlrpc/
except ImportError:
xmlrpclib = None
if not dict:
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
def _debuglog(message):
if _debug: print message
class URLGatekeeper:
"""a class to track robots.txt rules across multiple servers"""
def __init__(self):
self.rpcache = {} # a dictionary of RobotFileParser objects, by domain
self.urlopener = urllib.FancyURLopener()
self.urlopener.version = "feedfinder/" + __version__ + " " + self.urlopener.version + " +http://www.aaronsw.com/2002/feedfinder/"
_debuglog(self.urlopener.version)
self.urlopener.addheaders = [('User-agent', self.urlopener.version)]
robotparser.URLopener.version = self.urlopener.version
robotparser.URLopener.addheaders = self.urlopener.addheaders
def _getrp(self, url):
protocol, domain = urlparse.urlparse(url)[:2]
if self.rpcache.has_key(domain):
return self.rpcache[domain]
baseurl = '%s://%s' % (protocol, domain)
robotsurl = urlparse.urljoin(baseurl, 'robots.txt')
_debuglog('fetching %s' % robotsurl)
rp = robotparser.RobotFileParser(robotsurl)
try:
rp.read()
except:
pass
self.rpcache[domain] = rp
return rp
def can_fetch(self, url):
rp = self._getrp(url)
allow = rp.can_fetch(self.urlopener.version, url)
_debuglog("gatekeeper of %s says %s" % (url, allow))
return allow
@timelimit(15) # what unit is this in? milliseconds? seconds?
def get(self, url, check=True):
if check and not self.can_fetch(url): return ''
try:
return self.urlopener.open(url).read()
except:
return ''
_gatekeeper = URLGatekeeper()
class BaseParser(sgmllib.SGMLParser):
def __init__(self, baseuri):
sgmllib.SGMLParser.__init__(self)
self.links = []
self.baseuri = baseuri
def normalize_attrs(self, attrs):
def cleanattr(v):
v = sgmllib.charref.sub(lambda m: unichr(int(m.groups()[0])), v)
v = v.strip()
v = v.replace('<', '<').replace('>', '>').replace(''', "'").replace('"', '"').replace('&', '&')
return v
attrs = [(k.lower(), cleanattr(v)) for k, v in attrs]
attrs = [(k, k in ('rel','type') and v.lower() or v) for k, v in attrs]
return attrs
def do_base(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('href'): return
self.baseuri = attrsD['href']
def error(self, *a, **kw): pass # we're not picky
class LinkParser(BaseParser):
FEED_TYPES = ('application/rss+xml',
'text/xml',
'application/atom+xml',
'application/x.atom+xml',
'application/x-atom+xml')
def do_link(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('rel'): return
rels = attrsD['rel'].split()
if 'alternate' not in rels: return
if attrsD.get('type') not in self.FEED_TYPES: return
if not attrsD.has_key('href'): return
self.links.append(urlparse.urljoin(self.baseuri, attrsD['href']))
class ALinkParser(BaseParser):
def start_a(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('href'): return
self.links.append(urlparse.urljoin(self.baseuri, attrsD['href']))
def makeFullURI(uri):
uri = uri.strip()
if uri.startswith('feed://'):
uri = 'http://' + uri.split('feed://', 1).pop()
for x in ['http', 'https']:
if uri.startswith('%s://' % x):
return uri
return 'http://%s' % uri
def getLinks(data, baseuri):
p = LinkParser(baseuri)
p.feed(data)
return p.links
def getALinks(data, baseuri):
p = ALinkParser(baseuri)
p.feed(data)
return p.links
def getLocalLinks(links, baseuri):
baseuri = baseuri.lower()
urilen = len(baseuri)
return [l for l in links if l.lower().startswith(baseuri)]
def isFeedLink(link):
return link[-4:].lower() in ('.rss', '.rdf', '.xml', '.atom')
def isXMLRelatedLink(link):
link = link.lower()
return link.count('rss') + link.count('rdf') + link.count('xml') + link.count('atom')
r_brokenRedirect = re.compile('<newLocation[^>]*>(.*?)</newLocation>', re.S)
def tryBrokenRedirect(data):
if '<newLocation' in data:
newuris = r_brokenRedirect.findall(data)
if newuris: return newuris[0].strip()
def couldBeFeedData(data):
data = data.lower()
if data.count('<html'): return 0
return data.count('<rss') + data.count('<rdf') + data.count('<feed')
def isFeed(uri):
_debuglog('seeing if %s is a feed' % uri)
protocol = urlparse.urlparse(uri)
if protocol[0] not in ('http', 'https'): return 0
data = _gatekeeper.get(uri)
return couldBeFeedData(data)
def sortFeeds(feed1Info, feed2Info):
return cmp(feed2Info['headlines_rank'], feed1Info['headlines_rank'])
def getFeedsFromSyndic8(uri):
feeds = []
try:
server = xmlrpclib.Server('http://www.syndic8.com/xmlrpc.php')
feedids = server.syndic8.FindFeeds(uri)
infolist = server.syndic8.GetFeedInfo(feedids, ['headlines_rank','status','dataurl'])
infolist.sort(sortFeeds)
feeds = [f['dataurl'] for f in infolist if f['status']=='Syndicated']
_debuglog('found %s feeds through Syndic8' % len(feeds))
except:
pass
return feeds
def feeds(uri, all=False, querySyndic8=False, _recurs=None):
if _recurs is None: _recurs = [uri]
fulluri = makeFullURI(uri)
try:
data = _gatekeeper.get(fulluri, check=False)
except:
return []
# is this already a feed?
if couldBeFeedData(data):
return [fulluri]
newuri = tryBrokenRedirect(data)
if newuri and newuri not in _recurs:
_recurs.append(newuri)
return feeds(newuri, all=all, querySyndic8=querySyndic8, _recurs=_recurs)
# nope, it's a page, try LINK tags first
_debuglog('looking for LINK tags')
try:
outfeeds = getLinks(data, fulluri)
except:
outfeeds = []
_debuglog('found %s feeds through LINK tags' % len(outfeeds))
outfeeds = filter(isFeed, outfeeds)
if all or not outfeeds:
# no LINK tags, look for regular <A> links that point to feeds
_debuglog('no LINK tags, looking at A tags')
try:
links = getALinks(data, fulluri)
except:
links = []
locallinks = getLocalLinks(links, fulluri)
# look for obvious feed links on the same server
outfeeds.extend(filter(isFeed, filter(isFeedLink, locallinks)))
if all or not outfeeds:
# look harder for feed links on the same server
outfeeds.extend(filter(isFeed, filter(isXMLRelatedLink, locallinks)))
if all or not outfeeds:
# look for obvious feed links on another server
outfeeds.extend(filter(isFeed, filter(isFeedLink, links)))
if all or not outfeeds:
# look harder for feed links on another server
outfeeds.extend(filter(isFeed, filter(isXMLRelatedLink, links)))
if all or not outfeeds:
_debuglog('no A tags, guessing')
suffixes = [ # filenames used by popular software:
'atom.xml', # blogger, TypePad
'index.atom', # MT, apparently
'index.rdf', # MT
'rss.xml', # Dave Winer/Manila
'index.xml', # MT
'index.rss' # Slash
]
outfeeds.extend(filter(isFeed, [urlparse.urljoin(fulluri, x) for x in suffixes]))
if (all or not outfeeds) and querySyndic8:
# still no luck, search Syndic8 for feeds (requires xmlrpclib)
_debuglog('still no luck, searching Syndic8')
outfeeds.extend(getFeedsFromSyndic8(uri))
if hasattr(__builtins__, 'set') or __builtins__.has_key('set'):
outfeeds = list(set(outfeeds))
return outfeeds
getFeeds = feeds # backwards-compatibility
def feed(uri):
#todo: give preference to certain feed formats
feedlist = feeds(uri)
if feedlist:
return feedlist[0]
else:
return None
##### test harness ######
def test():
uri = 'http://diveintomark.org/tests/client/autodiscovery/html4-001.html'
failed = []
count = 0
while 1:
data = _gatekeeper.get(uri)
if data.find('Atom autodiscovery test') == -1: break
sys.stdout.write('.')
sys.stdout.flush()
count += 1
links = getLinks(data, uri)
if not links:
print '\n*** FAILED ***', uri, 'could not find link'
failed.append(uri)
elif len(links) > 1:
print '\n*** FAILED ***', uri, 'found too many links'
failed.append(uri)
else:
atomdata = urllib.urlopen(links[0]).read()
if atomdata.find('<link rel="alternate"') == -1:
print '\n*** FAILED ***', uri, 'retrieved something that is not a feed'
failed.append(uri)
else:
backlink = atomdata.split('href="').pop().split('"')[0]
if backlink != uri:
print '\n*** FAILED ***', uri, 'retrieved wrong feed'
failed.append(uri)
if data.find('<link rel="next" href="') == -1: break
uri = urlparse.urljoin(uri, data.split('<link rel="next" href="').pop().split('"')[0])
print
print count, 'tests executed,', len(failed), 'failed'
if __name__ == '__main__':
args = sys.argv[1:]
if args and args[0] == '--debug':
_debug = 1
args.pop(0)
if args:
uri = args[0]
else:
uri = 'http://diveintomark.org/'
if uri == 'test':
test()
else:
print "\n".join(getFeeds(uri))
|
lerouxb/seymour
|
thirdparty/feedfinder.py
|
Python
|
mit
| 12,984
|
[
"Brian"
] |
bdaab14acc7b580e789f457a908ec0c48dd3ec770293dcc708f03055be408829
|
"""
Author: Eric J. Ma
Purpose:
This Python module provides a dictionary for retrieving plasmid backbones as a
list of BioPython SeqRecord object.
"""
import os
from Bio import SeqIO
plasmid_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'plasmid_backbones')
plasmids = {f.replace('.fasta', ''): [s for s in
SeqIO.parse(os.path.join(plasmid_dir, f),
'fasta')]
for f in os.listdir(plasmid_dir)}
|
ericmjl/flu-gibson
|
FluGibson/plasmids.py
|
Python
|
mit
| 541
|
[
"Biopython"
] |
09445572f4c8e26b87c6337e83d7ced97d7b956fca7bd1fbbc77f4b0c6f6bc50
|
#
# Copyright (c) 2016, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
import mx
import mx_fastr
import os, string, shutil
from os.path import join
class FastRProjectAdapter(mx.ArchivableProject):
def __init__(self, suite, name, deps, workingSets, theLicense, **args):
mx.ArchivableProject.__init__(self, suite, name, deps, workingSets, theLicense)
self.dir = join(suite.dir, name)
def output_dir(self):
return self.dir
def archive_prefix(self):
return ""
def _get_files(self, d, results, filterfun=None):
for root, _, files in os.walk(join(self.dir, d)):
for f in files:
if not filterfun or filterfun(f):
results.append(join(root, f))
class FastRNativeProject(FastRProjectAdapter):
'''
Custom class for building the com.oracle.truffle.r.native project.
The customization is to support the creation of an exact FASTR_NATIVE_DEV distribution.
'''
def __init__(self, suite, name, deps, workingSets, theLicense, **args):
FastRProjectAdapter.__init__(self, suite, name, deps, workingSets, theLicense)
def getBuildTask(self, args):
return mx.NativeBuildTask(args, self)
def _get_gnur_files(self, gnur_dir, files, results):
for f in files:
results.append(join(self.dir, gnur_dir, f))
def getResults(self):
'''
Capture all the files from the com.oracle.truffle.r.native project that are needed
in an alternative implementation of the R FFI. This includes some files from GNU R.
This code has to be kept in sync with the FFI implementation.
'''
# plain files
results = [join(self.dir, "platform.mk")]
gnur = join('gnur', mx_fastr.r_version())
gnur_appl = join(gnur, 'src', 'appl')
self._get_gnur_files(gnur_appl, ['pretty.c', 'interv.c'], results)
gnur_main = join(gnur, 'src', 'main')
self._get_gnur_files(gnur_main, ['colors.c', 'devices.c', 'engine.c', 'format.c', 'graphics.c',
'plot.c', 'plot3d.c', 'plotmath.c', 'rlocale.c', 'sort.c'], results)
# these files are not compiled, just "included"
self._get_gnur_files(gnur_main, ['xspline.c', 'rlocale_data.h'], results)
# directories
for d in ["fficall/src/common", "fficall/src/include", "fficall/src/variable_defs"]:
self._get_files(d, results)
def is_dot_h(f):
ext = os.path.splitext(f)[1]
return ext == '.h'
# just the .h files from 'include'
self._get_files('include', results, is_dot_h)
# tools for alternate impl of gramRd.c
gnur_tools = join(gnur, 'library', 'tools')
self._get_files(gnur_tools, results)
gnur_tools_src = join(gnur, 'src', 'library', 'tools', 'src')
for f in ['gramRd.c', 'init.c', 'tools.h']:
results.append(join(self.dir, gnur_tools_src, f))
for f in ['lib.mk', 'Makefile', 'tools/src/tools_dummy.c', 'tools/src/gramRd_fastr.h', 'tools/Makefile']:
results.append(join(self.dir, 'library', f))
# selected headers from GNU R source
with open(join(self.dir, 'fficall/src/include/gnurheaders.mk')) as f:
lines = f.readlines()
for line in lines:
if '$(GNUR_HOME)' in line:
parts = line.split(' ')
results.append(join(self.dir, parts[2].rstrip().replace('$(GNUR_HOME)', gnur)))
def is_ddot_o(f):
ext = os.path.splitext(f)[1]
return f[0] == 'd' and ext == '.o'
# binary files from GNU R
self._get_files(gnur_appl, results, is_ddot_o)
return results
class FastRTestNativeProject(FastRProjectAdapter):
'''
Custom class for building the com.oracle.truffle.r.native project.
The customization is to support the creation of an exact FASTR_NATIVE_DEV distribution.
'''
def __init__(self, suite, name, deps, workingSets, theLicense, **args):
FastRProjectAdapter.__init__(self, suite, name, deps, workingSets, theLicense)
def getBuildTask(self, args):
return mx.NativeBuildTask(args, self)
def getResults(self):
'''
Capture all the files from the com.oracle.truffle.r.test.native project that are needed
for running unit tests in an alternate implementation.
'''
# plain files
results = []
self._get_files(join('packages', 'recommended'), results)
fastr_packages = []
fastr_packages_dir = join(self.dir, 'packages')
for root, dirs, _ in os.walk(fastr_packages_dir):
for d in dirs:
if d == 'recommended':
continue
if os.path.isdir(join(root, d)):
fastr_packages.append(d)
break
for p in fastr_packages:
results.append(join(fastr_packages_dir, p, 'lib', p + '.tar'))
results.append(join(self.dir, 'urand', 'lib', 'liburand.so'))
return results
class FastRReleaseProject(FastRProjectAdapter):
'''
Custom class for creating the FastR release project, which supports the
FASTR_RELEASE distribution.
'''
def __init__(self, suite, name, deps, workingSets, theLicense, **args):
FastRProjectAdapter.__init__(self, suite, name, deps, workingSets, theLicense)
def getResults(self):
results = []
if os.environ.has_key('FASTR_RELEASE'):
for rdir in ['bin', 'include', 'lib', 'library', 'etc', 'share', 'doc']:
self._get_files(rdir, results)
results.append(join(self.dir, 'LICENSE'))
results.append(join(self.dir, 'COPYRIGHT'))
results.append(join(self.dir, 'README.md'))
return results
def getBuildTask(self, args):
return ReleaseBuildTask(self, args)
class ReleaseBuildTask(mx.NativeBuildTask):
def __init__(self, project, args):
mx.NativeBuildTask.__init__(self, args, project)
def _template(self, source, target, dictionary):
class LauncherTemplate(string.Template):
delimiter = '%%'
with open(target, "w") as targetFile:
targetFile.write(LauncherTemplate(open(source).read()).substitute(dictionary))
def build(self):
if not os.environ.has_key('FASTR_RELEASE'):
mx.log('FastR: set FASTR_RELEASE to update release project')
return
# copy the release directories
output_dir = self.subject.dir
fastr_dir = mx_fastr._fastr_suite.dir
for d in ['bin', 'include', 'lib', 'library', 'etc', 'share', 'doc']:
target_dir = join(output_dir, d)
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
shutil.copytree(join(fastr_dir, d), target_dir)
# copyrights
copyrights_dir = join(fastr_dir, 'mx.fastr', 'copyrights')
with open(join(output_dir, 'COPYRIGHT'), 'w') as outfile:
for copyright_file in os.listdir(copyrights_dir):
basename = os.path.basename(copyright_file)
if basename.endswith('copyright.star'):
with open(join(copyrights_dir, copyright_file)) as infile:
data = infile.read()
outfile.write(data)
# license/README
shutil.copy(join(fastr_dir, 'LICENSE'), output_dir)
shutil.copy(join(fastr_dir, 'README.md'), output_dir)
# canonicalize R_HOME_DIR in bin/R
bin_dir = join(output_dir, 'bin')
rcmd = join(bin_dir, 'R')
# R is the generic shell script (taken essentially verbatim from GNU R)
with open(rcmd) as f:
lines = f.readlines()
with open(rcmd, 'w') as f:
for line in lines:
if line.startswith('R_HOME_DIR='):
f.write('R_HOME_DIR="$(dirname $0)/.."\n')
# produces a canonical path
line = 'R_HOME_DIR="$(unset CDPATH && cd ${R_HOME_DIR} && pwd)"\n'
f.write(line)
# jar files for the launchers
jars_dir = join(bin_dir, 'fastr_jars')
if not os.path.exists(jars_dir):
os.mkdir(jars_dir)
fastr_classfiles = dict()
# visitor to collect/copy all the classes/jar files needed by the launchers
def dep_visit(dep, edge):
if isinstance(dep, mx.JARDistribution):
shutil.copy(join(dep.suite.dir, dep.path), jars_dir)
elif isinstance(dep, mx.Library):
if not dep.name.lower() == 'jdk_tools':
jar_name = dep.name.lower() + '.jar'
shutil.copyfile(join(dep.suite.dir, dep.path), join(jars_dir, jar_name))
elif isinstance(dep, mx.JavaProject):
if 'com.oracle.truffle.r' in dep.name:
classfiles_dir = dep.output_dir()
for root, _, classfiles in os.walk(classfiles_dir):
for classfile in classfiles:
fastr_classfiles[os.path.relpath(join(root, classfile), classfiles_dir)] = join(root, classfile)
self.subject.walk_deps(visit=dep_visit)
# create the fastr.jar file
with mx.Archiver(join(jars_dir, 'fastr.jar')) as arc:
arc.zf.writestr("META-INF/MANIFEST.MF", "Manifest-Version: 1.0\n")
for arcname, path in fastr_classfiles.iteritems():
with open(path, 'r') as f:
contents = f.read()
arc.zf.writestr(arcname, contents)
# create the classpath string
classpath = []
for _, _, jars in os.walk(jars_dir):
for jar in jars:
classpath.append(join("$R_HOME/bin/fastr_jars", jar))
classpath_string = ":".join(classpath)
# replace the mx exec scripts with native Java launchers, setting the classpath from above
bin_exec_dir = join(bin_dir, 'exec')
r_launcher = join(self.subject.dir, 'src', 'R_launcher')
template_dict = {'CLASSPATH': classpath_string}
self._template(r_launcher, join(bin_exec_dir, 'R'), template_dict)
shutil.rmtree(join(bin_dir, 'execRextras'))
rscript_launcher = join(self.subject.dir, 'src', 'Rscript_launcher')
self._template(rscript_launcher, join(bin_dir, 'Rscript'), template_dict)
class FastRNativeRecommendedProject(mx.NativeProject):
'''
This finesses an ordering problem on installing the recommended R packages.
These must be installed by FastR using bin/R CMD INSTALL. That will invoke a
nested 'mx R' invocation which requires the FASTR distribution to be available.
However, this dependency cannt be specified in the suite.py file so we achieve
it here by ensuring that it is built prior to the native.recommended project.
'''
def __init__(self, suite, name, deps, workingSets, theLicense, **args):
mx.NativeProject.__init__(self, suite, name, None, [], deps, workingSets, None, None, join(suite.dir, name), theLicense)
def getBuildTask(self, args):
return NativeRecommendedBuildTask(self, args)
class NativeRecommendedBuildTask(mx.NativeBuildTask):
def __init__(self, project, args):
mx.NativeBuildTask.__init__(self, args, project)
def build(self):
# must archive FASTR before build so that nested mx R CMD INSTALL can execute
mx.archive(['@FASTR'])
mx.NativeBuildTask.build(self)
class FastRArchiveParticipant:
def __init__(self, dist):
self.dist = dist
def __opened__(self, arc, srcArc, services):
# The release project states dependencies on the java projects in order
# to ensure they are built first. Therefore, the JarDistribution code
# will include all their class files at the top-level of the jar by default.
# Since we have already encapsulated the class files in 'fastr_jars/fastr.jar' we
# suppress their inclusion here by resetting the deps field. A bit of a hack.
if self.dist.name == "FASTR_RELEASE":
assert isinstance(self.dist.deps[0], FastRReleaseProject)
self.release_project = self.dist.deps[0]
self.dist.deps[0].deps = []
def __add__(self, arcname, contents):
return False
def __addsrc__(self, arcname, contents):
return False
def __closing__(self):
if self.dist.name == "FASTR_RELEASE" and os.environ.has_key('FASTR_RELEASE'):
# the files copied in can be confused as source files by
# e.g., mx copyright, so delete them, specifically thne
# include dir
include_dir = join(self.release_project.dir, 'include')
shutil.rmtree(include_dir)
def mx_post_parse_cmd_line(opts):
for dist in mx_fastr._fastr_suite.dists:
dist.set_archiveparticipant(FastRArchiveParticipant(dist))
|
jjfumero/fastr
|
mx.fastr/mx_fastr_dists.py
|
Python
|
gpl-2.0
| 13,996
|
[
"VisIt"
] |
39fc0231fe6569506cf709d30ca3d4b09364aba29ccd424ba555ca4a8560629c
|
#!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import dbShared
import cgi
import pymysql
from xml.dom import minidom
import ghShared
#
form = cgi.FieldStorage()
spawnName = form.getfirst('spawn', '')
galaxy = form.getfirst('galaxy', '')
# escape input to prevent sql injection
spawnName = dbShared.dbInsertSafe(spawnName)
galaxy = dbShared.dbInsertSafe(galaxy)
# Main program
print('Content-type: text/xml\n')
doc = minidom.Document()
eRoot = doc.createElement("result")
doc.appendChild(eRoot)
eName = doc.createElement("spawnName")
tName = doc.createTextNode(spawnName)
eName.appendChild(tName)
eRoot.appendChild(eName)
try:
conn = dbShared.ghConn()
cursor = conn.cursor()
except Exception:
result = "Error: could not connect to database"
if (cursor):
cursor.execute('SELECT spawnID, spawnName, resourceType, entered FROM tResources WHERE galaxy=' + galaxy + ' AND spawnName != \'' + spawnName + '\' AND unavailable IS NULL AND resourceType=(SELECT resourceType FROM tResources WHERE spawnName=\'' + spawnName + '\' AND galaxy=' + galaxy + ') AND resourceType IN (SELECT resourceType FROM tResourceTypeGroup WHERE resourceGroup IN (\'flora_resources\',\'creature_resources\') GROUP BY resourceType);')
row = cursor.fetchone()
if (row != None):
spawnID = str(row[0])
eSpawn = doc.createElement("oldSpawnID")
tSpawn = doc.createTextNode(spawnID)
eSpawn.appendChild(tSpawn)
eRoot.appendChild(eSpawn)
eOldName = doc.createElement("oldSpawnName")
tOldName = doc.createTextNode(row[1])
eOldName.appendChild(tOldName)
eRoot.appendChild(eOldName)
eType = doc.createElement("resourceType")
tType = doc.createTextNode(row[2])
eType.appendChild(tType)
eRoot.appendChild(eType)
eAge = doc.createElement("resAge")
tAge = doc.createTextNode(ghShared.timeAgo(row[3]))
eAge.appendChild(tAge)
eRoot.appendChild(eAge)
result = "found"
else:
result = "new"
cursor.close()
conn.close()
else:
result = "Error: could not connect to database"
eText = doc.createElement("resultText")
tText = doc.createTextNode(result)
eText.appendChild(tText)
eRoot.appendChild(eText)
print(doc.toxml())
if (result.find("Error:") > -1):
sys.exit(500)
else:
sys.exit(200)
|
pwillworth/galaxyharvester
|
html/checkOldResource.py
|
Python
|
gpl-3.0
| 2,945
|
[
"Galaxy"
] |
eefaa1e91e3f7dce05e1763f37fcf3292bb1d2481c5ba809db8c0592c1dd53bf
|
# 1-text becomes instructions (as chunks) becomes/generates text
# 2-also as genetic algo.
# so: how to train on wounds...? tag it first
# and what we expect to achieve? by training rather than just recognition of injury phrases
# ballardian noun phrase?
#catting texts such as crash excerpts with paget report, ripper letters and so on
#///
# how to have like chunk + chunk options - relook at chunking
# how to search around key terms in grammars
#////
# desc /injuries // carried out by/sustained in/formed by/caused by-within-during/to
# in accidents involving the --- collisions (between)
#loc: at//at the (junctions) between --- and ---
#deformed by/sectioned by
#////
#merging concordances somehow//with phrasing also
#see concord.py
#///
#get nounphrase chunk ordering from one text and use as model for
#others automatically
#///
# Diana our lady of
# Diana princess of
# Diana queen of
def reader():
ttt = open('/root/collect2012-3/diana/documents/paget2.txt','r')
# ttt = open('shortpaget','r')
# ttt = open('crash.txt','r')
# ttt = open('crashchap1','r')
# ttt = open('wounds/crashwounds','r')
# uuu=rrr.read()+ttt.read()
return ttt.read()
def plaintaggedtree(text):
sentences = nltk.sent_tokenize(text)
sentences = [nltk.word_tokenize(sent) for sent in sentences]
sentences = [nltk.pos_tag(sent) for sent in sentences]
# sentences = [nltk.ne_chunk(sent) for sent in sentences]
return sentences
def tokenize_text_and_tag_named_entities(text):
tokens = []
for sentence in nltk.sent_tokenize(text):
for chunk in nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(sentence))):
if hasattr(chunk, 'node'):
# print chunk
if chunk.node != 'GPE':
tmp_tree = nltk.Tree(chunk.node, [(' '.join(c[0] for c in chunk.leaves()))])
else:
tmp_tree = nltk.Tree('LOCATION', [(' '.join(c[0] for c in chunk.leaves()))])
tokens.append(tmp_tree)
else:
tokens.append(chunk[0])
return tokens
def extract_people_in_locations():
for rel in nltk.sem.extract_rels('PERSON','LOCATION',doc,corpus='ieer',pattern=IN):
#filler_tokens = dict(nltk.pos_tag(nltk.word_tokenize(rel['filler'])))
print rel
class doc():
pass
doc.headline = ['this is expected by nltk.sem.extract_rels but not used in this script']
# compile expressions to use to identify relations between named entities
#IN = re.compile (r'.*\bin\b')
#TO = re.compile (r'.*\bto\b')
# a list of verb tags for reference
#verbs = ['VB', 'VBG', 'VBD', 'VBN', 'VBP', 'VBZ']
# def storepickle(doc):
# out = open("crash_tagged003.pickle", 'wb')
# pickle.dump(doc.text, out)
# out.close()
# def recallpickle(doc):
# out = open("crash_tagged001.pickle", 'rb')
# doc.text=pickle.load(out)
# out.close()
#ttt=recallpickle("crash_taggedwounds.pickle")
#004 is full crash... paget_tagged2 is full paget
# ? =preceding is optional
# * =repeat previous zero or more times
# + =repeat one or more times
#grammar = "NP: {<DT>?<N.*|JJ.*><IN|N.*|VBN><VBN|IN|N.*><JJ.*|N.*><N.*>}" # from crash wounds text
# {<.*>+} # Chunk everything
# grammar = """
# NP: {<DT>?<N.*|JJ.*>+<IN|N.*|VBN>+<VBN|IN|N.*>?<JJ.*|N.*>?<N.*>}
# }<NNS>+<VBN>+<IN>{ # Chink sequences of VBD and IN
# """
#grammar = "NP: {<DT>?<JJ.*>*<N.*>*<N.*>}"
# grammar = """
# NP: {<JJ.*>?<CC>?<JJ.*>?<INJ><V.*>?<IN>?<V.*|JJ.*|N.*>+<V.*|JJ.*|CC|POS|IN|N.*>+}
# }<INJ><V.*><IN>{
# """
# text=reader()
# ttt=nltk.word_tokenize(text)
# ttt=plaintaggedtree(text)
# patterns = [
# ('injur*|wound*', 'INJ'),
# ('crash*', 'CHR'),
# ('collision*', 'COL')
# ]
# tag1= nltk.data.load(nltk.tag._POS_TAGGER)
# regexp_tagger = nltk.RegexpTagger(patterns,backoff=tag1)
# ttt=[regexp_tagger.tag(sent) for sent in ttt]
# #print ttt
# storepickle(ttt,"pagetsentenced.pickle")
#ttt=recallpickle("pagetsentenced.pickle")+recallpickle("customcrashsentenced.pickle")
#ttt=recallpickle("crash_taggedwounds.pickle")
#004 is full crash... paget_tagged2 is full paget
# ? =preceding is optional
# * =repeat previous zero or more times
# + =repeat one or more times
#grammar = "NP: {<DT>?<N.*|JJ.*><IN|N.*|VBN><VBN|IN|N.*><JJ.*|N.*><N.*>}" # from crash wounds text
# {<.*>+} # Chunk everything
# grammar = """
# NP: {<DT>?<N.*|JJ.*>+<IN|N.*|VBN>+<VBN|IN|N.*>?<JJ.*|N.*>?<N.*>}
# }<NNS>+<VBN>+<IN>{ # Chink sequences of VBD and IN
# """
#grammar = "NP: {<DT>?<JJ.*>*<N.*>*<N.*>}"
# grammar = """
# NP: {<JJ.*>?<CC>?<JJ.*>?<INJ><V.*>?<IN>?<V.*|JJ.*|N.*>+<V.*|JJ.*|CC|POS|IN|N.*>+}
# }<INJ><V.*><IN>{
# """
sentence=""
# for sent in result:
# for subtree in sent.subtrees():
# # if len(subtree) == 5:
# np=""
# if subtree.node == 'IP':
# x=' '.join(nltk.tag.untag(subtree))
# # print x+",",
# for words in sent:
# if isinstance(words[0], tuple):
# sentence+= a(nounphrase)+" "
# else:
# if words[0]=="," or words[0]==".":
# sentence= sentence[:-1] + words[0]+" "
# else:
# sentence+=words[0]+" "
# print sentence[:-1]
# print
# sentence=""
# for i in x:
# np += i + " "
# np += i
# print np
# npp += np[:-1] +" "
# nounphrase.append(np[:-1])
#nounphrase = list(set(nounphrase))
#print nounphrase
#print npp
# g=ttt
# verblist = []
# for i in g:
# if i[1] == "VB":
# verblist.append(i[0])
# verblist = list(set(verblist))
# noun = []
# nounlist = []
# ing = []
# lowernoun = []
# for i in g:
# if i[1] == "NN":
# if i[0].endswith("ing"):
# ing.append(i[0])
# if i[1] == "NN":
# if i[0][0].islower():
# lowernoun.append(i[0])
# if i[1] == "VBG":
# if i[0].endswith("ing"):
# ing.append(i[0])
# else:
# for x in g:
# #print x
# if (x[0].isupper()):
# #print i[0]
# nounlist.append(x[0])
# nounlist = list(set(nounlist))
# nounlist = list(set(lowernoun))
# #print nounlist
# #print ing
# #print noun
# #foo=[["Visit"], ["our"], ["new"], ["and"], ["improved"], ["Career"], ["Tools"]]
# #for x in ing:
# # print x
# # z = nsyl(x)
# # if z[0] == 3:
# # print x
# determiner = []
# for i in g:
# if i[1] == "DT":
# determiner.append(i[0])
# determiner = list(set(determiner))
# #print determiner
# posperspronoun = []
# for i in g:
# if i[1] == "PRP$":
# posperspronoun.append(i[0])
# posperspronoun = list(set(posperspronoun))
# propnoun = []
# for i in g:
# if i[1] == "NNP":
# if i[0].istitle():
# propnoun.append(i[0])
# propnoun = list(set(propnoun))
# #print propnoun
# propnounpl = []
# for i in g:
# if i[1] == "NNPS":
# propnounpl.append(i[0])
# propnounpl = list(set(propnounpl))
# #print propnounpl
# possesive = []
# for i in g:
# if i[1] == "POS":
# possesive.append(i[0])
# possesive = list(set(possesive))
# adjective = []
# for i in g:
# if i[1] == "JJ":
# adjective.append(i[0])
# adjective = list(set(adjective))
# compadjective = []
# for i in g:
# if i[1] == "JJR":
# compadjective.append(i[0])
# compadjective = list(set(compadjective))
# keywords = ["self","selves","us","them","me","him","her"]
# perspronoun = []
# ppronoun = []
# for i in g:
# if i[1] == "PRP":
# ppronoun.append(i[0])
# for k in keywords:
# if k in i[0]:
# perspronoun.append(i[0])
# ppronoun = list(set(ppronoun))
# perspronoun = list(set(perspronoun))
# for bla in perspronoun:
# ppronoun.remove(bla)
# verb = []
# for i in g:
# if i[1] == "VB":
# verb.append(i[0])
# verb = list(set(verb))
# vbz = []
# for i in g:
# if i[1] == "VBZ":
# vbz.append(i[0])
# vbz = list(set(vbz))
# adverb = []
# adverbly = []
# for i in g:
# if i[1] == "RB":
# if i[0].endswith("ly"):
# adverbly.append(i[0])
# else:
# adverb.append(i[0])
# adverb = list(set(adverb))
# adverbly = list(set(adverbly))
# #print adverbly
# #print adverb
# ppverb = []
# for i in g:
# if i[1] == "VBN":
# ppverb.append(i[0])
# ppverb = list(set(ppverb))
# modalverb = []
# for i in g:
# if i[1] == "MD":
# modalverb.append(i[0])
# modalverb = list(set(modalverb))
# howwherewhy = []
# for i in g:
# if i[1] == "WRB":
# howwherewhy.append(i[0])
# howwherewhy = list(set(howwherewhy))
# cconj = []
# for i in g:
# if i[1] == "CC":
# cconj.append(i[0])
# cconj = list(set(cconj))
# c = [ppronoun, verblist, posperspronoun, nounlist, perspronoun, pastverb, cconj, perspronoun, pastverb, ppverb, adverb, howwherewhy,ing,propnoun]
# v = [verb,pastverb,ing,adverb,ppverb,vbz,adverbly]
# adj = [adjective,compadjective]
# noun = [propnoun,possesive,nounlist,lowernoun]
# dt = [determiner]
# components = [ppronoun, pastverb]
# def recitea():
# # x = a(adj[0]) + " " + a(noun[0]) + " " + a(v[0]) + " " + a(adj[1]) + " " + a(v[3]) + " " + a(noun[1])
# # x = a(dt[0]) + " " + a(adj[0]) + " "+ a(noun[3]) + " - "+ a(v[2]) + " " + a(v[2]) + " - " +a(noun[2]) + " and "+ a(noun[2]) + ", " + a(dt[0]) + " " + a(adj[0]) + " "+ a(noun[3]) + " "+ a(v[5]) + ": " + a(v[2]) + " "+ a(v[2]) + " "+ a(v[2]) + " - " #+ a(v[6]) + " "
# x = "spray and actress" + ", " + a(dt[0]) + " " + a(adj[0]) + " "+ a(noun[3]) + " "+ a(v[5]) + ": " + "morning dying dying"
# # x = a(adj[0])
# # x = a(adj[0]) + " " + a(noun[0]) + " " + a(adj[0]) + " " + a(noun[0]) + " "
# # x = a(c[13]) + " " + a(v[0]) + " " + a(v[0]) +" " + a(c[0]) + " " + a(c[5]) + " "+ a(c[13]) + " "+ a(v[2]) + " - " + a(v[2]) +" - "+ a(v[2]) + " " + a(adj[0]) + " " + a(noun[0]) + "! " + a(adj[0]) + " " + a(noun[0]) + "! "
# # y = a(c[5]) + ", and " + a(c[5]) + ", " + a(c[0]) + " " + a(c[5]) + " and " + a(c[5]) + " a " + a(c[3]) + " of " + a(c[3]) + ". "
# # x = x.capitalize()
# #y = y.lower().capitalize()
# #z = z.capitalize()
# print x
#for x in range(2000):
# print a(("Diana our lady of","Diana princess of", "Diana queen of")), a(nounphrase)+"."
# Diana our lady of
# Diana princess of
# Diana queen of
#print nounphrasei
#print "Diana",
#print "INJURIES"
#for phrase in nounphrasei:
# print phrase
# #def machine():
# # how to run text as a set of instructions which generates another?
def permute(inputlist):
x= [' '.join(s) for s in itertools.product(*inputlist)]
for sent in x:
# print sent[:-1]+"."
print sent+"."
#x = ["Diana"],pastverb,nounphrasei
#permute(x)
|
microresearch/diana
|
discard.py
|
Python
|
gpl-2.0
| 10,970
|
[
"VisIt"
] |
2ec91e063e567dff0a5b578dd00b875b88d11e293d683e5490f2beed84d89ce9
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
import testtools
from neutron.agent.common import utils # noqa
from neutron.agent.linux import ip_lib
from neutron.common import exceptions
from neutron.tests import base
NETNS_SAMPLE = [
'12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc']
LINK_SAMPLE = [
'1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \\'
'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0',
'2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP '
'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff'
'\ alias openvswitch',
'3: br-int: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN '
'\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff promiscuity 0',
'4: gw-ddc717df-49: <BROADCAST,MULTICAST> mtu 1500 qdisc noop '
'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'5: foo:foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state '
'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'6: foo@foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state '
'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'7: foo:foo@foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0',
'8: foo@foo:foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0',
'9: bar.9@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc '
' noqueue master brq0b24798c-07 state UP mode DEFAULT'
'\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1q id 9 <REORDER_HDR>',
'10: bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc '
' noqueue master brq0b24798c-07 state UP mode DEFAULT'
'\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1Q id 10 <REORDER_HDR>',
'11: bar:bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan id 11 <REORDER_HDR>',
'12: bar@bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan id 12 <REORDER_HDR>',
'13: bar:bar@bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 '
'qdisc mq state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1q id 13 <REORDER_HDR>',
'14: bar@bar:bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 '
'qdisc mq state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1Q id 14 <REORDER_HDR>']
ADDR_SAMPLE = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::3023:39ff:febc:22ae/64 scope link tentative
valid_lft forever preferred_lft forever
inet6 fe80::3023:39ff:febc:22af/64 scope link tentative dadfailed
valid_lft forever preferred_lft forever
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
ADDR_SAMPLE2 = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::3023:39ff:febc:22ae/64 scope link tentative
valid_lft forever preferred_lft forever
inet6 fe80::3023:39ff:febc:22af/64 scope link tentative dadfailed
valid_lft forever preferred_lft forever
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
GATEWAY_SAMPLE1 = ("""
default via 10.35.19.254 metric 100
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE2 = ("""
default via 10.35.19.254 metric 100
""")
GATEWAY_SAMPLE3 = ("""
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE4 = ("""
default via 10.35.19.254
""")
GATEWAY_SAMPLE5 = ("""
default via 192.168.99.1 proto static
""")
GATEWAY_SAMPLE6 = ("""
default via 192.168.99.1 proto static metric 100
""")
IPv6_GATEWAY_SAMPLE1 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 100
2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE2 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 100
""")
IPv6_GATEWAY_SAMPLE3 = ("""
2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE4 = ("""
default via fe80::dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE5 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 1024
""")
DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2")
SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n"
"10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2")
SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n"
"10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1")
RULE_V4_SAMPLE = ("""
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
101: from 192.168.45.100 lookup 2
""")
RULE_V6_SAMPLE = ("""
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
201: from 2001:db8::1 lookup 3
""")
class TestSubProcessBase(base.BaseTestCase):
def setUp(self):
super(TestSubProcessBase, self).setUp()
self.execute_p = mock.patch('neutron.agent.common.utils.execute')
self.execute = self.execute_p.start()
def test_execute_wrapper(self):
ip_lib.SubProcessBase._execute(['o'], 'link', ('list',),
run_as_root=True)
self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_execute_wrapper_int_options(self):
ip_lib.SubProcessBase._execute([4], 'link', ('list',))
self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_execute_wrapper_no_options(self):
ip_lib.SubProcessBase._execute([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_run_no_namespace(self):
base = ip_lib.SubProcessBase()
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_run_namespace(self):
base = ip_lib.SubProcessBase(namespace='ns')
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_as_root_namespace(self):
base = ip_lib.SubProcessBase(namespace='ns')
base._as_root([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
class TestIpWrapper(base.BaseTestCase):
def setUp(self):
super(TestIpWrapper, self).setUp()
self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute')
self.execute = self.execute_p.start()
@mock.patch('os.path.islink')
@mock.patch('os.listdir', return_value=['lo'])
def test_get_devices(self, mocked_listdir, mocked_islink):
retval = ip_lib.IPWrapper().get_devices()
mocked_islink.assert_called_once_with('/sys/class/net/lo')
self.assertEqual(retval, [ip_lib.IPDevice('lo')])
@mock.patch('neutron.agent.common.utils.execute')
def test_get_devices_namespaces(self, mocked_execute):
fake_str = mock.Mock()
fake_str.split.return_value = ['lo']
mocked_execute.return_value = fake_str
retval = ip_lib.IPWrapper(namespace='foo').get_devices()
mocked_execute.assert_called_once_with(
['ip', 'netns', 'exec', 'foo', 'find', '/sys/class/net',
'-maxdepth', '1', '-type', 'l', '-printf', '%f '],
run_as_root=True, log_fail_as_error=True)
self.assertTrue(fake_str.split.called)
self.assertEqual(retval, [ip_lib.IPDevice('lo', namespace='foo')])
def test_get_namespaces(self):
self.execute.return_value = '\n'.join(NETNS_SAMPLE)
retval = ip_lib.IPWrapper.get_namespaces()
self.assertEqual(retval,
['12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc'])
self.execute.assert_called_once_with([], 'netns', ('list',))
def test_add_tuntap(self):
ip_lib.IPWrapper().add_tuntap('tap0')
self.execute.assert_called_once_with([], 'tuntap',
('add', 'tap0', 'mode', 'tap'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_veth(self):
ip_lib.IPWrapper().add_veth('tap0', 'tap1')
self.execute.assert_called_once_with([], 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_del_veth(self):
ip_lib.IPWrapper().del_veth('fpr-1234')
self.execute.assert_called_once_with([], 'link',
('del', 'fpr-1234'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_veth_with_namespaces(self):
ns2 = 'ns2'
with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en:
ip_lib.IPWrapper().add_veth('tap0', 'tap1', namespace2=ns2)
en.assert_has_calls([mock.call(ns2)])
self.execute.assert_called_once_with([], 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1',
'netns', ns2),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_get_device(self):
dev = ip_lib.IPWrapper(namespace='ns').device('eth0')
self.assertEqual(dev.namespace, 'ns')
self.assertEqual(dev.name, 'eth0')
def test_ensure_namespace(self):
with mock.patch.object(ip_lib, 'IPDevice') as ip_dev:
ip = ip_lib.IPWrapper()
with mock.patch.object(ip.netns, 'exists') as ns_exists:
with mock.patch('neutron.agent.common.utils.execute'):
ns_exists.return_value = False
ip.ensure_namespace('ns')
self.execute.assert_has_calls(
[mock.call([], 'netns', ('add', 'ns'),
run_as_root=True, namespace=None,
log_fail_as_error=True)])
ip_dev.assert_has_calls([mock.call('lo', namespace='ns'),
mock.call().link.set_up()])
def test_ensure_namespace_existing(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd:
ip_ns_cmd.exists.return_value = True
ns = ip_lib.IPWrapper().ensure_namespace('ns')
self.assertFalse(self.execute.called)
self.assertEqual(ns.namespace, 'ns')
def test_namespace_is_empty_no_devices(self):
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = []
self.assertTrue(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_namespace_is_empty(self):
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = [mock.Mock()]
self.assertFalse(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_garbage_collect_namespace_does_not_exist(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = False
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
self.assertFalse(ip.garbage_collect_namespace())
ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')])
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.return_value.mock_calls)
self.assertEqual(mock_is_empty.mock_calls, [])
def test_garbage_collect_namespace_existing_empty_ns(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = True
self.assertTrue(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call().exists('ns'),
mock.call().delete('ns')]
ip_ns_cmd_cls.assert_has_calls(expected)
def test_garbage_collect_namespace_existing_not_empty(self):
lo_device = mock.Mock()
lo_device.name = 'lo'
tap_device = mock.Mock()
tap_device.name = 'tap1'
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = False
self.assertFalse(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call(ip),
mock.call().exists('ns')]
self.assertEqual(ip_ns_cmd_cls.mock_calls, expected)
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.mock_calls)
def test_add_vxlan_valid_port_length(self):
retval = ip_lib.IPWrapper().add_vxlan('vxlan0', 'vni0',
group='group0',
dev='dev0', ttl='ttl0',
tos='tos0',
local='local0', proxy=True,
port=('1', '2'))
self.assertIsInstance(retval, ip_lib.IPDevice)
self.assertEqual(retval.name, 'vxlan0')
self.execute.assert_called_once_with([], 'link',
['add', 'vxlan0', 'type',
'vxlan', 'id', 'vni0', 'group',
'group0', 'dev', 'dev0',
'ttl', 'ttl0', 'tos', 'tos0',
'local', 'local0', 'proxy',
'port', '1', '2'],
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_vxlan_invalid_port_length(self):
wrapper = ip_lib.IPWrapper()
self.assertRaises(exceptions.NetworkVxlanPortRangeError,
wrapper.add_vxlan, 'vxlan0', 'vni0', group='group0',
dev='dev0', ttl='ttl0', tos='tos0',
local='local0', proxy=True,
port=('1', '2', '3'))
def test_add_device_to_namespace(self):
dev = mock.Mock()
ip_lib.IPWrapper(namespace='ns').add_device_to_namespace(dev)
dev.assert_has_calls([mock.call.link.set_netns('ns')])
def test_add_device_to_namespace_is_none(self):
dev = mock.Mock()
ip_lib.IPWrapper().add_device_to_namespace(dev)
self.assertEqual(dev.mock_calls, [])
class TestIPDevice(base.BaseTestCase):
def test_eq_same_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap0')
self.assertEqual(dev1, dev2)
def test_eq_diff_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap1')
self.assertNotEqual(dev1, dev2)
def test_eq_same_namespace(self):
dev1 = ip_lib.IPDevice('tap0', 'ns1')
dev2 = ip_lib.IPDevice('tap0', 'ns1')
self.assertEqual(dev1, dev2)
def test_eq_diff_namespace(self):
dev1 = ip_lib.IPDevice('tap0', namespace='ns1')
dev2 = ip_lib.IPDevice('tap0', namespace='ns2')
self.assertNotEqual(dev1, dev2)
def test_eq_other_is_none(self):
dev1 = ip_lib.IPDevice('tap0', namespace='ns1')
self.assertIsNotNone(dev1)
def test_str(self):
self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0')
class TestIPCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPCommandBase, self).setUp()
self.ip = mock.Mock()
self.ip.namespace = 'namespace'
self.ip_cmd = ip_lib.IpCommandBase(self.ip)
self.ip_cmd.COMMAND = 'foo'
def test_run(self):
self.ip_cmd._run([], ('link', 'show'))
self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))])
def test_run_with_options(self):
self.ip_cmd._run(['o'], ('link'))
self.ip.assert_has_calls([mock.call._run(['o'], 'foo', ('link'))])
def test_as_root_namespace_false(self):
self.ip_cmd._as_root([], ('link'))
self.ip.assert_has_calls(
[mock.call._as_root([],
'foo',
('link'),
use_root_namespace=False)])
def test_as_root_namespace_true(self):
self.ip_cmd._as_root([], ('link'), use_root_namespace=True)
self.ip.assert_has_calls(
[mock.call._as_root([],
'foo',
('link'),
use_root_namespace=True)])
def test_as_root_namespace_true_with_options(self):
self.ip_cmd._as_root('o', 'link', use_root_namespace=True)
self.ip.assert_has_calls(
[mock.call._as_root('o',
'foo',
('link'),
use_root_namespace=True)])
class TestIPDeviceCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPDeviceCommandBase, self).setUp()
self.ip_dev = mock.Mock()
self.ip_dev.name = 'eth0'
self.ip_dev._execute = mock.Mock(return_value='executed')
self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev)
self.ip_cmd.COMMAND = 'foo'
def test_name_property(self):
self.assertEqual(self.ip_cmd.name, 'eth0')
class TestIPCmdBase(base.BaseTestCase):
def setUp(self):
super(TestIPCmdBase, self).setUp()
self.parent = mock.Mock()
self.parent.name = 'eth0'
def _assert_call(self, options, args):
self.parent.assert_has_calls([
mock.call._run(options, self.command, args)])
def _assert_sudo(self, options, args, use_root_namespace=False):
self.parent.assert_has_calls(
[mock.call._as_root(options, self.command, args,
use_root_namespace=use_root_namespace)])
class TestIpRuleCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRuleCommand, self).setUp()
self.parent._as_root.return_value = ''
self.command = 'rule'
self.rule_cmd = ip_lib.IpRuleCommand(self.parent)
def _test_add_rule(self, ip, table, priority):
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.add(ip, table, priority)
self._assert_sudo([ip_version], (['show']))
self._assert_sudo([ip_version], ('add', 'from', ip,
'table', table, 'priority', priority))
def _test_add_rule_exists(self, ip, table, priority, output):
self.parent._as_root.return_value = output
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.add(ip, table, priority)
self._assert_sudo([ip_version], (['show']))
def _test_delete_rule(self, ip, table, priority):
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.delete(ip, table, priority)
self._assert_sudo([ip_version],
('del', 'table', table,
'priority', priority))
def test_add_rule_v4(self):
self._test_add_rule('192.168.45.100', 2, 100)
def test_add_rule_v4_exists(self):
self._test_add_rule_exists('192.168.45.100', 2, 101, RULE_V4_SAMPLE)
def test_add_rule_v6(self):
self._test_add_rule('2001:db8::1', 3, 200)
def test_add_rule_v6_exists(self):
self._test_add_rule_exists('2001:db8::1', 3, 201, RULE_V6_SAMPLE)
def test_delete_rule_v4(self):
self._test_delete_rule('192.168.45.100', 2, 100)
def test_delete_rule_v6(self):
self._test_delete_rule('2001:db8::1', 3, 200)
class TestIpLinkCommand(TestIPCmdBase):
def setUp(self):
super(TestIpLinkCommand, self).setUp()
self.parent._run.return_value = LINK_SAMPLE[1]
self.command = 'link'
self.link_cmd = ip_lib.IpLinkCommand(self.parent)
def test_set_address(self):
self.link_cmd.set_address('aa:bb:cc:dd:ee:ff')
self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff'))
def test_set_mtu(self):
self.link_cmd.set_mtu(1500)
self._assert_sudo([], ('set', 'eth0', 'mtu', 1500))
def test_set_up(self):
self.link_cmd.set_up()
self._assert_sudo([], ('set', 'eth0', 'up'))
def test_set_down(self):
self.link_cmd.set_down()
self._assert_sudo([], ('set', 'eth0', 'down'))
def test_set_netns(self):
self.link_cmd.set_netns('foo')
self._assert_sudo([], ('set', 'eth0', 'netns', 'foo'))
self.assertEqual(self.parent.namespace, 'foo')
def test_set_name(self):
self.link_cmd.set_name('tap1')
self._assert_sudo([], ('set', 'eth0', 'name', 'tap1'))
self.assertEqual(self.parent.name, 'tap1')
def test_set_alias(self):
self.link_cmd.set_alias('openvswitch')
self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch'))
def test_delete(self):
self.link_cmd.delete()
self._assert_sudo([], ('delete', 'eth0'))
def test_address_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.address, 'cc:dd:ee:ff:ab:cd')
def test_mtu_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.mtu, 1500)
def test_qdisc_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qdisc, 'mq')
def test_qlen_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qlen, 1000)
def test_alias_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.alias, 'openvswitch')
def test_state_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.state, 'UP')
def test_settings_property(self):
expected = {'mtu': 1500,
'qlen': 1000,
'state': 'UP',
'qdisc': 'mq',
'brd': 'ff:ff:ff:ff:ff:ff',
'link/ether': 'cc:dd:ee:ff:ab:cd',
'alias': 'openvswitch'}
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.attributes, expected)
self._assert_call(['o'], ('show', 'eth0'))
class TestIpAddrCommand(TestIPCmdBase):
def setUp(self):
super(TestIpAddrCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'addr'
self.addr_cmd = ip_lib.IpAddrCommand(self.parent)
def test_add_address(self):
self.addr_cmd.add('192.168.45.100/24')
self._assert_sudo([4],
('add', '192.168.45.100/24',
'scope', 'global',
'dev', 'tap0',
'brd', '192.168.45.255'))
def test_add_address_scoped(self):
self.addr_cmd.add('192.168.45.100/24', scope='link')
self._assert_sudo([4],
('add', '192.168.45.100/24',
'scope', 'link',
'dev', 'tap0',
'brd', '192.168.45.255'))
def test_del_address(self):
self.addr_cmd.delete('192.168.45.100/24')
self._assert_sudo([4],
('del', '192.168.45.100/24', 'dev', 'tap0'))
def test_flush(self):
self.addr_cmd.flush(6)
self._assert_sudo([6], ('flush', 'tap0'))
def test_list(self):
expected = [
dict(scope='global', dadfailed=False, tentative=False,
dynamic=False, cidr='172.16.77.240/24'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64'),
dict(scope='link', dadfailed=False, tentative=True,
dynamic=False, cidr='fe80::3023:39ff:febc:22ae/64'),
dict(scope='link', dadfailed=True, tentative=True,
dynamic=False, cidr='fe80::3023:39ff:febc:22af/64'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64'),
dict(scope='link', dadfailed=False, tentative=False,
dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
self.parent._run = mock.Mock(return_value=test_case)
self.assertEqual(expected, self.addr_cmd.list())
self._assert_call([], ('show', 'tap0'))
def test_wait_until_address_ready(self):
self.parent._run.return_value = ADDR_SAMPLE
# this address is not tentative or failed so it should return
self.assertIsNone(self.addr_cmd.wait_until_address_ready(
'2001:470:9:1224:fd91:272:581e:3a32'))
def test_wait_until_address_ready_non_existent_address(self):
self.addr_cmd.list = mock.Mock(return_value=[])
with testtools.ExpectedException(ip_lib.AddressNotReady):
self.addr_cmd.wait_until_address_ready('abcd::1234')
def test_wait_until_address_ready_timeout(self):
tentative_address = 'fe80::3023:39ff:febc:22ae'
self.addr_cmd.list = mock.Mock(return_value=[
dict(scope='link', dadfailed=False, tentative=True, dynamic=False,
cidr=tentative_address + '/64')])
with testtools.ExpectedException(ip_lib.AddressNotReady):
self.addr_cmd.wait_until_address_ready(tentative_address,
wait_time=1)
def test_list_filtered(self):
expected = [
dict(scope='global', tentative=False, dadfailed=False,
dynamic=False, cidr='172.16.77.240/24')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
output = '\n'.join(test_case.split('\n')[0:4])
self.parent._run.return_value = output
self.assertEqual(self.addr_cmd.list('global',
filters=['permanent']), expected)
self._assert_call([], ('show', 'tap0', 'permanent', 'scope',
'global'))
class TestIpRouteCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRouteCommand, self).setUp()
self.parent.name = 'eth0'
self.command = 'route'
self.route_cmd = ip_lib.IpRouteCommand(self.parent)
self.ip_version = 4
self.table = 14
self.metric = 100
self.cidr = '192.168.45.100/24'
self.ip = '10.0.0.1'
self.gateway = '192.168.45.100'
self.test_cases = [{'sample': GATEWAY_SAMPLE1,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE2,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE3,
'expected': None},
{'sample': GATEWAY_SAMPLE4,
'expected': {'gateway': '10.35.19.254'}},
{'sample': GATEWAY_SAMPLE5,
'expected': {'gateway': '192.168.99.1'}},
{'sample': GATEWAY_SAMPLE6,
'expected': {'gateway': '192.168.99.1',
'metric': 100}}]
def test_add_gateway(self):
self.route_cmd.add_gateway(self.gateway, self.metric, self.table)
self._assert_sudo([self.ip_version],
('replace', 'default',
'via', self.gateway,
'metric', self.metric,
'dev', self.parent.name,
'table', self.table))
def test_del_gateway(self):
self.route_cmd.delete_gateway(self.gateway, table=self.table)
self._assert_sudo([self.ip_version],
('del', 'default',
'via', self.gateway,
'dev', self.parent.name,
'table', self.table))
def test_get_gateway(self):
for test_case in self.test_cases:
self.parent._run = mock.Mock(return_value=test_case['sample'])
self.assertEqual(self.route_cmd.get_gateway(),
test_case['expected'])
def test_pullup_route(self):
# NOTE(brian-haley) Currently we do not have any IPv6-specific usecase
# for pullup_route, hence skipping. Revisit, if required, in future.
if self.ip_version == 6:
return
# interface is not the first in the list - requires
# deleting and creating existing entries
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10', ip_version=4)
self._assert_sudo([4], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2'))
self._assert_sudo([4], ('append', '10.0.0.0/24', 'proto', 'kernel',
'src', '10.0.0.1', 'dev', 'qr-23380d11-d2'))
def test_pullup_route_first(self):
# NOTE(brian-haley) Currently we do not have any IPv6-specific usecase
# for pullup_route, hence skipping. Revisit, if required, in future.
if self.ip_version == 6:
return
# interface is first in the list - no changes
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10', ip_version=4)
# Check two calls - device get and subnet get
self.assertEqual(len(self.parent._run.mock_calls), 2)
def test_add_route(self):
self.route_cmd.add_route(self.cidr, self.ip, self.table)
self._assert_sudo([self.ip_version],
('replace', self.cidr,
'via', self.ip,
'dev', self.parent.name,
'table', self.table))
def test_delete_route(self):
self.route_cmd.delete_route(self.cidr, self.ip, self.table)
self._assert_sudo([self.ip_version],
('del', self.cidr,
'via', self.ip,
'dev', self.parent.name,
'table', self.table))
class TestIPv6IpRouteCommand(TestIpRouteCommand):
def setUp(self):
super(TestIPv6IpRouteCommand, self).setUp()
self.ip_version = 6
self.cidr = '2001:db8::/64'
self.ip = '2001:db8::100'
self.gateway = '2001:db8::1'
self.test_cases = [{'sample': IPv6_GATEWAY_SAMPLE1,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 100}},
{'sample': IPv6_GATEWAY_SAMPLE2,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 100}},
{'sample': IPv6_GATEWAY_SAMPLE3,
'expected': None},
{'sample': IPv6_GATEWAY_SAMPLE4,
'expected':
{'gateway': 'fe80::dfcc:aaff:feb9:76ce'}},
{'sample': IPv6_GATEWAY_SAMPLE5,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 1024}}]
class TestIpNetnsCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNetnsCommand, self).setUp()
self.command = 'netns'
self.netns_cmd = ip_lib.IpNetnsCommand(self.parent)
def test_add_namespace(self):
with mock.patch('neutron.agent.common.utils.execute') as execute:
ns = self.netns_cmd.add('ns')
self._assert_sudo([], ('add', 'ns'), use_root_namespace=True)
self.assertEqual(ns.namespace, 'ns')
execute.assert_called_once_with(
['ip', 'netns', 'exec', 'ns',
'sysctl', '-w', 'net.ipv4.conf.all.promote_secondaries=1'],
run_as_root=True, check_exit_code=True, extra_ok_codes=None)
def test_delete_namespace(self):
with mock.patch('neutron.agent.common.utils.execute'):
self.netns_cmd.delete('ns')
self._assert_sudo([], ('delete', 'ns'), use_root_namespace=True)
def test_namespace_exists_use_helper(self):
self.config(group='AGENT', use_helper_for_ns_read=True)
retval = '\n'.join(NETNS_SAMPLE)
# need another instance to avoid mocking
netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase())
with mock.patch('neutron.agent.common.utils.execute') as execute:
execute.return_value = retval
self.assertTrue(
netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'))
execute.assert_called_once_with(['ip', '-o', 'netns', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_namespace_doest_not_exist_no_helper(self):
self.config(group='AGENT', use_helper_for_ns_read=False)
retval = '\n'.join(NETNS_SAMPLE)
# need another instance to avoid mocking
netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase())
with mock.patch('neutron.agent.common.utils.execute') as execute:
execute.return_value = retval
self.assertFalse(
netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb'))
execute.assert_called_once_with(['ip', '-o', 'netns', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_execute(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.common.utils.execute') as execute:
self.netns_cmd.execute(['ip', 'link', 'list'])
execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip',
'link', 'list'],
run_as_root=True,
check_exit_code=True,
extra_ok_codes=None)
def test_execute_env_var_prepend(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.common.utils.execute') as execute:
env = dict(FOO=1, BAR=2)
self.netns_cmd.execute(['ip', 'link', 'list'], env)
execute.assert_called_once_with(
['ip', 'netns', 'exec', 'ns', 'env'] +
['%s=%s' % (k, v) for k, v in env.items()] +
['ip', 'link', 'list'],
run_as_root=True, check_exit_code=True, extra_ok_codes=None)
def test_execute_nosudo_with_no_namespace(self):
with mock.patch('neutron.agent.common.utils.execute') as execute:
self.parent.namespace = None
self.netns_cmd.execute(['test'])
execute.assert_called_once_with(['test'],
check_exit_code=True,
extra_ok_codes=None,
run_as_root=False)
class TestDeviceExists(base.BaseTestCase):
def test_device_exists(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = LINK_SAMPLE[1]
self.assertTrue(ip_lib.device_exists('eth0'))
_execute.assert_called_once_with(['o'], 'link', ('show', 'eth0'),
log_fail_as_error=False)
def test_device_does_not_exist(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = ''
_execute.side_effect = RuntimeError
self.assertFalse(ip_lib.device_exists('eth0'))
def test_ensure_device_is_ready(self):
ip_lib_mock = mock.Mock()
with mock.patch.object(ip_lib, 'IPDevice', return_value=ip_lib_mock):
self.assertTrue(ip_lib.ensure_device_is_ready("eth0"))
self.assertTrue(ip_lib_mock.link.set_up.called)
ip_lib_mock.reset_mock()
# device doesn't exists
ip_lib_mock.link.set_up.side_effect = RuntimeError
self.assertFalse(ip_lib.ensure_device_is_ready("eth0"))
class TestIpNeighCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNeighCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'neigh'
self.neigh_cmd = ip_lib.IpNeighCommand(self.parent)
def test_add_entry(self):
self.neigh_cmd.add('192.168.45.100', 'cc:dd:ee:ff:ab:cd')
self._assert_sudo([4],
('replace', '192.168.45.100',
'lladdr', 'cc:dd:ee:ff:ab:cd',
'nud', 'permanent',
'dev', 'tap0'))
def test_delete_entry(self):
self.neigh_cmd.delete('192.168.45.100', 'cc:dd:ee:ff:ab:cd')
self._assert_sudo([4],
('del', '192.168.45.100',
'lladdr', 'cc:dd:ee:ff:ab:cd',
'dev', 'tap0'))
class TestArpPing(TestIPCmdBase):
def _test_arping(self, function, address, spawn_n, mIPWrapper):
spawn_n.side_effect = lambda f: f()
ARPING_COUNT = 3
function(mock.sentinel.ns_name,
mock.sentinel.iface_name,
address,
ARPING_COUNT)
self.assertTrue(spawn_n.called)
mIPWrapper.assert_called_once_with(namespace=mock.sentinel.ns_name)
ip_wrapper = mIPWrapper(namespace=mock.sentinel.ns_name)
# Just test that arping is called with the right arguments
arping_cmd = ['arping', '-A',
'-I', mock.sentinel.iface_name,
'-c', ARPING_COUNT,
'-w', mock.ANY,
address]
ip_wrapper.netns.execute.assert_any_call(arping_cmd,
check_exit_code=True)
@mock.patch.object(ip_lib, 'IPWrapper')
@mock.patch('eventlet.spawn_n')
def test_send_gratuitous_arp(self, spawn_n, mIPWrapper):
self._test_arping(
ip_lib.send_gratuitous_arp, '20.0.0.1', spawn_n, mIPWrapper)
class TestAddNamespaceToCmd(base.BaseTestCase):
def test_add_namespace_to_cmd_with_namespace(self):
cmd = ['ping', '8.8.8.8']
self.assertEqual(['ip', 'netns', 'exec', 'tmp'] + cmd,
ip_lib.add_namespace_to_cmd(cmd, 'tmp'))
def test_add_namespace_to_cmd_without_namespace(self):
cmd = ['ping', '8.8.8.8']
self.assertEqual(cmd, ip_lib.add_namespace_to_cmd(cmd, None))
|
JioCloud/neutron
|
neutron/tests/unit/agent/linux/test_ip_lib.py
|
Python
|
apache-2.0
| 45,248
|
[
"Brian"
] |
1ae31512f5a86fdbfae81f0cf37ba6d9cc44100e3347e53f16d188762e0aa944
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This tests the scafacos p2nfft dipolar calculations by matching against
# reference data from direct summation. In 2d, reference data from the mdlc
# test case is used
import espressomd
import espressomd.magnetostatics
import espressomd.magnetostatic_extensions
import numpy as np
import unittest as ut
import unittest_decorators as utx
import tests_common
DIPOLAR_PREFACTOR = 1.1
@utx.skipIfMissingFeatures(["DIPOLES", "FFTW"])
class Dipolar_p3m_mdlc_p2nfft(ut.TestCase):
"""Tests mdlc (2d) as well as dipolar p3m and dipolar p2nfft (3d) against
stored data. Validity of the stored data:
2d: as long as this test AND the scafacos_dipolar_1d_2d test passes, we are safe.
3d: as long as the independently written p3m and p2nfft agree, we are safe.
"""
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.time_step = 0.01
system.cell_system.skin = .4
system.periodicity = [1, 1, 1]
def tearDown(self):
self.system.part.clear()
self.system.actors.clear()
def vector_error(self, a, b):
return np.sum(np.linalg.norm(a - b, axis=1)) / np.sqrt(a.shape[0])
def test_mdlc(self):
s = self.system
rho = 0.3
# This is only for box size calculation. The actual particle number is
# lower, because particles are removed from the mdlc gap region
n_particle = 100
particle_radius = 0.5
box_l = np.cbrt(4 * n_particle * np.pi / (3 * rho)) * particle_radius
s.box_l = 3 * [box_l]
ref_E_path = tests_common.abspath(
"data/mdlc_reference_data_energy.dat")
ref_E = float(np.genfromtxt(ref_E_path)) * DIPOLAR_PREFACTOR
gap_size = 2.0
# Particles
data = np.genfromtxt(
tests_common.abspath("data/mdlc_reference_data_forces_torques.dat"))
partcls = s.part.add(pos=data[:, 1:4], dip=data[:, 4:7])
partcls.rotation = 3 * [True]
p3m = espressomd.magnetostatics.DipolarP3M(
prefactor=DIPOLAR_PREFACTOR, mesh=32, accuracy=1E-4)
dlc = espressomd.magnetostatic_extensions.DLC(
maxPWerror=1E-5, gap_size=gap_size)
s.actors.add(p3m)
s.actors.add(dlc)
s.integrator.run(0)
err_f = self.vector_error(
partcls.f, data[:, 7:10] * DIPOLAR_PREFACTOR)
err_t = self.vector_error(
partcls.torque_lab, data[:, 10:13] * DIPOLAR_PREFACTOR)
err_e = s.analysis.energy()["dipolar"] - ref_E
tol_f = 2E-3
tol_t = 2E-3
tol_e = 1E-3
self.assertLessEqual(abs(err_e), tol_e, "Energy difference too large")
self.assertLessEqual(abs(err_t), tol_t, "Torque difference too large")
self.assertLessEqual(abs(err_f), tol_f, "Force difference too large")
# Check if error is thrown when particles enter the MDLC gap
# positive direction
p0 = s.part.by_id(0)
p0.pos = [
s.box_l[0] / 2,
s.box_l[1] / 2,
s.box_l[2] - gap_size / 2]
with self.assertRaises(Exception):
self.system.analysis.energy()
with self.assertRaises(Exception):
self.integrator.run(2)
# negative direction
p0.pos = [s.box_l[0] / 2, s.box_l[1] / 2, -gap_size / 2]
with self.assertRaises(Exception):
self.system.analysis.energy()
with self.assertRaises(Exception):
self.integrator.run(2)
def test_p3m(self):
s = self.system
rho = 0.09
# This is only for box size calculation. The actual particle number is
# lower, because particles are removed from the mdlc gap region
n_particle = 1000
particle_radius = 1
box_l = np.cbrt(4 * n_particle * np.pi / (3 * rho)) * particle_radius
s.box_l = 3 * [box_l]
# Particles
data = np.genfromtxt(
tests_common.abspath("data/p3m_magnetostatics_system.data"))
partcls = s.part.add(pos=data[:, 1:4], dip=data[:, 4:7])
partcls.rotation = 3 * [True]
p3m = espressomd.magnetostatics.DipolarP3M(
prefactor=DIPOLAR_PREFACTOR, mesh=32, accuracy=1E-6, epsilon="metallic")
s.actors.add(p3m)
s.integrator.run(0)
expected = np.genfromtxt(
tests_common.abspath("data/p3m_magnetostatics_expected.data"))[:, 1:]
err_f = self.vector_error(
partcls.f, expected[:, 0:3] * DIPOLAR_PREFACTOR)
err_t = self.vector_error(
partcls.torque_lab, expected[:, 3:6] * DIPOLAR_PREFACTOR)
ref_E = 5.570 * DIPOLAR_PREFACTOR
err_e = s.analysis.energy()["dipolar"] - ref_E
tol_f = 2E-3
tol_t = 2E-3
tol_e = 1E-3
self.assertLessEqual(abs(err_e), tol_e, "Energy difference too large")
self.assertLessEqual(abs(err_t), tol_t, "Torque difference too large")
self.assertLessEqual(abs(err_f), tol_f, "Force difference too large")
@utx.skipIfMissingFeatures("SCAFACOS_DIPOLES")
def test_scafacos_dipoles(self):
s = self.system
rho = 0.09
# This is only for box size calculation. The actual particle number is
# lower, because particles are removed from the mdlc gap region
n_particle = 1000
particle_radius = 1
box_l = np.cbrt(4 * n_particle * np.pi / (3 * rho)) * particle_radius
s.box_l = 3 * [box_l]
# Particles
data = np.genfromtxt(
tests_common.abspath("data/p3m_magnetostatics_system.data"))
partcls = s.part.add(pos=data[:, 1:4], dip=data[:, 4:7])
partcls.rotation = 3 * [True]
scafacos = espressomd.magnetostatics.Scafacos(
prefactor=DIPOLAR_PREFACTOR,
method_name="p2nfft",
method_params={
"p2nfft_verbose_tuning": 0,
"pnfft_N": "32,32,32",
"pnfft_n": "32,32,32",
"pnfft_window_name": "bspline",
"pnfft_m": "4",
"p2nfft_ignore_tolerance": "1",
"pnfft_diff_ik": "0",
"p2nfft_r_cut": "11",
"p2nfft_alpha": "0.31"})
s.actors.add(scafacos)
s.integrator.run(0)
expected = np.genfromtxt(
tests_common.abspath("data/p3m_magnetostatics_expected.data"))[:, 1:]
err_f = self.vector_error(
partcls.f, expected[:, 0:3] * DIPOLAR_PREFACTOR)
err_t = self.vector_error(
partcls.torque_lab, expected[:, 3:6] * DIPOLAR_PREFACTOR)
ref_E = 5.570 * DIPOLAR_PREFACTOR
err_e = s.analysis.energy()["dipolar"] - ref_E
tol_f = 2E-3
tol_t = 2E-3
tol_e = 1E-3
self.assertLessEqual(abs(err_e), tol_e, "Energy difference too large")
self.assertLessEqual(abs(err_t), tol_t, "Torque difference too large")
self.assertLessEqual(abs(err_f), tol_f, "Force difference too large")
if __name__ == "__main__":
ut.main()
|
pkreissl/espresso
|
testsuite/python/dipolar_mdlc_p3m_scafacos_p2nfft.py
|
Python
|
gpl-3.0
| 7,707
|
[
"ESPResSo"
] |
13007b86bdcb55aa60838619fb0837a2b264fa6907cae2ed10f519ad5a46a576
|
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2006 Donald N. Allingham
# Copyright (C) 2008,2012 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
""" custom text for the book report """
# Written by Alex Roitman,
# largely based on the SimpleBookTitle.py by Don Allingham
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# gtk
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.plug.menu import TextOption
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle,
FONT_SANS_SERIF, PARA_ALIGN_CENTER,
IndexMark, INDEX_TYPE_TOC)
#------------------------------------------------------------------------
#
# CustomText
#
#------------------------------------------------------------------------
class CustomText(Report):
""" CustomText """
def __init__(self, database, options, user):
"""
Create CustomText object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
top - Text on the top.
mid - Text in the middle.
bot - Text on the bottom.
"""
Report.__init__(self, database, options, user)
menu = options.menu
self.top_text = menu.get_option_by_name('top').get_value()
self.middle_text = menu.get_option_by_name('mid').get_value()
self.bottom_text = menu.get_option_by_name('bot').get_value()
def write_report(self):
mark_text = _("Custom Text")
if self.top_text[0]:
mark_text = "%s (%s)" % (_("Custom Text"), self.top_text[0])
elif self.middle_text[0]:
mark_text = "%s (%s)" % (_("Custom Text"), self.middle_text[0])
elif self.bottom_text[0]:
mark_text = "%s (%s)" % (_("Custom Text"), self.bottom_text[0])
mark = IndexMark(mark_text, INDEX_TYPE_TOC, 1)
self.doc.start_paragraph('CBT-Initial')
for line in self.top_text:
self.doc.write_text(line, mark)
self.doc.write_text("\n")
self.doc.end_paragraph()
self.doc.start_paragraph('CBT-Middle')
for line in self.middle_text:
self.doc.write_text(line)
self.doc.write_text("\n")
self.doc.end_paragraph()
self.doc.start_paragraph('CBT-Final')
for line in self.bottom_text:
self.doc.write_text(line)
self.doc.write_text("\n")
self.doc.end_paragraph()
#------------------------------------------------------------------------
#
# CustomTextOptions
#
#------------------------------------------------------------------------
class CustomTextOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
self.__top = None
self.__mid = None
self.__bot = None
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
category_name = _("Text")
self.__top = TextOption(_("Initial Text"), [""])
self.__top.set_help(_("Text to display at the top."))
menu.add_option(category_name, "top", self.__top)
self.__mid = TextOption(_("Middle Text"), [""])
self.__mid.set_help(_("Text to display in the middle"))
menu.add_option(category_name, "mid", self.__mid)
self.__bot = TextOption(_("Final Text"), [""])
self.__bot.set_help(_("Text to display last."))
menu.add_option(category_name, "bot", self.__bot)
def get_subject(self):
""" Return a string that describes the subject of the report. """
if len(self.__top.get_value()[0]) > 0:
return self.__top.get_value()[0]
if len(self.__mid.get_value()[0]) > 0:
return self.__mid.get_value()[0]
if len(self.__bot.get_value()[0]) > 0:
return self.__bot.get_value()[0]
return ""
def make_default_style(self, default_style):
"""Make the default output style for the Custom Text report."""
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=12, bold=0, italic=0)
para = ParagraphStyle()
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set(pad=0.5)
para.set_description(
_('The style used for the first portion of the custom text.'))
default_style.add_paragraph_style("CBT-Initial", para)
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=12, bold=0, italic=0)
para = ParagraphStyle()
para.set_font(font)
para.set(pad=0.5)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(
_('The style used for the middle portion of the custom text.'))
default_style.add_paragraph_style("CBT-Middle", para)
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=12, bold=0, italic=0)
para = ParagraphStyle()
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set(pad=0.5)
para.set_description(
_('The style used for the last portion of the custom text.'))
default_style.add_paragraph_style("CBT-Final", para)
|
beernarrd/gramps
|
gramps/plugins/textreport/custombooktext.py
|
Python
|
gpl-2.0
| 6,829
|
[
"Brian"
] |
73b3e6982f7d957a5a71ad34a657a2aecca83bb4f27a3eea0eade3aad1c7a150
|
# -*- coding: utf-8 -*-
import os
import sys
import array
import numpy as np
import scipy.ndimage
import scipy.interpolate
from scipy.interpolate import UnivariateSpline
from matplotlib.pyplot import *
from matplotlib.mlab import *
####### Check for h5py to Read AMR data ######
#try:
# import h5py as h5
# hasH5 = True
#except ImportError:
# hasH5 = False
hasH5 = False
def curdir():
""" Get the current working directory.
"""
curdir = os.getcwd()+'/'
return curdir
def get_nstepstr(ns):
""" Convert the float input *ns* into a string that would match the data file name.
**Inputs**:
ns -- Integer number that represents the time step number. E.g., The ns for data.0001.dbl is 1.\n
**Outputs**:
Returns the string that would be used to complete the data file name. E.g., for data.0001.dbl, ns = 1 and pyPLUTO.get_nstepstr(1) returns '0001'
"""
nstepstr = str(ns)
while len(nstepstr) < 4:
nstepstr= '0'+nstepstr
return nstepstr
def nlast_info(w_dir=None,datatype=None):
""" Prints the information of the last step of the simulation as obtained from out files
**Inputs**:
w_dir -- path to the directory which has the dbl.out(or flt.out) and the data\n
datatype -- If the data is of 'float' type then datatype = 'float' else by default the datatype is set to 'double'.
**Outputs**:
This function returns a dictionary with following keywords - \n
nlast -- The ns for the last file saved.\n
time -- The simulation time for the last file saved.\n
dt -- The time step dt for the last file. \n
Nstep -- The Nstep value for the last file saved.
**Usage**:
In case the data is 'float'.
``wdir = /path/to/data/directory``\n
``import pyPLUTO as pp``\n
``A = pp.nlast_info(w_dir=wdir,datatype='float')``
"""
if w_dir is None: w_dir=curdir()
if datatype == 'float':
fname_v = w_dir+"flt.out"
elif datatype == 'vtk':
fname_v = w_dir+"vtk.out"
else:
fname_v = w_dir+"dbl.out"
last_line = file(fname_v,"r").readlines()[-1].split()
nlast = int(last_line[0])
SimTime = float(last_line[1])
Dt = float(last_line[2])
Nstep = int(last_line[3])
print "------------TIME INFORMATION--------------"
print 'nlast =',nlast
print 'time =',SimTime
print 'dt =', Dt
print 'Nstep =',Nstep
print "-------------------------------------------"
return {'nlast':nlast,'time':SimTime,'dt':Dt,'Nstep':Nstep}
class pload(object):
def __init__(self, ns, w_dir=None, datatype=None, level = 0, x1range=None, x2range=None, x3range=None):
"""Loads the data.
**Inputs**:
ns -- Step Number of the data file\n
w_dir -- path to the directory which has the data files\n
datatype -- Datatype (default = 'double')
**Outputs**:
pyPLUTO pload object whose keys are arrays of data values.
"""
self.NStep = ns
self.Dt = 0.0
self.n1 = 0
self.n2 = 0
self.n3 = 0
self.x1 = []
self.x2 = []
self.x3 = []
self.dx1 = []
self.dx2 = []
self.dx3 = []
self.x1range = x1range
self.x2range = x2range
self.x3range = x3range
self.NStepStr = str(self.NStep)
while len(self.NStepStr) < 4:
self.NStepStr = '0'+self.NStepStr
if datatype is None:
datatype = "double"
self.datatype = datatype
if ((not hasH5) and (datatype == 'hdf5')):
print 'To read AMR hdf5 files with python'
print 'Please install h5py (Python HDF5 Reader)'
return
self.level = level
if w_dir is None:
w_dir = os.getcwd() + '/'
self.wdir = w_dir
Data_dictionary = self.ReadDataFile(self.NStepStr)
for keys in Data_dictionary:
object.__setattr__(self, keys, Data_dictionary.get(keys))
def ReadTimeInfo(self, timefile):
""" Read time info from the outfiles.
**Inputs**:
timefile -- name of the out file which has timing information.
"""
if (self.datatype == 'hdf5'):
fh5 = h5.File(timefile,'r')
self.SimTime = fh5.attrs.get('time')
#self.Dt = 1.e-2 # Should be erased later given the level in AMR
fh5.close()
else:
ns = self.NStep
f_var = open(timefile, "r")
tlist = []
for line in f_var.readlines():
tlist.append(line.split())
self.SimTime = float(tlist[ns][1])
self.Dt = float(tlist[ns][2])
def ReadVarFile(self, varfile):
""" Read variable names from the outfiles.
**Inputs**:
varfile -- name of the out file which has variable information.
"""
if (self.datatype == 'hdf5'):
fh5 = h5.File(varfile,'r')
self.filetype = 'single_file'
self.endianess = '>' # not used with AMR, kept for consistency
self.vars = []
for iv in range(fh5.attrs.get('num_components')):
self.vars.append(fh5.attrs.get('component_'+str(iv)))
fh5.close()
else:
vfp = open(varfile, "r")
varinfo = vfp.readline().split()
self.filetype = varinfo[4]
self.endianess = varinfo[5]
self.vars = varinfo[6:]
vfp.close()
def ReadGridFile(self, gridfile):
""" Read grid values from the grid.out file.
**Inputs**:
gridfile -- name of the grid.out file which has information about the grid.
"""
xL = []
xR = []
nmax = []
gfp = open(gridfile, "r")
for i in gfp.readlines():
if len(i.split()) == 1:
try:
int(i.split()[0])
nmax.append(int(i.split()[0]))
except:
pass
if len(i.split()) == 3:
try:
int(i.split()[0])
xL.append(float(i.split()[1]))
xR.append(float(i.split()[2]))
except:
if (i.split()[1] == 'GEOMETRY:'):
self.geometry=i.split()[2]
pass
self.n1, self.n2, self.n3 = nmax
n1 = self.n1
n1p2 = self.n1 + self.n2
n1p2p3 = self.n1 + self.n2 + self.n3
self.x1 = np.asarray([0.5*(xL[i]+xR[i]) for i in range(n1)])
self.dx1 = np.asarray([(xR[i]-xL[i]) for i in range(n1)])
self.x2 = np.asarray([0.5*(xL[i]+xR[i]) for i in range(n1, n1p2)])
self.dx2 = np.asarray([(xR[i]-xL[i]) for i in range(n1, n1p2)])
self.x3 = np.asarray([0.5*(xL[i]+xR[i]) for i in range(n1p2, n1p2p3)])
self.dx3 = np.asarray([(xR[i]-xL[i]) for i in range(n1p2, n1p2p3)])
# Stores the total number of points in '_tot' variable in case only
# a portion of the domain is loaded. Redefine the x and dx arrays
# to match the requested ranges
self.n1_tot = self.n1 ; self.n2_tot = self.n2 ; self.n3_tot = self.n3
if (self.x1range != None):
self.n1_tot = self.n1
self.irange = range(abs(self.x1-self.x1range[0]).argmin(),abs(self.x1-self.x1range[1]).argmin()+1)
self.n1 = len(self.irange)
self.x1 = self.x1[self.irange]
self.dx1 = self.dx1[self.irange]
else:
self.irange = range(self.n1)
if (self.x2range != None):
self.n2_tot = self.n2
self.jrange = range(abs(self.x2-self.x2range[0]).argmin(),abs(self.x2-self.x2range[1]).argmin()+1)
self.n2 = len(self.jrange)
self.x2 = self.x2[self.jrange]
self.dx2 = self.dx2[self.jrange]
else:
self.jrange = range(self.n2)
if (self.x3range != None):
self.n3_tot = self.n3
self.krange = range(abs(self.x3-self.x3range[0]).argmin(),abs(self.x3-self.x3range[1]).argmin()+1)
self.n3 = len(self.krange)
self.x3 = self.x3[self.krange]
self.dx3 = self.dx3[self.krange]
else:
self.krange = range(self.n3)
self.Slice=(self.x1range != None) or (self.x2range != None) or (self.x3range != None)
# Create the xr arrays containing the edges positions
# Useful for pcolormesh which should use those
self.x1r = np.zeros(len(self.x1)+1) ; self.x1r[1:] = self.x1 + self.dx1/2.0 ; self.x1r[0] = self.x1r[1]-self.dx1[0]
self.x2r = np.zeros(len(self.x2)+1) ; self.x2r[1:] = self.x2 + self.dx2/2.0 ; self.x2r[0] = self.x2r[1]-self.dx2[0]
self.x3r = np.zeros(len(self.x3)+1) ; self.x3r[1:] = self.x3 + self.dx3/2.0 ; self.x3r[0] = self.x3r[1]-self.dx3[0]
prodn = self.n1*self.n2*self.n3
if prodn == self.n1:
self.nshp = (self.n1)
elif prodn == self.n1*self.n2:
self.nshp = (self.n2, self.n1)
else:
self.nshp = (self.n3, self.n2, self.n1)
def DataScanVTK(self, fp, n1, n2, n3, endian, dtype):
""" Scans the VTK data files.
**Inputs**:
fp -- Data file pointer\n
n1 -- No. of points in X1 direction\n
n2 -- No. of points in X2 direction\n
n3 -- No. of points in X3 direction\n
endian -- Endianess of the data\n
dtype -- datatype
**Output**:
Dictionary consisting of variable names as keys and its values.
"""
ks = []
vtkvar = []
while True:
l = fp.readline()
try:
l.split()[0]
except IndexError:
pass
else:
if l.split()[0] == 'SCALARS':
ks.append(l.split()[1])
elif l.split()[0] == 'LOOKUP_TABLE':
A = array.array(dtype)
fmt = endian+str(n1*n2*n3)+dtype
nb = np.dtype(fmt).itemsize
A.fromstring(fp.read(nb))
if (self.Slice):
darr = np.zeros((n1*n2*n3))
indxx = np.sort([self.n3_tot*self.n2_tot*k + j*self.n2_tot + i for i in self.irange for j in self.jrange for k in self.krange])
if (sys.byteorder != self.endianess):
A.byteswap()
for ii,iii in enumerate(indxx):
darr[ii] = A[iii]
vtkvar_buf = [darr]
else:
vtkvar_buf = np.frombuffer(A,dtype=np.dtype(fmt))
vtkvar.append(np.reshape(vtkvar_buf,self.nshp).transpose())
else:
pass
if l == '':
break
vtkvardict = dict(zip(ks,vtkvar))
return vtkvardict
def DataScanHDF5(self, fp, myvars, ilev):
""" Scans the Chombo HDF5 data files for AMR in PLUTO.
**Inputs**:
fp -- Data file pointer\n
myvars -- Names of the variables to read\n
ilev -- required AMR level
**Output**:
Dictionary consisting of variable names as keys and its values.
**Note**:
Due to the particularity of AMR, the grid arrays loaded in ReadGridFile are overwritten here.
"""
# Read the grid information
dim = fp['Chombo_global'].attrs.get('SpaceDim')
nlev = fp.attrs.get('num_levels')
il = min(nlev-1,ilev)
lev = []
for i in range(nlev):
lev.append('level_'+str(i))
freb = np.zeros(nlev,dtype='int')
for i in range(il+1)[::-1]:
fl = fp[lev[i]]
if (i == il):
pdom = fl.attrs.get('prob_domain')
dx = fl.attrs.get('dx')
dt = fl.attrs.get('dt')
ystr = 1. ; zstr = 1. ; logr = 0
try:
geom = fl.attrs.get('geometry')
logr = fl.attrs.get('logr')
if (dim == 2):
ystr = fl.attrs.get('g_x2stretch')
elif (dim == 3):
zstr = fl.attrs.get('g_x3stretch')
except:
print 'Old HDF5 file, not reading stretch and logr factors'
freb[i] = 1
x1b = fl.attrs.get('domBeg1')
if (dim == 1):
x2b = 0
else:
x2b = fl.attrs.get('domBeg2')
if (dim == 1 or dim == 2):
x3b = 0
else:
x3b = fl.attrs.get('domBeg3')
jbeg = 0 ; jend = 0 ; ny = 1
kbeg = 0 ; kend = 0 ; nz = 1
if (dim == 1):
ibeg = pdom[0] ; iend = pdom[1] ; nx = iend-ibeg+1
elif (dim == 2):
ibeg = pdom[0] ; iend = pdom[2] ; nx = iend-ibeg+1
jbeg = pdom[1] ; jend = pdom[3] ; ny = jend-jbeg+1
elif (dim == 3):
ibeg = pdom[0] ; iend = pdom[3] ; nx = iend-ibeg+1
jbeg = pdom[1] ; jend = pdom[4] ; ny = jend-jbeg+1
kbeg = pdom[2] ; kend = pdom[5] ; nz = kend-kbeg+1
else:
rat = fl.attrs.get('ref_ratio')
freb[i] = rat*freb[i+1]
dx0 = dx*freb[0]
## Allow to load only a portion of the domain
if (self.x1range != None):
if logr == 0:
self.x1range = self.x1range-x1b
else:
self.x1range = [log(self.x1range[0]/x1b),log(self.x1range[1]/x1b)]
ibeg0 = min(self.x1range)/dx0 ; iend0 = max(self.x1range)/dx0
ibeg = max([ibeg, int(ibeg0*freb[0])]) ; iend = min([iend,int(iend0*freb[0]-1)])
nx = iend-ibeg+1
if (self.x2range != None):
self.x2range = (self.x2range-x2b)/ystr
jbeg0 = min(self.x2range)/dx0 ; jend0 = max(self.x2range)/dx0
jbeg = max([jbeg, int(jbeg0*freb[0])]) ; jend = min([jend,int(jend0*freb[0]-1)])
ny = jend-jbeg+1
if (self.x3range != None):
self.x3range = (self.x3range-x3b)/zstr
kbeg0 = min(self.x3range)/dx0 ; kend0 = max(self.x3range)/dx0
kbeg = max([kbeg, int(kbeg0*freb[0])]) ; kend = min([kend,int(kend0*freb[0]-1)])
nz = kend-kbeg+1
## Create uniform grids at the required level
if logr == 0:
x1 = x1b + (ibeg+np.array(range(nx))+0.5)*dx
else:
x1 = x1b*(exp((ibeg+np.array(range(nx))+1)*dx)+exp((ibeg+np.array(range(nx)))*dx))*0.5
x2 = x2b + (jbeg+np.array(range(ny))+0.5)*dx*ystr
x3 = x3b + (kbeg+np.array(range(nz))+0.5)*dx*zstr
if logr == 0:
dx1 = np.ones(nx)*dx
else:
dx1 = x1b*(exp((ibeg+np.array(range(nx))+1)*dx)-exp((ibeg+np.array(range(nx)))*dx))
dx2 = np.ones(ny)*dx*ystr
dx3 = np.ones(nz)*dx*zstr
# Create the xr arrays containing the edges positions
# Useful for pcolormesh which should use those
x1r = np.zeros(len(x1)+1) ; x1r[1:] = x1 + dx1/2.0 ; x1r[0] = x1r[1]-dx1[0]
x2r = np.zeros(len(x2)+1) ; x2r[1:] = x2 + dx2/2.0 ; x2r[0] = x2r[1]-dx2[0]
x3r = np.zeros(len(x3)+1) ; x3r[1:] = x3 + dx3/2.0 ; x3r[0] = x3r[1]-dx3[0]
NewGridDict = dict([('n1',nx),('n2',ny),('n3',nz),\
('x1',x1),('x2',x2),('x3',x3),\
('x1r',x1r),('x2r',x2r),('x3r',x3r),\
('dx1',dx1),('dx2',dx2),('dx3',dx3),\
('Dt',dt)])
# Variables table
nvar = len(myvars)
vars = np.zeros((nx,ny,nz,nvar))
LevelDic = {'nbox':0,'ibeg':ibeg,'iend':iend,'jbeg':jbeg,'jend':jend,'kbeg':kbeg,'kend':kend}
AMRLevel = []
AMRBoxes = np.zeros((nx,ny,nz))
for i in range(il+1):
AMRLevel.append(LevelDic.copy())
fl = fp[lev[i]]
data = fl['data:datatype=0']
boxes = fl['boxes']
nbox = len(boxes['lo_i'])
AMRLevel[i]['nbox'] = nbox
ncount = 0L
AMRLevel[i]['box']=[]
for j in range(nbox): # loop on all boxes of a given level
AMRLevel[i]['box'].append({'x0':0.,'x1':0.,'ib':0L,'ie':0L,\
'y0':0.,'y1':0.,'jb':0L,'je':0L,\
'z0':0.,'z1':0.,'kb':0L,'ke':0L})
# Box indexes
ib = boxes[j]['lo_i'] ; ie = boxes[j]['hi_i'] ; nbx = ie-ib+1
jb = 0 ; je = 0 ; nby = 1
kb = 0 ; ke = 0 ; nbz = 1
if (dim > 1):
jb = boxes[j]['lo_j'] ; je = boxes[j]['hi_j'] ; nby = je-jb+1
if (dim > 2):
kb = boxes[j]['lo_k'] ; ke = boxes[j]['hi_k'] ; nbz = ke-kb+1
szb = nbx*nby*nbz*nvar
# Rescale to current level
kb = kb*freb[i] ; ke = (ke+1)*freb[i] - 1
jb = jb*freb[i] ; je = (je+1)*freb[i] - 1
ib = ib*freb[i] ; ie = (ie+1)*freb[i] - 1
# Skip boxes lying outside ranges
if ((ib > iend) or (ie < ibeg) or \
(jb > jend) or (je < jbeg) or \
(kb > kend) or (ke < kbeg)):
ncount = ncount + szb
else:
### Read data
q = data[ncount:ncount+szb].reshape((nvar,nbz,nby,nbx)).T
### Find boxes intersections with current domain ranges
ib0 = max([ibeg,ib]) ; ie0 = min([iend,ie])
jb0 = max([jbeg,jb]) ; je0 = min([jend,je])
kb0 = max([kbeg,kb]) ; ke0 = min([kend,ke])
### Store box corners in the AMRLevel structure
if logr == 0:
AMRLevel[i]['box'][j]['x0'] = x1b + dx*(ib0)
AMRLevel[i]['box'][j]['x1'] = x1b + dx*(ie0+1)
else:
AMRLevel[i]['box'][j]['x0'] = x1b*exp(dx*(ib0))
AMRLevel[i]['box'][j]['x1'] = x1b*exp(dx*(ie0+1))
AMRLevel[i]['box'][j]['y0'] = x2b + dx*(jb0)*ystr
AMRLevel[i]['box'][j]['y1'] = x2b + dx*(je0+1)*ystr
AMRLevel[i]['box'][j]['z0'] = x3b + dx*(kb0)*zstr
AMRLevel[i]['box'][j]['z1'] = x3b + dx*(ke0+1)*zstr
AMRLevel[i]['box'][j]['ib'] = ib0 ; AMRLevel[i]['box'][j]['ie'] = ie0
AMRLevel[i]['box'][j]['jb'] = jb0 ; AMRLevel[i]['box'][j]['je'] = je0
AMRLevel[i]['box'][j]['kb'] = kb0 ; AMRLevel[i]['box'][j]['ke'] = ke0
AMRBoxes[ib0-ibeg:ie0-ibeg+1, jb0-jbeg:je0-jbeg+1, kb0-kbeg:ke0-kbeg+1] = il
### Extract the box intersection from data stored in q
cib0 = (ib0-ib)/freb[i] ; cie0 = (ie0-ib)/freb[i]
cjb0 = (jb0-jb)/freb[i] ; cje0 = (je0-jb)/freb[i]
ckb0 = (kb0-kb)/freb[i] ; cke0 = (ke0-kb)/freb[i]
q1 = np.zeros((cie0-cib0+1, cje0-cjb0+1, cke0-ckb0+1,nvar))
q1 = q[cib0:cie0+1,cjb0:cje0+1,ckb0:cke0+1,:]
# Remap the extracted portion
if (dim == 1):
new_shape = (ie0-ib0+1,1)
elif (dim == 2):
new_shape = (ie0-ib0+1,je0-jb0+1)
else:
new_shape = (ie0-ib0+1,je0-jb0+1,ke0-kb0+1)
stmp = list(new_shape)
while stmp.count(1) > 0:
stmp.remove(1)
new_shape = tuple(stmp)
myT = Tools()
for iv in range(nvar):
vars[ib0-ibeg:ie0-ibeg+1,jb0-jbeg:je0-jbeg+1,kb0-kbeg:ke0-kbeg+1,iv] = \
myT.congrid(q1[:,:,:,iv].squeeze(),new_shape,method='linear',minusone=True).reshape((ie0-ib0+1,je0-jb0+1,ke0-kb0+1))
ncount = ncount+szb
h5vardict={}
for iv in range(nvar):
h5vardict[myvars[iv]] = vars[:,:,:,iv].squeeze()
AMRdict = dict([('AMRBoxes',AMRBoxes),('AMRLevel',AMRLevel)])
OutDict = dict(NewGridDict)
OutDict.update(AMRdict)
OutDict.update(h5vardict)
return OutDict
def DataScan(self, fp, n1, n2, n3, endian, dtype, off=None):
""" Scans the data files in all formats.
**Inputs**:
fp -- Data file pointer\n
n1 -- No. of points in X1 direction\n
n2 -- No. of points in X2 direction\n
n3 -- No. of points in X3 direction\n
endian -- Endianess of the data\n
dtype -- datatype, eg : double, float, vtk, hdf5\n
off -- offset (for avoiding staggered B fields)
**Output**:
Dictionary consisting of variable names as keys and its values.
"""
if off is not None:
off_fmt = endian+str(off)+dtype
nboff = np.dtype(off_fmt).itemsize
fp.read(nboff)
n1_tot = self.n1_tot ; n2_tot = self.n2_tot; n3_tot = self.n3_tot
A = array.array(dtype)
fmt = endian+str(n1_tot*n2_tot*n3_tot)+dtype
nb = np.dtype(fmt).itemsize
A.fromstring(fp.read(nb))
if (self.Slice):
darr = np.zeros((n1*n2*n3))
indxx = np.sort([n3_tot*n2_tot*k + j*n2_tot + i for i in self.irange for j in self.jrange for k in self.krange])
if (sys.byteorder != self.endianess):
A.byteswap()
for ii,iii in enumerate(indxx):
darr[ii] = A[iii]
darr = [darr]
else:
darr = np.frombuffer(A,dtype=np.dtype(fmt))
return np.reshape(darr[0],self.nshp).transpose()
def ReadSingleFile(self, datafilename, myvars, n1, n2, n3, endian,
dtype, ddict):
"""Reads a single data file, data.****.dtype.
**Inputs**:
datafilename -- Data file name\n
myvars -- List of variable names to be read\n
n1 -- No. of points in X1 direction\n
n2 -- No. of points in X2 direction\n
n3 -- No. of points in X3 direction\n
endian -- Endianess of the data\n
dtype -- datatype\n
ddict -- Dictionary containing Grid and Time Information
which is updated
**Output**:
Updated Dictionary consisting of variable names as keys and its values.
"""
if self.datatype == 'hdf5':
fp = h5.File(datafilename,'r')
else:
fp = open(datafilename, "rb")
print "Reading Data file : %s"%datafilename
if self.datatype == 'vtk':
vtkd = self.DataScanVTK(fp, n1, n2, n3, endian, dtype)
ddict.update(vtkd)
elif self.datatype == 'hdf5':
h5d = self.DataScanHDF5(fp,myvars,self.level)
ddict.update(h5d)
else:
for i in range(len(myvars)):
if myvars[i] == 'bx1s':
ddict.update({myvars[i]: self.DataScan(fp, n1, n2, n3, endian,
dtype, off=n2*n3)})
elif myvars[i] == 'bx2s':
ddict.update({myvars[i]: self.DataScan(fp, n1, n2, n3, endian,
dtype, off=n3*n1)})
elif myvars[i] == 'bx3s':
ddict.update({myvars[i]: self.DataScan(fp, n1, n2, n3, endian,
dtype, off=n1*n2)})
else:
ddict.update({myvars[i]: self.DataScan(fp, n1, n2, n3, endian,
dtype)})
fp.close()
def ReadMultipleFiles(self, nstr, dataext, myvars, n1, n2, n3, endian,
dtype, ddict):
"""Reads a multiple data files, varname.****.dataext.
**Inputs**:
nstr -- File number in form of a string\n
dataext -- Data type of the file, e.g., 'dbl', 'flt' or 'vtk' \n
myvars -- List of variable names to be read\n
n1 -- No. of points in X1 direction\n
n2 -- No. of points in X2 direction\n
n3 -- No. of points in X3 direction\n
endian -- Endianess of the data\n
dtype -- datatype\n
ddict -- Dictionary containing Grid and Time Information
which is updated.
**Output**:
Updated Dictionary consisting of variable names as keys and its values.
"""
for i in range(len(myvars)):
datafilename = self.wdir+myvars[i]+"."+nstr+dataext
fp = open(datafilename, "rb")
if self.datatype == 'vtk':
ddict.update(self.DataScanVTK(fp, n1, n2, n3, endian, dtype))
else:
ddict.update({myvars[i]: self.DataScan(fp, n1, n2, n3, endian,
dtype)})
fp.close()
def ReadDataFile(self, num):
"""Reads the data file generated from PLUTO code.
**Inputs**:
num -- Data file number in form of an Integer.
**Outputs**:
Dictionary that contains all information about Grid, Time and
variables.
"""
gridfile = self.wdir+"grid.out"
if self.datatype == "float":
dtype = "f"
varfile = self.wdir+"flt.out"
dataext = ".flt"
elif self.datatype == "vtk":
dtype = "f"
varfile = self.wdir+"vtk.out"
dataext=".vtk"
elif self.datatype == 'hdf5':
dtype = 'd'
dataext = '.hdf5'
nstr = num
varfile = self.wdir+"data."+nstr+dataext
else:
dtype = "d"
varfile = self.wdir+"dbl.out"
dataext = ".dbl"
self.ReadVarFile(varfile)
self.ReadGridFile(gridfile)
self.ReadTimeInfo(varfile)
nstr = num
if self.endianess == 'big':
endian = ">"
elif self.datatype == 'vtk':
endian = ">"
else:
endian = "<"
D = [('NStep', self.NStep), ('SimTime', self.SimTime), ('Dt', self.Dt),
('n1', self.n1), ('n2', self.n2), ('n3', self.n3),
('x1', self.x1), ('x2', self.x2), ('x3', self.x3),
('dx1', self.dx1), ('dx2', self.dx2), ('dx3', self.dx3),
('endianess', self.endianess), ('datatype', self.datatype),
('filetype', self.filetype)]
ddict = dict(D)
if self.filetype == "single_file":
datafilename = self.wdir+"data."+nstr+dataext
self.ReadSingleFile(datafilename, self.vars, self.n1, self.n2,
self.n3, endian, dtype, ddict)
elif self.filetype == "multiple_files":
self.ReadMultipleFiles(nstr, dataext, self.vars, self.n1, self.n2,
self.n3, endian, dtype, ddict)
else:
print "Wrong file type : CHECK pluto.ini for file type."
print "Only supported are .dbl, .flt, .vtk, .hdf5"
sys.exit()
return ddict
class Tools(object):
"""
This Class has all the functions doing basic mathematical
operations to the vector or scalar fields.
It is called after pyPLUTO.pload object is defined.
"""
def deriv(self,Y,X=None):
"""
Calculates the derivative of Y with respect to X.
**Inputs:**
Y : 1-D array to be differentiated.\n
X : 1-D array with len(X) = len(Y).\n
If X is not specified then by default X is chosen to be an equally spaced array having same number of elements
as Y.
**Outputs:**
This returns an 1-D array having the same no. of elements as Y (or X) and contains the values of dY/dX.
"""
n = len(Y)
n2 = n-2
if X==None : X = np.arange(n)
Xarr = np.asarray(X,dtype='float')
Yarr = np.asarray(Y,dtype='float')
x12 = Xarr - np.roll(Xarr,-1) #x1 - x2
x01 = np.roll(Xarr,1) - Xarr #x0 - x1
x02 = np.roll(Xarr,1) - np.roll(Xarr,-1) #x0 - x2
DfDx = np.roll(Yarr,1) * (x12 / (x01*x02)) + Yarr * (1./x12 - 1./x01) - np.roll(Yarr,-1) * (x01 / (x02 * x12))
# Formulae for the first and last points:
DfDx[0] = Yarr[0] * (x01[1]+x02[1])/(x01[1]*x02[1]) - Yarr[1] * x02[1]/(x01[1]*x12[1]) + Yarr[2] * x01[1]/(x02[1]*x12[1])
DfDx[n-1] = -Yarr[n-3] * x12[n2]/(x01[n2]*x02[n2]) + Yarr[n-2]*x02[n2]/(x01[n2]*x12[n2]) - Yarr[n-1]*(x02[n2]+x12[n2])/(x02[n2]*x12[n2])
return DfDx
def Grad(self,phi,x1,x2,dx1,dx2,polar=False):
""" This method calculates the gradient of the 2D scalar phi.
**Inputs:**
phi -- 2D scalar whose gradient is to be determined.\n
x1 -- The 'x' array\n
x2 -- The 'y' array\n
dx1 -- The grid spacing in 'x' direction.\n
dx2 -- The grid spacing in 'y' direction.\n
polar -- The keyword should be set to True inorder to estimate the Gradient in polar co-ordinates. By default it is set to False.
**Outputs:**
This routine outputs a 3D array with shape = (len(x1),len(x2),2), such that [:,:,0] element corresponds to the gradient values of phi wrt to x1 and [:,:,1] are the gradient values of phi wrt to x2.
"""
(n1, n2) = phi.shape
grad_phi = np.zeros(shape=(n1,n2,2))
h2 = np.ones(shape=(n1,n2))
if polar == True:
for j in range(n2):
h2[:,j] = x1
for i in range(n1):
scrh1 = phi[i,:]
grad_phi[i,:,1] = self.deriv(scrh1,x2)/h2[i,:]
for j in range(n2):
scrh2 = phi[:,j]
grad_phi[:,j,0] = self.deriv(scrh2,x1)
return grad_phi
def Div(self,u1,u2,x1,x2,dx1,dx2,geometry=None):
""" This method calculates the divergence of the 2D vector fields u1 and u2.
**Inputs:**
u1 -- 2D vector along x1 whose divergence is to be determined.\n
u2 -- 2D vector along x2 whose divergence is to be determined.\n
x1 -- The 'x' array\n
x2 -- The 'y' array\n
dx1 -- The grid spacing in 'x' direction.\n
dx2 -- The grid spacing in 'y' direction.\n
geometry -- The keyword *geometry* is by default set to 'cartesian'. It can be set to either one of the following : *cartesian*, *cylindrical*, *spherical* or *polar*. To calculate the divergence of the vector fields, respective geometric corrections are taken into account based on the value of this keyword.
**Outputs:**
A 2D array with same shape as u1(or u2) having the values of divergence.
"""
(n1, n2) = u1.shape
Divergence = np.zeros(shape=(n1,n2))
du1 = np.zeros(shape=(n1,n2))
du2 = np.zeros(shape=(n1,n2))
A1 = np.zeros(shape=n1)
A2 = np.zeros(shape=n2)
dV1 = np.zeros(shape=(n1,n2))
dV2 = np.zeros(shape=(n1,n2))
if geometry == None : geometry = 'cartesian'
#------------------------------------------------
# define area and volume elements for the
# different coordinate systems
#------------------------------------------------
if geometry == 'cartesian' :
A1[:] = 1.0
A2[:] = 1.0
dV1 = np.outer(dx1,A2)
dV2 = np.outer(A1,dx2)
if geometry == 'cylindrical' :
A1 = x1
A2[:] = 1.0
dV1 = np.meshgrid(x1*dx1,A2)[0].T*np.meshgrid(x1*dx1,A2)[1].T
for i in range(n1) : dV2[i,:] = dx2[:]
if geometry == 'polar' :
A1 = x1
A2[:] = 1.0
dV1 = np.meshgrid(x1,A2)[0].T*np.meshgrid(x1,A2)[1].T
dV2 = np.meshgrid(x1,dx2)[0].T*np.meshgrid(x1,dx2)[1].T
if geometry == 'spherical' :
A1 = x1*x1
A2 = np.sin(x2)
for j in range(n2): dV1[:,j] = A1*dx1
dV2 = np.meshgrid(x1,np.sin(x2)*dx2)[0].T*np.meshgrid(x1,np.sin(x2)*dx2)[1].T
# ------------------------------------------------
# Make divergence
# ------------------------------------------------
for i in range(1,n1-1):
du1[i,:] = 0.5*(A1[i+1]*u1[i+1,:] - A1[i-1]*u1[i-1,:])/dV1[i,:]
for j in range(1,n2-1):
du2[:,j] = 0.5*(A2[j+1]*u2[:,j+1] - A2[j-1]*u2[:,j-1])/dV2[:,j]
Divergence = du1 + du2
return Divergence
def RTh2Cyl(self,R,Th,X1,X2):
""" This method does the transformation from spherical coordinates to cylindrical ones.
**Inputs:**
R - 2D array of spherical radius coordinates.\n
Th - 2D array of spherical theta-angle coordinates.\n
X1 - 2D array of radial component of given vector\n
X2 - 2D array of thetoidal component of given vector\n
**Outputs:**
This routine outputs two 2D arrays after transformation.
**Usage:**
``import pyPLUTO as pp``\n
``import numpy as np``\n
``D = pp.pload(0)``\n
``ppt=pp.Tools()``\n
``TH,R=np.meshgrid(D.x2,D.x1)``\n
``Br,Bz=ppt.RTh2Cyl(R,TH,D.bx1,D.bx2)``
D.bx1 and D.bx2 should be vectors in spherical coordinates. After transformation (Br,Bz) corresponds to vector in cilindrical coordinates.
"""
Y1=X1*np.sin(Th)+X2*np.cos(Th)
Y2=X1*np.cos(Th)-X2*np.sin(Th)
return Y1,Y2
def myInterpol(self,RR,N):
""" This method interpolates (linear interpolation) vector 1D vector RR to 1D N-length vector. Useful for stretched grid calculations.
**Inputs:**
RR - 1D array to interpolate.\n
N - Number of grids to interpolate to.\n
**Outputs:**
This routine outputs interpolated 1D array to the new grid (len=N).
**Usage:**
``import pyPLUTO as pp``\n
``import numpy as np``\n
``D = pp.pload(0)``\n
``ppt=pp.Tools()``\n
``x=linspace(0,1,10) #len(x)=10``\n
``y=x*x``\n
``Ri,Ni=ppt.myInterpol(y,100) #len(Ri)=100``
Ri - interpolated numbers;
Ni - grid for Ri
"""
NN=np.linspace(0,len(RR)-1,len(RR))
spline_fit=UnivariateSpline(RR,NN,k=3,s=0)
RRi=np.linspace(RR[0],RR[-1],N)
NNi=spline_fit(RRi)
NNi[0]=NN[0]+0.00001
NNi[-1]=NN[-1]-0.00001
return RRi,NNi
def getUniformGrid(self,r,th,rho,Nr,Nth):
""" This method transforms data with non-uniform grid (stretched) to uniform. Useful for stretched grid calculations.
**Inputs:**
r - 1D vector of X1 coordinate (could be any, e.g D.x1).\n
th - 1D vector of X2 coordinate (could be any, e.g D.x2).\n
rho- 2D array of data.\n
Nr - new size of X1 vector.\n
Nth- new size of X2 vector.\n
**Outputs:**
This routine outputs 2D uniform array Nr x Nth dimension
**Usage:**
``import pyPLUTO as pp``\n
``import numpy as np``\n
``D = pp.pload(0)``\n
``ppt=pp.Tools()``\n
``X1new, X2new, res = ppt.getUniformGrid(D.x1,D.x2,D.rho,20,30)``
X1new - X1 interpolated grid len(X1new)=20
X2new - X2 interpolated grid len(X2new)=30
res - 2D array of interpolated variable
"""
Ri,NRi=self.myInterpol(r,Nr)
Ra=np.int32(NRi);Wr=NRi-Ra
YY=np.ones([Nr,len(th)])
for i in range(len(th)):
YY[:,i]=(1-Wr)*rho[Ra,i] + Wr*rho[Ra+1,i]
THi,NTHi=self.myInterpol(th,Nth)
THa=np.int32(NTHi);Wth=NTHi-THa
ZZ=np.ones([Nr,Nth])
for i in range(Nr):
ZZ[i,:]=(1-Wth)*YY[i,THa] + Wth*YY[i,THa+1]
return Ri,THi,ZZ
def sph2cyl(self,D,Dx,rphi=None,theta0=None):
""" This method transforms spherical data into cylindrical applying interpolation. Works for stretched grid as well, transforms poloidal (R-Theta) data by default. Fix theta and set rphi=True to get (R-Phi) transformation.
**Inputs:**
D - structure from 'pload' method.\n
Dx - variable to be transformed (D.rho for example).\n
**Outputs:**
This routine outputs transformed (sph->cyl) variable and grid.
**Usage:**
``import pyPLUTO as pp``\n
``import numpy as np``\n
``D = pp.pload(0)``\n
``ppt=pp.Tools()``\n
``R,Z,res = ppt.sph2cyl(D,D.rho.transpose())``
R - 2D array with cylindrical radius values
Z - 2D array with cylindrical Z values
res - 2D array of transformed variable
"""
if rphi is None or rphi == False:
rx=D.x1
th=D.x2
else:
rx=D.x1*np.sin(theta0)
th=D.x3
rx,th,Dx=self.getUniformGrid(rx,th,Dx.T,200,200)
Dx=Dx.T
if rphi is None or rphi == False:
r0=np.min(np.sin(th)*rx[0])
rN=rx[-1]
dr=rN-r0
z0=np.min(np.cos(th)*rN)
zN=np.max(np.cos(th)*rN)
dz=zN-z0
dth=th[-1]-th[0]
rl=np.int32(len(rx)*dr/(rx[-1]-rx[0]))
zl=np.int32(rl* dz/dr)
thl=len(th)
r=np.linspace(r0, rN, rl)
z=np.linspace(z0, zN, zl)
else:
r0=np.min([np.sin(th)*rx[0] , np.sin(th)*rx[-1]])
rN=np.max([np.sin(th)*rx[0] , np.sin(th)*rx[-1]])
dr=rN-r0
z0=np.min(np.cos(th)*rN)
zN=np.max(np.cos(th)*rN)
dz=zN-z0
dth=th[-1]-th[0]
rl=np.int32(len(rx)*dr/(rx[-1]-rx[0]))
zl=np.int32(rl* dz/dr)
thl=len(th)
r=np.linspace(r0, rN, rl)
z=np.linspace(z0, zN, zl)
R,Z = np.meshgrid(r, z)
Rs = np.sqrt(R*R + Z*Z)
Th = np.arccos(Z/Rs)
kv_34=find(R<0)
Th.flat[kv_34]=2*np.pi - Th.flat[kv_34]
ddr=rx[1]-rx[0]
ddth=th[1]-th[0]
Rs_copy=Rs.copy()
Th_copy=Th.copy()
nR1=find(Rs<rx[0])
Rs.flat[nR1]=rx[0]
nR2=find(Rs>rN)
Rs.flat[nR2]=rN
nTh1=find(Th>th[-1])
Th.flat[nTh1]=th[-1]
nTh2=find(Th<th[0])
Th.flat[nTh2]=th[0]
ra = ((len(rx)-1.001)/(np.max(Rs.flat)-np.min(Rs.flat)) *(Rs-np.min(Rs.flat)))
tha = ((thl-1.001)/dth *(Th-th[0]))
rn = np.int32(ra)
thn = np.int32(tha)
dra=ra-rn
dtha=tha-thn
w1=1-dra
w2=dra
w3=1-dtha
w4=dtha
lrx=len(rx)
NN1=np.int32(rn+thn*lrx)
NN2=np.int32((rn+1)+thn*lrx)
NN3=np.int32(rn+(thn+1)*lrx)
NN4=np.int32((rn+1)+(thn+1)*lrx)
n=np.transpose(np.arange(0,np.product(np.shape(R))))
DD=Dx.copy()
F=R.copy()
F.flat[n]=w1.flat[n]*(w3.flat[n]*Dx.flat[NN1.flat[n]] + w4.flat[n]*Dx.flat[NN3.flat[n]] )+\
w2.flat[n]*(w3.flat[n]*Dx.flat[NN2.flat[n]] + w4.flat[n]*Dx.flat[NN4.flat[n]] )
nR1=find(Rs_copy<rx[0]-ddr/1.5)
nR2=find(Rs_copy>rN+ddr/1.5)
nTh1=find(Th_copy>th[-1]+ddth/1.5)
nTh2=find(Th_copy<th[0]-ddth/1.5)
nmask=np.concatenate((nR1,nR2,nTh1,nTh2))
F.flat[nmask]=np.nan;
return R,Z,F
def congrid(self, a, newdims, method='linear', centre=False, minusone=False):
"""
Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL''s congrid routine, which apparently originally came from a VAX/VMS
routine of the same name.
**Inputs:**
a -- The 2D array that needs resampling into new dimensions.\n
newdims -- A tuple which represents the shape of the resampled data\n
method -- This keyword decides the method used for interpolation.\n
neighbour - closest value from original data\n
nearest and linear - uses n x 1-D interpolations using scipy.interpolate.interp1d
(see Numerical Recipes for validity of use of n 1-D interpolations)\n
spline - uses ndimage.map_coordinates\n
centre -- This keyword decides the positions of interpolation points.\n
True - interpolation points are at the centres of the bins\n
False - points are at the front edge of the bin\n
minusone -- This prevents extrapolation one element beyond bounds of input array\n
For example- inarray.shape = (i,j) & new dimensions = (x,y)\n
False - inarray is resampled by factors of (i/x) * (j/y)\n
True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)\n
**Outputs:**
A 2D array with resampled data having a shape corresponding to newdims.
"""
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print "[congrid] dimensions error. " \
"This routine currently only support " \
"rebinning to the same number of dimensions."
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
for i in range( ndims ):
base = np.indices(newdims)[i]
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
cd = np.array( dimlist ).round().astype(int)
newa = a[list( cd )]
return newa
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
newa = mint( dimlist[-1] )
trorder = [ndims - 1] + range( ndims - 1 )
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0,j) for j in old ]
oldcoords = np.ogrid[oslices]
nslices = [ slice(0,j) for j in list(newdims) ]
newcoords = np.mgrid[nslices]
newcoords_dims = range(np.rank(newcoords))
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print "Congrid error: Unrecognized interpolation type.\n", \
"Currently only \'neighbour\', \'nearest\',\'linear\',", \
"and \'spline\' are supported."
return None
class Image(object):
''' This Class has all the routines for the imaging the data
and plotting various contours and fieldlines on these images.
CALLED AFTER pyPLUTO.pload object is defined
'''
def pldisplay(self, D, var,**kwargs):
""" This method allows the user to display a 2D data using the
matplotlib's pcolormesh.
**Inputs:**
D -- pyPLUTO pload object.\n
var -- 2D array that needs to be displayed.
*Required Keywords:*
x1 -- The 'x' array\n
x2 -- The 'y' array
*Optional Keywords:*
vmin -- The minimum value of the 2D array (Default : min(var))\n
vmax -- The maximum value of the 2D array (Default : max(var))\n
title -- Sets the title of the image.\n
label1 -- Sets the X Label (Default: 'XLabel')\n
label2 -- Sets the Y Label (Default: 'YLabel')\n
polar -- A list to project Polar data on Cartesian Grid.\n
polar = [True, True] -- Projects r-phi plane.\n
polar = [True, False] -- Project r-theta plane.\n
polar = [False, False] -- No polar plot (Default)\n
cbar -- Its a tuple to set the colorbar on or off. \n
cbar = (True,'vertical') -- Displays a vertical colorbar\n
cbar = (True,'horizontal') -- Displays a horizontal colorbar\n
cbar = (False,'') -- Displays no colorbar.
**Usage:**
``import pyPLUTO as pp``\n
``wdir = '/path/to/the data files/'``\n
``D = pp.pload(1,w_dir=wdir)``\n
``I = pp.Image()``\n
``I.pldisplay(D, D.v2, x1=D.x1, x2=D.x2, cbar=(True,'vertical'),\
title='Velocity',label1='Radius',label2='Height')``
"""
x1 = kwargs.get('x1')
x2 = kwargs.get('x2')
var = var.T
f1 = figure(kwargs.get('fignum',1), figsize=kwargs.get('figsize',[10,10]),
dpi=80, facecolor='w', edgecolor='k')
ax1 = f1.add_subplot(111)
ax1.set_aspect('equal')
if kwargs.get('polar',[False,False])[0]:
xx, yy = self.getPolarData(D,kwargs.get('x2'),rphi=kwargs.get('polar')[1])
pcolormesh(xx,yy,var,vmin=kwargs.get('vmin',np.min(var)),vmax=kwargs.get('vmax',np.max(var)))
else:
ax1.axis([np.min(x1),np.max(x1),np.min(x2),np.max(x2)])
pcolormesh(x1,x2,var,vmin=kwargs.get('vmin',np.min(var)),vmax=kwargs.get('vmax',np.max(var)))
title(kwargs.get('title',"Title"),size=kwargs.get('size'))
xlabel(kwargs.get('label1',"Xlabel"),size=kwargs.get('size'))
ylabel(kwargs.get('label2',"Ylabel"),size=kwargs.get('size'))
if kwargs.get('cbar',(False,''))[0] == True:
colorbar(orientation=kwargs.get('cbar')[1])
def multi_disp(self,*args,**kwargs):
mvar = []
for arg in args:
mvar.append(arg.T)
xmin = np.min(kwargs.get('x1'))
xmax = np.max(kwargs.get('x1'))
ymin = np.min(kwargs.get('x2'))
ymax = np.max(kwargs.get('x2'))
mfig = figure(kwargs.get('fignum',1),figsize=kwargs.get('figsize',[10,10]))
Ncols = kwargs.get('Ncols')
Nrows = len(args)/Ncols
mprod = Nrows*Ncols
dictcbar=kwargs.get('cbar',(False,'','each'))
for j in range(mprod):
mfig.add_subplot(Nrows,Ncols,j+1)
pcolormesh(kwargs.get('x1'),kwargs.get('x2'), mvar[j])
axis([xmin,xmax,ymin,ymax])
gca().set_aspect('equal')
xlabel(kwargs.get('label1',mprod*['Xlabel'])[j])
ylabel(kwargs.get('label2',mprod*['Ylabel'])[j])
title(kwargs.get('title',mprod*['Title'])[j])
if (dictcbar[0] == True) and (dictcbar[2] =='each'):
colorbar(orientation=kwargs.get('cbar')[1])
if dictcbar[0] == True and dictcbar[2]=='last':
if (j == np.max(range(mprod))):colorbar(orientation=kwargs.get('cbar')[1])
def oplotbox(self, AMRLevel, lrange=[0,0], cval=['b','r','g','m','w','k'],\
islice=-1, jslice=-1, kslice=-1,geom='CARTESIAN'):
"""
This method overplots the AMR boxes up to the specified level.
**Input:**
AMRLevel -- AMR object loaded during the reading and stored in the pload object
*Optional Keywords:*
lrange -- [level_min,level_max] to be overplotted. By default it shows all the loaded levels\n
cval -- list of colors for the levels to be overplotted.\n
[ijk]slice -- Index of the 2D slice to look for so that the adequate box limits are plotted.
By default oplotbox considers you are plotting a 2D slice of the z=min(x3) plane.\n
geom -- Specified the geometry. Currently, CARTESIAN (default) and POLAR geometries are handled.
"""
nlev = len(AMRLevel)
lrange[1] = min(lrange[1],nlev-1)
npl = lrange[1]-lrange[0]+1
lpls = [lrange[0]+v for v in range(npl)]
cols = cval[0:nlev]
# Get the offset and the type of slice
Slice = 0 ; inds = 'k'
xx = 'x' ; yy ='y'
if (islice >= 0):
Slice = islice + AMRLevel[0]['ibeg'] ; inds = 'i'
xx = 'y' ; yy ='z'
if (jslice >= 0):
Slice = jslice + AMRLevel[0]['jbeg'] ; inds = 'j'
xx = 'x' ; yy ='z'
if (kslice >= 0):
Slice = kslice + AMRLevel[0]['kbeg'] ; inds = 'k'
xx = 'x' ; yy ='y'
# Overplot the boxes
hold(True)
for il in lpls:
level = AMRLevel[il]
for ib in range(level['nbox']):
box = level['box'][ib]
if ((Slice-box[inds+'b'])*(box[inds+'e']-Slice) >= 0):
if (geom == 'CARTESIAN'):
x0 = box[xx+'0'] ; x1 = box[xx+'1']
y0 = box[yy+'0'] ; y1 = box[yy+'1']
plot([x0,x1,x1,x0,x0],[y0,y0,y1,y1,y0],color=cols[il])
elif (geom == 'POLAR') or (geom == 'SPHERICAL'):
dn = np.pi/50.
x0 = box[xx+'0'] ; x1 = box[xx+'1']
y0 = box[yy+'0'] ; y1 = box[yy+'1']
if y0 == y1:
y1 = 2*np.pi+y0 - 1.e-3
xb = np.concatenate([
[x0*np.cos(y0),x1*np.cos(y0)],\
x1*np.cos(np.linspace(y0,y1,num=int(abs(y0-y1)/dn) )),\
[x1*np.cos(y1),x0*np.cos(y1)],\
x0*np.cos(np.linspace(y1,y0,num=int(abs(y0-y1)/dn)))])
yb = np.concatenate([
[x0*np.sin(y0),x1*np.sin(y0)],\
x1*np.sin(np.linspace(y0,y1,num=int(abs(y0-y1)/dn))),\
[x1*np.sin(y1),x0*np.sin(y1)],\
x0*np.sin(np.linspace(y1,y0,num=int(abs(y0-y1)/dn)))])
plot(xb,yb,color=cols[il])
hold(False)
def field_interp(self,var1,var2,x,y,dx,dy,xp,yp):
""" This method interpolates value of vector fields (var1 and var2) on field points (xp and yp).
The field points are obtained from the method field_line.
**Inputs:**
var1 -- 2D Vector field in X direction\n
var2 -- 2D Vector field in Y direction\n
x -- 1D X array\n
y -- 1D Y array\n
dx -- 1D grid spacing array in X direction\n
dy -- 1D grid spacing array in Y direction\n
xp -- field point in X direction\n
yp -- field point in Y direction\n
**Outputs:**
A list with 2 elements where the first element corresponds to the interpolate field
point in 'x' direction and the second element is the field point in 'y' direction.
"""
q=[]
U = var1
V = var2
i0 = np.abs(xp-x).argmin()
j0 = np.abs(yp-y).argmin()
scrhUx = np.interp(xp,x,U[:,j0])
scrhUy = np.interp(yp,y,U[i0,:])
q.append(scrhUx + scrhUy - U[i0,j0])
scrhVx = np.interp(xp,x,V[:,j0])
scrhVy = np.interp(yp,y,V[i0,:])
q.append(scrhVx + scrhVy - V[i0,j0])
return q
def field_line(self,var1,var2,x,y,dx,dy,x0,y0):
""" This method is used to obtain field lines (same as fieldline.pro in PLUTO IDL tools).
**Inputs:**
var1 -- 2D Vector field in X direction\n
var2 -- 2D Vector field in Y direction\n
x -- 1D X array\n
y -- 1D Y array\n
dx -- 1D grid spacing array in X direction\n
dy -- 1D grid spacing array in Y direction\n
x0 -- foot point of the field line in X direction\n
y0 -- foot point of the field line in Y direction\n
**Outputs:**
This routine returns a dictionary with keys - \n
qx -- list of the field points along the 'x' direction.
qy -- list of the field points along the 'y' direction.
**Usage:**
See the myfieldlines routine for the same.
"""
xbeg = x[0] - 0.5*dx[0]
xend = x[-1] + 0.5*dx[-1]
ybeg = y[0] - 0.5*dy[0]
yend = y[-1] + 0.5*dy[-1]
inside_domain = x0 > xbeg and x0 < xend and y0 > ybeg and y0 < yend
MAX_STEPS = 5000
xln_fwd = [x0]
yln_fwd = [y0]
xln_bck = [x0]
yln_bck = [y0]
rhs = []
k = 0
while inside_domain == True:
R1 = self.field_interp(var1,var2,x,y,dx,dy,xln_fwd[k],yln_fwd[k])
dl = 0.5*np.max(np.concatenate((dx,dy)))/(np.sqrt(R1[0]*R1[0] + R1[1]*R1[1] + 1.e-14))
xscrh = xln_fwd[k] + 0.5*dl*R1[0]
yscrh = yln_fwd[k] + 0.5*dl*R1[1]
R2 = self.field_interp(var1,var2,x,y,dx,dy,xln_fwd[k],yln_fwd[k])
xln_one = xln_fwd[k] + dl*R2[0]
yln_one = yln_fwd[k] + dl*R2[1]
xln_fwd.append(xln_one)
yln_fwd.append(yln_one)
inside_domain = xln_one > xbeg and xln_one < xend and yln_one > ybeg and yln_one < yend
inside_domain = inside_domain and (k < MAX_STEPS-3)
k = k + 1
k_fwd = k
qx = np.asarray(xln_fwd[0:k_fwd])
qy = np.asarray(yln_fwd[0:k_fwd])
flines={'qx':qx,'qy':qy}
return flines
def myfieldlines(self,Data,x0arr,y0arr,stream=False,**kwargs):
""" This method overplots the magnetic field lines at the footpoints given by (x0arr[i],y0arr[i]).
**Inputs:**
Data -- pyPLUTO.pload object\n
x0arr -- array of x co-ordinates of the footpoints\n
y0arr -- array of y co-ordinates of the footpoints\n
stream -- keyword for two different ways of calculating the field lines.\n
True -- plots contours of rAphi (needs to store vector potential)\n
False -- plots the fieldlines obtained from the field_line routine. (Default option)
*Optional Keywords:*
colors -- A list of matplotlib colors to represent the lines. The length of this list should be same as that of x0arr.\n
lw -- Integer value that determines the linewidth of each line.\n
ls -- Determines the linestyle of each line.
**Usage:**
Assume that the magnetic field is a given as **B** = B0$\hat{y}$.
Then to show this field lines we have to define the x and y arrays of field foot points.\n
``x0arr = linspace(0.0,10.0,20)``\n
``y0arr = linspace(0.0,0.0,20)``\n
``import pyPLUTO as pp``\n
``D = pp.pload(45)``\n
``I = pp.Image()``\n
``I.myfieldlines(D,x0arr,y0arr,colors='k',ls='--',lw=1.0)``
"""
if len(x0arr) != len(y0arr) : print "Input Arrays should have same size"
QxList=[]
QyList=[]
StreamFunction = []
levels =[]
if stream == True:
X, Y = np.meshgrid(Data.x1,Data.x2.T)
StreamFunction = X*(Data.Ax3.T)
for i in range(len(x0arr)):
nx = np.abs(X[0,:]-x0arr[i]).argmin()
ny = np.abs(X[:,0]-y0arr[i]).argmin()
levels.append(X[ny,nx]*Data.Ax3.T[ny,nx])
contour(X,Y,StreamFunction,levels,colors=kwargs.get('colors'),linewidths=kwargs.get('lw',1),linestyles=kwargs.get('ls','solid'))
else:
for i in range(len(x0arr)):
QxList.append(self.field_line(Data.bx1,Data.bx2,Data.x1,Data.x2,Data.dx1,Data.dx1,x0arr[i],y0arr[i]).get('qx'))
QyList.append(self.field_line(Data.bx1,Data.bx2,Data.x1,Data.x2,Data.dx1,Data.dx1,x0arr[i],y0arr[i]).get('qy'))
plot(QxList[i],QyList[i],color=kwargs.get('colors'))
axis([min(Data.x1),max(Data.x1),min(Data.x2),max(Data.x2)])
def getSphData(self,Data,w_dir=None,datatype=None,**kwargs):
"""This method transforms the vector and scalar fields from Spherical co-ordinates to Cylindrical.
**Inputs**:
Data -- pyPLUTO.pload object\n
w_dir -- /path/to/the/working/directory/\n
datatype -- If the data is of 'float' type then datatype = 'float' else by default the datatype is set to 'double'.
*Optional Keywords*:
rphi -- [Default] is set to False implies that the r-theta plane is transformed. If set True then the r-phi plane is transformed.\n
x2cut -- Applicable for 3D data and it determines the co-ordinate of the x2 plane while r-phi is set to True.\n
x3cut -- Applicable for 3D data and it determines the co-ordinate of the x3 plane while r-phi is set to False.
"""
Tool = Tools()
key_value_pairs = []
allvars = []
if w_dir is None: w_dir = curdir()
for v in Data.vars:
allvars.append(v)
if kwargs.get('rphi',False)==True:
R,TH = np.meshgrid(Data.x1,Data.x3)
if Data.n3 != 1:
for variable in allvars:
key_value_pairs.append([variable,getattr(Data,variable)[:,kwargs.get('x2cut',0),:].T])
SphData = dict(key_value_pairs)
if ('bx1' in allvars) or ('bx2' in allvars):
(SphData['b1c'],SphData['b3c']) = Tool.RTh2Cyl(R,TH,SphData.get('bx1'),SphData.get('bx3'))
allvars.append('b1c')
allvars.append('b3c')
if ('vx1' in allvars) or ('vx2' in allvars):
(SphData['v1c'],SphData['v3c']) = Tool.RTh2Cyl(R,TH,SphData.get('vx1'),SphData.get('vx3'))
allvars.append('v1c')
allvars.append('v3c')
else:
print "No x3 plane for 2D data"
else:
R,TH = np.meshgrid(Data.x1,Data.x2)
if Data.n3 != 1:
for variable in allvars:
key_value_pairs.append([variable,getattr(Data,variable)[:,:,kwargs.get('x3cut',0)].T])
SphData = dict(key_value_pairs)
if ('bx1' in allvars) or ('bx2' in allvars):
(SphData['b1c'],SphData['b2c']) = Tool.RTh2Cyl(R,TH,SphData.get('bx1'),SphData.get('bx2'))
allvars.append('b1c')
allvars.append('b2c')
if ('vx1' in allvars) or ('vx2' in allvars):
(SphData['v1c'],SphData['v2c']) = Tool.RTh2Cyl(R,TH,SphData.get('vx1'),SphData.get('vx2'))
allvars.append('v1c')
allvars.append('v2c')
else:
for variable in allvars:
key_value_pairs.append([variable,getattr(Data,variable)[:,:].T])
SphData = dict(key_value_pairs)
if ('bx1' in allvars) or ('bx2' in allvars):
(SphData['b1c'],SphData['b2c']) = Tool.RTh2Cyl(R,TH,SphData.get('bx1'),SphData.get('bx2'))
allvars.append('b1c')
allvars.append('b2c')
if ('vx1' in allvars) or ('vx2' in allvars):
(SphData['v1c'],SphData['v2c']) = Tool.RTh2Cyl(R,TH,SphData.get('vx1'),SphData.get('vx2'))
allvars.append('v1c')
allvars.append('v2c')
for variable in allvars:
if kwargs.get('rphi',False)==True:
R,Z,SphData[variable]= Tool.sph2cyl(Data,SphData.get(variable),rphi=True,theta0=Data.x2[kwargs.get('x2cut',0)])
else:
if Data.n3 != 1:
R,Z,SphData[variable] = Tool.sph2cyl(Data,SphData.get(variable),rphi=False)
else:
R,Z,SphData[variable] = Tool.sph2cyl(Data,SphData.get(variable),rphi=False)
return R,Z,SphData
def getPolarData(self, Data, ang_coord, rphi=False):
"""To get the Cartesian Co-ordinates from Polar.
**Inputs:**
Data -- pyPLUTO pload Object\n
ang_coord -- The Angular co-ordinate (theta or Phi)
*Optional Keywords:*
rphi -- Default value FALSE is for R-THETA data,
Set TRUE for R-PHI data.\n
**Outputs**:
2D Arrays of X, Y from the Radius and Angular co-ordinates.\n
They are used in pcolormesh in the Image.pldisplay functions.
"""
D = Data
if ang_coord is D.x2:
x2r = D.x2r
elif ang_coord is D.x3:
x2r = D.x3r
else:
print "Angular co-ordinate must be given"
rcos = np.outer(np.cos(x2r), D.x1r)
rsin = np.outer(np.sin(x2r), D.x1r)
if rphi:
xx = rcos
yy = rsin
else:
xx = rsin
yy = rcos
return xx, yy
def pltSphData(self,Data,w_dir=None,datatype=None,**kwargs):
"""This method plots the transformed data obtained from getSphData using the matplotlib's imshow
**Inputs:**
Data -- pyPLUTO.pload object\n
w_dir -- /path/to/the/working/directory/\n
datatype -- Datatype.
*Required Keywords*:
plvar -- A string which represents the plot variable.\n
*Optional Keywords*:
logvar -- [Default = False] Set it True for plotting the log of a variable.\n
rphi -- [Default = False - for plotting in r-theta plane] Set it True for plotting the variable in r-phi plane.
"""
if w_dir is None: w_dir=curdir()
R,Z,SphData = self.getSphData(Data,w_dir=w_dir,datatype=datatype,**kwargs)
extent=(np.min(R.flat),max(R.flat),np.min(Z.flat),max(Z.flat))
dRR=max(R.flat)-np.min(R.flat)
dZZ=max(Z.flat)-np.min(Z.flat)
isnotnan=-np.isnan(SphData[kwargs.get('plvar')])
maxPl=max(SphData[kwargs.get('plvar')][isnotnan].flat)
minPl=np.min(SphData[kwargs.get('plvar')][isnotnan].flat)
normrange=False
if minPl<0:
normrange=True
if maxPl>-minPl:
minPl=-maxPl
else:
maxPl=-minPl
if (normrange and kwargs.get('plvar')!='rho' and kwargs.get('plvar')!='prs'):
SphData[kwargs.get('plvar')][-1][-1]=maxPl
SphData[kwargs.get('plvar')][-1][-2]=minPl
if (kwargs.get('logvar') == True):
SphData[kwargs.get('plvar')] = np.log10(SphData[kwargs.get('plvar')])
imshow(SphData[kwargs.get('plvar')], aspect='equal', origin='lower', cmap=cm.jet,extent=extent, interpolation='nearest')
|
aywander/pluto-outflows
|
Tools/pyPLUTO/pyPLUTO/pyPLUTO.py
|
Python
|
gpl-2.0
| 63,895
|
[
"VTK"
] |
534176c0856b2c4a0f3faf78137df27e98fe9f8072fa6e3c541120932f1df01a
|
from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, PermissionsMixin,
)
from django.contrib.auth.models import Group
from django.core.validators import RegexValidator
from django.utils import timezone
from datetime import datetime, timedelta
from django.conf import settings
# Create your models here.
class CustomUserManager( BaseUserManager ):
def create_user( self, username, surname, email, identify, phone_number, password, position, person_in_charge):
"""
"""
user = self.model(
email = self.normalize_email(email),
surname = surname,
identify = identify,
phone_number = phone_number,
person_in_charge= person_in_charge,
position = position
)
user.set_password(password)
user.username = username
user.save(using=self._db)
return user
def create_superuser( self, username, surname, email, identify, phone_number, password ):
position = "staff"
orgnization = Orgnization()
orgnization.name = "厦门大学"
orgnization.department = "化学化工学院"
orgnization.address = "厦门市思明区思明南路220号"
orgnization.save()
person_in_charge = PersonInCharge()
person_in_charge.surname = surname
person_in_charge.phone_number = phone_number
person_in_charge.email = email
person_in_charge.orgnization = orgnization
person_in_charge.save()
user = self.create_user( username, surname, email, identify, phone_number, password, position, person_in_charge )
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class Orgnization( models.Model ):
name = models.CharField( verbose_name='单位名称', max_length=250 )
department = models.CharField( verbose_name='部门', max_length=100 )
address = models.CharField( verbose_name='地址', max_length=250 )
def __str__(self):
full_name = self.name+self.department
return full_name
class PersonInCharge( models.Model ):
surname0 = models.CharField( unique=True, verbose_name='姓名', help_text='用户的真实姓名', max_length=30)
phonenumeric = RegexValidator(r'^\+?1?\d{9,15}$', message='格式为05922186874或者手机号码')
phone_number0 = models.CharField( verbose_name='电话号码', help_text='格式为05922186874或者手机号码', max_length=18, default='05920000000' )
email0 = models.EmailField( verbose_name='邮箱', help_text='有效邮箱,用于认证通知', max_length=255, default='unkown@xmu.edu.cn')
titles_choice = (
('PI', '课题组负责人'),
('FL', '经费负责人'),
('OL', '公司领导')
)
titles = models.CharField( verbose_name='职务', max_length=3, choices=titles_choice, default='PI')
orgnization = models.ForeignKey( Orgnization, verbose_name='单位', null=True, blank=True )
def __str__(self):
return self.surname0
class CustomUser( AbstractBaseUser, PermissionsMixin ):
alphanumeric = RegexValidator(r'^[0-9a-zA-Z]*$', message='只有[0-9a-z-A-Z]字符可以.')
username = models.CharField( verbose_name='用户名', help_text='纯数字和字母', unique=True, max_length=30, validators=[alphanumeric])
surname = models.CharField( verbose_name='姓名', help_text='用户的真实姓名', max_length=30)
identify = models.CharField( verbose_name='学号/教工号', help_text='厦大学生号或者教工号,非厦大学生或者教师不用填', max_length=30, unique=True, null=True, blank=True )
phonenumeric = RegexValidator(r'^\+?1?\d{9,15}$', message='格式为05922186874或者手机号码')
phone_number = models.CharField( verbose_name='电话号码', help_text='格式为05922186874或者手机号码', max_length=18, default="05920000000" )
website = models.URLField( verbose_name='个人网页', blank=True, null=True )
position_choice = (
('student', "厦大学生"),
('staff', "厦大教师"),
('temp', "临时卡"),
('visit', "其他"),
)
position = models.CharField( verbose_name='身份', max_length=7, choices=position_choice, default='student' )
person_in_charge = models.ForeignKey( PersonInCharge, verbose_name='负责人', help_text='课题组负责人, 导师或者领导,里面找不到暂时不用填写,登录以后请修改个人资料!', null=True, blank=True )
create_time = models.DateTimeField(auto_now_add=True)
expired_time = models.DateField( default=(timezone.now()+timedelta(days=100*365)).date(), verbose_name='失效日期' )
email = models.EmailField( verbose_name='邮箱', help_text='有效邮箱,用于认证通知', max_length=255, default="unkown@xmu.edu.cn")
email_code = models.CharField( max_length=50, null=True, blank=True )
objects = CustomUserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['surname', 'identify', 'phone_number', 'email']
is_active = models.BooleanField(verbose_name='活跃', default=False, null=False)
is_staff = models.BooleanField(verbose_name='NMRCEN_Man', default=False, null=False)
title = models.CharField(verbose_name='职称', max_length=50, blank=True, null=False)
profile_image = models.ImageField(upload_to="profile", blank=False, null=False, verbose_name='个人照片', default="profile/default.png")
user_bio = models.TextField(verbose_name='自我介绍', max_length=600,blank=True)
def get_full_name(self):
return self.surname
@property
def get_absolute_url(self):
return '%s%s' % (settings.BASE_DIR, self.profile_image.url)
def get_position_name(self):
return dict(self.position_choice)[self.position]
def is_expired(self):
if not self.expired_time:
self.expired_time = (timezone.now() + timedelta(days=100*365)).date()
return timezone.now().date() > self.expired_time
is_expired.boolean = True
is_expired.short_description= '过期'
def get_short_name(self):
return self.surname
def __str__(self):
return self.surname
class ImageAttachment(models.Model):
image = models.ImageField(upload_to="upload")
@property
def get_absolute_url(self):
return '%s%s' % (settings.BASE_DIR, self.image.url)
def __str__(self):
return self.image.name
|
fenglb/mysite
|
accounts/models.py
|
Python
|
cc0-1.0
| 6,595
|
[
"VisIt"
] |
d48387a9035c7c686db42ddbb089a90eae41df45180f54ec97ed0aadaa540641
|
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def _check_for_local_file(name, path):
import os
"""Checks if the file exists locally somewhere first, even if we
aren't checking in the "copy_to_local" directory.
Keyword Arguments:
filepath -- the filepath
Paul J. Gierz, Sun Jul 19 11:43:35 2015
"""
# print "Looking for:", name, path
# print "---------------------------------"
foundname = False
for r, d, f in os.walk(path):
# print r, d, f
if name in f:
foundname = os.path.join(r, name)
break
else:
# foundname = False
pass # foundname will still be false, not reassigned...
return foundname
def _refile_local_file_correctly(filepath, from_where):
import os
# Check if this directory exists locally:
if not os.path.exists("/Users/pgierz/Research/" + '/'.join(filepath.split("/")[4:-1])):
os.makedirs(
"/Users/pgierz/Research/" + '/'.join(filepath.split("/")[4:-1]))
# TODO: Check if the run is registered in ~/.all_runs:
os.rename(from_where + os.path.basename(filepath),
"/Users/pgierz/Research/" + '/'.join(filepath.split("/")[4:]))
return "/Users/pgierz/Research/" + '/'.join(filepath.split("/")[4:-1]) + "/", os.path.basename(filepath)
def _copy_remote_file(filepath, remote_dump="/tmp/remote_data/"):
"""
'Private' function that copies a remote file to /tmp/remote_data
for use in get remote data
Keyword Arguments:
filepath -- the path of the file with username@hostname
Paul J. Gierz, Sun Feb 15 13:52:38 2015
"""
# --------------------------------------------------------------------------------
# CHANGELOG:
#
# FEATURE: There is now a progressbar when copying to the local file system! Hooray!
# Paul J. Gierz, Sat Jun 27 12:58:13 2015
#
# FEATURE: Now, we look for the file in multiple local locations
# based upon a file in ${HOME}/.all_runs.
# Paul J. Gierz, Sun Jul 19 14:08:28 2015
#
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# KNOWN ISSUES:
#
# BUG: This function copies to /tmp/remote_data by default, which
# might not be available on all computers, depending on the /tmp/
# folder settings. |==> Paul J. Gierz, Sun Feb 15 14:24:39 2015
# --------------------------------------------------------------------------------
import os
import paramiko
import progressbar
if not os.path.exists(remote_dump):
os.makedirs(remote_dump)
if not os.path.exists(remote_dump + os.path.basename(filepath)):
for r in [line.strip() for line in open(os.path.expanduser("~/.all_runs.dat")) if "#" not in line]:
test = _check_for_local_file(os.path.basename(filepath), r)
if test:
break
if not test:
print "Couldn't find the file in any organized way, copying again..."
user = filepath.split(':')[0].split('@')[0]
host = filepath.split(':')[0].split('@')[1]
rfile = filepath.split(':')[1]
print "Trying to copy: ", filepath
# FIXME: This function only works if .ssh/id_rsa exists and is properly
# configured
privatekeyfile = os.path.expanduser('~/.ssh/id_rsa')
mykey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, username=user, pkey=mykey)
sftp = client.open_sftp()
info_rfile = sftp.stat(rfile)
widgetlist = [rfile.split("/")[-1], ' (' + sizeof_fmt(info_rfile.st_size) + ')', progressbar.Percentage(
), ' ', progressbar.FileTransferSpeed(), ' ', progressbar.Bar(), ' ', progressbar.ETA(), ' ', progressbar.Timer()]
pbar = progressbar.ProgressBar(
widgets=widgetlist, maxval=info_rfile.st_size)
def _progress_cb(done, total):
if pbar.start_time is None:
pbar.start()
pbar.update(done)
print "starting transfer..."
sftp.get(
rfile, remote_dump + os.path.basename(filepath),
callback=_progress_cb)
pbar.finish()
sftp.close()
client.close()
pre_s = "Copied " + \
os.path.basename(filepath) + " to " + remote_dump
remote_dump, new_filepath = _refile_local_file_correctly(
filepath, remote_dump)
else:
pre_s = "Found " + os.path.basename(filepath) + " in " + test
remote_dump, new_filepath = "/".join(
test.split("/")[:-1]) + "/", os.path.basename(test)
else:
pre_s = "Loaded from " + remote_dump
remote_dump, new_filepath = _refile_local_file_correctly(
filepath, remote_dump)
return pre_s, remote_dump + new_filepath
def _check_for_local(name, path):
import os
"""Checks if the file exists locally somewhere first, even if we
aren't checking in the "copy_to_local" directory.
Keyword Arguments:
filepath -- the filepath
Paul J. Gierz, Sun Jul 19 11:43:35 2015
"""
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
else:
return False
def get_remote_data(filepath, time=False, info=False, copy_to_local=False):
"""
A paramiko wrapper that gets file from a remote computer. Parses
hostname from filepath. Works only for netcdf files!
Keyword Arguments:
filepath -- the path of the file with hosname.
time -- print time required for loading(default False)
info -- print some information about the file after loading
(default False)
copy_to_local -- copies file to local /tmp/remote_data/ and checks
if this file already exists there (default False)
Example:
>>> get_remote_data('pgierz@rayo3:/csys/paleo2/pgierz/GR30s.nc')
Paul J. Gierz, Sat Feb 14 14:20:43 2015
"""
# --------------------------------------------------------------------------------
# CHANGELOG:
#
# First port of this function to the repository that will be
# shared with the AWI Paleodyn group later. I have also added
# options to print out some file information and the amount of
# time required during transfer
#
# Paul J. Gierz, Sat Feb 14 14:21:19 2015
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# ROADMAP:
#
# A couple of things could be added:
# 1) option to copy to some local folder to avoid long load times
# due to ssh transfers -- DONE!
# 2) Support for non nc files
# 3) Checking a local datasystem first to see if this file already
# exists somewhere to prevent needless copying
# 4) The info statement printing could be cleaner
# 5) hostname check...
#
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# KNOWN ISSUES:
#
# BUG: For some reason, this function breaks horribly when I do
# not directly return the netcdf file. I suspect that it has
# something to do with the namespaces/scopes: The main program
# probably doesn't know about the ssh-socket. Or something...I'm
# not even remotely close to what you would call a network
# protocol expert; so that's an "educated guess" Since we
# generally work with netcdf data exclusively, I will simply use
# this note as a caution for anyone trying to open other data
# files with this function. I'll also probably post something on
# an online forum later on, maybe someone can help!
#
# BUG: This function currently depends on the ~/.ssh/id_rsa
# file. It would be elegant if the user did not necessearily have
# to have this file in place (some people prefer to type their
# password often, I guess). By not having this file in place, the
# entire function breaks needlessly
# |==> Paul J. Gierz, Sat Feb 14 15:12:24 2015
# --------------------------------------------------------------------------------
# Import stuff from your own library:
from UI_Stuff import print_colors
if time:
import time
import paramiko
import os
from scipy.io import netcdf
if time:
now = time.time()
print "Trying to load ".ljust(40) \
+ \
print_colors.WARNING("{f}").format(
f=os.path.basename(filepath)).ljust(100)
if ":" not in filepath:
# local file
file = netcdf.netcdf_file(filepath)
else:
if not copy_to_local:
# We wish to split the filepath to get the username, hostname, and
# path on the remote machine.
user = filepath.split(':')[0].split('@')[0]
host = filepath.split(':')[0].split('@')[1]
rfile = filepath.split(':')[1]
# FIXME: This function only works if .ssh/id_rsa exists and is
# properly configured
privatekeyfile = os.path.expanduser('~/.ssh/id_rsa')
mykey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, username=user, pkey=mykey)
sftp = client.open_sftp()
fileObject = sftp.file(rfile)
file = netcdf.netcdf_file(fileObject)
pre_s = "Loaded from " + host
else:
if type(copy_to_local) is str:
pre_s, fileObject = _copy_remote_file(
filepath, remote_dump=copy_to_local)
else:
pre_s, fileObject = _copy_remote_file(filepath)
file = netcdf.netcdf_file(fileObject)
if time:
print pre_s.ljust(40) \
+ print_colors.OKGREEN("{filepath}").format(filepath=os.path.basename(filepath)).ljust(100) \
+ " in ".rjust(0) \
+ print_colors.OKBLUE("{time}").format(time=round(time.time() - now)) \
+ " seconds"
if info:
s = print_colors.HEADER(
"#" * 30 + " INFO of " + os.path.basename(filepath) + " " + "#" * 30)
print s
print "Variables: \n"
for k, v in file.variables.iteritems():
print k, ": dimensions -" + str(v.dimensions) + " shape - " + str(v.shape)
print "Dimensions: \n"
print file.dimensions
print print_colors.HEADER("#" * len(s))
return file
|
AWI-Paleodyn/Python_Helpers
|
custom_io/get_remote_data.py
|
Python
|
gpl-2.0
| 11,352
|
[
"NetCDF"
] |
a70006951ea33a9bf002c73ed550d5bc095953d7bbeea98a9b3e838931e3bf20
|
'''
@author Ivehui
@time 2016/05/18
@function copy caffenet model to other net
'''
import caffe
from caffe import layers as L
from caffe.proto import caffe_pb2
import parameters as pms
import random
weight_param = dict(lr_mult=1, decay_mult=1)
bias_param = dict(lr_mult=1, decay_mult=0)
learned_param = [weight_param, bias_param]
frozen_param = [dict(lr_mult=0)] * 2
def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1,
param=learned_param,
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)):
conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
num_output=nout, pad=pad, group=group,
param=param, weight_filler=weight_filler,
bias_filler=bias_filler)
return conv, L.ReLU(conv, in_place=True)
def fc_relu(bottom, nout, param=learned_param,
weight_filler=dict(type='gaussian', std=0.005),
bias_filler=dict(type='constant', value=1)):
fc = L.InnerProduct(bottom, num_output=nout, param=param,
weight_filler=weight_filler,
bias_filler=bias_filler)
return fc, L.ReLU(fc, in_place=True)
def overall_net(batch_size, channels, height, width, action_size, net_type):
# param = learned_param
n=caffe.NetSpec()
# action
n.frames = L.Input(shape=dict(dim=[batch_size, channels, height, width]))
# Image feature
if net_type == 'action':
param = learned_param
else:
param = frozen_param
n.conv1, n.relu1 = conv_relu(n.frames, 8, 32, stride=4, param=param)
n.conv2, n.relu2 = conv_relu(n.relu1, 4, 64, stride=2, param=param)
n.conv3, n.relu3 = conv_relu(n.relu2, 3, 64, stride=1, param=param)
n.fc4, n.relu4 = fc_relu(n.relu3, 512, param=param)
n.value_q = L.InnerProduct(n.relu4, num_output=action_size, param=param,
weight_filler=dict(type='gaussian', std=0.005),
bias_filler=dict(type='constant', value=1))
if net_type == 'test':
return n.to_proto()
n.filter = L.Input(shape=dict(dim=[batch_size, action_size]))
# operation 0: PROD
n.filtered_value_q = L.Eltwise(n.value_q, n.filter, operation=0)
n.target = L.Input(shape=dict(dim=[batch_size, action_size]))
n.loss = L.EuclideanLoss(n.filtered_value_q, n.target)
return n.to_proto()
### define solver
def solver(train_net_path, net_type, test_net_path=None, base_lr=0.001):
s = caffe_pb2.SolverParameter()
# Specify locations of the train and (maybe) test networks.
s.train_net = train_net_path
if test_net_path is not None:
s.test_net.append(test_net_path)
s.test_interval = pms.maxIter # Test after every 1000 training iterations.
s.test_iter.append(1) # Test on 1 batches each time we test.
# The number of iterations over which to average the gradient.
# Effectively boosts the training batch size by the given factor, without
# affecting memory utilization.
s.iter_size = 1
s.max_iter = pms.maxIter # # of times to update the net (training iterations)
# Solve using the stochastic gradient descent (SGD) algorithm.
# Other choices include 'Adam' and 'RMSProp'.
s.type = 'SGD'
# Set the initial learning rate for SGD.
s.base_lr = base_lr
# Set `lr_policy` to define how the learning rate changes during training.
# Here, we 'step' the learning rate by multiplying it by a factor `gamma`
# every `stepsize` iterations.
s.lr_policy = 'fixed'
# Set other SGD hyperparameters. Setting a non-zero `momentum` takes a
# weighted average of the current gradient and previous gradients to make
# learning more stable. L2 weight decay regularizes learning, to help prevent
# the model from overfitting.
s.momentum = 0.95
s.weight_decay = 5e-4
# Display the current training loss and accuracy every 1000 iterations.
s.display = 50000
# Snapshots are files used to store networks we've trained. Here, we'll
# snapshot every 10K iterations -- ten times during training.
s.snapshot = 100000
s.snapshot_prefix = 'models/'+net_type
# Train on the GPU. Using the CPU to train large networks is very slow.
s.solver_mode = caffe_pb2.SolverParameter.GPU
# Write the solver to a temporary file and return its filename.
return s
# Write the solver to a temporary file and return its filename.
with open(pms.actionTestNetPath, 'w') as f:
f.write('name: "action test net"\n')
f.write(str(overall_net(pms.batchSize, pms.frameChannel, pms.frameHeight,
pms.frameWidth, pms.actionSize, 'test')))
with open(pms.actionTrainNetPath, 'w') as f:
f.write('name: "action train net"\n')
f.write(str(overall_net(pms.batchSize, pms.frameChannel, pms.frameHeight,
pms.frameWidth, pms.actionSize, 'action')))
with open(pms.actionSolverPath, 'w') as f:
f.write(str(solver(pms.actionTrainNetPath, 'action',
test_net_path=pms.actionTestNetPath,
base_lr=pms.baseLr)))
# create new net
random.seed(0)
newNet = caffe.Net(pms.actionTrainNetPath, caffe.TEST)
# save the model, weights of last 3 layers are random init
newNet.save(pms.newModel)
|
Ivehui/DQN
|
creatNet.py
|
Python
|
mit
| 5,380
|
[
"Gaussian"
] |
c19940d3da7265e107943360b861fa9e9202b9376926d2ca430d681747142ac9
|
#!/usr/bin/env python
"""
Do the initial installation and configuration of a DIRAC component
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC import exit as DIRACexit
from DIRAC import gConfig, gLogger, S_OK
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
from DIRAC.Core.Utilities.Extensions import extensionsByPriority
from DIRAC.FrameworkSystem.Utilities import MonitoringUtilities
__RCSID__ = "$Id$"
overwrite = False
module = ""
specialOptions = {}
def setOverwrite(opVal):
global overwrite
overwrite = True
return S_OK()
def setModule(optVal):
global specialOptions, module
specialOptions["Module"] = optVal
module = optVal
return S_OK()
def setSpecialOption(optVal):
global specialOptions
option, value = optVal.split("=")
specialOptions[option] = value
return S_OK()
@Script()
def main():
global overwrite
global specialOptions
global module
global specialOptions
from DIRAC.FrameworkSystem.Client.ComponentInstaller import gComponentInstaller
gComponentInstaller.exitOnError = True
Script.registerSwitch("w", "overwrite", "Overwrite the configuration in the global CS", setOverwrite)
Script.registerSwitch("m:", "module=", "Python module name for the component code", setModule)
Script.registerSwitch("p:", "parameter=", "Special component option ", setSpecialOption)
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(
(
"System/Component: Full component name (ie: WorkloadManagement/Matcher)",
"System: Name of the DIRAC system (ie: WorkloadManagement)",
)
)
Script.registerArgument(" Component: Name of the DIRAC service (ie: Matcher)", mandatory=False)
Script.parseCommandLine()
args = Script.getPositionalArgs()
if len(args) == 1:
args = args[0].split("/")
if len(args) != 2:
Script.showHelp(exitCode=1)
system = args[0]
component = args[1]
compOrMod = module or component
result = gComponentInstaller.getSoftwareComponents(extensionsByPriority())
if not result["OK"]:
gLogger.error(result["Message"])
DIRACexit(1)
availableComponents = result["Value"]
for compType in availableComponents:
if system in availableComponents[compType] and compOrMod in availableComponents[compType][system]:
cType = compType[:-1].lower()
break
else:
gLogger.error("Component %s/%s is not available for installation" % (system, component))
DIRACexit(1)
if module:
result = gComponentInstaller.addDefaultOptionsToCS(
gConfig, cType, system, module, extensionsByPriority(), overwrite=overwrite
)
result = gComponentInstaller.addDefaultOptionsToCS(
gConfig,
cType,
system,
component,
extensionsByPriority(),
specialOptions=specialOptions,
overwrite=overwrite,
addDefaultOptions=False,
)
else:
result = gComponentInstaller.addDefaultOptionsToCS(
gConfig,
cType,
system,
component,
extensionsByPriority(),
specialOptions=specialOptions,
overwrite=overwrite,
)
if not result["OK"]:
gLogger.error(result["Message"])
DIRACexit(1)
result = gComponentInstaller.installComponent(cType, system, component, extensionsByPriority(), module)
if not result["OK"]:
gLogger.error(result["Message"])
DIRACexit(1)
gLogger.notice("Successfully installed component %s in %s system, now setting it up" % (component, system))
result = gComponentInstaller.setupComponent(cType, system, component, extensionsByPriority(), module)
if not result["OK"]:
gLogger.error(result["Message"])
DIRACexit(1)
if component == "ComponentMonitoring":
result = MonitoringUtilities.monitorInstallation("DB", system, "InstalledComponentsDB")
if not result["OK"]:
gLogger.error(result["Message"])
DIRACexit(1)
result = MonitoringUtilities.monitorInstallation(cType, system, component, module)
if not result["OK"]:
gLogger.error(result["Message"])
DIRACexit(1)
gLogger.notice("Successfully completed the installation of %s/%s" % (system, component))
DIRACexit()
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/FrameworkSystem/scripts/dirac_install_component.py
|
Python
|
gpl-3.0
| 4,598
|
[
"DIRAC"
] |
b2cf98f91b4b8902af774f035aaef7c3f3dd31b9eb1686dfba4d1a72ba8bd1af
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
************************************
**Storage** - Storage Object
************************************
This is the base class for all storage objects.
All derived classes implement at least the following methods:
* `decompose()`
Send all particles to their corresponding cell/cpu
* `addParticle(pid, pos)`:
Add a particle to the storage
* `removeParticle(pid)`:
Remove a particle with id number *pid* from the storage.
>>> system.storage.removeParticle(4)
There is an example in *examples* folder
* `getParticle(pid)`:
Get a particle object.
This can be used to get specific particle information:
>>> particle = system.storage.getParticle(15)
>>> print "Particle ID is : ", particle.id
>>> print "Particle position is : ", particle.pos
you cannot use this particle object to modify particle data.
You have to use the modifyParticle command for that (see below).
* `addAdrParticle(pid, pos, last_pos)`:
Add an AdResS Particle to the storage
* `setFixedTuplesAdress(fixed_tuple_list)`:
* `addParticles(particle_list, *properties)`:
This routine adds particles with certain properties to the storage.
:param particleList: list of particles (and properties) to be added
:param properties: property strings
Each particle in the list must be itself a list where each entry corresponds
to the property specified in properties.
Example:
>>> addParticles([[id, pos, type, ... ], ...], 'id', 'pos', 'type', ...)
* `modifyParticle(pid, property, value, decompose='yes')`
This routine allows to modify any properties of an already existing particle.
Example:
>>> modifyParticle(pid, 'pos', Real3D(new_x, new_y, new_z))
* `removeAllParticles()`:
This routine removes all particles from the storage.
* 'system':
The property 'system' returns the System object of the storage.
Examples:
>>> s.storage.addParticles([[1, espresso.Real3D(3,3,3)], [2, espresso.Real3D(4,4,4)]],'id','pos')
>>> s.storage.decompose()
>>> s.storage.modifyParticle(15, 'pos', Real3D(new_x, new_y, new_z))
"""
from espresso import pmi
from espresso import Int3D
import mpi4py.MPI as MPI
import logging
from espresso import toReal3DFromVector, ParticleLocal, Particle
from espresso.Exceptions import ParticleDoesNotExistHere
class StorageLocal(object):
logger = logging.getLogger("Storage")
def particleExists(self, pid):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if self.cxxclass.lookupRealParticle(self, pid):
return True
else:
return False
def addParticle(self, pid, pos):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.addParticle(self, pid, toReal3DFromVector(pos))
def removeParticle(self, pid):
'remove a particle, will return if particle exists on this worker 0 else'
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.removeParticle(self, pid)
def removeAllParticles(self):
'remove all particles'
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.removeAllParticles(self)
def addAdrATParticle(self, pid, *args):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.addAdrATParticle(
self, pid, toReal3DFromVector(*args)
)
def setFixedTuplesAdress(self, fixedtuples):
if pmi.workerIsActive():
self.cxxclass.setFixedTuplesAdress(self, fixedtuples)
def getParticle(self, pid):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return ParticleLocal(pid, self)
def addParticles(self, particleList, *properties):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
index_id = -1
index_pos = -1
index_v = -1
index_f = -1
index_q = -1
index_radius = -1
index_fradius = -1
index_vradius = -1
index_type = -1
index_mass = -1
index_adrAT = -1 # adress AT particle if 1
index_lambda_adr = -1
index_lambda_adrd = -1
last_pos = toReal3DFromVector([-99,-99,-99])
if properties == None:
# default properities = (id, pos)
index_id = 0
index_pos = 1
nindex = 2
else:
nindex = 0
for val in properties:
if val.lower() == "id": index_id = nindex
elif val.lower() == "pos": index_pos = nindex
elif val.lower() == "type": index_type = nindex
elif val.lower() == "mass": index_mass = nindex
elif val.lower() == "v": index_v = nindex
elif val.lower() == "f": index_f = nindex
elif val.lower() == "q": index_q = nindex
elif val.lower() == "radius": index_radius = nindex
elif val.lower() == "fradius": index_fradius = nindex
elif val.lower() == "vradius": index_vradius = nindex
elif val.lower() == "adrat": index_adrAT = nindex
elif val.lower() == "lambda_adr": index_lambda_adr = nindex
elif val.lower() == "lambda_adrd": index_lambda_adrd = nindex
else: raise SyntaxError("unknown particle property: %s"%val)
nindex += 1
if index_id < 0 : raise "particle property id is mandatory"
if index_pos < 0 : raise "particle property pos is mandatory"
# we should check at the begin whether all the particles do not exist.
doWeAddParticles = True
for particle in particleList:
pid = particle[index_id]
if( self.particleExists(pid) ):
doWeAddParticles = False
print "WARNING: Particle ", pid, " already exists"
if(not doWeAddParticles):
print 'WARNING: Some particles already exist. The list of particles was not added.'
return
for particle in particleList:
# verify that each particle has enough entries, avoids index errors
if len(particle) != nindex:
raise SyntaxError("particle has %d entries, but %d expected"%(len(particle), nindex))
id = particle[index_id]
pos = particle[index_pos]
if index_adrAT >= 0:
if particle[index_adrAT] == 0:
#print "%d: addParticle %d, last_pos=pos %f, %f, %f"%(pmi._MPIcomm.rank,id,pos[0], pos[1], pos[2])
storedParticle = self.cxxclass.addParticle(self, id, pos)
last_pos = pos
else:
#print "%d: addAdrATparticle %d, pos %f, %f, %f, last_pos %f, %f, %f"%(pmi._MPIcomm.rank,id,pos[0],pos[1],pos[2],last_pos[0], last_pos[1], last_pos[2])
storedParticle = self.cxxclass.addAdrATParticle(self, id, pos, last_pos)
else:
#print "%d: addParticle %d, last_pos=pos %f, %f, %f"%(pmi._MPIcomm.rank,id,pos[0], pos[1], pos[2])
storedParticle = self.cxxclass.addParticle(self, id, pos)
if storedParticle != None:
self.logger.debug("Processor %d stores particle id = %d"%(pmi.rank, id))
self.logger.debug("particle property indexes: id=%i pos=%i type=%i mass=%i v=%i f=%i q=%i radius=%i lambda_adr=%i lambda_adrd=%i"%(index_id,index_pos,index_type,index_mass,index_v,index_f,index_q,index_radius,index_lambda_adr,index_lambda_adrd))
# only the owner processor writes other properties
if index_v >= 0:
storedParticle.v = particle[index_v]
if index_f >= 0:
storedParticle.f = particle[index_f]
if index_q >= 0:
storedParticle.q = particle[index_q]
if index_radius >= 0:
storedParticle.radius = particle[index_radius]
if index_fradius >= 0:
storedParticle.fradius = particle[index_fradius]
if index_vradius >= 0:
storedParticle.vradius = particle[index_vradius]
if index_type >= 0:
storedParticle.type = particle[index_type]
if index_mass >= 0:
storedParticle.mass = particle[index_mass]
if index_lambda_adr >= 0:
storedParticle.lambda_adr = particle[index_lambda_adr]
if index_lambda_adrd >= 0:
storedParticle.lambda_adrd = particle[index_lambda_adrd]
def modifyParticle(self, pid, property, value):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if (self.particleExists(pid)):
#try:
#if not particle.isGhost:
particle = self.getParticle(pid)
self.logger.info("particle pid=%i rank=%i" % (pid, pmi.rank))
if property.lower() == "id" : raise "particles pid cannot be modified !"
elif property.lower() == "pos" : # alway assume unfolded coordinates
particle.pos = value
particle.imageBox = Int3D(0, 0, 0)
elif property.lower() == "img" : particle.imageBox = value
elif property.lower() == "type" : particle.type = value
elif property.lower() == "mass" : particle.mass = value
elif property.lower() == "v" : particle.v = value
elif property.lower() == "f" : particle.f = value
elif property.lower() == "q" : particle.q = value
elif property.lower() == "radius" : particle.radius = value
elif property.lower() == "fradius" : particle.fradius = value
elif property.lower() == "vradius" : particle.vradius = value
elif property.lower() == "lambda_adr" : particle.lambda_adr = value
elif property.lower() == "lambda_adrd" : particle.lambda_adrd = value
else: raise SyntaxError( 'unknown particle property: %s' % property) # UnknownParticleProperty exception is not implemented
#except ParticleDoesNotExistHere:
# self.logger.debug("ParticleDoesNotExistHere pid=% rank=%i" % (pid, pmi.rank))
# pass
#else:
# print "WARNING: Particle ", pid, " does not exist and was not modified"
def savePositions(self, idList):
'it is responsibility of user to save existing particles, otherwise nothing will be saved'
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
for pid in idList:
self.cxxclass.savePosition(self, pid)
def restorePositions(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.restorePositions(self)
def clearSavedPositions(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.clearSavedPositions(self)
if pmi.isController:
class Storage(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
pmicall = [ "decompose", "addParticles", "setFixedTuplesAdress", "removeAllParticles"],
pmiproperty = [ "system" ]
)
def particleExists(self, pid):
return pmi.reduce(pmi.BOR, self.pmiobject, 'particleExists', pid)
def addParticle(self, pid, pos, checkexist=True):
if checkexist:
if( self.particleExists(pid) ):
print "WARNING: Particle ", pid, " already exists. Therefore it was not added."
return None
else:
pmi.call(self.pmiobject, 'addParticle', pid, pos)
return Particle(pid, self)
else:
pmi.call(self.pmiobject, 'addParticle', pid, pos)
def removeParticle(self, pid):
n = pmi.reduce(pmi.SUM, self.pmiobject, 'removeParticle', pid)
if n == 0:
print "WARNING: Particle ", pid, " does not exist"
elif n > 1:
print "ERROR: Particle ",pid, " did exist more than once !"
print " This should never happen !!!"
def modifyParticle(self, pid, property, value):
if (self.particleExists(pid)):
pmi.call(self.pmiobject, 'modifyParticle', pid, property, value)
else:
print "WARNING: Particle ", pid, " does not exist and was not modified"
def addAdrATParticle(self, pid, *args):
if( self.particleExists(pid) ):
print "WARNING: Particle ", pid, " already exists. Therefore it was not added."
return None
else:
pmi.call(self.pmiobject, 'addAdrATParticle', pid, *args)
return Particle(pid, self)
#def setFixedTuples(self, tuples):
# pmi.call(self.pmiobject, 'setFixedTuples', tuples)
def getParticle(self, pid):
if( self.particleExists(pid) ):
return Particle(pid, self)
else:
print "WARNING: Particle ", pid, " does not exist"
return None
def savePositions(self, idList):
pmi.call(self.pmiobject, 'clearSavedPositions')
pmi.call(self.pmiobject, 'savePositions', idList)
def restorePositions(self):
pmi.call(self.pmiobject, 'restorePositions')
def clearSavedPositions(self):
pmi.call(self.pmiobject, 'clearSavedPositions')
|
BackupTheBerlios/espressopp
|
src/storage/Storage.py
|
Python
|
gpl-3.0
| 15,816
|
[
"ESPResSo"
] |
2e33ab93d7086ab9c3a1be2ca6d0dc13272a0765509687ba0ad41d01affabecc
|
#!/usr/bin/env python
"""
Remove the given file replica or a list of file replicas from the File Catalog
This script should be used with great care as it may leave dark data in the storage!
Use dirac-dms-remove-replicas instead
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import os
from DIRAC import exit as dexit
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
from DIRAC import gLogger
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(("LocalFile: Path to local file containing LFNs", "LFN: Logical File Names"))
Script.registerArgument(" SE: Storage element")
Script.parseCommandLine()
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
allowUsers = Operations().getValue("DataManagement/AllowUserReplicaManagement", False)
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
res = getProxyInfo()
if not res["OK"]:
gLogger.fatal("Can't get proxy info", res["Message"])
dexit(1)
properties = res["Value"].get("groupProperties", [])
if not allowUsers:
if "FileCatalogManagement" not in properties:
gLogger.error("You need to use a proxy from a group with FileCatalogManagement")
dexit(5)
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
dm = DataManager()
# parseCommandLine show help when mandatory arguments are not specified or incorrect argument
inputFileName, storageElementName = Script.getPositionalArgs(group=True)
if os.path.exists(inputFileName):
inputFile = open(inputFileName, "r")
string = inputFile.read()
lfns = [lfn.strip() for lfn in string.splitlines()]
inputFile.close()
else:
lfns = [inputFileName]
res = dm.removeReplicaFromCatalog(storageElementName, lfns)
if not res["OK"]:
print(res["Message"])
dexit(0)
for lfn in sorted(res["Value"]["Failed"]):
message = res["Value"]["Failed"][lfn]
print("Failed to remove %s replica of %s: %s" % (storageElementName, lfn, message))
print("Successfully remove %d catalog replicas at %s" % (len(res["Value"]["Successful"]), storageElementName))
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/DataManagementSystem/scripts/dirac_dms_remove_catalog_replicas.py
|
Python
|
gpl-3.0
| 2,410
|
[
"DIRAC"
] |
9b0efd5220630076549242db3eec487c56946a5886f063e085cd95b72ce0d137
|
r"""
This module is a VTK Web server application.
The following command line illustrate how to use it::
$ vtkpython .../vtk_web_phylogenetic_tree.py --tree /.../data.phy --table /.../data.csv
Set of expected arguments:
--tree /file/path/to/anolis.phy
--table /file/path/to/anolisDataAppended.csv
Sample data file can be found in the given git repository
http://visomics.org/visomicsdata.git
- Data/treeData/anolis.phy
- Data/treeData/anolisDataAppended.csv
Any VTK Web executable script come with a set of standard arguments that
can be overriden if need be::
--host localhost
Interface on which the HTTP server will listen on.
--port 8080
Port number on which the HTTP server will listen to.
--content /path-to-web-content/
Directory that you want to server as static web content.
By default, this variable is empty which mean that we rely on another server
to deliver the static content and the current process only focus on the
WebSocket connectivity of clients.
--authKey vtk-secret
Secret key that should be provided by the client to allow it to make any
WebSocket communication. The client will assume if none is given that the
server expect "vtk-secret" as secret key.
"""
# import to process args
import sys
import os
# import vtk modules.
import vtk
from vtk.web import server, protocols
from vtk.web import wamp as vtk_wamp
try:
import argparse
except ImportError:
# since Python 2.6 and earlier don't have argparse, we simply provide
# the source for the same as _argparse and we use it instead.
import _argparse as argparse
# =============================================================================
# Create custom File Opener class to handle clients requests
# =============================================================================
class _PhylogeneticTree(vtk_wamp.ServerProtocol):
# Application configuration
view = None
authKey = "vtkweb-secret"
treeFilePath = None
csvFilePath = None
def initialize(self):
global renderer, renderWindow, renderWindowInteractor, cone, mapper, actor
# Bring used components
self.registerVtkWebProtocol(protocols.vtkWebMouseHandler())
self.registerVtkWebProtocol(protocols.vtkWebViewPort())
self.registerVtkWebProtocol(protocols.vtkWebViewPortImageDelivery())
self.registerVtkWebProtocol(protocols.vtkWebViewPortGeometryDelivery())
# Update authentication key to use
self.updateSecret(_PhylogeneticTree.authKey)
# Create default pipeline (Only once for all the session)
if not _PhylogeneticTree.view:
# read in a tree
treeReader = vtk.vtkNewickTreeReader()
treeReader.SetFileName(_PhylogeneticTree.treeFilePath)
treeReader.Update()
reader = treeReader.GetOutput()
# read in a table
tableReader = vtk.vtkDelimitedTextReader()
tableReader.SetFileName(_PhylogeneticTree.csvFilePath)
tableReader.SetHaveHeaders(True)
tableReader.DetectNumericColumnsOn()
tableReader.Update()
table = tableReader.GetOutput()
# play with the heatmap vis
treeHeatmapItem = vtk.vtkTreeHeatmapItem()
treeHeatmapItem.SetTree(reader);
treeHeatmapItem.SetTable(table);
# setup the window
view = vtk.vtkContextView()
view.GetRenderer().SetBackground(1,1,1)
view.GetRenderWindow().SetSize(800,600)
iren = view.GetInteractor()
iren.SetRenderWindow(view.GetRenderWindow())
transformItem = vtk.vtkContextTransform()
transformItem.AddItem(treeHeatmapItem)
transformItem.SetInteractive(1)
view.GetScene().AddItem(transformItem)
view.GetRenderWindow().SetMultiSamples(0)
iren.Initialize()
view.GetRenderWindow().Render()
# VTK Web application specific
_PhylogeneticTree.view = view.GetRenderWindow()
self.Application.GetObjectIdMap().SetActiveObject("VIEW", view.GetRenderWindow())
# =============================================================================
# Main: Parse args and start server
# =============================================================================
if __name__ == "__main__":
# Create argument parser
parser = argparse.ArgumentParser(description="VTK/Web Tree web-application")
# Add default arguments
server.add_arguments(parser)
# Add local arguments
parser.add_argument("--tree", help="path to phy tree file", dest="tree")
parser.add_argument("--table", help="path to csv file", dest="table")
# Exctract arguments
args = parser.parse_args()
# Configure our current application
_PhylogeneticTree.authKey = args.authKey
_PhylogeneticTree.treeFilePath = args.tree
_PhylogeneticTree.csvFilePath = args.table
# Start server
server.start_webserver(options=args, protocol=_PhylogeneticTree)
|
timkrentz/SunTracker
|
IMU/VTK-6.2.0/Web/Applications/PhylogeneticTree/server/vtk_web_phylogenetic_tree.py
|
Python
|
mit
| 5,415
|
[
"VTK"
] |
d41cba8676b53b9444679d425d4324885bdddbcbdbbab304e7d6c69710d14ea2
|
#!/usr/bin/env python
from numpy import *
import pylo
import visit
import sys
import os
from glob import glob
filelist = sys.argv[1:]
zipped = False
name,sep,ext = filelist[0].rpartition('.')
if ext == 'lzma':
os.system('tar -xaf '+filelist[0])
filelist = glob('*.silo')
zipped = True
#create a .visit file
filelist.sort()
filelist = [fname+'\n' for fname in filelist]
fid = open('db.visit','w')
fid.writelines(filelist)
fid.close()
#plot the database
visit.Launch()
visit.OpenDatabase('db.visit')
visit.AddPlot("Pseudocolor","/psmFpeSolver/data/phi")
visit.DrawPlots()
visit.OpenGUI()
raw_input("Press any key to exit...")
#cleanup
os.system('rm db.visit')
if zipped: os.system('rm *.silo')
|
rogersce/fts
|
scripts/plot_visit.py
|
Python
|
mit
| 708
|
[
"VisIt"
] |
048378b3a317ed4dd765332a96931e2c6215e16a805f22d548639ddbd207328c
|
import os
import glob
import warnings
import logging
import re
import atexit
logger = logging.getLogger(__name__)
@atexit.register
def cleanup():
for f in glob.glob('/sys/class/tacho-motor/motor*/command'):
with open(f, 'w') as f:
f.write('stop')
for f in glob.glob('/sys/class/leds/*/trigger'):
with open(f, 'w') as f:
f.write('none')
for f in glob.glob('/sys/class/leds/*/brightness'):
with open(f, 'w') as f:
f.write('0')
class NoSuchSensorError(Exception):
def __init__(self, port, name=None):
self.port = port
self.name = name
def __str__(self):
return "No such sensor port=%d name=%s" % (self.port, self.name)
class NoSuchMotorError(Exception):
def __init__(self, port, _type):
self.port = port
self._type = _type
def __str__(self):
return "No such motor port=%s type=%s" % (self.port, self._type)
class NoSuchLibraryError(Exception):
def __init__(self, lib=""):
self.lib = lib
def __str__(self):
return "No such library %s" % self.lib
class Ev3StringType(object):
@staticmethod
def post_read(value):
return value
@staticmethod
def pre_write(value):
return value
class Ev3IntType(object):
@staticmethod
def post_read(value):
return int(value)
@staticmethod
def pre_write(value):
return str(value)
class Ev3BoolType(object):
@staticmethod
def post_read(value):
return bool(value)
@staticmethod
def pre_write(value):
return '1' if value else '0'
class Ev3OnOffType(object):
@staticmethod
def post_read(value):
return True if value == 'on' else False
@staticmethod
def pre_write(value):
if (value == 'on' or value == 'off'):
return value
else:
return 'on' if bool(value) else 'off'
class create_ev3_property(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
for name, args in self.kwargs.items():
def ev3_property(name, read_only=False, write_only=False, property_type=Ev3StringType):
def fget(self):
if not write_only:
return property_type.post_read(self.read_value(name))
else:
return None
def fset(self, value):
self.write_value(
name, property_type.pre_write(value))
return property(fget, None if read_only else fset)
setattr(cls, name, ev3_property(name, **args))
return cls
def get_battery_percentage():
"""
Return an int() of the percentage of battery life remaining
"""
voltage_max = None
voltage_min = None
voltage_now = None
with open('/sys/devices/platform/legoev3-battery/power_supply/legoev3-battery/uevent', 'r') as fh:
for line in fh:
if not voltage_max:
re_voltage_max = re.search(
'POWER_SUPPLY_VOLTAGE_MAX_DESIGN=(\d+)', line)
if re_voltage_max:
voltage_max = int(re_voltage_max.group(1))
continue
if not voltage_min:
re_voltage_min = re.search(
'POWER_SUPPLY_VOLTAGE_MIN_DESIGN=(\d+)', line)
if re_voltage_min:
voltage_min = int(re_voltage_min.group(1))
continue
if not voltage_now:
re_voltage_now = re.search(
'POWER_SUPPLY_VOLTAGE_NOW=(\d+)', line)
if re_voltage_now:
voltage_now = int(re_voltage_now.group(1))
if re_voltage_max and re_voltage_min and re_voltage_now:
break
if voltage_max and voltage_min and voltage_now:
# This happens with the EV3 rechargeable battery if it is fully charge
if voltage_now >= voltage_max:
return 100
# Haven't seen this scenario but it can't hurt to check for it
elif voltage_now <= voltage_min:
return 0
# voltage_now is between the min and max
else:
voltage_max -= voltage_min
voltage_now -= voltage_min
return int(voltage_now / float(voltage_max) * 100)
else:
logger.error('voltage_max %s, voltage_min %s, voltage_now %s' %
(voltage_max, voltage_min, voltage_now))
return 0
class Ev3Dev(object):
def __init__(self):
self.sys_path = ""
def read_value(self, name):
attr_file = os.path.join(self.sys_path, name)
if os.path.isfile(attr_file):
with open(attr_file) as f:
value = f.read().strip()
return value
else:
return None
def write_value(self, name, value):
attr_file = os.path.join(self.sys_path, name)
if os.path.isfile(attr_file):
with open(attr_file, 'w') as f:
f.write(str(value))
else:
return
@create_ev3_property(
bin_data={'read_only': True},
bin_data_format={'read_only': True},
decimals={'read_only': True},
#mode={ 'read_only': False},
fw_version={'read_only': True},
modes={'read_only': True},
name={'read_only': True},
port_name={'read_only': True},
uevent={'read_only': True},
units={'read_only': True},
value0={'read_only': True, 'property_type': Ev3IntType},
value1={'read_only': True, 'property_type': Ev3IntType},
value2={'read_only': True, 'property_type': Ev3IntType},
value3={'read_only': True, 'property_type': Ev3IntType},
value4={'read_only': True, 'property_type': Ev3IntType},
value5={'read_only': True, 'property_type': Ev3IntType},
value6={'read_only': True, 'property_type': Ev3IntType},
value7={'read_only': True, 'property_type': Ev3IntType}
)
class LegoSensor(Ev3Dev):
def __init__(self, port=-1, name=None):
Ev3Dev.__init__(self)
sensor_existing = False
if (port > 0):
self.port = port
for p in glob.glob('/sys/class/lego-sensor/sensor*/port_name'):
with open(p) as f:
value = f.read().strip()
port_len = len(str(port))
if (value[:port_len + 2] == 'in' + str(port)):
self.sys_path = os.path.dirname(p)
sensor_existing = True
break
if (len(glob.glob('/sys/class/lego-sensor/sensor*/driver_name')) > 0 and name != None and port == -1):
for p in glob.glob('/sys/class/lego-sensor/sensor*/driver_name'):
with open(p) as f:
value = f.read().strip()
if (name in value):
self.sys_path = os.path.dirname(p)
self.port = int(self.port_name.split(':')[0][2:])
sensor_existing = True
break
if (not sensor_existing):
raise NoSuchSensorError(port, name)
self._mode = self.read_value('mode')
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
if (self._mode != value):
self._mode = value
self.write_value('mode', value)
def mode_force_flush(self, value):
self._mode = value
self.write_value('mode', value)
class Enum(object):
def __init__(self, *args, **kwargs):
for arg in args:
kwargs[arg] = arg
self.enum_dict = kwargs
def __getattr__(self, name):
if (name in self.enum_dict.keys()):
return self.enum_dict[name]
else:
raise NameError("no such item %s" % name)
@create_ev3_property(
commands={'read_only': True},
command={'read_only': True, 'write_only': True},
count_per_rot={'read_only': True, 'property_type': Ev3IntType},
driver_name={'read_only': True},
duty_cycle={'read_only': True, 'property_type': Ev3IntType},
duty_cycle_sp={'read_only': False, 'property_type': Ev3IntType},
encoder_polarity={'read_only': False},
polarity_mode={'read_only': False},
port_name={'read_only': True},
position={'read_only': False, 'property_type': Ev3IntType},
position_sp={'read_only': False, 'property_type': Ev3IntType},
ramp_down_sp={'read_only': False, 'property_type': Ev3IntType},
ramp_up_sp={'read_only': False, 'property_type': Ev3IntType},
speed={'read_only': True, 'property_type': Ev3IntType},
speed_regulation={'read_only': False, 'property_type': Ev3OnOffType},
speed_sp={'read_only': False, 'property_type': Ev3IntType},
state={'read_only': True},
stop_command={'read_only': False},
stop_commands={'read_only': True},
time_sp={'read_only': False, 'property_type': Ev3IntType},
uevent={'read_only': True}
)
class Motor(Ev3Dev):
STOP_MODE = Enum(COAST='coast', BRAKE='brake', HOLD='hold')
POSITION_MODE = Enum(RELATIVE='relative', ABSOLUTE='absolute')
PORT = Enum('A', 'B', 'C', 'D')
def __init__(self, port='', _type=''):
Ev3Dev.__init__(self)
motor_existing = False
searchpath = '/sys/class/tacho-motor/motor*/'
if (port != ''):
self.port = port
for p in glob.glob(searchpath + 'port_name'):
with open(p) as f:
value = f.read().strip()
if (value.lower() == ('out' + port).lower()):
self.sys_path = os.path.dirname(p)
motor_existing = True
break
if (_type != '' and port == ''):
for p in glob.glob(searchpath + 'driver_name'):
with open(p) as f:
value = f.read().strip()
if (value.lower() == _type.lower()):
self.sys_path = os.path.dirname(p)
self.port = self.port_name[3:]
motor_existing = True
break
if (not motor_existing):
raise NoSuchMotorError(port, _type)
def stop(self):
self.write_value('command', 'stop')
def start(self):
self.write_value('command', self.mode)
def reset(self):
self.write_value('command', 'reset')
# setup functions just set up all the values, run calls start (run=1)
# these are separated so that multiple motors can be started at the same time
def setup_forever(self, speed_sp, **kwargs):
self.mode = 'run-forever'
for k in kwargs:
v = kwargs[k]
if (v != None):
setattr(self, k, v)
speed_regulation = self.speed_regulation
if (speed_regulation):
self.speed_sp = int(speed_sp)
else:
self.duty_cycle_sp = int(speed_sp)
def run_forever(self, speed_sp, **kwargs):
self.setup_forever(speed_sp, **kwargs)
self.start()
def setup_direct(self, duty_cycle_sp, **kwargs):
self.mode = 'run-forever'
for k in kwargs:
v = kwargs[k]
if (v != None):
setattr(self, k, v)
self.duty_cycle_sp = int(duty_cycle_sp)
def run_direct(self, duty_cycle_sp, **kwargs):
self.setup_direct(duty_cycle_sp, **kwargs)
self.start()
def setup_time_limited(self, time_sp, speed_sp, **kwargs):
self.mode = 'run-timed'
for k in kwargs:
v = kwargs[k]
if (v != None):
setattr(self, k, v)
speed_regulation = self.speed_regulation
if (speed_regulation):
self.speed_sp = int(speed_sp)
else:
self.duty_cycle_sp = int(speed_sp)
self.time_sp = int(time_sp)
def run_time_limited(self, time_sp, speed_sp, **kwargs):
self.setup_time_limited(time_sp, speed_sp, **kwargs)
self.start()
def setup_position_limited(self, position_sp, speed_sp, absolute=True, **kwargs):
if absolute == True:
self.mode = 'run-to-abs-pos'
else:
self.mode = 'run-to-rel-pos'
kwargs['speed_regulation'] = True
for k in kwargs:
v = kwargs[k]
if (v != None):
setattr(self, k, v)
self.speed_sp = int(speed_sp)
self.position_sp = int(position_sp)
def run_position_limited(self, position_sp, speed_sp, **kwargs):
self.setup_position_limited(position_sp, speed_sp, **kwargs)
self.start()
def I2CSMBusProxy(cls):
try:
from smbus import SMBus
smbus_proxied_methods = [
m for m in dir(SMBus) if (m.startswith('read') or m.startswith('write'))]
for m in smbus_proxied_methods:
def create_proxied_smb_method(method):
def proxied_smb_method(self, *args, **kwargs):
return getattr(self.b, method)(self.addr, *args, **kwargs)
return proxied_smb_method
setattr(cls, m, create_proxied_smb_method(m))
return cls
except ImportError:
warnings.warn('python-smbus binding not found!')
return cls
@I2CSMBusProxy
class I2CS(object):
def __init__(self, port, addr):
self.port = port
self.i2c_port = port + 2
self.sys_path = '/dev/i2c-%s' % self.i2c_port
if (not os.path.exists(self.sys_path)):
raise NoSuchSensorError(port)
try:
from smbus import SMBus
self.b = SMBus(self.i2c_port)
self.addr = addr
except ImportError:
raise NoSuchLibraryError('smbus')
def read_byte_array(self, reg, _len):
return [self.read_byte_data(reg + r) for r in range(_len)]
def read_byte_array_as_string(self, reg, _len):
return ''.join(chr(r) for r in self.read_byte_array(reg, _len))
class create_i2c_property(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
for name, reg_address_and_read_only in self.kwargs.items():
def i2c_property(reg, read_only=True):
def fget(self):
return self.read_byte_data(reg)
def fset(self, value):
return self.write_byte_data(reg, value)
return property(fget, None if read_only else fset)
if (type(reg_address_and_read_only) == int):
prop = i2c_property(reg_address_and_read_only)
else:
prop = i2c_property(
reg_address_and_read_only[0], **reg_address_and_read_only[1])
setattr(cls, name, prop)
return cls
@create_ev3_property(
brightness={'read_only': False, 'property_type': Ev3IntType},
max_brightness={'read_only': True, 'property_type': Ev3IntType},
trigger={'read_only': False},
delay_on={'read_only': False, 'property_type': Ev3IntType},
delay_off={'read_only': False, 'property_type': Ev3IntType}
)
class LEDLight(Ev3Dev):
def __init__(self, light_path):
super(Ev3Dev, self).__init__()
self.sys_path = '/sys/class/leds/' + light_path
class LEDSide (object):
def __init__(self, left_or_right):
self.green = LEDLight('ev3-%s1:green:ev3dev' % left_or_right)
self.red = LEDLight('ev3-%s0:red:ev3dev' % left_or_right)
self._color = (0, 0)
@property
def color(self):
"""LED color (RED, GREEN), where RED and GREEN are integers
between 0 and 255."""
return self._color
@color.setter
def color(self, value):
assert len(value) == 2
assert 0 <= value[0] <= self.red.max_brightness
assert 0 <= value[1] <= self.green.max_brightness
self._color = (
self.red.brightness, self.green.brightness) = tuple(value)
def blink(self, color=(0, 0), **kwargs):
if (color != (0, 0)):
self.color = color
for index, light in enumerate((self.red, self.green)):
if (not self._color[index]):
continue
light.trigger = 'timer'
for p, v in kwargs.items():
setattr(light, p, v)
def on(self):
self.green.trigger, self.red.trigger = 'none', 'none'
self.red.brightness, self.green.brightness = self._color
def off(self):
self.green.trigger, self.red.trigger = 'none', 'none'
self.red.brightness, self.green.brightness = 0, 0
class LED(object):
class COLOR:
NONE = (0, 0)
RED = (255, 0)
GREEN = (0, 255)
YELLOW = (25, 255)
ORANGE = (120, 255)
AMBER = (255, 255)
left = LEDSide('left')
right = LEDSide('right')
@create_ev3_property(
tone={'read_only': False},
mode={'read_only': True},
volume={'read_only': False, 'property_type': Ev3IntType}
)
class Tone(Ev3Dev):
def __init__(self):
super(Ev3Dev, self).__init__()
self.sys_path = '/sys/devices/platform/snd-legoev3'
def play(self, frequency, milliseconds=1000):
self.tone = '%d %d' % (frequency, milliseconds)
def stop(self):
self.tone = '0'
class Lcd(object):
def __init__(self):
try:
from PIL import Image, ImageDraw
SCREEN_WIDTH = 178
SCREEN_HEIGHT = 128
HW_MEM_WIDTH = int((SCREEN_WIDTH + 31) / 32) * 4
SCREEN_MEM_WIDTH = int((SCREEN_WIDTH + 7) / 8)
LCD_BUFFER_LENGTH = SCREEN_MEM_WIDTH * SCREEN_HEIGHT
LCD_HW_BUFFER_LENGTH = HW_MEM_WIDTH * SCREEN_HEIGHT
self._buffer = Image.new(
"1", (HW_MEM_WIDTH * 8, SCREEN_HEIGHT), "white")
self._draw = ImageDraw.Draw(self._buffer)
except ImportError:
raise NoSuchLibraryError('PIL')
def update(self):
f = os.open('/dev/fb0', os.O_RDWR)
os.write(f, self._buffer.tobytes("raw", "1;IR"))
os.close(f)
@property
def buffer(self):
return self._buffer
@property
def draw(self):
return self._draw
def reset(self):
self._draw.rectangle(
(0, 0) + self._buffer.size, outline='white', fill='white')
class attach_ev3_keys(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
key_const = {}
for key_name, key_code in self.kwargs.items():
def attach_key(key_name, key_code):
def fget(self):
buf = self.polling()
return self.test_bit(key_code, buf)
return property(fget)
setattr(cls, key_name, attach_key(key_name, key_code))
key_const[key_name.upper()] = key_code
setattr(cls, 'CODE', Enum(**key_const))
return cls
import array
import fcntl
@attach_ev3_keys(
up=103,
down=108,
left=105,
right=106,
enter=28,
backspace=14
)
class Key(object):
def __init__(self):
pass
def EVIOCGKEY(self, length):
return 2 << (14 + 8 + 8) | length << (8 + 8) | ord('E') << 8 | 0x18
def test_bit(self, bit, bytes):
# bit in bytes is 1 when released and 0 when pressed
return not bool(bytes[int(bit / 8)] & 1 << bit % 8)
def polling(self):
KEY_MAX = 0x2ff
BUF_LEN = int((KEY_MAX + 7) / 8)
buf = array.array('B', [0] * BUF_LEN)
with open('/dev/input/by-path/platform-gpio-keys.0-event', 'r') as fd:
ret = fcntl.ioctl(fd, self.EVIOCGKEY(len(buf)), buf)
if (ret < 0):
return None
else:
return buf
|
MaxNoe/python-ev3
|
ev3/ev3dev.py
|
Python
|
apache-2.0
| 19,911
|
[
"Amber"
] |
e48576a95c44d4542d409793ffaef2edb0ac9173f56ce034142484ed6a3bbd93
|
import espressopp as sxx
from momo import osio, endl, flush
import mpi4py.MPI as MPI
import os
osio << "Running on %d cores ..." % MPI.COMM_WORLD.size << endl
# COLLECT CONFIG FILES FROM FOLDER
config_file_dir = './configs'
osio.cd(config_file_dir)
config_files = sorted(os.listdir('./'))
config_list = sxx.tools.convert.aseread.AseConfigList(config_files, log=osio)
osio.cd(-1)
# SETUP SYSTEMS FOR CONFIGURATIONS
systems = []
for config in config_list:
osio << config.atoms << endl
system = sxx.tools.convert.aseread.setup_sxx_system(config.atoms, config.config_file)
systems.append(system)
# SET OPTIONS
options = sxx.soap.Options()
options.configureCenters(1., [1,77,119])
options.configureRealBasis(12,9,10.)
options.configureReciprocalBasis(10,7,8.)
b1 = sxx.Real3D(2.,0,0)
b2 = sxx.Real3D(0,2.,0)
b3 = sxx.Real3D(0,0,2.)
options.configureReciprocalLattice(b1, b2, b3)
osio << options.summarizeOptions() << endl
spectra = []
for system in systems:
osio << osio.mg << system.label << endl
spectrum = sxx.soap.Spectrum(system, options)
spectrum.compute()
spectrum.saveAndClean()
spectra.append(spectrum)
pair_spectrum = sxx.soap.PairSpectrum(systems[0], systems[1], options)
pair_spectrum.compute()
osio.okquit()
for system in systems:
tmp = sxx.soap.Soap(system)
print "soap::compute"
tmp.compute()
portal = sxx.soap.Portal(system)
print "portal::initialise"
portal.initialise()
# COMPUTE
# spectrum = sxx.soap.Spectrum(system, options)
# spectrum.compute()
# spectrum.serialize('config.soap')
# SAVE & LOAD
# sxx.soap.saveSpectrum(spectrum, 'config.soap')
# spectrum = sxx.soap.loadSpectrum('config.soap')
# OVERLAP
# overlap = sxx.soap.dot(spectrum, spectrum)
L = 16
box = (L, L, L)
rc = 2 * pow(2, 1./6.)
skin = 0.3
dt = 0.001
epsilon = 1.
sigma = 0.
temperature = 1.2
bonds, angles, x, y, z, Lx, Ly, Lz = espressopp.tools.convert.lammps.read('polymer_melt.lammps')
bonds, angles, x, y, z, Lx, Ly, Lz = espressopp.tools.replicate(bonds, angles, x, y, z, Lx, Ly, Lz, xdim=1, ydim=1, zdim=1)
num_particles = len(x)
density = num_particles / (Lx * Ly * Lz)
box = (Lx, Ly, Lz)
system, integrator = espressopp.standard_system.Default(
box=box,
rc=rc,
skin=skin,
dt=timestep,
temperature=temperature)
# add particles to the system and then decompose
# do this in chunks of 1000 particles to speed it up
props = ['id', 'type', 'mass', 'pos']
new_particles = []
for i in range(num_particles):
part = [i + 1, 0, 1.0, espressopp.Real3D(x[i], y[i], z[i])]
new_particles.append(part)
if i % 1000 == 0:
system.storage.addParticles(new_particles, *props)
system.storage.decompose()
new_particles = []
system.storage.addParticles(new_particles, *props)
system.storage.decompose()
# SYSTEM
system = sxx.System()
system.rng = sxx.esutil.RNG()
system.bc = sxx.bc.OrthorhombicBC(system.rng, box)
system.skin = skin
# GRID
nodeGrid = sxx.tools.decomp.nodeGrid(sxx.MPI.COMM_WORLD.size)
cellGrid = sxx.tools.decomp.cellGrid(box, nodeGrid, rc, skin)
system.storage = sxx.storage.DomainDecomposition(system, nodeGrid, cellGrid)
# INTERACTIONS
interaction = sxx.interaction.VerletListLennardJones(sxx.VerletList(system, cutoff=rc))
potLJ = sxx.interaction.LennardJones(epsilon, sigma, rc)
interaction.setPotential(type1=0, type2=0, potential=potLJ)
system.addInteraction(interaction)
# INTEGRATOR
integrator = sxx.integrator.VelocityVerlet(system)
integrator.dt = dt
thermostat = sxx.integrator.LangevinThermostat(system)
thermostat.gamma = 1.0
thermostat.temperature = temperature
integrator.addExtension(thermostat)
props = ['id', 'type', 'mass', 'pos', 'v']
vel_zero = sxx.Real3D(0.0, 0.0, 0.0)
bondlist = sxx.FixedPairList(system.storage)
pid = 1
p_type = 0
mass = 1.0
chain = []
n_chains = 1
monomers_per_chain = 10
bondlen = 1.
system.storage.decompose()
potFENE = sxx.interaction.FENE(K=30.0, r0=0.0, rMax=1.5)
interFENE = sxx.interaction.FixedPairListFENE(system, bondlist, potFENE)
system.addInteraction(interFENE)
print "dt =", integrator.dt
print "gamma =", thermostat.gamma
print "T =", thermostat.temperature
tmp = sxx.analysis.Temperature(system)
print "temp::compute", tmp.compute()
tmp = sxx.soap.Soap(system)
print "soap::compute", tmp.compute()
|
capoe/espressopp.soap
|
examples/soap/test.py
|
Python
|
gpl-3.0
| 4,413
|
[
"LAMMPS"
] |
12b5ab13c938f8a54a88151b79cd9b99f049396e432314072214d29a3ee0a286
|
<<<<<<< HEAD
<<<<<<< HEAD
"""Header value parser implementing various email-related RFC parsing rules.
The parsing methods defined in this module implement various email related
parsing rules. Principal among them is RFC 5322, which is the followon
to RFC 2822 and primarily a clarification of the former. It also implements
RFC 2047 encoded word decoding.
RFC 5322 goes to considerable trouble to maintain backward compatibility with
RFC 822 in the parse phase, while cleaning up the structure on the generation
phase. This parser supports correct RFC 5322 generation by tagging white space
as folding white space only when folding is allowed in the non-obsolete rule
sets. Actually, the parser is even more generous when accepting input than RFC
5322 mandates, following the spirit of Postel's Law, which RFC 5322 encourages.
Where possible deviations from the standard are annotated on the 'defects'
attribute of tokens that deviate.
The general structure of the parser follows RFC 5322, and uses its terminology
where there is a direct correspondence. Where the implementation requires a
somewhat different structure than that used by the formal grammar, new terms
that mimic the closest existing terms are used. Thus, it really helps to have
a copy of RFC 5322 handy when studying this code.
Input to the parser is a string that has already been unfolded according to
RFC 5322 rules. According to the RFC this unfolding is the very first step, and
this parser leaves the unfolding step to a higher level message parser, which
will have already detected the line breaks that need unfolding while
determining the beginning and end of each header.
The output of the parser is a TokenList object, which is a list subclass. A
TokenList is a recursive data structure. The terminal nodes of the structure
are Terminal objects, which are subclasses of str. These do not correspond
directly to terminal objects in the formal grammar, but are instead more
practical higher level combinations of true terminals.
All TokenList and Terminal objects have a 'value' attribute, which produces the
semantically meaningful value of that part of the parse subtree. The value of
all whitespace tokens (no matter how many sub-tokens they may contain) is a
single space, as per the RFC rules. This includes 'CFWS', which is herein
included in the general class of whitespace tokens. There is one exception to
the rule that whitespace tokens are collapsed into single spaces in values: in
the value of a 'bare-quoted-string' (a quoted-string with no leading or
trailing whitespace), any whitespace that appeared between the quotation marks
is preserved in the returned value. Note that in all Terminal strings quoted
pairs are turned into their unquoted values.
All TokenList and Terminal objects also have a string value, which attempts to
be a "canonical" representation of the RFC-compliant form of the substring that
produced the parsed subtree, including minimal use of quoted pair quoting.
Whitespace runs are not collapsed.
Comment tokens also have a 'content' attribute providing the string found
between the parens (including any nested comments) with whitespace preserved.
All TokenList and Terminal objects have a 'defects' attribute which is a
possibly empty list all of the defects found while creating the token. Defects
may appear on any token in the tree, and a composite list of all defects in the
subtree is available through the 'all_defects' attribute of any node. (For
Terminal notes x.defects == x.all_defects.)
Each object in a parse tree is called a 'token', and each has a 'token_type'
attribute that gives the name from the RFC 5322 grammar that it represents.
Not all RFC 5322 nodes are produced, and there is one non-RFC 5322 node that
may be produced: 'ptext'. A 'ptext' is a string of printable ascii characters.
It is returned in place of lists of (ctext/quoted-pair) and
(qtext/quoted-pair).
XXX: provide complete list of token types.
"""
import re
import urllib # For urllib.parse.unquote
from string import hexdigits
from collections import OrderedDict
from email import _encoded_words as _ew
from email import errors
from email import utils
#
# Useful constants and functions
#
WSP = set(' \t')
CFWS_LEADER = WSP | set('(')
SPECIALS = set(r'()<>@,:;.\"[]')
ATOM_ENDS = SPECIALS | WSP
DOT_ATOM_ENDS = ATOM_ENDS - set('.')
# '.', '"', and '(' do not end phrases in order to support obs-phrase
PHRASE_ENDS = SPECIALS - set('."(')
TSPECIALS = (SPECIALS | set('/?=')) - set('.')
TOKEN_ENDS = TSPECIALS | WSP
ASPECIALS = TSPECIALS | set("*'%")
ATTRIBUTE_ENDS = ASPECIALS | WSP
EXTENDED_ATTRIBUTE_ENDS = ATTRIBUTE_ENDS - set('%')
def quote_string(value):
return '"'+str(value).replace('\\', '\\\\').replace('"', r'\"')+'"'
#
# Accumulator for header folding
#
class _Folded:
def __init__(self, maxlen, policy):
self.maxlen = maxlen
self.policy = policy
self.lastlen = 0
self.stickyspace = None
self.firstline = True
self.done = []
self.current = []
def newline(self):
self.done.extend(self.current)
self.done.append(self.policy.linesep)
self.current.clear()
self.lastlen = 0
def finalize(self):
if self.current:
self.newline()
def __str__(self):
return ''.join(self.done)
def append(self, stoken):
self.current.append(stoken)
def append_if_fits(self, token, stoken=None):
if stoken is None:
stoken = str(token)
l = len(stoken)
if self.stickyspace is not None:
stickyspace_len = len(self.stickyspace)
if self.lastlen + stickyspace_len + l <= self.maxlen:
self.current.append(self.stickyspace)
self.lastlen += stickyspace_len
self.current.append(stoken)
self.lastlen += l
self.stickyspace = None
self.firstline = False
return True
if token.has_fws:
ws = token.pop_leading_fws()
if ws is not None:
self.stickyspace += str(ws)
stickyspace_len += len(ws)
token._fold(self)
return True
if stickyspace_len and l + 1 <= self.maxlen:
margin = self.maxlen - l
if 0 < margin < stickyspace_len:
trim = stickyspace_len - margin
self.current.append(self.stickyspace[:trim])
self.stickyspace = self.stickyspace[trim:]
stickyspace_len = trim
self.newline()
self.current.append(self.stickyspace)
self.current.append(stoken)
self.lastlen = l + stickyspace_len
self.stickyspace = None
self.firstline = False
return True
if not self.firstline:
self.newline()
self.current.append(self.stickyspace)
self.current.append(stoken)
self.stickyspace = None
self.firstline = False
return True
if self.lastlen + l <= self.maxlen:
self.current.append(stoken)
self.lastlen += l
return True
if l < self.maxlen:
self.newline()
self.current.append(stoken)
self.lastlen = l
return True
return False
#
# TokenList and its subclasses
#
class TokenList(list):
token_type = None
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.defects = []
def __str__(self):
return ''.join(str(x) for x in self)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
super().__repr__())
@property
def value(self):
return ''.join(x.value for x in self if x.value)
@property
def all_defects(self):
return sum((x.all_defects for x in self), self.defects)
#
# Folding API
#
# parts():
#
# return a list of objects that constitute the "higher level syntactic
# objects" specified by the RFC as the best places to fold a header line.
# The returned objects must include leading folding white space, even if
# this means mutating the underlying parse tree of the object. Each object
# is only responsible for returning *its* parts, and should not drill down
# to any lower level except as required to meet the leading folding white
# space constraint.
#
# _fold(folded):
#
# folded: the result accumulator. This is an instance of _Folded.
# (XXX: I haven't finished factoring this out yet, the folding code
# pretty much uses this as a state object.) When the folded.current
# contains as much text as will fit, the _fold method should call
# folded.newline.
# folded.lastlen: the current length of the test stored in folded.current.
# folded.maxlen: The maximum number of characters that may appear on a
# folded line. Differs from the policy setting in that "no limit" is
# represented by +inf, which means it can be used in the trivially
# logical fashion in comparisons.
#
# Currently no subclasses implement parts, and I think this will remain
# true. A subclass only needs to implement _fold when the generic version
# isn't sufficient. _fold will need to be implemented primarily when it is
# possible for encoded words to appear in the specialized token-list, since
# there is no generic algorithm that can know where exactly the encoded
# words are allowed. A _fold implementation is responsible for filling
# lines in the same general way that the top level _fold does. It may, and
# should, call the _fold method of sub-objects in a similar fashion to that
# of the top level _fold.
#
# XXX: I'm hoping it will be possible to factor the existing code further
# to reduce redundancy and make the logic clearer.
@property
def parts(self):
klass = self.__class__
this = []
for token in self:
if token.startswith_fws():
if this:
yield this[0] if len(this)==1 else klass(this)
this.clear()
end_ws = token.pop_trailing_ws()
this.append(token)
if end_ws:
yield klass(this)
this = [end_ws]
if this:
yield this[0] if len(this)==1 else klass(this)
def startswith_fws(self):
return self[0].startswith_fws()
def pop_leading_fws(self):
if self[0].token_type == 'fws':
return self.pop(0)
return self[0].pop_leading_fws()
def pop_trailing_ws(self):
if self[-1].token_type == 'cfws':
return self.pop(-1)
return self[-1].pop_trailing_ws()
@property
def has_fws(self):
for part in self:
if part.has_fws:
return True
return False
def has_leading_comment(self):
return self[0].has_leading_comment()
@property
def comments(self):
comments = []
for token in self:
comments.extend(token.comments)
return comments
def fold(self, *, policy):
# max_line_length 0/None means no limit, ie: infinitely long.
maxlen = policy.max_line_length or float("+inf")
folded = _Folded(maxlen, policy)
self._fold(folded)
folded.finalize()
return str(folded)
def as_encoded_word(self, charset):
# This works only for things returned by 'parts', which include
# the leading fws, if any, that should be used.
res = []
ws = self.pop_leading_fws()
if ws:
res.append(ws)
trailer = self.pop(-1) if self[-1].token_type=='fws' else ''
res.append(_ew.encode(str(self), charset))
res.append(trailer)
return ''.join(res)
def cte_encode(self, charset, policy):
res = []
for part in self:
res.append(part.cte_encode(charset, policy))
return ''.join(res)
def _fold(self, folded):
for part in self.parts:
tstr = str(part)
tlen = len(tstr)
try:
str(part).encode('us-ascii')
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
# XXX: this should be a policy setting
charset = 'utf-8'
tstr = part.cte_encode(charset, folded.policy)
tlen = len(tstr)
if folded.append_if_fits(part, tstr):
continue
# Peel off the leading whitespace if any and make it sticky, to
# avoid infinite recursion.
ws = part.pop_leading_fws()
if ws is not None:
# Peel off the leading whitespace and make it sticky, to
# avoid infinite recursion.
folded.stickyspace = str(part.pop(0))
if folded.append_if_fits(part):
continue
if part.has_fws:
part._fold(folded)
continue
# There are no fold points in this one; it is too long for a single
# line and can't be split...we just have to put it on its own line.
folded.append(tstr)
folded.newline()
def pprint(self, indent=''):
print('\n'.join(self._pp(indent='')))
def ppstr(self, indent=''):
return '\n'.join(self._pp(indent=''))
def _pp(self, indent=''):
yield '{}{}/{}('.format(
indent,
self.__class__.__name__,
self.token_type)
for token in self:
if not hasattr(token, '_pp'):
yield (indent + ' !! invalid element in token '
'list: {!r}'.format(token))
else:
yield from token._pp(indent+' ')
if self.defects:
extra = ' Defects: {}'.format(self.defects)
else:
extra = ''
yield '{}){}'.format(indent, extra)
class WhiteSpaceTokenList(TokenList):
@property
def value(self):
return ' '
@property
def comments(self):
return [x.content for x in self if x.token_type=='comment']
class UnstructuredTokenList(TokenList):
token_type = 'unstructured'
def _fold(self, folded):
last_ew = None
for part in self.parts:
tstr = str(part)
is_ew = False
try:
str(part).encode('us-ascii')
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
charset = 'utf-8'
if last_ew is not None:
# We've already done an EW, combine this one with it
# if there's room.
chunk = get_unstructured(
''.join(folded.current[last_ew:]+[tstr])).as_encoded_word(charset)
oldlastlen = sum(len(x) for x in folded.current[:last_ew])
schunk = str(chunk)
lchunk = len(schunk)
if oldlastlen + lchunk <= folded.maxlen:
del folded.current[last_ew:]
folded.append(schunk)
folded.lastlen = oldlastlen + lchunk
continue
tstr = part.as_encoded_word(charset)
is_ew = True
if folded.append_if_fits(part, tstr):
if is_ew:
last_ew = len(folded.current) - 1
continue
if is_ew or last_ew:
# It's too big to fit on the line, but since we've
# got encoded words we can use encoded word folding.
part._fold_as_ew(folded)
continue
# Peel off the leading whitespace if any and make it sticky, to
# avoid infinite recursion.
ws = part.pop_leading_fws()
if ws is not None:
folded.stickyspace = str(ws)
if folded.append_if_fits(part):
continue
if part.has_fws:
part.fold(folded)
continue
# It can't be split...we just have to put it on its own line.
folded.append(tstr)
folded.newline()
last_ew = None
def cte_encode(self, charset, policy):
res = []
last_ew = None
for part in self:
spart = str(part)
try:
spart.encode('us-ascii')
res.append(spart)
except UnicodeEncodeError:
if last_ew is None:
res.append(part.cte_encode(charset, policy))
last_ew = len(res)
else:
tl = get_unstructured(''.join(res[last_ew:] + [spart]))
res.append(tl.as_encoded_word())
return ''.join(res)
class Phrase(TokenList):
token_type = 'phrase'
def _fold(self, folded):
# As with Unstructured, we can have pure ASCII with or without
# surrogateescape encoded bytes, or we could have unicode. But this
# case is more complicated, since we have to deal with the various
# sub-token types and how they can be composed in the face of
# unicode-that-needs-CTE-encoding, and the fact that if a token a
# comment that becomes a barrier across which we can't compose encoded
# words.
last_ew = None
for part in self.parts:
tstr = str(part)
tlen = len(tstr)
has_ew = False
try:
str(part).encode('us-ascii')
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
charset = 'utf-8'
if last_ew is not None and not part.has_leading_comment():
# We've already done an EW, let's see if we can combine
# this one with it. The last_ew logic ensures that all we
# have at this point is atoms, no comments or quoted
# strings. So we can treat the text between the last
# encoded word and the content of this token as
# unstructured text, and things will work correctly. But
# we have to strip off any trailing comment on this token
# first, and if it is a quoted string we have to pull out
# the content (we're encoding it, so it no longer needs to
# be quoted).
if part[-1].token_type == 'cfws' and part.comments:
remainder = part.pop(-1)
else:
remainder = ''
for i, token in enumerate(part):
if token.token_type == 'bare-quoted-string':
part[i] = UnstructuredTokenList(token[:])
chunk = get_unstructured(
''.join(folded.current[last_ew:]+[tstr])).as_encoded_word(charset)
schunk = str(chunk)
lchunk = len(schunk)
if last_ew + lchunk <= folded.maxlen:
del folded.current[last_ew:]
folded.append(schunk)
folded.lastlen = sum(len(x) for x in folded.current)
continue
tstr = part.as_encoded_word(charset)
tlen = len(tstr)
has_ew = True
if folded.append_if_fits(part, tstr):
if has_ew and not part.comments:
last_ew = len(folded.current) - 1
elif part.comments or part.token_type == 'quoted-string':
# If a comment is involved we can't combine EWs. And if a
# quoted string is involved, it's not worth the effort to
# try to combine them.
last_ew = None
continue
part._fold(folded)
def cte_encode(self, charset, policy):
res = []
last_ew = None
is_ew = False
for part in self:
spart = str(part)
try:
spart.encode('us-ascii')
res.append(spart)
except UnicodeEncodeError:
is_ew = True
if last_ew is None:
if not part.comments:
last_ew = len(res)
res.append(part.cte_encode(charset, policy))
elif not part.has_leading_comment():
if part[-1].token_type == 'cfws' and part.comments:
remainder = part.pop(-1)
else:
remainder = ''
for i, token in enumerate(part):
if token.token_type == 'bare-quoted-string':
part[i] = UnstructuredTokenList(token[:])
tl = get_unstructured(''.join(res[last_ew:] + [spart]))
res[last_ew:] = [tl.as_encoded_word(charset)]
if part.comments or (not is_ew and part.token_type == 'quoted-string'):
last_ew = None
return ''.join(res)
class Word(TokenList):
token_type = 'word'
class CFWSList(WhiteSpaceTokenList):
token_type = 'cfws'
def has_leading_comment(self):
return bool(self.comments)
class Atom(TokenList):
token_type = 'atom'
class Token(TokenList):
token_type = 'token'
class EncodedWord(TokenList):
token_type = 'encoded-word'
cte = None
charset = None
lang = None
@property
def encoded(self):
if self.cte is not None:
return self.cte
_ew.encode(str(self), self.charset)
class QuotedString(TokenList):
token_type = 'quoted-string'
@property
def content(self):
for x in self:
if x.token_type == 'bare-quoted-string':
return x.value
@property
def quoted_value(self):
res = []
for x in self:
if x.token_type == 'bare-quoted-string':
res.append(str(x))
else:
res.append(x.value)
return ''.join(res)
@property
def stripped_value(self):
for token in self:
if token.token_type == 'bare-quoted-string':
return token.value
class BareQuotedString(QuotedString):
token_type = 'bare-quoted-string'
def __str__(self):
return quote_string(''.join(str(x) for x in self))
@property
def value(self):
return ''.join(str(x) for x in self)
class Comment(WhiteSpaceTokenList):
token_type = 'comment'
def __str__(self):
return ''.join(sum([
["("],
[self.quote(x) for x in self],
[")"],
], []))
def quote(self, value):
if value.token_type == 'comment':
return str(value)
return str(value).replace('\\', '\\\\').replace(
'(', '\(').replace(
')', '\)')
@property
def content(self):
return ''.join(str(x) for x in self)
@property
def comments(self):
return [self.content]
class AddressList(TokenList):
token_type = 'address-list'
@property
def addresses(self):
return [x for x in self if x.token_type=='address']
@property
def mailboxes(self):
return sum((x.mailboxes
for x in self if x.token_type=='address'), [])
@property
def all_mailboxes(self):
return sum((x.all_mailboxes
for x in self if x.token_type=='address'), [])
class Address(TokenList):
token_type = 'address'
@property
def display_name(self):
if self[0].token_type == 'group':
return self[0].display_name
@property
def mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return [self[0]]
return self[0].all_mailboxes
class MailboxList(TokenList):
token_type = 'mailbox-list'
@property
def mailboxes(self):
return [x for x in self if x.token_type=='mailbox']
@property
def all_mailboxes(self):
return [x for x in self
if x.token_type in ('mailbox', 'invalid-mailbox')]
class GroupList(TokenList):
token_type = 'group-list'
@property
def mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].all_mailboxes
class Group(TokenList):
token_type = "group"
@property
def mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].mailboxes
@property
def all_mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].all_mailboxes
@property
def display_name(self):
return self[0].display_name
class NameAddr(TokenList):
token_type = 'name-addr'
@property
def display_name(self):
if len(self) == 1:
return None
return self[0].display_name
@property
def local_part(self):
return self[-1].local_part
@property
def domain(self):
return self[-1].domain
@property
def route(self):
return self[-1].route
@property
def addr_spec(self):
return self[-1].addr_spec
class AngleAddr(TokenList):
token_type = 'angle-addr'
@property
def local_part(self):
for x in self:
if x.token_type == 'addr-spec':
return x.local_part
@property
def domain(self):
for x in self:
if x.token_type == 'addr-spec':
return x.domain
@property
def route(self):
for x in self:
if x.token_type == 'obs-route':
return x.domains
@property
def addr_spec(self):
for x in self:
if x.token_type == 'addr-spec':
return x.addr_spec
else:
return '<>'
class ObsRoute(TokenList):
token_type = 'obs-route'
@property
def domains(self):
return [x.domain for x in self if x.token_type == 'domain']
class Mailbox(TokenList):
token_type = 'mailbox'
@property
def display_name(self):
if self[0].token_type == 'name-addr':
return self[0].display_name
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
return self[0].domain
@property
def route(self):
if self[0].token_type == 'name-addr':
return self[0].route
@property
def addr_spec(self):
return self[0].addr_spec
class InvalidMailbox(TokenList):
token_type = 'invalid-mailbox'
@property
def display_name(self):
return None
local_part = domain = route = addr_spec = display_name
class Domain(TokenList):
token_type = 'domain'
@property
def domain(self):
return ''.join(super().value.split())
class DotAtom(TokenList):
token_type = 'dot-atom'
class DotAtomText(TokenList):
token_type = 'dot-atom-text'
class AddrSpec(TokenList):
token_type = 'addr-spec'
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
if len(self) < 3:
return None
return self[-1].domain
@property
def value(self):
if len(self) < 3:
return self[0].value
return self[0].value.rstrip()+self[1].value+self[2].value.lstrip()
@property
def addr_spec(self):
nameset = set(self.local_part)
if len(nameset) > len(nameset-DOT_ATOM_ENDS):
lp = quote_string(self.local_part)
else:
lp = self.local_part
if self.domain is not None:
return lp + '@' + self.domain
return lp
class ObsLocalPart(TokenList):
token_type = 'obs-local-part'
class DisplayName(Phrase):
token_type = 'display-name'
@property
def display_name(self):
res = TokenList(self)
if res[0].token_type == 'cfws':
res.pop(0)
else:
if res[0][0].token_type == 'cfws':
res[0] = TokenList(res[0][1:])
if res[-1].token_type == 'cfws':
res.pop()
else:
if res[-1][-1].token_type == 'cfws':
res[-1] = TokenList(res[-1][:-1])
return res.value
@property
def value(self):
quote = False
if self.defects:
quote = True
else:
for x in self:
if x.token_type == 'quoted-string':
quote = True
if quote:
pre = post = ''
if self[0].token_type=='cfws' or self[0][0].token_type=='cfws':
pre = ' '
if self[-1].token_type=='cfws' or self[-1][-1].token_type=='cfws':
post = ' '
return pre+quote_string(self.display_name)+post
else:
return super().value
class LocalPart(TokenList):
token_type = 'local-part'
@property
def value(self):
if self[0].token_type == "quoted-string":
return self[0].quoted_value
else:
return self[0].value
@property
def local_part(self):
# Strip whitespace from front, back, and around dots.
res = [DOT]
last = DOT
last_is_tl = False
for tok in self[0] + [DOT]:
if tok.token_type == 'cfws':
continue
if (last_is_tl and tok.token_type == 'dot' and
last[-1].token_type == 'cfws'):
res[-1] = TokenList(last[:-1])
is_tl = isinstance(tok, TokenList)
if (is_tl and last.token_type == 'dot' and
tok[0].token_type == 'cfws'):
res.append(TokenList(tok[1:]))
else:
res.append(tok)
last = res[-1]
last_is_tl = is_tl
res = TokenList(res[1:-1])
return res.value
class DomainLiteral(TokenList):
token_type = 'domain-literal'
@property
def domain(self):
return ''.join(super().value.split())
@property
def ip(self):
for x in self:
if x.token_type == 'ptext':
return x.value
class MIMEVersion(TokenList):
token_type = 'mime-version'
major = None
minor = None
class Parameter(TokenList):
token_type = 'parameter'
sectioned = False
extended = False
charset = 'us-ascii'
@property
def section_number(self):
# Because the first token, the attribute (name) eats CFWS, the second
# token is always the section if there is one.
return self[1].number if self.sectioned else 0
@property
def param_value(self):
# This is part of the "handle quoted extended parameters" hack.
for token in self:
if token.token_type == 'value':
return token.stripped_value
if token.token_type == 'quoted-string':
for token in token:
if token.token_type == 'bare-quoted-string':
for token in token:
if token.token_type == 'value':
return token.stripped_value
return ''
class InvalidParameter(Parameter):
token_type = 'invalid-parameter'
class Attribute(TokenList):
token_type = 'attribute'
@property
def stripped_value(self):
for token in self:
if token.token_type.endswith('attrtext'):
return token.value
class Section(TokenList):
token_type = 'section'
number = None
class Value(TokenList):
token_type = 'value'
@property
def stripped_value(self):
token = self[0]
if token.token_type == 'cfws':
token = self[1]
if token.token_type.endswith(
('quoted-string', 'attribute', 'extended-attribute')):
return token.stripped_value
return self.value
class MimeParameters(TokenList):
token_type = 'mime-parameters'
@property
def params(self):
# The RFC specifically states that the ordering of parameters is not
# guaranteed and may be reordered by the transport layer. So we have
# to assume the RFC 2231 pieces can come in any order. However, we
# output them in the order that we first see a given name, which gives
# us a stable __str__.
params = OrderedDict()
for token in self:
if not token.token_type.endswith('parameter'):
continue
if token[0].token_type != 'attribute':
continue
name = token[0].value.strip()
if name not in params:
params[name] = []
params[name].append((token.section_number, token))
for name, parts in params.items():
parts = sorted(parts)
# XXX: there might be more recovery we could do here if, for
# example, this is really a case of a duplicate attribute name.
value_parts = []
charset = parts[0][1].charset
for i, (section_number, param) in enumerate(parts):
if section_number != i:
param.defects.append(errors.InvalidHeaderDefect(
"inconsistent multipart parameter numbering"))
value = param.param_value
if param.extended:
try:
value = urllib.parse.unquote_to_bytes(value)
except UnicodeEncodeError:
# source had surrogate escaped bytes. What we do now
# is a bit of an open question. I'm not sure this is
# the best choice, but it is what the old algorithm did
value = urllib.parse.unquote(value, encoding='latin-1')
else:
try:
value = value.decode(charset, 'surrogateescape')
except LookupError:
# XXX: there should really be a custom defect for
# unknown character set to make it easy to find,
# because otherwise unknown charset is a silent
# failure.
value = value.decode('us-ascii', 'surrogateescape')
if utils._has_surrogates(value):
param.defects.append(errors.UndecodableBytesDefect())
value_parts.append(value)
value = ''.join(value_parts)
yield name, value
def __str__(self):
params = []
for name, value in self.params:
if value:
params.append('{}={}'.format(name, quote_string(value)))
else:
params.append(name)
params = '; '.join(params)
return ' ' + params if params else ''
class ParameterizedHeaderValue(TokenList):
@property
def params(self):
for token in reversed(self):
if token.token_type == 'mime-parameters':
return token.params
return {}
@property
def parts(self):
if self and self[-1].token_type == 'mime-parameters':
# We don't want to start a new line if all of the params don't fit
# after the value, so unwrap the parameter list.
return TokenList(self[:-1] + self[-1])
return TokenList(self).parts
class ContentType(ParameterizedHeaderValue):
token_type = 'content-type'
maintype = 'text'
subtype = 'plain'
class ContentDisposition(ParameterizedHeaderValue):
token_type = 'content-disposition'
content_disposition = None
class ContentTransferEncoding(TokenList):
token_type = 'content-transfer-encoding'
cte = '7bit'
class HeaderLabel(TokenList):
token_type = 'header-label'
class Header(TokenList):
token_type = 'header'
def _fold(self, folded):
folded.append(str(self.pop(0)))
folded.lastlen = len(folded.current[0])
# The first line of the header is different from all others: we don't
# want to start a new object on a new line if it has any fold points in
# it that would allow part of it to be on the first header line.
# Further, if the first fold point would fit on the new line, we want
# to do that, but if it doesn't we want to put it on the first line.
# Folded supports this via the stickyspace attribute. If this
# attribute is not None, it does the special handling.
folded.stickyspace = str(self.pop(0)) if self[0].token_type == 'cfws' else ''
rest = self.pop(0)
if self:
raise ValueError("Malformed Header token list")
rest._fold(folded)
#
# Terminal classes and instances
#
class Terminal(str):
def __new__(cls, value, token_type):
self = super().__new__(cls, value)
self.token_type = token_type
self.defects = []
return self
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super().__repr__())
@property
def all_defects(self):
return list(self.defects)
def _pp(self, indent=''):
return ["{}{}/{}({}){}".format(
indent,
self.__class__.__name__,
self.token_type,
super().__repr__(),
'' if not self.defects else ' {}'.format(self.defects),
)]
def cte_encode(self, charset, policy):
value = str(self)
try:
value.encode('us-ascii')
return value
except UnicodeEncodeError:
return _ew.encode(value, charset)
def pop_trailing_ws(self):
# This terminates the recursion.
return None
def pop_leading_fws(self):
# This terminates the recursion.
return None
@property
def comments(self):
return []
def has_leading_comment(self):
return False
def __getnewargs__(self):
return(str(self), self.token_type)
class WhiteSpaceTerminal(Terminal):
@property
def value(self):
return ' '
def startswith_fws(self):
return True
has_fws = True
class ValueTerminal(Terminal):
@property
def value(self):
return self
def startswith_fws(self):
return False
has_fws = False
def as_encoded_word(self, charset):
return _ew.encode(str(self), charset)
class EWWhiteSpaceTerminal(WhiteSpaceTerminal):
@property
def value(self):
return ''
@property
def encoded(self):
return self[:]
def __str__(self):
return ''
has_fws = True
# XXX these need to become classes and used as instances so
# that a program can't change them in a parse tree and screw
# up other parse trees. Maybe should have tests for that, too.
DOT = ValueTerminal('.', 'dot')
ListSeparator = ValueTerminal(',', 'list-separator')
RouteComponentMarker = ValueTerminal('@', 'route-component-marker')
#
# Parser
#
# Parse strings according to RFC822/2047/2822/5322 rules.
#
# This is a stateless parser. Each get_XXX function accepts a string and
# returns either a Terminal or a TokenList representing the RFC object named
# by the method and a string containing the remaining unparsed characters
# from the input. Thus a parser method consumes the next syntactic construct
# of a given type and returns a token representing the construct plus the
# unparsed remainder of the input string.
#
# For example, if the first element of a structured header is a 'phrase',
# then:
#
# phrase, value = get_phrase(value)
#
# returns the complete phrase from the start of the string value, plus any
# characters left in the string after the phrase is removed.
_wsp_splitter = re.compile(r'([{}]+)'.format(''.join(WSP))).split
_non_atom_end_matcher = re.compile(r"[^{}]+".format(
''.join(ATOM_ENDS).replace('\\','\\\\').replace(']','\]'))).match
_non_printable_finder = re.compile(r"[\x00-\x20\x7F]").findall
_non_token_end_matcher = re.compile(r"[^{}]+".format(
''.join(TOKEN_ENDS).replace('\\','\\\\').replace(']','\]'))).match
_non_attribute_end_matcher = re.compile(r"[^{}]+".format(
''.join(ATTRIBUTE_ENDS).replace('\\','\\\\').replace(']','\]'))).match
_non_extended_attribute_end_matcher = re.compile(r"[^{}]+".format(
''.join(EXTENDED_ATTRIBUTE_ENDS).replace(
'\\','\\\\').replace(']','\]'))).match
def _validate_xtext(xtext):
"""If input token contains ASCII non-printables, register a defect."""
non_printables = _non_printable_finder(xtext)
if non_printables:
xtext.defects.append(errors.NonPrintableDefect(non_printables))
if utils._has_surrogates(xtext):
xtext.defects.append(errors.UndecodableBytesDefect(
"Non-ASCII characters found in header token"))
def _get_ptext_to_endchars(value, endchars):
"""Scan printables/quoted-pairs until endchars and return unquoted ptext.
This function turns a run of qcontent, ccontent-without-comments, or
dtext-with-quoted-printables into a single string by unquoting any
quoted printables. It returns the string, the remaining value, and
a flag that is True iff there were any quoted printables decoded.
"""
fragment, *remainder = _wsp_splitter(value, 1)
vchars = []
escape = False
had_qp = False
for pos in range(len(fragment)):
if fragment[pos] == '\\':
if escape:
escape = False
had_qp = True
else:
escape = True
continue
if escape:
escape = False
elif fragment[pos] in endchars:
break
vchars.append(fragment[pos])
else:
pos = pos + 1
return ''.join(vchars), ''.join([fragment[pos:]] + remainder), had_qp
def get_fws(value):
"""FWS = 1*WSP
This isn't the RFC definition. We're using fws to represent tokens where
folding can be done, but when we are parsing the *un*folding has already
been done so we don't need to watch out for CRLF.
"""
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
return fws, newvalue
def get_encoded_word(value):
""" encoded-word = "=?" charset "?" encoding "?" encoded-text "?="
"""
ew = EncodedWord()
if not value.startswith('=?'):
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
tok, *remainder = value[2:].split('?=', 1)
if tok == value[2:]:
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
remstr = ''.join(remainder)
if len(remstr) > 1 and remstr[0] in hexdigits and remstr[1] in hexdigits:
# The ? after the CTE was followed by an encoded word escape (=XX).
rest, *remainder = remstr.split('?=', 1)
tok = tok + '?=' + rest
if len(tok.split()) > 1:
ew.defects.append(errors.InvalidHeaderDefect(
"whitespace inside encoded word"))
ew.cte = value
value = ''.join(remainder)
try:
text, charset, lang, defects = _ew.decode('=?' + tok + '?=')
except ValueError:
raise errors.HeaderParseError(
"encoded word format invalid: '{}'".format(ew.cte))
ew.charset = charset
ew.lang = lang
ew.defects.extend(defects)
while text:
if text[0] in WSP:
token, text = get_fws(text)
ew.append(token)
continue
chars, *remainder = _wsp_splitter(text, 1)
vtext = ValueTerminal(chars, 'vtext')
_validate_xtext(vtext)
ew.append(vtext)
text = ''.join(remainder)
return ew, value
def get_unstructured(value):
"""unstructured = (*([FWS] vchar) *WSP) / obs-unstruct
obs-unstruct = *((*LF *CR *(obs-utext) *LF *CR)) / FWS)
obs-utext = %d0 / obs-NO-WS-CTL / LF / CR
obs-NO-WS-CTL is control characters except WSP/CR/LF.
So, basically, we have printable runs, plus control characters or nulls in
the obsolete syntax, separated by whitespace. Since RFC 2047 uses the
obsolete syntax in its specification, but requires whitespace on either
side of the encoded words, I can see no reason to need to separate the
non-printable-non-whitespace from the printable runs if they occur, so we
parse this into xtext tokens separated by WSP tokens.
Because an 'unstructured' value must by definition constitute the entire
value, this 'get' routine does not return a remaining value, only the
parsed TokenList.
"""
# XXX: but what about bare CR and LF? They might signal the start or
# end of an encoded word. YAGNI for now, since our current parsers
# will never send us strings with bare CR or LF.
unstructured = UnstructuredTokenList()
while value:
if value[0] in WSP:
token, value = get_fws(value)
unstructured.append(token)
continue
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: Need to figure out how to register defects when
# appropriate here.
pass
else:
have_ws = True
if len(unstructured) > 0:
if unstructured[-1].token_type != 'fws':
unstructured.defects.append(errors.InvalidHeaderDefect(
"missing whitespace before encoded word"))
have_ws = False
if have_ws and len(unstructured) > 1:
if unstructured[-2].token_type == 'encoded-word':
unstructured[-1] = EWWhiteSpaceTerminal(
unstructured[-1], 'fws')
unstructured.append(token)
continue
tok, *remainder = _wsp_splitter(value, 1)
vtext = ValueTerminal(tok, 'vtext')
_validate_xtext(vtext)
unstructured.append(vtext)
value = ''.join(remainder)
return unstructured
def get_qp_ctext(value):
"""ctext = <printable ascii except \ ( )>
This is not the RFC ctext, since we are handling nested comments in comment
and unquoting quoted-pairs here. We allow anything except the '()'
characters, but if we find any ASCII other than the RFC defined printable
ASCII an NonPrintableDefect is added to the token's defects list. Since
quoted pairs are converted to their unquoted values, what is returned is
a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value
is ' '.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '()')
ptext = WhiteSpaceTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_qcontent(value):
"""qcontent = qtext / quoted-pair
We allow anything except the DQUOTE character, but if we find any ASCII
other than the RFC defined printable ASCII an NonPrintableDefect is
added to the token's defects list. Any quoted pairs are converted to their
unquoted values, so what is returned is a 'ptext' token. In this case it
is a ValueTerminal.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '"')
ptext = ValueTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_atext(value):
"""atext = <matches _atext_matcher>
We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to
the token's defects list if we find non-atext characters.
"""
m = _non_atom_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected atext but found '{}'".format(value))
atext = m.group()
value = value[len(atext):]
atext = ValueTerminal(atext, 'atext')
_validate_xtext(atext)
return atext, value
def get_bare_quoted_string(value):
"""bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
A quoted-string without the leading or trailing white space. Its
value is the text between the quote marks, with whitespace
preserved and quoted pairs decoded.
"""
if value[0] != '"':
raise errors.HeaderParseError(
"expected '\"' but found '{}'".format(value))
bare_quoted_string = BareQuotedString()
value = value[1:]
while value and value[0] != '"':
if value[0] in WSP:
token, value = get_fws(value)
elif value[:2] == '=?':
try:
token, value = get_encoded_word(value)
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"encoded word inside quoted string"))
except errors.HeaderParseError:
token, value = get_qcontent(value)
else:
token, value = get_qcontent(value)
bare_quoted_string.append(token)
if not value:
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"end of header inside quoted string"))
return bare_quoted_string, value
return bare_quoted_string, value[1:]
def get_comment(value):
"""comment = "(" *([FWS] ccontent) [FWS] ")"
ccontent = ctext / quoted-pair / comment
We handle nested comments here, and quoted-pair in our qp-ctext routine.
"""
if value and value[0] != '(':
raise errors.HeaderParseError(
"expected '(' but found '{}'".format(value))
comment = Comment()
value = value[1:]
while value and value[0] != ")":
if value[0] in WSP:
token, value = get_fws(value)
elif value[0] == '(':
token, value = get_comment(value)
else:
token, value = get_qp_ctext(value)
comment.append(token)
if not value:
comment.defects.append(errors.InvalidHeaderDefect(
"end of header inside comment"))
return comment, value
return comment, value[1:]
def get_cfws(value):
"""CFWS = (1*([FWS] comment) [FWS]) / FWS
"""
cfws = CFWSList()
while value and value[0] in CFWS_LEADER:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_comment(value)
cfws.append(token)
return cfws, value
def get_quoted_string(value):
"""quoted-string = [CFWS] <bare-quoted-string> [CFWS]
'bare-quoted-string' is an intermediate class defined by this
parser and not by the RFC grammar. It is the quoted string
without any attached CFWS.
"""
quoted_string = QuotedString()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
token, value = get_bare_quoted_string(value)
quoted_string.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
return quoted_string, value
def get_atom(value):
"""atom = [CFWS] 1*atext [CFWS]
An atom could be an rfc2047 encoded word.
"""
atom = Atom()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
if value and value[0] in ATOM_ENDS:
raise errors.HeaderParseError(
"expected atom but found '{}'".format(value))
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_atext(value)
else:
token, value = get_atext(value)
atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
return atom, value
def get_dot_atom_text(value):
""" dot-text = 1*atext *("." 1*atext)
"""
dot_atom_text = DotAtomText()
if not value or value[0] in ATOM_ENDS:
raise errors.HeaderParseError("expected atom at a start of "
"dot-atom-text but found '{}'".format(value))
while value and value[0] not in ATOM_ENDS:
token, value = get_atext(value)
dot_atom_text.append(token)
if value and value[0] == '.':
dot_atom_text.append(DOT)
value = value[1:]
if dot_atom_text[-1] is DOT:
raise errors.HeaderParseError("expected atom at end of dot-atom-text "
"but found '{}'".format('.'+value))
return dot_atom_text, value
def get_dot_atom(value):
""" dot-atom = [CFWS] dot-atom-text [CFWS]
Any place we can have a dot atom, we could instead have an rfc2047 encoded
word.
"""
dot_atom = DotAtom()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_dot_atom_text(value)
else:
token, value = get_dot_atom_text(value)
dot_atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
return dot_atom, value
def get_word(value):
"""word = atom / quoted-string
Either atom or quoted-string may start with CFWS. We have to peel off this
CFWS first to determine which type of word to parse. Afterward we splice
the leading CFWS, if any, into the parsed sub-token.
If neither an atom or a quoted-string is found before the next special, a
HeaderParseError is raised.
The token returned is either an Atom or a QuotedString, as appropriate.
This means the 'word' level of the formal grammar is not represented in the
parse tree; this is because having that extra layer when manipulating the
parse tree is more confusing than it is helpful.
"""
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
else:
leader = None
if value[0]=='"':
token, value = get_quoted_string(value)
elif value[0] in SPECIALS:
raise errors.HeaderParseError("Expected 'atom' or 'quoted-string' "
"but found '{}'".format(value))
else:
token, value = get_atom(value)
if leader is not None:
token[:0] = [leader]
return token, value
def get_phrase(value):
""" phrase = 1*word / obs-phrase
obs-phrase = word *(word / "." / CFWS)
This means a phrase can be a sequence of words, periods, and CFWS in any
order as long as it starts with at least one word. If anything other than
words is detected, an ObsoleteHeaderDefect is added to the token's defect
list. We also accept a phrase that starts with CFWS followed by a dot;
this is registered as an InvalidHeaderDefect, since it is not supported by
even the obsolete grammar.
"""
phrase = Phrase()
try:
token, value = get_word(value)
phrase.append(token)
except errors.HeaderParseError:
phrase.defects.append(errors.InvalidHeaderDefect(
"phrase does not start with word"))
while value and value[0] not in PHRASE_ENDS:
if value[0]=='.':
phrase.append(DOT)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"period in 'phrase'"))
value = value[1:]
else:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"comment found without atom"))
else:
raise
phrase.append(token)
return phrase, value
def get_local_part(value):
""" local-part = dot-atom / quoted-string / obs-local-part
"""
local_part = LocalPart()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected local-part but found '{}'".format(value))
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] != '\\' and value[0] in PHRASE_ENDS:
raise
token = TokenList()
if leader is not None:
token[:0] = [leader]
local_part.append(token)
if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
obs_local_part, value = get_obs_local_part(str(local_part) + value)
if obs_local_part.token_type == 'invalid-obs-local-part':
local_part.defects.append(errors.InvalidHeaderDefect(
"local-part is not dot-atom, quoted-string, or obs-local-part"))
else:
local_part.defects.append(errors.ObsoleteHeaderDefect(
"local-part is not a dot-atom (contains CFWS)"))
local_part[0] = obs_local_part
try:
local_part.value.encode('ascii')
except UnicodeEncodeError:
local_part.defects.append(errors.NonASCIILocalPartDefect(
"local-part contains non-ASCII characters)"))
return local_part, value
def get_obs_local_part(value):
""" obs-local-part = word *("." word)
"""
obs_local_part = ObsLocalPart()
last_non_ws_was_dot = False
while value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
if value[0] == '.':
if last_non_ws_was_dot:
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"invalid repeated '.'"))
obs_local_part.append(DOT)
last_non_ws_was_dot = True
value = value[1:]
continue
elif value[0]=='\\':
obs_local_part.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"'\\' character outside of quoted-string/ccontent"))
last_non_ws_was_dot = False
continue
if obs_local_part and obs_local_part[-1].token_type != 'dot':
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"missing '.' between words"))
try:
token, value = get_word(value)
last_non_ws_was_dot = False
except errors.HeaderParseError:
if value[0] not in CFWS_LEADER:
raise
token, value = get_cfws(value)
obs_local_part.append(token)
if (obs_local_part[0].token_type == 'dot' or
obs_local_part[0].token_type=='cfws' and
obs_local_part[1].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid leading '.' in local part"))
if (obs_local_part[-1].token_type == 'dot' or
obs_local_part[-1].token_type=='cfws' and
obs_local_part[-2].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid trailing '.' in local part"))
if obs_local_part.defects:
obs_local_part.token_type = 'invalid-obs-local-part'
return obs_local_part, value
def get_dtext(value):
""" dtext = <printable ascii except \ [ ]> / obs-dtext
obs-dtext = obs-NO-WS-CTL / quoted-pair
We allow anything except the excluded characters, but if we find any
ASCII other than the RFC defined printable ASCII an NonPrintableDefect is
added to the token's defects list. Quoted pairs are converted to their
unquoted values, so what is returned is a ptext token, in this case a
ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
added to the returned token's defect list.
"""
ptext, value, had_qp = _get_ptext_to_endchars(value, '[]')
ptext = ValueTerminal(ptext, 'ptext')
if had_qp:
ptext.defects.append(errors.ObsoleteHeaderDefect(
"quoted printable found in domain-literal"))
_validate_xtext(ptext)
return ptext, value
def _check_for_early_dl_end(value, domain_literal):
if value:
return False
domain_literal.append(errors.InvalidHeaderDefect(
"end of input inside domain-literal"))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
return True
def get_domain_literal(value):
""" domain-literal = [CFWS] "[" *([FWS] dtext) [FWS] "]" [CFWS]
"""
domain_literal = DomainLiteral()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
if not value:
raise errors.HeaderParseError("expected domain-literal")
if value[0] != '[':
raise errors.HeaderParseError("expected '[' at start of domain-literal "
"but found '{}'".format(value))
value = value[1:]
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
domain_literal.append(ValueTerminal('[', 'domain-literal-start'))
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
token, value = get_dtext(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] != ']':
raise errors.HeaderParseError("expected ']' at end of domain-literal "
"but found '{}'".format(value))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
return domain_literal, value
def get_domain(value):
""" domain = dot-atom / domain-literal / obs-domain
obs-domain = atom *("." atom))
"""
domain = Domain()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected domain but found '{}'".format(value))
if value[0] == '[':
token, value = get_domain_literal(value)
if leader is not None:
token[:0] = [leader]
domain.append(token)
return domain, value
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
token, value = get_atom(value)
if leader is not None:
token[:0] = [leader]
domain.append(token)
if value and value[0] == '.':
domain.defects.append(errors.ObsoleteHeaderDefect(
"domain is not a dot-atom (contains CFWS)"))
if domain[0].token_type == 'dot-atom':
domain[:] = domain[0]
while value and value[0] == '.':
domain.append(DOT)
token, value = get_atom(value[1:])
domain.append(token)
return domain, value
def get_addr_spec(value):
""" addr-spec = local-part "@" domain
"""
addr_spec = AddrSpec()
token, value = get_local_part(value)
addr_spec.append(token)
if not value or value[0] != '@':
addr_spec.defects.append(errors.InvalidHeaderDefect(
"add-spec local part with no domain"))
return addr_spec, value
addr_spec.append(ValueTerminal('@', 'address-at-symbol'))
token, value = get_domain(value[1:])
addr_spec.append(token)
return addr_spec, value
def get_obs_route(value):
""" obs-route = obs-domain-list ":"
obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain])
Returns an obs-route token with the appropriate sub-tokens (that is,
there is no obs-domain-list in the parse tree).
"""
obs_route = ObsRoute()
while value and (value[0]==',' or value[0] in CFWS_LEADER):
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
elif value[0] == ',':
obs_route.append(ListSeparator)
value = value[1:]
if not value or value[0] != '@':
raise errors.HeaderParseError(
"expected obs-route domain but found '{}'".format(value))
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
while value and value[0]==',':
obs_route.append(ListSeparator)
value = value[1:]
if not value:
break
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
if value[0] == '@':
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
if not value:
raise errors.HeaderParseError("end of header while parsing obs-route")
if value[0] != ':':
raise errors.HeaderParseError( "expected ':' marking end of "
"obs-route but found '{}'".format(value))
obs_route.append(ValueTerminal(':', 'end-of-obs-route-marker'))
return obs_route, value[1:]
def get_angle_addr(value):
""" angle-addr = [CFWS] "<" addr-spec ">" [CFWS] / obs-angle-addr
obs-angle-addr = [CFWS] "<" obs-route addr-spec ">" [CFWS]
"""
angle_addr = AngleAddr()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
if not value or value[0] != '<':
raise errors.HeaderParseError(
"expected angle-addr but found '{}'".format(value))
angle_addr.append(ValueTerminal('<', 'angle-addr-start'))
value = value[1:]
# Although it is not legal per RFC5322, SMTP uses '<>' in certain
# circumstances.
if value[0] == '>':
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
angle_addr.defects.append(errors.InvalidHeaderDefect(
"null addr-spec in angle-addr"))
value = value[1:]
return angle_addr, value
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
try:
token, value = get_obs_route(value)
angle_addr.defects.append(errors.ObsoleteHeaderDefect(
"obsolete route specification in angle-addr"))
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected addr-spec or obs-route but found '{}'".format(value))
angle_addr.append(token)
token, value = get_addr_spec(value)
angle_addr.append(token)
if value and value[0] == '>':
value = value[1:]
else:
angle_addr.defects.append(errors.InvalidHeaderDefect(
"missing trailing '>' on angle-addr"))
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
return angle_addr, value
def get_display_name(value):
""" display-name = phrase
Because this is simply a name-rule, we don't return a display-name
token containing a phrase, but rather a display-name token with
the content of the phrase.
"""
display_name = DisplayName()
token, value = get_phrase(value)
display_name.extend(token[:])
display_name.defects = token.defects[:]
return display_name, value
def get_name_addr(value):
""" name-addr = [display-name] angle-addr
"""
name_addr = NameAddr()
# Both the optional display name and the angle-addr can start with cfws.
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(leader))
if value[0] != '<':
if value[0] in PHRASE_ENDS:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(value))
token, value = get_display_name(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(token))
if leader is not None:
token[0][:0] = [leader]
leader = None
name_addr.append(token)
token, value = get_angle_addr(value)
if leader is not None:
token[:0] = [leader]
name_addr.append(token)
return name_addr, value
def get_mailbox(value):
""" mailbox = name-addr / addr-spec
"""
# The only way to figure out if we are dealing with a name-addr or an
# addr-spec is to try parsing each one.
mailbox = Mailbox()
try:
token, value = get_name_addr(value)
except errors.HeaderParseError:
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected mailbox but found '{}'".format(value))
if any(isinstance(x, errors.InvalidHeaderDefect)
for x in token.all_defects):
mailbox.token_type = 'invalid-mailbox'
mailbox.append(token)
return mailbox, value
def get_invalid_mailbox(value, endchars):
""" Read everything up to one of the chars in endchars.
This is outside the formal grammar. The InvalidMailbox TokenList that is
returned acts like a Mailbox, but the data attributes are None.
"""
invalid_mailbox = InvalidMailbox()
while value and value[0] not in endchars:
if value[0] in PHRASE_ENDS:
invalid_mailbox.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_mailbox.append(token)
return invalid_mailbox, value
def get_mailbox_list(value):
""" mailbox-list = (mailbox *("," mailbox)) / obs-mbox-list
obs-mbox-list = *([CFWS] ",") mailbox *("," [mailbox / CFWS])
For this routine we go outside the formal grammar in order to improve error
handling. We recognize the end of the mailbox list only at the end of the
value or at a ';' (the group terminator). This is so that we can turn
invalid mailboxes into InvalidMailbox tokens and continue parsing any
remaining valid mailboxes. We also allow all mailbox entries to be null,
and this condition is handled appropriately at a higher level.
"""
mailbox_list = MailboxList()
while value and value[0] != ';':
try:
token, value = get_mailbox(value)
mailbox_list.append(token)
except errors.HeaderParseError:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] in ',;':
mailbox_list.append(leader)
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
elif value[0] == ',':
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] not in ',;':
# Crap after mailbox; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = mailbox_list[-1]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',;')
mailbox.extend(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] == ',':
mailbox_list.append(ListSeparator)
value = value[1:]
return mailbox_list, value
def get_group_list(value):
""" group-list = mailbox-list / CFWS / obs-group-list
obs-group-list = 1*([CFWS] ",") [CFWS]
"""
group_list = GroupList()
if not value:
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header before group-list"))
return group_list, value
leader = None
if value and value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
# This should never happen in email parsing, since CFWS-only is a
# legal alternative to group-list in a group, which is the only
# place group-list appears.
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header in group-list"))
group_list.append(leader)
return group_list, value
if value[0] == ';':
group_list.append(leader)
return group_list, value
token, value = get_mailbox_list(value)
if len(token.all_mailboxes)==0:
if leader is not None:
group_list.append(leader)
group_list.extend(token)
group_list.defects.append(errors.ObsoleteHeaderDefect(
"group-list with empty entries"))
return group_list, value
if leader is not None:
token[:0] = [leader]
group_list.append(token)
return group_list, value
def get_group(value):
""" group = display-name ":" [group-list] ";" [CFWS]
"""
group = Group()
token, value = get_display_name(value)
if not value or value[0] != ':':
raise errors.HeaderParseError("expected ':' at end of group "
"display name but found '{}'".format(value))
group.append(token)
group.append(ValueTerminal(':', 'group-display-name-terminator'))
value = value[1:]
if value and value[0] == ';':
group.append(ValueTerminal(';', 'group-terminator'))
return group, value[1:]
token, value = get_group_list(value)
group.append(token)
if not value:
group.defects.append(errors.InvalidHeaderDefect(
"end of header in group"))
if value[0] != ';':
raise errors.HeaderParseError(
"expected ';' at end of group but found {}".format(value))
group.append(ValueTerminal(';', 'group-terminator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
group.append(token)
return group, value
def get_address(value):
""" address = mailbox / group
Note that counter-intuitively, an address can be either a single address or
a list of addresses (a group). This is why the returned Address object has
a 'mailboxes' attribute which treats a single address as a list of length
one. When you need to differentiate between to two cases, extract the single
element, which is either a mailbox or a group token.
"""
# The formal grammar isn't very helpful when parsing an address. mailbox
# and group, especially when allowing for obsolete forms, start off very
# similarly. It is only when you reach one of @, <, or : that you know
# what you've got. So, we try each one in turn, starting with the more
# likely of the two. We could perhaps make this more efficient by looking
# for a phrase and then branching based on the next character, but that
# would be a premature optimization.
address = Address()
try:
token, value = get_group(value)
except errors.HeaderParseError:
try:
token, value = get_mailbox(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected address but found '{}'".format(value))
address.append(token)
return address, value
def get_address_list(value):
""" address_list = (address *("," address)) / obs-addr-list
obs-addr-list = *([CFWS] ",") address *("," [address / CFWS])
We depart from the formal grammar here by continuing to parse until the end
of the input, assuming the input to be entirely composed of an
address-list. This is always true in email parsing, and allows us
to skip invalid addresses to parse additional valid ones.
"""
address_list = AddressList()
while value:
try:
token, value = get_address(value)
address_list.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] == ',':
address_list.append(leader)
address_list.defects.append(errors.ObsoleteHeaderDefect(
"address-list entry with no content"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
elif value[0] == ',':
address_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in address-list"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value and value[0] != ',':
# Crap after address; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = address_list[-1][0]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',')
mailbox.extend(token)
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value: # Must be a , at this point.
address_list.append(ValueTerminal(',', 'list-separator'))
value = value[1:]
return address_list, value
#
# XXX: As I begin to add additional header parsers, I'm realizing we probably
# have two level of parser routines: the get_XXX methods that get a token in
# the grammar, and parse_XXX methods that parse an entire field value. So
# get_address_list above should really be a parse_ method, as probably should
# be get_unstructured.
#
def parse_mime_version(value):
""" mime-version = [CFWS] 1*digit [CFWS] "." [CFWS] 1*digit [CFWS]
"""
# The [CFWS] is implicit in the RFC 2045 BNF.
# XXX: This routine is a bit verbose, should factor out a get_int method.
mime_version = MIMEVersion()
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Missing MIME version number (eg: 1.0)"))
return mime_version
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Expected MIME version number but found only CFWS"))
digits = ''
while value and value[0] != '.' and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME major version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.major = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value or value[0] != '.':
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
if value:
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
mime_version.append(ValueTerminal('.', 'version-separator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
return mime_version
digits = ''
while value and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME minor version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.minor = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if value:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Excess non-CFWS text after MIME version"))
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
def get_invalid_parameter(value):
""" Read everything up to the next ';'.
This is outside the formal grammar. The InvalidParameter TokenList that is
returned acts like a Parameter, but the data attributes are None.
"""
invalid_parameter = InvalidParameter()
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
invalid_parameter.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_parameter.append(token)
return invalid_parameter, value
def get_ttext(value):
"""ttext = <matches _ttext_matcher>
We allow any non-TOKEN_ENDS in ttext, but add defects to the token's
defects list if we find non-ttext characters. We also register defects for
*any* non-printables even though the RFC doesn't exclude all of them,
because we follow the spirit of RFC 5322.
"""
m = _non_token_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected ttext but found '{}'".format(value))
ttext = m.group()
value = value[len(ttext):]
ttext = ValueTerminal(ttext, 'ttext')
_validate_xtext(ttext)
return ttext, value
def get_token(value):
"""token = [CFWS] 1*ttext [CFWS]
The RFC equivalent of ttext is any US-ASCII chars except space, ctls, or
tspecials. We also exclude tabs even though the RFC doesn't.
The RFC implies the CFWS but is not explicit about it in the BNF.
"""
mtoken = Token()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
if value and value[0] in TOKEN_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_ttext(value)
mtoken.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
return mtoken, value
def get_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character)
We allow any non-ATTRIBUTE_ENDS in attrtext, but add defects to the
token's defects list if we find non-attrtext characters. We also register
defects for *any* non-printables even though the RFC doesn't exclude all of
them, because we follow the spirit of RFC 5322.
"""
m = _non_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_attribute(value):
""" [CFWS] 1*attrtext [CFWS]
This version of the BNF makes the CFWS explicit, and as usual we use a
value terminal for the actual run of characters. The RFC equivalent of
attrtext is the token characters, with the subtraction of '*', "'", and '%'.
We include tab in the excluded set just as we do for token.
"""
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_extended_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%')
This is a special parsing routine so that we get a value that
includes % escapes as a single string (which we decode as a single
string later).
"""
m = _non_extended_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected extended attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'extended-attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_extended_attribute(value):
""" [CFWS] 1*extended_attrtext [CFWS]
This is like the non-extended version except we allow % characters, so that
we can pick up an encoded value as a single string.
"""
# XXX: should we have an ExtendedAttribute TokenList?
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in EXTENDED_ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_extended_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_section(value):
""" '*' digits
The formal BNF is more complicated because leading 0s are not allowed. We
check for that and add a defect. We also assume no CFWS is allowed between
the '*' and the digits, though the RFC is not crystal clear on that.
The caller should already have dealt with leading CFWS.
"""
section = Section()
if not value or value[0] != '*':
raise errors.HeaderParseError("Expected section but found {}".format(
value))
section.append(ValueTerminal('*', 'section-marker'))
value = value[1:]
if not value or not value[0].isdigit():
raise errors.HeaderParseError("Expected section number but "
"found {}".format(value))
digits = ''
while value and value[0].isdigit():
digits += value[0]
value = value[1:]
if digits[0] == '0' and digits != '0':
section.defects.append(errors.InvalidHeaderError("section number"
"has an invalid leading 0"))
section.number = int(digits)
section.append(ValueTerminal(digits, 'digits'))
return section, value
def get_value(value):
""" quoted-string / attribute
"""
v = Value()
if not value:
raise errors.HeaderParseError("Expected value but found end of string")
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError("Expected value but found "
"only {}".format(leader))
if value[0] == '"':
token, value = get_quoted_string(value)
else:
token, value = get_extended_attribute(value)
if leader is not None:
token[:0] = [leader]
v.append(token)
return v, value
def get_parameter(value):
""" attribute [section] ["*"] [CFWS] "=" value
The CFWS is implied by the RFC but not made explicit in the BNF. This
simplified form of the BNF from the RFC is made to conform with the RFC BNF
through some extra checks. We do it this way because it makes both error
recovery and working with the resulting parse tree easier.
"""
# It is possible CFWS would also be implicitly allowed between the section
# and the 'extended-attribute' marker (the '*') , but we've never seen that
# in the wild and we will therefore ignore the possibility.
param = Parameter()
token, value = get_attribute(value)
param.append(token)
if not value or value[0] == ';':
param.defects.append(errors.InvalidHeaderDefect("Parameter contains "
"name ({}) but no value".format(token)))
return param, value
if value[0] == '*':
try:
token, value = get_section(value)
param.sectioned = True
param.append(token)
except errors.HeaderParseError:
pass
if not value:
raise errors.HeaderParseError("Incomplete parameter")
if value[0] == '*':
param.append(ValueTerminal('*', 'extended-parameter-marker'))
value = value[1:]
param.extended = True
if value[0] != '=':
raise errors.HeaderParseError("Parameter not followed by '='")
param.append(ValueTerminal('=', 'parameter-separator'))
value = value[1:]
leader = None
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
param.append(token)
remainder = None
appendto = param
if param.extended and value and value[0] == '"':
# Now for some serious hackery to handle the common invalid case of
# double quotes around an extended value. We also accept (with defect)
# a value marked as encoded that isn't really.
qstring, remainder = get_quoted_string(value)
inner_value = qstring.stripped_value
semi_valid = False
if param.section_number == 0:
if inner_value and inner_value[0] == "'":
semi_valid = True
else:
token, rest = get_attrtext(inner_value)
if rest and rest[0] == "'":
semi_valid = True
else:
try:
token, rest = get_extended_attrtext(inner_value)
except:
pass
else:
if not rest:
semi_valid = True
if semi_valid:
param.defects.append(errors.InvalidHeaderDefect(
"Quoted string value for extended parameter is invalid"))
param.append(qstring)
for t in qstring:
if t.token_type == 'bare-quoted-string':
t[:] = []
appendto = t
break
value = inner_value
else:
remainder = None
param.defects.append(errors.InvalidHeaderDefect(
"Parameter marked as extended but appears to have a "
"quoted string value that is non-encoded"))
if value and value[0] == "'":
token = None
else:
token, value = get_value(value)
if not param.extended or param.section_number > 0:
if not value or value[0] != "'":
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
param.defects.append(errors.InvalidHeaderDefect(
"Apparent initial-extended-value but attribute "
"was not marked as extended or was not initial section"))
if not value:
# Assume the charset/lang is missing and the token is the value.
param.defects.append(errors.InvalidHeaderDefect(
"Missing required charset/lang delimiters"))
appendto.append(token)
if remainder is None:
return param, value
else:
if token is not None:
for t in token:
if t.token_type == 'extended-attrtext':
break
t.token_type == 'attrtext'
appendto.append(t)
param.charset = t.value
if value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {!r}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231 delimiter'))
value = value[1:]
if value and value[0] != "'":
token, value = get_attrtext(value)
appendto.append(token)
param.lang = token.value
if not value or value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231 delimiter'))
value = value[1:]
if remainder is not None:
# Treat the rest of value as bare quoted string content.
v = Value()
while value:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_qcontent(value)
v.append(token)
token = v
else:
token, value = get_value(value)
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
def parse_mime_parameters(value):
""" parameter *( ";" parameter )
That BNF is meant to indicate this routine should only be called after
finding and handling the leading ';'. There is no corresponding rule in
the formal RFC grammar, but it is more convenient for us for the set of
parameters to be treated as its own TokenList.
This is 'parse' routine because it consumes the reminaing value, but it
would never be called to parse a full header. Instead it is called to
parse everything after the non-parameter value of a specific MIME header.
"""
mime_parameters = MimeParameters()
while value:
try:
token, value = get_parameter(value)
mime_parameters.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
mime_parameters.append(leader)
return mime_parameters
if value[0] == ';':
if leader is not None:
mime_parameters.append(leader)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter entry with no content"))
else:
token, value = get_invalid_parameter(value)
if leader:
token[:0] = [leader]
mime_parameters.append(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"invalid parameter {!r}".format(token)))
if value and value[0] != ';':
# Junk after the otherwise valid parameter. Mark it as
# invalid, but it will have a value.
param = mime_parameters[-1]
param.token_type = 'invalid-parameter'
token, value = get_invalid_parameter(value)
param.extend(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter with invalid trailing text {!r}".format(token)))
if value:
# Must be a ';' at this point.
mime_parameters.append(ValueTerminal(';', 'parameter-separator'))
value = value[1:]
return mime_parameters
def _find_mime_parameters(tokenlist, value):
"""Do our best to find the parameters in an invalid MIME header
"""
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
tokenlist.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
tokenlist.append(token)
if not value:
return
tokenlist.append(ValueTerminal(';', 'parameter-separator'))
tokenlist.append(parse_mime_parameters(value[1:]))
def parse_content_type_header(value):
""" maintype "/" subtype *( ";" parameter )
The maintype and substype are tokens. Theoretically they could
be checked against the official IANA list + x-token, but we
don't do that.
"""
ctype = ContentType()
recover = False
if not value:
ctype.defects.append(errors.HeaderMissingRequiredValue(
"Missing content type specification"))
return ctype
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content maintype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
# XXX: If we really want to follow the formal grammer we should make
# mantype and subtype specialized TokenLists here. Probably not worth it.
if not value or value[0] != '/':
ctype.defects.append(errors.InvalidHeaderDefect(
"Invalid content type"))
if value:
_find_mime_parameters(ctype, value)
return ctype
ctype.maintype = token.value.strip().lower()
ctype.append(ValueTerminal('/', 'content-type-separator'))
value = value[1:]
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content subtype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
ctype.subtype = token.value.strip().lower()
if not value:
return ctype
if value[0] != ';':
ctype.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content type, but "
"found {!r}".format(value)))
# The RFC requires that a syntactically invalid content-type be treated
# as text/plain. Perhaps we should postel this, but we should probably
# only do that if we were checking the subtype value against IANA.
del ctype.maintype, ctype.subtype
_find_mime_parameters(ctype, value)
return ctype
ctype.append(ValueTerminal(';', 'parameter-separator'))
ctype.append(parse_mime_parameters(value[1:]))
return ctype
def parse_content_disposition_header(value):
""" disposition-type *( ";" parameter )
"""
disp_header = ContentDisposition()
if not value:
disp_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content disposition"))
return disp_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
disp_header.defects.append(errors.InvalidHeaderDefect(
"Expected content disposition but found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(token)
disp_header.content_disposition = token.value.strip().lower()
if not value:
return disp_header
if value[0] != ';':
disp_header.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content disposition, but "
"found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(ValueTerminal(';', 'parameter-separator'))
disp_header.append(parse_mime_parameters(value[1:]))
return disp_header
def parse_content_transfer_encoding_header(value):
""" mechanism
"""
# We should probably validate the values, since the list is fixed.
cte_header = ContentTransferEncoding()
if not value:
cte_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content transfer encoding"))
return cte_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Expected content transfer encoding but found {!r}".format(value)))
else:
cte_header.append(token)
cte_header.cte = token.value.strip().lower()
if not value:
return cte_header
while value:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Extra text after content transfer encoding"))
if value[0] in PHRASE_ENDS:
cte_header.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
cte_header.append(token)
return cte_header
=======
"""Header value parser implementing various email-related RFC parsing rules.
The parsing methods defined in this module implement various email related
parsing rules. Principal among them is RFC 5322, which is the followon
to RFC 2822 and primarily a clarification of the former. It also implements
RFC 2047 encoded word decoding.
RFC 5322 goes to considerable trouble to maintain backward compatibility with
RFC 822 in the parse phase, while cleaning up the structure on the generation
phase. This parser supports correct RFC 5322 generation by tagging white space
as folding white space only when folding is allowed in the non-obsolete rule
sets. Actually, the parser is even more generous when accepting input than RFC
5322 mandates, following the spirit of Postel's Law, which RFC 5322 encourages.
Where possible deviations from the standard are annotated on the 'defects'
attribute of tokens that deviate.
The general structure of the parser follows RFC 5322, and uses its terminology
where there is a direct correspondence. Where the implementation requires a
somewhat different structure than that used by the formal grammar, new terms
that mimic the closest existing terms are used. Thus, it really helps to have
a copy of RFC 5322 handy when studying this code.
Input to the parser is a string that has already been unfolded according to
RFC 5322 rules. According to the RFC this unfolding is the very first step, and
this parser leaves the unfolding step to a higher level message parser, which
will have already detected the line breaks that need unfolding while
determining the beginning and end of each header.
The output of the parser is a TokenList object, which is a list subclass. A
TokenList is a recursive data structure. The terminal nodes of the structure
are Terminal objects, which are subclasses of str. These do not correspond
directly to terminal objects in the formal grammar, but are instead more
practical higher level combinations of true terminals.
All TokenList and Terminal objects have a 'value' attribute, which produces the
semantically meaningful value of that part of the parse subtree. The value of
all whitespace tokens (no matter how many sub-tokens they may contain) is a
single space, as per the RFC rules. This includes 'CFWS', which is herein
included in the general class of whitespace tokens. There is one exception to
the rule that whitespace tokens are collapsed into single spaces in values: in
the value of a 'bare-quoted-string' (a quoted-string with no leading or
trailing whitespace), any whitespace that appeared between the quotation marks
is preserved in the returned value. Note that in all Terminal strings quoted
pairs are turned into their unquoted values.
All TokenList and Terminal objects also have a string value, which attempts to
be a "canonical" representation of the RFC-compliant form of the substring that
produced the parsed subtree, including minimal use of quoted pair quoting.
Whitespace runs are not collapsed.
Comment tokens also have a 'content' attribute providing the string found
between the parens (including any nested comments) with whitespace preserved.
All TokenList and Terminal objects have a 'defects' attribute which is a
possibly empty list all of the defects found while creating the token. Defects
may appear on any token in the tree, and a composite list of all defects in the
subtree is available through the 'all_defects' attribute of any node. (For
Terminal notes x.defects == x.all_defects.)
Each object in a parse tree is called a 'token', and each has a 'token_type'
attribute that gives the name from the RFC 5322 grammar that it represents.
Not all RFC 5322 nodes are produced, and there is one non-RFC 5322 node that
may be produced: 'ptext'. A 'ptext' is a string of printable ascii characters.
It is returned in place of lists of (ctext/quoted-pair) and
(qtext/quoted-pair).
XXX: provide complete list of token types.
"""
import re
import urllib # For urllib.parse.unquote
from string import hexdigits
from collections import OrderedDict
from email import _encoded_words as _ew
from email import errors
from email import utils
#
# Useful constants and functions
#
WSP = set(' \t')
CFWS_LEADER = WSP | set('(')
SPECIALS = set(r'()<>@,:;.\"[]')
ATOM_ENDS = SPECIALS | WSP
DOT_ATOM_ENDS = ATOM_ENDS - set('.')
# '.', '"', and '(' do not end phrases in order to support obs-phrase
PHRASE_ENDS = SPECIALS - set('."(')
TSPECIALS = (SPECIALS | set('/?=')) - set('.')
TOKEN_ENDS = TSPECIALS | WSP
ASPECIALS = TSPECIALS | set("*'%")
ATTRIBUTE_ENDS = ASPECIALS | WSP
EXTENDED_ATTRIBUTE_ENDS = ATTRIBUTE_ENDS - set('%')
def quote_string(value):
return '"'+str(value).replace('\\', '\\\\').replace('"', r'\"')+'"'
#
# Accumulator for header folding
#
class _Folded:
def __init__(self, maxlen, policy):
self.maxlen = maxlen
self.policy = policy
self.lastlen = 0
self.stickyspace = None
self.firstline = True
self.done = []
self.current = []
def newline(self):
self.done.extend(self.current)
self.done.append(self.policy.linesep)
self.current.clear()
self.lastlen = 0
def finalize(self):
if self.current:
self.newline()
def __str__(self):
return ''.join(self.done)
def append(self, stoken):
self.current.append(stoken)
def append_if_fits(self, token, stoken=None):
if stoken is None:
stoken = str(token)
l = len(stoken)
if self.stickyspace is not None:
stickyspace_len = len(self.stickyspace)
if self.lastlen + stickyspace_len + l <= self.maxlen:
self.current.append(self.stickyspace)
self.lastlen += stickyspace_len
self.current.append(stoken)
self.lastlen += l
self.stickyspace = None
self.firstline = False
return True
if token.has_fws:
ws = token.pop_leading_fws()
if ws is not None:
self.stickyspace += str(ws)
stickyspace_len += len(ws)
token._fold(self)
return True
if stickyspace_len and l + 1 <= self.maxlen:
margin = self.maxlen - l
if 0 < margin < stickyspace_len:
trim = stickyspace_len - margin
self.current.append(self.stickyspace[:trim])
self.stickyspace = self.stickyspace[trim:]
stickyspace_len = trim
self.newline()
self.current.append(self.stickyspace)
self.current.append(stoken)
self.lastlen = l + stickyspace_len
self.stickyspace = None
self.firstline = False
return True
if not self.firstline:
self.newline()
self.current.append(self.stickyspace)
self.current.append(stoken)
self.stickyspace = None
self.firstline = False
return True
if self.lastlen + l <= self.maxlen:
self.current.append(stoken)
self.lastlen += l
return True
if l < self.maxlen:
self.newline()
self.current.append(stoken)
self.lastlen = l
return True
return False
#
# TokenList and its subclasses
#
class TokenList(list):
token_type = None
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.defects = []
def __str__(self):
return ''.join(str(x) for x in self)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
super().__repr__())
@property
def value(self):
return ''.join(x.value for x in self if x.value)
@property
def all_defects(self):
return sum((x.all_defects for x in self), self.defects)
#
# Folding API
#
# parts():
#
# return a list of objects that constitute the "higher level syntactic
# objects" specified by the RFC as the best places to fold a header line.
# The returned objects must include leading folding white space, even if
# this means mutating the underlying parse tree of the object. Each object
# is only responsible for returning *its* parts, and should not drill down
# to any lower level except as required to meet the leading folding white
# space constraint.
#
# _fold(folded):
#
# folded: the result accumulator. This is an instance of _Folded.
# (XXX: I haven't finished factoring this out yet, the folding code
# pretty much uses this as a state object.) When the folded.current
# contains as much text as will fit, the _fold method should call
# folded.newline.
# folded.lastlen: the current length of the test stored in folded.current.
# folded.maxlen: The maximum number of characters that may appear on a
# folded line. Differs from the policy setting in that "no limit" is
# represented by +inf, which means it can be used in the trivially
# logical fashion in comparisons.
#
# Currently no subclasses implement parts, and I think this will remain
# true. A subclass only needs to implement _fold when the generic version
# isn't sufficient. _fold will need to be implemented primarily when it is
# possible for encoded words to appear in the specialized token-list, since
# there is no generic algorithm that can know where exactly the encoded
# words are allowed. A _fold implementation is responsible for filling
# lines in the same general way that the top level _fold does. It may, and
# should, call the _fold method of sub-objects in a similar fashion to that
# of the top level _fold.
#
# XXX: I'm hoping it will be possible to factor the existing code further
# to reduce redundancy and make the logic clearer.
@property
def parts(self):
klass = self.__class__
this = []
for token in self:
if token.startswith_fws():
if this:
yield this[0] if len(this)==1 else klass(this)
this.clear()
end_ws = token.pop_trailing_ws()
this.append(token)
if end_ws:
yield klass(this)
this = [end_ws]
if this:
yield this[0] if len(this)==1 else klass(this)
def startswith_fws(self):
return self[0].startswith_fws()
def pop_leading_fws(self):
if self[0].token_type == 'fws':
return self.pop(0)
return self[0].pop_leading_fws()
def pop_trailing_ws(self):
if self[-1].token_type == 'cfws':
return self.pop(-1)
return self[-1].pop_trailing_ws()
@property
def has_fws(self):
for part in self:
if part.has_fws:
return True
return False
def has_leading_comment(self):
return self[0].has_leading_comment()
@property
def comments(self):
comments = []
for token in self:
comments.extend(token.comments)
return comments
def fold(self, *, policy):
# max_line_length 0/None means no limit, ie: infinitely long.
maxlen = policy.max_line_length or float("+inf")
folded = _Folded(maxlen, policy)
self._fold(folded)
folded.finalize()
return str(folded)
def as_encoded_word(self, charset):
# This works only for things returned by 'parts', which include
# the leading fws, if any, that should be used.
res = []
ws = self.pop_leading_fws()
if ws:
res.append(ws)
trailer = self.pop(-1) if self[-1].token_type=='fws' else ''
res.append(_ew.encode(str(self), charset))
res.append(trailer)
return ''.join(res)
def cte_encode(self, charset, policy):
res = []
for part in self:
res.append(part.cte_encode(charset, policy))
return ''.join(res)
def _fold(self, folded):
for part in self.parts:
tstr = str(part)
tlen = len(tstr)
try:
str(part).encode('us-ascii')
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
# XXX: this should be a policy setting
charset = 'utf-8'
tstr = part.cte_encode(charset, folded.policy)
tlen = len(tstr)
if folded.append_if_fits(part, tstr):
continue
# Peel off the leading whitespace if any and make it sticky, to
# avoid infinite recursion.
ws = part.pop_leading_fws()
if ws is not None:
# Peel off the leading whitespace and make it sticky, to
# avoid infinite recursion.
folded.stickyspace = str(part.pop(0))
if folded.append_if_fits(part):
continue
if part.has_fws:
part._fold(folded)
continue
# There are no fold points in this one; it is too long for a single
# line and can't be split...we just have to put it on its own line.
folded.append(tstr)
folded.newline()
def pprint(self, indent=''):
print('\n'.join(self._pp(indent='')))
def ppstr(self, indent=''):
return '\n'.join(self._pp(indent=''))
def _pp(self, indent=''):
yield '{}{}/{}('.format(
indent,
self.__class__.__name__,
self.token_type)
for token in self:
if not hasattr(token, '_pp'):
yield (indent + ' !! invalid element in token '
'list: {!r}'.format(token))
else:
yield from token._pp(indent+' ')
if self.defects:
extra = ' Defects: {}'.format(self.defects)
else:
extra = ''
yield '{}){}'.format(indent, extra)
class WhiteSpaceTokenList(TokenList):
@property
def value(self):
return ' '
@property
def comments(self):
return [x.content for x in self if x.token_type=='comment']
class UnstructuredTokenList(TokenList):
token_type = 'unstructured'
def _fold(self, folded):
last_ew = None
for part in self.parts:
tstr = str(part)
is_ew = False
try:
str(part).encode('us-ascii')
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
charset = 'utf-8'
if last_ew is not None:
# We've already done an EW, combine this one with it
# if there's room.
chunk = get_unstructured(
''.join(folded.current[last_ew:]+[tstr])).as_encoded_word(charset)
oldlastlen = sum(len(x) for x in folded.current[:last_ew])
schunk = str(chunk)
lchunk = len(schunk)
if oldlastlen + lchunk <= folded.maxlen:
del folded.current[last_ew:]
folded.append(schunk)
folded.lastlen = oldlastlen + lchunk
continue
tstr = part.as_encoded_word(charset)
is_ew = True
if folded.append_if_fits(part, tstr):
if is_ew:
last_ew = len(folded.current) - 1
continue
if is_ew or last_ew:
# It's too big to fit on the line, but since we've
# got encoded words we can use encoded word folding.
part._fold_as_ew(folded)
continue
# Peel off the leading whitespace if any and make it sticky, to
# avoid infinite recursion.
ws = part.pop_leading_fws()
if ws is not None:
folded.stickyspace = str(ws)
if folded.append_if_fits(part):
continue
if part.has_fws:
part.fold(folded)
continue
# It can't be split...we just have to put it on its own line.
folded.append(tstr)
folded.newline()
last_ew = None
def cte_encode(self, charset, policy):
res = []
last_ew = None
for part in self:
spart = str(part)
try:
spart.encode('us-ascii')
res.append(spart)
except UnicodeEncodeError:
if last_ew is None:
res.append(part.cte_encode(charset, policy))
last_ew = len(res)
else:
tl = get_unstructured(''.join(res[last_ew:] + [spart]))
res.append(tl.as_encoded_word())
return ''.join(res)
class Phrase(TokenList):
token_type = 'phrase'
def _fold(self, folded):
# As with Unstructured, we can have pure ASCII with or without
# surrogateescape encoded bytes, or we could have unicode. But this
# case is more complicated, since we have to deal with the various
# sub-token types and how they can be composed in the face of
# unicode-that-needs-CTE-encoding, and the fact that if a token a
# comment that becomes a barrier across which we can't compose encoded
# words.
last_ew = None
for part in self.parts:
tstr = str(part)
tlen = len(tstr)
has_ew = False
try:
str(part).encode('us-ascii')
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
charset = 'utf-8'
if last_ew is not None and not part.has_leading_comment():
# We've already done an EW, let's see if we can combine
# this one with it. The last_ew logic ensures that all we
# have at this point is atoms, no comments or quoted
# strings. So we can treat the text between the last
# encoded word and the content of this token as
# unstructured text, and things will work correctly. But
# we have to strip off any trailing comment on this token
# first, and if it is a quoted string we have to pull out
# the content (we're encoding it, so it no longer needs to
# be quoted).
if part[-1].token_type == 'cfws' and part.comments:
remainder = part.pop(-1)
else:
remainder = ''
for i, token in enumerate(part):
if token.token_type == 'bare-quoted-string':
part[i] = UnstructuredTokenList(token[:])
chunk = get_unstructured(
''.join(folded.current[last_ew:]+[tstr])).as_encoded_word(charset)
schunk = str(chunk)
lchunk = len(schunk)
if last_ew + lchunk <= folded.maxlen:
del folded.current[last_ew:]
folded.append(schunk)
folded.lastlen = sum(len(x) for x in folded.current)
continue
tstr = part.as_encoded_word(charset)
tlen = len(tstr)
has_ew = True
if folded.append_if_fits(part, tstr):
if has_ew and not part.comments:
last_ew = len(folded.current) - 1
elif part.comments or part.token_type == 'quoted-string':
# If a comment is involved we can't combine EWs. And if a
# quoted string is involved, it's not worth the effort to
# try to combine them.
last_ew = None
continue
part._fold(folded)
def cte_encode(self, charset, policy):
res = []
last_ew = None
is_ew = False
for part in self:
spart = str(part)
try:
spart.encode('us-ascii')
res.append(spart)
except UnicodeEncodeError:
is_ew = True
if last_ew is None:
if not part.comments:
last_ew = len(res)
res.append(part.cte_encode(charset, policy))
elif not part.has_leading_comment():
if part[-1].token_type == 'cfws' and part.comments:
remainder = part.pop(-1)
else:
remainder = ''
for i, token in enumerate(part):
if token.token_type == 'bare-quoted-string':
part[i] = UnstructuredTokenList(token[:])
tl = get_unstructured(''.join(res[last_ew:] + [spart]))
res[last_ew:] = [tl.as_encoded_word(charset)]
if part.comments or (not is_ew and part.token_type == 'quoted-string'):
last_ew = None
return ''.join(res)
class Word(TokenList):
token_type = 'word'
class CFWSList(WhiteSpaceTokenList):
token_type = 'cfws'
def has_leading_comment(self):
return bool(self.comments)
class Atom(TokenList):
token_type = 'atom'
class Token(TokenList):
token_type = 'token'
class EncodedWord(TokenList):
token_type = 'encoded-word'
cte = None
charset = None
lang = None
@property
def encoded(self):
if self.cte is not None:
return self.cte
_ew.encode(str(self), self.charset)
class QuotedString(TokenList):
token_type = 'quoted-string'
@property
def content(self):
for x in self:
if x.token_type == 'bare-quoted-string':
return x.value
@property
def quoted_value(self):
res = []
for x in self:
if x.token_type == 'bare-quoted-string':
res.append(str(x))
else:
res.append(x.value)
return ''.join(res)
@property
def stripped_value(self):
for token in self:
if token.token_type == 'bare-quoted-string':
return token.value
class BareQuotedString(QuotedString):
token_type = 'bare-quoted-string'
def __str__(self):
return quote_string(''.join(str(x) for x in self))
@property
def value(self):
return ''.join(str(x) for x in self)
class Comment(WhiteSpaceTokenList):
token_type = 'comment'
def __str__(self):
return ''.join(sum([
["("],
[self.quote(x) for x in self],
[")"],
], []))
def quote(self, value):
if value.token_type == 'comment':
return str(value)
return str(value).replace('\\', '\\\\').replace(
'(', '\(').replace(
')', '\)')
@property
def content(self):
return ''.join(str(x) for x in self)
@property
def comments(self):
return [self.content]
class AddressList(TokenList):
token_type = 'address-list'
@property
def addresses(self):
return [x for x in self if x.token_type=='address']
@property
def mailboxes(self):
return sum((x.mailboxes
for x in self if x.token_type=='address'), [])
@property
def all_mailboxes(self):
return sum((x.all_mailboxes
for x in self if x.token_type=='address'), [])
class Address(TokenList):
token_type = 'address'
@property
def display_name(self):
if self[0].token_type == 'group':
return self[0].display_name
@property
def mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return [self[0]]
return self[0].all_mailboxes
class MailboxList(TokenList):
token_type = 'mailbox-list'
@property
def mailboxes(self):
return [x for x in self if x.token_type=='mailbox']
@property
def all_mailboxes(self):
return [x for x in self
if x.token_type in ('mailbox', 'invalid-mailbox')]
class GroupList(TokenList):
token_type = 'group-list'
@property
def mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].all_mailboxes
class Group(TokenList):
token_type = "group"
@property
def mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].mailboxes
@property
def all_mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].all_mailboxes
@property
def display_name(self):
return self[0].display_name
class NameAddr(TokenList):
token_type = 'name-addr'
@property
def display_name(self):
if len(self) == 1:
return None
return self[0].display_name
@property
def local_part(self):
return self[-1].local_part
@property
def domain(self):
return self[-1].domain
@property
def route(self):
return self[-1].route
@property
def addr_spec(self):
return self[-1].addr_spec
class AngleAddr(TokenList):
token_type = 'angle-addr'
@property
def local_part(self):
for x in self:
if x.token_type == 'addr-spec':
return x.local_part
@property
def domain(self):
for x in self:
if x.token_type == 'addr-spec':
return x.domain
@property
def route(self):
for x in self:
if x.token_type == 'obs-route':
return x.domains
@property
def addr_spec(self):
for x in self:
if x.token_type == 'addr-spec':
return x.addr_spec
else:
return '<>'
class ObsRoute(TokenList):
token_type = 'obs-route'
@property
def domains(self):
return [x.domain for x in self if x.token_type == 'domain']
class Mailbox(TokenList):
token_type = 'mailbox'
@property
def display_name(self):
if self[0].token_type == 'name-addr':
return self[0].display_name
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
return self[0].domain
@property
def route(self):
if self[0].token_type == 'name-addr':
return self[0].route
@property
def addr_spec(self):
return self[0].addr_spec
class InvalidMailbox(TokenList):
token_type = 'invalid-mailbox'
@property
def display_name(self):
return None
local_part = domain = route = addr_spec = display_name
class Domain(TokenList):
token_type = 'domain'
@property
def domain(self):
return ''.join(super().value.split())
class DotAtom(TokenList):
token_type = 'dot-atom'
class DotAtomText(TokenList):
token_type = 'dot-atom-text'
class AddrSpec(TokenList):
token_type = 'addr-spec'
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
if len(self) < 3:
return None
return self[-1].domain
@property
def value(self):
if len(self) < 3:
return self[0].value
return self[0].value.rstrip()+self[1].value+self[2].value.lstrip()
@property
def addr_spec(self):
nameset = set(self.local_part)
if len(nameset) > len(nameset-DOT_ATOM_ENDS):
lp = quote_string(self.local_part)
else:
lp = self.local_part
if self.domain is not None:
return lp + '@' + self.domain
return lp
class ObsLocalPart(TokenList):
token_type = 'obs-local-part'
class DisplayName(Phrase):
token_type = 'display-name'
@property
def display_name(self):
res = TokenList(self)
if res[0].token_type == 'cfws':
res.pop(0)
else:
if res[0][0].token_type == 'cfws':
res[0] = TokenList(res[0][1:])
if res[-1].token_type == 'cfws':
res.pop()
else:
if res[-1][-1].token_type == 'cfws':
res[-1] = TokenList(res[-1][:-1])
return res.value
@property
def value(self):
quote = False
if self.defects:
quote = True
else:
for x in self:
if x.token_type == 'quoted-string':
quote = True
if quote:
pre = post = ''
if self[0].token_type=='cfws' or self[0][0].token_type=='cfws':
pre = ' '
if self[-1].token_type=='cfws' or self[-1][-1].token_type=='cfws':
post = ' '
return pre+quote_string(self.display_name)+post
else:
return super().value
class LocalPart(TokenList):
token_type = 'local-part'
@property
def value(self):
if self[0].token_type == "quoted-string":
return self[0].quoted_value
else:
return self[0].value
@property
def local_part(self):
# Strip whitespace from front, back, and around dots.
res = [DOT]
last = DOT
last_is_tl = False
for tok in self[0] + [DOT]:
if tok.token_type == 'cfws':
continue
if (last_is_tl and tok.token_type == 'dot' and
last[-1].token_type == 'cfws'):
res[-1] = TokenList(last[:-1])
is_tl = isinstance(tok, TokenList)
if (is_tl and last.token_type == 'dot' and
tok[0].token_type == 'cfws'):
res.append(TokenList(tok[1:]))
else:
res.append(tok)
last = res[-1]
last_is_tl = is_tl
res = TokenList(res[1:-1])
return res.value
class DomainLiteral(TokenList):
token_type = 'domain-literal'
@property
def domain(self):
return ''.join(super().value.split())
@property
def ip(self):
for x in self:
if x.token_type == 'ptext':
return x.value
class MIMEVersion(TokenList):
token_type = 'mime-version'
major = None
minor = None
class Parameter(TokenList):
token_type = 'parameter'
sectioned = False
extended = False
charset = 'us-ascii'
@property
def section_number(self):
# Because the first token, the attribute (name) eats CFWS, the second
# token is always the section if there is one.
return self[1].number if self.sectioned else 0
@property
def param_value(self):
# This is part of the "handle quoted extended parameters" hack.
for token in self:
if token.token_type == 'value':
return token.stripped_value
if token.token_type == 'quoted-string':
for token in token:
if token.token_type == 'bare-quoted-string':
for token in token:
if token.token_type == 'value':
return token.stripped_value
return ''
class InvalidParameter(Parameter):
token_type = 'invalid-parameter'
class Attribute(TokenList):
token_type = 'attribute'
@property
def stripped_value(self):
for token in self:
if token.token_type.endswith('attrtext'):
return token.value
class Section(TokenList):
token_type = 'section'
number = None
class Value(TokenList):
token_type = 'value'
@property
def stripped_value(self):
token = self[0]
if token.token_type == 'cfws':
token = self[1]
if token.token_type.endswith(
('quoted-string', 'attribute', 'extended-attribute')):
return token.stripped_value
return self.value
class MimeParameters(TokenList):
token_type = 'mime-parameters'
@property
def params(self):
# The RFC specifically states that the ordering of parameters is not
# guaranteed and may be reordered by the transport layer. So we have
# to assume the RFC 2231 pieces can come in any order. However, we
# output them in the order that we first see a given name, which gives
# us a stable __str__.
params = OrderedDict()
for token in self:
if not token.token_type.endswith('parameter'):
continue
if token[0].token_type != 'attribute':
continue
name = token[0].value.strip()
if name not in params:
params[name] = []
params[name].append((token.section_number, token))
for name, parts in params.items():
parts = sorted(parts)
# XXX: there might be more recovery we could do here if, for
# example, this is really a case of a duplicate attribute name.
value_parts = []
charset = parts[0][1].charset
for i, (section_number, param) in enumerate(parts):
if section_number != i:
param.defects.append(errors.InvalidHeaderDefect(
"inconsistent multipart parameter numbering"))
value = param.param_value
if param.extended:
try:
value = urllib.parse.unquote_to_bytes(value)
except UnicodeEncodeError:
# source had surrogate escaped bytes. What we do now
# is a bit of an open question. I'm not sure this is
# the best choice, but it is what the old algorithm did
value = urllib.parse.unquote(value, encoding='latin-1')
else:
try:
value = value.decode(charset, 'surrogateescape')
except LookupError:
# XXX: there should really be a custom defect for
# unknown character set to make it easy to find,
# because otherwise unknown charset is a silent
# failure.
value = value.decode('us-ascii', 'surrogateescape')
if utils._has_surrogates(value):
param.defects.append(errors.UndecodableBytesDefect())
value_parts.append(value)
value = ''.join(value_parts)
yield name, value
def __str__(self):
params = []
for name, value in self.params:
if value:
params.append('{}={}'.format(name, quote_string(value)))
else:
params.append(name)
params = '; '.join(params)
return ' ' + params if params else ''
class ParameterizedHeaderValue(TokenList):
@property
def params(self):
for token in reversed(self):
if token.token_type == 'mime-parameters':
return token.params
return {}
@property
def parts(self):
if self and self[-1].token_type == 'mime-parameters':
# We don't want to start a new line if all of the params don't fit
# after the value, so unwrap the parameter list.
return TokenList(self[:-1] + self[-1])
return TokenList(self).parts
class ContentType(ParameterizedHeaderValue):
token_type = 'content-type'
maintype = 'text'
subtype = 'plain'
class ContentDisposition(ParameterizedHeaderValue):
token_type = 'content-disposition'
content_disposition = None
class ContentTransferEncoding(TokenList):
token_type = 'content-transfer-encoding'
cte = '7bit'
class HeaderLabel(TokenList):
token_type = 'header-label'
class Header(TokenList):
token_type = 'header'
def _fold(self, folded):
folded.append(str(self.pop(0)))
folded.lastlen = len(folded.current[0])
# The first line of the header is different from all others: we don't
# want to start a new object on a new line if it has any fold points in
# it that would allow part of it to be on the first header line.
# Further, if the first fold point would fit on the new line, we want
# to do that, but if it doesn't we want to put it on the first line.
# Folded supports this via the stickyspace attribute. If this
# attribute is not None, it does the special handling.
folded.stickyspace = str(self.pop(0)) if self[0].token_type == 'cfws' else ''
rest = self.pop(0)
if self:
raise ValueError("Malformed Header token list")
rest._fold(folded)
#
# Terminal classes and instances
#
class Terminal(str):
def __new__(cls, value, token_type):
self = super().__new__(cls, value)
self.token_type = token_type
self.defects = []
return self
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super().__repr__())
@property
def all_defects(self):
return list(self.defects)
def _pp(self, indent=''):
return ["{}{}/{}({}){}".format(
indent,
self.__class__.__name__,
self.token_type,
super().__repr__(),
'' if not self.defects else ' {}'.format(self.defects),
)]
def cte_encode(self, charset, policy):
value = str(self)
try:
value.encode('us-ascii')
return value
except UnicodeEncodeError:
return _ew.encode(value, charset)
def pop_trailing_ws(self):
# This terminates the recursion.
return None
def pop_leading_fws(self):
# This terminates the recursion.
return None
@property
def comments(self):
return []
def has_leading_comment(self):
return False
def __getnewargs__(self):
return(str(self), self.token_type)
class WhiteSpaceTerminal(Terminal):
@property
def value(self):
return ' '
def startswith_fws(self):
return True
has_fws = True
class ValueTerminal(Terminal):
@property
def value(self):
return self
def startswith_fws(self):
return False
has_fws = False
def as_encoded_word(self, charset):
return _ew.encode(str(self), charset)
class EWWhiteSpaceTerminal(WhiteSpaceTerminal):
@property
def value(self):
return ''
@property
def encoded(self):
return self[:]
def __str__(self):
return ''
has_fws = True
# XXX these need to become classes and used as instances so
# that a program can't change them in a parse tree and screw
# up other parse trees. Maybe should have tests for that, too.
DOT = ValueTerminal('.', 'dot')
ListSeparator = ValueTerminal(',', 'list-separator')
RouteComponentMarker = ValueTerminal('@', 'route-component-marker')
#
# Parser
#
# Parse strings according to RFC822/2047/2822/5322 rules.
#
# This is a stateless parser. Each get_XXX function accepts a string and
# returns either a Terminal or a TokenList representing the RFC object named
# by the method and a string containing the remaining unparsed characters
# from the input. Thus a parser method consumes the next syntactic construct
# of a given type and returns a token representing the construct plus the
# unparsed remainder of the input string.
#
# For example, if the first element of a structured header is a 'phrase',
# then:
#
# phrase, value = get_phrase(value)
#
# returns the complete phrase from the start of the string value, plus any
# characters left in the string after the phrase is removed.
_wsp_splitter = re.compile(r'([{}]+)'.format(''.join(WSP))).split
_non_atom_end_matcher = re.compile(r"[^{}]+".format(
''.join(ATOM_ENDS).replace('\\','\\\\').replace(']','\]'))).match
_non_printable_finder = re.compile(r"[\x00-\x20\x7F]").findall
_non_token_end_matcher = re.compile(r"[^{}]+".format(
''.join(TOKEN_ENDS).replace('\\','\\\\').replace(']','\]'))).match
_non_attribute_end_matcher = re.compile(r"[^{}]+".format(
''.join(ATTRIBUTE_ENDS).replace('\\','\\\\').replace(']','\]'))).match
_non_extended_attribute_end_matcher = re.compile(r"[^{}]+".format(
''.join(EXTENDED_ATTRIBUTE_ENDS).replace(
'\\','\\\\').replace(']','\]'))).match
def _validate_xtext(xtext):
"""If input token contains ASCII non-printables, register a defect."""
non_printables = _non_printable_finder(xtext)
if non_printables:
xtext.defects.append(errors.NonPrintableDefect(non_printables))
if utils._has_surrogates(xtext):
xtext.defects.append(errors.UndecodableBytesDefect(
"Non-ASCII characters found in header token"))
def _get_ptext_to_endchars(value, endchars):
"""Scan printables/quoted-pairs until endchars and return unquoted ptext.
This function turns a run of qcontent, ccontent-without-comments, or
dtext-with-quoted-printables into a single string by unquoting any
quoted printables. It returns the string, the remaining value, and
a flag that is True iff there were any quoted printables decoded.
"""
fragment, *remainder = _wsp_splitter(value, 1)
vchars = []
escape = False
had_qp = False
for pos in range(len(fragment)):
if fragment[pos] == '\\':
if escape:
escape = False
had_qp = True
else:
escape = True
continue
if escape:
escape = False
elif fragment[pos] in endchars:
break
vchars.append(fragment[pos])
else:
pos = pos + 1
return ''.join(vchars), ''.join([fragment[pos:]] + remainder), had_qp
def get_fws(value):
"""FWS = 1*WSP
This isn't the RFC definition. We're using fws to represent tokens where
folding can be done, but when we are parsing the *un*folding has already
been done so we don't need to watch out for CRLF.
"""
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
return fws, newvalue
def get_encoded_word(value):
""" encoded-word = "=?" charset "?" encoding "?" encoded-text "?="
"""
ew = EncodedWord()
if not value.startswith('=?'):
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
tok, *remainder = value[2:].split('?=', 1)
if tok == value[2:]:
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
remstr = ''.join(remainder)
if len(remstr) > 1 and remstr[0] in hexdigits and remstr[1] in hexdigits:
# The ? after the CTE was followed by an encoded word escape (=XX).
rest, *remainder = remstr.split('?=', 1)
tok = tok + '?=' + rest
if len(tok.split()) > 1:
ew.defects.append(errors.InvalidHeaderDefect(
"whitespace inside encoded word"))
ew.cte = value
value = ''.join(remainder)
try:
text, charset, lang, defects = _ew.decode('=?' + tok + '?=')
except ValueError:
raise errors.HeaderParseError(
"encoded word format invalid: '{}'".format(ew.cte))
ew.charset = charset
ew.lang = lang
ew.defects.extend(defects)
while text:
if text[0] in WSP:
token, text = get_fws(text)
ew.append(token)
continue
chars, *remainder = _wsp_splitter(text, 1)
vtext = ValueTerminal(chars, 'vtext')
_validate_xtext(vtext)
ew.append(vtext)
text = ''.join(remainder)
return ew, value
def get_unstructured(value):
"""unstructured = (*([FWS] vchar) *WSP) / obs-unstruct
obs-unstruct = *((*LF *CR *(obs-utext) *LF *CR)) / FWS)
obs-utext = %d0 / obs-NO-WS-CTL / LF / CR
obs-NO-WS-CTL is control characters except WSP/CR/LF.
So, basically, we have printable runs, plus control characters or nulls in
the obsolete syntax, separated by whitespace. Since RFC 2047 uses the
obsolete syntax in its specification, but requires whitespace on either
side of the encoded words, I can see no reason to need to separate the
non-printable-non-whitespace from the printable runs if they occur, so we
parse this into xtext tokens separated by WSP tokens.
Because an 'unstructured' value must by definition constitute the entire
value, this 'get' routine does not return a remaining value, only the
parsed TokenList.
"""
# XXX: but what about bare CR and LF? They might signal the start or
# end of an encoded word. YAGNI for now, since our current parsers
# will never send us strings with bare CR or LF.
unstructured = UnstructuredTokenList()
while value:
if value[0] in WSP:
token, value = get_fws(value)
unstructured.append(token)
continue
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: Need to figure out how to register defects when
# appropriate here.
pass
else:
have_ws = True
if len(unstructured) > 0:
if unstructured[-1].token_type != 'fws':
unstructured.defects.append(errors.InvalidHeaderDefect(
"missing whitespace before encoded word"))
have_ws = False
if have_ws and len(unstructured) > 1:
if unstructured[-2].token_type == 'encoded-word':
unstructured[-1] = EWWhiteSpaceTerminal(
unstructured[-1], 'fws')
unstructured.append(token)
continue
tok, *remainder = _wsp_splitter(value, 1)
vtext = ValueTerminal(tok, 'vtext')
_validate_xtext(vtext)
unstructured.append(vtext)
value = ''.join(remainder)
return unstructured
def get_qp_ctext(value):
"""ctext = <printable ascii except \ ( )>
This is not the RFC ctext, since we are handling nested comments in comment
and unquoting quoted-pairs here. We allow anything except the '()'
characters, but if we find any ASCII other than the RFC defined printable
ASCII an NonPrintableDefect is added to the token's defects list. Since
quoted pairs are converted to their unquoted values, what is returned is
a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value
is ' '.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '()')
ptext = WhiteSpaceTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_qcontent(value):
"""qcontent = qtext / quoted-pair
We allow anything except the DQUOTE character, but if we find any ASCII
other than the RFC defined printable ASCII an NonPrintableDefect is
added to the token's defects list. Any quoted pairs are converted to their
unquoted values, so what is returned is a 'ptext' token. In this case it
is a ValueTerminal.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '"')
ptext = ValueTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_atext(value):
"""atext = <matches _atext_matcher>
We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to
the token's defects list if we find non-atext characters.
"""
m = _non_atom_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected atext but found '{}'".format(value))
atext = m.group()
value = value[len(atext):]
atext = ValueTerminal(atext, 'atext')
_validate_xtext(atext)
return atext, value
def get_bare_quoted_string(value):
"""bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
A quoted-string without the leading or trailing white space. Its
value is the text between the quote marks, with whitespace
preserved and quoted pairs decoded.
"""
if value[0] != '"':
raise errors.HeaderParseError(
"expected '\"' but found '{}'".format(value))
bare_quoted_string = BareQuotedString()
value = value[1:]
while value and value[0] != '"':
if value[0] in WSP:
token, value = get_fws(value)
elif value[:2] == '=?':
try:
token, value = get_encoded_word(value)
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"encoded word inside quoted string"))
except errors.HeaderParseError:
token, value = get_qcontent(value)
else:
token, value = get_qcontent(value)
bare_quoted_string.append(token)
if not value:
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"end of header inside quoted string"))
return bare_quoted_string, value
return bare_quoted_string, value[1:]
def get_comment(value):
"""comment = "(" *([FWS] ccontent) [FWS] ")"
ccontent = ctext / quoted-pair / comment
We handle nested comments here, and quoted-pair in our qp-ctext routine.
"""
if value and value[0] != '(':
raise errors.HeaderParseError(
"expected '(' but found '{}'".format(value))
comment = Comment()
value = value[1:]
while value and value[0] != ")":
if value[0] in WSP:
token, value = get_fws(value)
elif value[0] == '(':
token, value = get_comment(value)
else:
token, value = get_qp_ctext(value)
comment.append(token)
if not value:
comment.defects.append(errors.InvalidHeaderDefect(
"end of header inside comment"))
return comment, value
return comment, value[1:]
def get_cfws(value):
"""CFWS = (1*([FWS] comment) [FWS]) / FWS
"""
cfws = CFWSList()
while value and value[0] in CFWS_LEADER:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_comment(value)
cfws.append(token)
return cfws, value
def get_quoted_string(value):
"""quoted-string = [CFWS] <bare-quoted-string> [CFWS]
'bare-quoted-string' is an intermediate class defined by this
parser and not by the RFC grammar. It is the quoted string
without any attached CFWS.
"""
quoted_string = QuotedString()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
token, value = get_bare_quoted_string(value)
quoted_string.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
return quoted_string, value
def get_atom(value):
"""atom = [CFWS] 1*atext [CFWS]
An atom could be an rfc2047 encoded word.
"""
atom = Atom()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
if value and value[0] in ATOM_ENDS:
raise errors.HeaderParseError(
"expected atom but found '{}'".format(value))
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_atext(value)
else:
token, value = get_atext(value)
atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
return atom, value
def get_dot_atom_text(value):
""" dot-text = 1*atext *("." 1*atext)
"""
dot_atom_text = DotAtomText()
if not value or value[0] in ATOM_ENDS:
raise errors.HeaderParseError("expected atom at a start of "
"dot-atom-text but found '{}'".format(value))
while value and value[0] not in ATOM_ENDS:
token, value = get_atext(value)
dot_atom_text.append(token)
if value and value[0] == '.':
dot_atom_text.append(DOT)
value = value[1:]
if dot_atom_text[-1] is DOT:
raise errors.HeaderParseError("expected atom at end of dot-atom-text "
"but found '{}'".format('.'+value))
return dot_atom_text, value
def get_dot_atom(value):
""" dot-atom = [CFWS] dot-atom-text [CFWS]
Any place we can have a dot atom, we could instead have an rfc2047 encoded
word.
"""
dot_atom = DotAtom()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_dot_atom_text(value)
else:
token, value = get_dot_atom_text(value)
dot_atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
return dot_atom, value
def get_word(value):
"""word = atom / quoted-string
Either atom or quoted-string may start with CFWS. We have to peel off this
CFWS first to determine which type of word to parse. Afterward we splice
the leading CFWS, if any, into the parsed sub-token.
If neither an atom or a quoted-string is found before the next special, a
HeaderParseError is raised.
The token returned is either an Atom or a QuotedString, as appropriate.
This means the 'word' level of the formal grammar is not represented in the
parse tree; this is because having that extra layer when manipulating the
parse tree is more confusing than it is helpful.
"""
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
else:
leader = None
if value[0]=='"':
token, value = get_quoted_string(value)
elif value[0] in SPECIALS:
raise errors.HeaderParseError("Expected 'atom' or 'quoted-string' "
"but found '{}'".format(value))
else:
token, value = get_atom(value)
if leader is not None:
token[:0] = [leader]
return token, value
def get_phrase(value):
""" phrase = 1*word / obs-phrase
obs-phrase = word *(word / "." / CFWS)
This means a phrase can be a sequence of words, periods, and CFWS in any
order as long as it starts with at least one word. If anything other than
words is detected, an ObsoleteHeaderDefect is added to the token's defect
list. We also accept a phrase that starts with CFWS followed by a dot;
this is registered as an InvalidHeaderDefect, since it is not supported by
even the obsolete grammar.
"""
phrase = Phrase()
try:
token, value = get_word(value)
phrase.append(token)
except errors.HeaderParseError:
phrase.defects.append(errors.InvalidHeaderDefect(
"phrase does not start with word"))
while value and value[0] not in PHRASE_ENDS:
if value[0]=='.':
phrase.append(DOT)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"period in 'phrase'"))
value = value[1:]
else:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"comment found without atom"))
else:
raise
phrase.append(token)
return phrase, value
def get_local_part(value):
""" local-part = dot-atom / quoted-string / obs-local-part
"""
local_part = LocalPart()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected local-part but found '{}'".format(value))
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] != '\\' and value[0] in PHRASE_ENDS:
raise
token = TokenList()
if leader is not None:
token[:0] = [leader]
local_part.append(token)
if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
obs_local_part, value = get_obs_local_part(str(local_part) + value)
if obs_local_part.token_type == 'invalid-obs-local-part':
local_part.defects.append(errors.InvalidHeaderDefect(
"local-part is not dot-atom, quoted-string, or obs-local-part"))
else:
local_part.defects.append(errors.ObsoleteHeaderDefect(
"local-part is not a dot-atom (contains CFWS)"))
local_part[0] = obs_local_part
try:
local_part.value.encode('ascii')
except UnicodeEncodeError:
local_part.defects.append(errors.NonASCIILocalPartDefect(
"local-part contains non-ASCII characters)"))
return local_part, value
def get_obs_local_part(value):
""" obs-local-part = word *("." word)
"""
obs_local_part = ObsLocalPart()
last_non_ws_was_dot = False
while value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
if value[0] == '.':
if last_non_ws_was_dot:
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"invalid repeated '.'"))
obs_local_part.append(DOT)
last_non_ws_was_dot = True
value = value[1:]
continue
elif value[0]=='\\':
obs_local_part.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"'\\' character outside of quoted-string/ccontent"))
last_non_ws_was_dot = False
continue
if obs_local_part and obs_local_part[-1].token_type != 'dot':
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"missing '.' between words"))
try:
token, value = get_word(value)
last_non_ws_was_dot = False
except errors.HeaderParseError:
if value[0] not in CFWS_LEADER:
raise
token, value = get_cfws(value)
obs_local_part.append(token)
if (obs_local_part[0].token_type == 'dot' or
obs_local_part[0].token_type=='cfws' and
obs_local_part[1].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid leading '.' in local part"))
if (obs_local_part[-1].token_type == 'dot' or
obs_local_part[-1].token_type=='cfws' and
obs_local_part[-2].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid trailing '.' in local part"))
if obs_local_part.defects:
obs_local_part.token_type = 'invalid-obs-local-part'
return obs_local_part, value
def get_dtext(value):
""" dtext = <printable ascii except \ [ ]> / obs-dtext
obs-dtext = obs-NO-WS-CTL / quoted-pair
We allow anything except the excluded characters, but if we find any
ASCII other than the RFC defined printable ASCII an NonPrintableDefect is
added to the token's defects list. Quoted pairs are converted to their
unquoted values, so what is returned is a ptext token, in this case a
ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
added to the returned token's defect list.
"""
ptext, value, had_qp = _get_ptext_to_endchars(value, '[]')
ptext = ValueTerminal(ptext, 'ptext')
if had_qp:
ptext.defects.append(errors.ObsoleteHeaderDefect(
"quoted printable found in domain-literal"))
_validate_xtext(ptext)
return ptext, value
def _check_for_early_dl_end(value, domain_literal):
if value:
return False
domain_literal.append(errors.InvalidHeaderDefect(
"end of input inside domain-literal"))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
return True
def get_domain_literal(value):
""" domain-literal = [CFWS] "[" *([FWS] dtext) [FWS] "]" [CFWS]
"""
domain_literal = DomainLiteral()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
if not value:
raise errors.HeaderParseError("expected domain-literal")
if value[0] != '[':
raise errors.HeaderParseError("expected '[' at start of domain-literal "
"but found '{}'".format(value))
value = value[1:]
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
domain_literal.append(ValueTerminal('[', 'domain-literal-start'))
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
token, value = get_dtext(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] != ']':
raise errors.HeaderParseError("expected ']' at end of domain-literal "
"but found '{}'".format(value))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
return domain_literal, value
def get_domain(value):
""" domain = dot-atom / domain-literal / obs-domain
obs-domain = atom *("." atom))
"""
domain = Domain()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected domain but found '{}'".format(value))
if value[0] == '[':
token, value = get_domain_literal(value)
if leader is not None:
token[:0] = [leader]
domain.append(token)
return domain, value
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
token, value = get_atom(value)
if leader is not None:
token[:0] = [leader]
domain.append(token)
if value and value[0] == '.':
domain.defects.append(errors.ObsoleteHeaderDefect(
"domain is not a dot-atom (contains CFWS)"))
if domain[0].token_type == 'dot-atom':
domain[:] = domain[0]
while value and value[0] == '.':
domain.append(DOT)
token, value = get_atom(value[1:])
domain.append(token)
return domain, value
def get_addr_spec(value):
""" addr-spec = local-part "@" domain
"""
addr_spec = AddrSpec()
token, value = get_local_part(value)
addr_spec.append(token)
if not value or value[0] != '@':
addr_spec.defects.append(errors.InvalidHeaderDefect(
"add-spec local part with no domain"))
return addr_spec, value
addr_spec.append(ValueTerminal('@', 'address-at-symbol'))
token, value = get_domain(value[1:])
addr_spec.append(token)
return addr_spec, value
def get_obs_route(value):
""" obs-route = obs-domain-list ":"
obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain])
Returns an obs-route token with the appropriate sub-tokens (that is,
there is no obs-domain-list in the parse tree).
"""
obs_route = ObsRoute()
while value and (value[0]==',' or value[0] in CFWS_LEADER):
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
elif value[0] == ',':
obs_route.append(ListSeparator)
value = value[1:]
if not value or value[0] != '@':
raise errors.HeaderParseError(
"expected obs-route domain but found '{}'".format(value))
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
while value and value[0]==',':
obs_route.append(ListSeparator)
value = value[1:]
if not value:
break
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
if value[0] == '@':
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
if not value:
raise errors.HeaderParseError("end of header while parsing obs-route")
if value[0] != ':':
raise errors.HeaderParseError( "expected ':' marking end of "
"obs-route but found '{}'".format(value))
obs_route.append(ValueTerminal(':', 'end-of-obs-route-marker'))
return obs_route, value[1:]
def get_angle_addr(value):
""" angle-addr = [CFWS] "<" addr-spec ">" [CFWS] / obs-angle-addr
obs-angle-addr = [CFWS] "<" obs-route addr-spec ">" [CFWS]
"""
angle_addr = AngleAddr()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
if not value or value[0] != '<':
raise errors.HeaderParseError(
"expected angle-addr but found '{}'".format(value))
angle_addr.append(ValueTerminal('<', 'angle-addr-start'))
value = value[1:]
# Although it is not legal per RFC5322, SMTP uses '<>' in certain
# circumstances.
if value[0] == '>':
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
angle_addr.defects.append(errors.InvalidHeaderDefect(
"null addr-spec in angle-addr"))
value = value[1:]
return angle_addr, value
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
try:
token, value = get_obs_route(value)
angle_addr.defects.append(errors.ObsoleteHeaderDefect(
"obsolete route specification in angle-addr"))
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected addr-spec or obs-route but found '{}'".format(value))
angle_addr.append(token)
token, value = get_addr_spec(value)
angle_addr.append(token)
if value and value[0] == '>':
value = value[1:]
else:
angle_addr.defects.append(errors.InvalidHeaderDefect(
"missing trailing '>' on angle-addr"))
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
return angle_addr, value
def get_display_name(value):
""" display-name = phrase
Because this is simply a name-rule, we don't return a display-name
token containing a phrase, but rather a display-name token with
the content of the phrase.
"""
display_name = DisplayName()
token, value = get_phrase(value)
display_name.extend(token[:])
display_name.defects = token.defects[:]
return display_name, value
def get_name_addr(value):
""" name-addr = [display-name] angle-addr
"""
name_addr = NameAddr()
# Both the optional display name and the angle-addr can start with cfws.
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(leader))
if value[0] != '<':
if value[0] in PHRASE_ENDS:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(value))
token, value = get_display_name(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(token))
if leader is not None:
token[0][:0] = [leader]
leader = None
name_addr.append(token)
token, value = get_angle_addr(value)
if leader is not None:
token[:0] = [leader]
name_addr.append(token)
return name_addr, value
def get_mailbox(value):
""" mailbox = name-addr / addr-spec
"""
# The only way to figure out if we are dealing with a name-addr or an
# addr-spec is to try parsing each one.
mailbox = Mailbox()
try:
token, value = get_name_addr(value)
except errors.HeaderParseError:
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected mailbox but found '{}'".format(value))
if any(isinstance(x, errors.InvalidHeaderDefect)
for x in token.all_defects):
mailbox.token_type = 'invalid-mailbox'
mailbox.append(token)
return mailbox, value
def get_invalid_mailbox(value, endchars):
""" Read everything up to one of the chars in endchars.
This is outside the formal grammar. The InvalidMailbox TokenList that is
returned acts like a Mailbox, but the data attributes are None.
"""
invalid_mailbox = InvalidMailbox()
while value and value[0] not in endchars:
if value[0] in PHRASE_ENDS:
invalid_mailbox.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_mailbox.append(token)
return invalid_mailbox, value
def get_mailbox_list(value):
""" mailbox-list = (mailbox *("," mailbox)) / obs-mbox-list
obs-mbox-list = *([CFWS] ",") mailbox *("," [mailbox / CFWS])
For this routine we go outside the formal grammar in order to improve error
handling. We recognize the end of the mailbox list only at the end of the
value or at a ';' (the group terminator). This is so that we can turn
invalid mailboxes into InvalidMailbox tokens and continue parsing any
remaining valid mailboxes. We also allow all mailbox entries to be null,
and this condition is handled appropriately at a higher level.
"""
mailbox_list = MailboxList()
while value and value[0] != ';':
try:
token, value = get_mailbox(value)
mailbox_list.append(token)
except errors.HeaderParseError:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] in ',;':
mailbox_list.append(leader)
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
elif value[0] == ',':
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] not in ',;':
# Crap after mailbox; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = mailbox_list[-1]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',;')
mailbox.extend(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] == ',':
mailbox_list.append(ListSeparator)
value = value[1:]
return mailbox_list, value
def get_group_list(value):
""" group-list = mailbox-list / CFWS / obs-group-list
obs-group-list = 1*([CFWS] ",") [CFWS]
"""
group_list = GroupList()
if not value:
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header before group-list"))
return group_list, value
leader = None
if value and value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
# This should never happen in email parsing, since CFWS-only is a
# legal alternative to group-list in a group, which is the only
# place group-list appears.
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header in group-list"))
group_list.append(leader)
return group_list, value
if value[0] == ';':
group_list.append(leader)
return group_list, value
token, value = get_mailbox_list(value)
if len(token.all_mailboxes)==0:
if leader is not None:
group_list.append(leader)
group_list.extend(token)
group_list.defects.append(errors.ObsoleteHeaderDefect(
"group-list with empty entries"))
return group_list, value
if leader is not None:
token[:0] = [leader]
group_list.append(token)
return group_list, value
def get_group(value):
""" group = display-name ":" [group-list] ";" [CFWS]
"""
group = Group()
token, value = get_display_name(value)
if not value or value[0] != ':':
raise errors.HeaderParseError("expected ':' at end of group "
"display name but found '{}'".format(value))
group.append(token)
group.append(ValueTerminal(':', 'group-display-name-terminator'))
value = value[1:]
if value and value[0] == ';':
group.append(ValueTerminal(';', 'group-terminator'))
return group, value[1:]
token, value = get_group_list(value)
group.append(token)
if not value:
group.defects.append(errors.InvalidHeaderDefect(
"end of header in group"))
if value[0] != ';':
raise errors.HeaderParseError(
"expected ';' at end of group but found {}".format(value))
group.append(ValueTerminal(';', 'group-terminator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
group.append(token)
return group, value
def get_address(value):
""" address = mailbox / group
Note that counter-intuitively, an address can be either a single address or
a list of addresses (a group). This is why the returned Address object has
a 'mailboxes' attribute which treats a single address as a list of length
one. When you need to differentiate between to two cases, extract the single
element, which is either a mailbox or a group token.
"""
# The formal grammar isn't very helpful when parsing an address. mailbox
# and group, especially when allowing for obsolete forms, start off very
# similarly. It is only when you reach one of @, <, or : that you know
# what you've got. So, we try each one in turn, starting with the more
# likely of the two. We could perhaps make this more efficient by looking
# for a phrase and then branching based on the next character, but that
# would be a premature optimization.
address = Address()
try:
token, value = get_group(value)
except errors.HeaderParseError:
try:
token, value = get_mailbox(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected address but found '{}'".format(value))
address.append(token)
return address, value
def get_address_list(value):
""" address_list = (address *("," address)) / obs-addr-list
obs-addr-list = *([CFWS] ",") address *("," [address / CFWS])
We depart from the formal grammar here by continuing to parse until the end
of the input, assuming the input to be entirely composed of an
address-list. This is always true in email parsing, and allows us
to skip invalid addresses to parse additional valid ones.
"""
address_list = AddressList()
while value:
try:
token, value = get_address(value)
address_list.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] == ',':
address_list.append(leader)
address_list.defects.append(errors.ObsoleteHeaderDefect(
"address-list entry with no content"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
elif value[0] == ',':
address_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in address-list"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value and value[0] != ',':
# Crap after address; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = address_list[-1][0]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',')
mailbox.extend(token)
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value: # Must be a , at this point.
address_list.append(ValueTerminal(',', 'list-separator'))
value = value[1:]
return address_list, value
#
# XXX: As I begin to add additional header parsers, I'm realizing we probably
# have two level of parser routines: the get_XXX methods that get a token in
# the grammar, and parse_XXX methods that parse an entire field value. So
# get_address_list above should really be a parse_ method, as probably should
# be get_unstructured.
#
def parse_mime_version(value):
""" mime-version = [CFWS] 1*digit [CFWS] "." [CFWS] 1*digit [CFWS]
"""
# The [CFWS] is implicit in the RFC 2045 BNF.
# XXX: This routine is a bit verbose, should factor out a get_int method.
mime_version = MIMEVersion()
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Missing MIME version number (eg: 1.0)"))
return mime_version
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Expected MIME version number but found only CFWS"))
digits = ''
while value and value[0] != '.' and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME major version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.major = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value or value[0] != '.':
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
if value:
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
mime_version.append(ValueTerminal('.', 'version-separator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
return mime_version
digits = ''
while value and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME minor version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.minor = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if value:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Excess non-CFWS text after MIME version"))
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
def get_invalid_parameter(value):
""" Read everything up to the next ';'.
This is outside the formal grammar. The InvalidParameter TokenList that is
returned acts like a Parameter, but the data attributes are None.
"""
invalid_parameter = InvalidParameter()
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
invalid_parameter.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_parameter.append(token)
return invalid_parameter, value
def get_ttext(value):
"""ttext = <matches _ttext_matcher>
We allow any non-TOKEN_ENDS in ttext, but add defects to the token's
defects list if we find non-ttext characters. We also register defects for
*any* non-printables even though the RFC doesn't exclude all of them,
because we follow the spirit of RFC 5322.
"""
m = _non_token_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected ttext but found '{}'".format(value))
ttext = m.group()
value = value[len(ttext):]
ttext = ValueTerminal(ttext, 'ttext')
_validate_xtext(ttext)
return ttext, value
def get_token(value):
"""token = [CFWS] 1*ttext [CFWS]
The RFC equivalent of ttext is any US-ASCII chars except space, ctls, or
tspecials. We also exclude tabs even though the RFC doesn't.
The RFC implies the CFWS but is not explicit about it in the BNF.
"""
mtoken = Token()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
if value and value[0] in TOKEN_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_ttext(value)
mtoken.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
return mtoken, value
def get_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character)
We allow any non-ATTRIBUTE_ENDS in attrtext, but add defects to the
token's defects list if we find non-attrtext characters. We also register
defects for *any* non-printables even though the RFC doesn't exclude all of
them, because we follow the spirit of RFC 5322.
"""
m = _non_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_attribute(value):
""" [CFWS] 1*attrtext [CFWS]
This version of the BNF makes the CFWS explicit, and as usual we use a
value terminal for the actual run of characters. The RFC equivalent of
attrtext is the token characters, with the subtraction of '*', "'", and '%'.
We include tab in the excluded set just as we do for token.
"""
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_extended_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%')
This is a special parsing routine so that we get a value that
includes % escapes as a single string (which we decode as a single
string later).
"""
m = _non_extended_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected extended attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'extended-attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_extended_attribute(value):
""" [CFWS] 1*extended_attrtext [CFWS]
This is like the non-extended version except we allow % characters, so that
we can pick up an encoded value as a single string.
"""
# XXX: should we have an ExtendedAttribute TokenList?
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in EXTENDED_ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_extended_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_section(value):
""" '*' digits
The formal BNF is more complicated because leading 0s are not allowed. We
check for that and add a defect. We also assume no CFWS is allowed between
the '*' and the digits, though the RFC is not crystal clear on that.
The caller should already have dealt with leading CFWS.
"""
section = Section()
if not value or value[0] != '*':
raise errors.HeaderParseError("Expected section but found {}".format(
value))
section.append(ValueTerminal('*', 'section-marker'))
value = value[1:]
if not value or not value[0].isdigit():
raise errors.HeaderParseError("Expected section number but "
"found {}".format(value))
digits = ''
while value and value[0].isdigit():
digits += value[0]
value = value[1:]
if digits[0] == '0' and digits != '0':
section.defects.append(errors.InvalidHeaderError("section number"
"has an invalid leading 0"))
section.number = int(digits)
section.append(ValueTerminal(digits, 'digits'))
return section, value
def get_value(value):
""" quoted-string / attribute
"""
v = Value()
if not value:
raise errors.HeaderParseError("Expected value but found end of string")
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError("Expected value but found "
"only {}".format(leader))
if value[0] == '"':
token, value = get_quoted_string(value)
else:
token, value = get_extended_attribute(value)
if leader is not None:
token[:0] = [leader]
v.append(token)
return v, value
def get_parameter(value):
""" attribute [section] ["*"] [CFWS] "=" value
The CFWS is implied by the RFC but not made explicit in the BNF. This
simplified form of the BNF from the RFC is made to conform with the RFC BNF
through some extra checks. We do it this way because it makes both error
recovery and working with the resulting parse tree easier.
"""
# It is possible CFWS would also be implicitly allowed between the section
# and the 'extended-attribute' marker (the '*') , but we've never seen that
# in the wild and we will therefore ignore the possibility.
param = Parameter()
token, value = get_attribute(value)
param.append(token)
if not value or value[0] == ';':
param.defects.append(errors.InvalidHeaderDefect("Parameter contains "
"name ({}) but no value".format(token)))
return param, value
if value[0] == '*':
try:
token, value = get_section(value)
param.sectioned = True
param.append(token)
except errors.HeaderParseError:
pass
if not value:
raise errors.HeaderParseError("Incomplete parameter")
if value[0] == '*':
param.append(ValueTerminal('*', 'extended-parameter-marker'))
value = value[1:]
param.extended = True
if value[0] != '=':
raise errors.HeaderParseError("Parameter not followed by '='")
param.append(ValueTerminal('=', 'parameter-separator'))
value = value[1:]
leader = None
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
param.append(token)
remainder = None
appendto = param
if param.extended and value and value[0] == '"':
# Now for some serious hackery to handle the common invalid case of
# double quotes around an extended value. We also accept (with defect)
# a value marked as encoded that isn't really.
qstring, remainder = get_quoted_string(value)
inner_value = qstring.stripped_value
semi_valid = False
if param.section_number == 0:
if inner_value and inner_value[0] == "'":
semi_valid = True
else:
token, rest = get_attrtext(inner_value)
if rest and rest[0] == "'":
semi_valid = True
else:
try:
token, rest = get_extended_attrtext(inner_value)
except:
pass
else:
if not rest:
semi_valid = True
if semi_valid:
param.defects.append(errors.InvalidHeaderDefect(
"Quoted string value for extended parameter is invalid"))
param.append(qstring)
for t in qstring:
if t.token_type == 'bare-quoted-string':
t[:] = []
appendto = t
break
value = inner_value
else:
remainder = None
param.defects.append(errors.InvalidHeaderDefect(
"Parameter marked as extended but appears to have a "
"quoted string value that is non-encoded"))
if value and value[0] == "'":
token = None
else:
token, value = get_value(value)
if not param.extended or param.section_number > 0:
if not value or value[0] != "'":
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
param.defects.append(errors.InvalidHeaderDefect(
"Apparent initial-extended-value but attribute "
"was not marked as extended or was not initial section"))
if not value:
# Assume the charset/lang is missing and the token is the value.
param.defects.append(errors.InvalidHeaderDefect(
"Missing required charset/lang delimiters"))
appendto.append(token)
if remainder is None:
return param, value
else:
if token is not None:
for t in token:
if t.token_type == 'extended-attrtext':
break
t.token_type == 'attrtext'
appendto.append(t)
param.charset = t.value
if value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {!r}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231 delimiter'))
value = value[1:]
if value and value[0] != "'":
token, value = get_attrtext(value)
appendto.append(token)
param.lang = token.value
if not value or value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231 delimiter'))
value = value[1:]
if remainder is not None:
# Treat the rest of value as bare quoted string content.
v = Value()
while value:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_qcontent(value)
v.append(token)
token = v
else:
token, value = get_value(value)
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
def parse_mime_parameters(value):
""" parameter *( ";" parameter )
That BNF is meant to indicate this routine should only be called after
finding and handling the leading ';'. There is no corresponding rule in
the formal RFC grammar, but it is more convenient for us for the set of
parameters to be treated as its own TokenList.
This is 'parse' routine because it consumes the reminaing value, but it
would never be called to parse a full header. Instead it is called to
parse everything after the non-parameter value of a specific MIME header.
"""
mime_parameters = MimeParameters()
while value:
try:
token, value = get_parameter(value)
mime_parameters.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
mime_parameters.append(leader)
return mime_parameters
if value[0] == ';':
if leader is not None:
mime_parameters.append(leader)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter entry with no content"))
else:
token, value = get_invalid_parameter(value)
if leader:
token[:0] = [leader]
mime_parameters.append(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"invalid parameter {!r}".format(token)))
if value and value[0] != ';':
# Junk after the otherwise valid parameter. Mark it as
# invalid, but it will have a value.
param = mime_parameters[-1]
param.token_type = 'invalid-parameter'
token, value = get_invalid_parameter(value)
param.extend(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter with invalid trailing text {!r}".format(token)))
if value:
# Must be a ';' at this point.
mime_parameters.append(ValueTerminal(';', 'parameter-separator'))
value = value[1:]
return mime_parameters
def _find_mime_parameters(tokenlist, value):
"""Do our best to find the parameters in an invalid MIME header
"""
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
tokenlist.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
tokenlist.append(token)
if not value:
return
tokenlist.append(ValueTerminal(';', 'parameter-separator'))
tokenlist.append(parse_mime_parameters(value[1:]))
def parse_content_type_header(value):
""" maintype "/" subtype *( ";" parameter )
The maintype and substype are tokens. Theoretically they could
be checked against the official IANA list + x-token, but we
don't do that.
"""
ctype = ContentType()
recover = False
if not value:
ctype.defects.append(errors.HeaderMissingRequiredValue(
"Missing content type specification"))
return ctype
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content maintype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
# XXX: If we really want to follow the formal grammer we should make
# mantype and subtype specialized TokenLists here. Probably not worth it.
if not value or value[0] != '/':
ctype.defects.append(errors.InvalidHeaderDefect(
"Invalid content type"))
if value:
_find_mime_parameters(ctype, value)
return ctype
ctype.maintype = token.value.strip().lower()
ctype.append(ValueTerminal('/', 'content-type-separator'))
value = value[1:]
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content subtype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
ctype.subtype = token.value.strip().lower()
if not value:
return ctype
if value[0] != ';':
ctype.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content type, but "
"found {!r}".format(value)))
# The RFC requires that a syntactically invalid content-type be treated
# as text/plain. Perhaps we should postel this, but we should probably
# only do that if we were checking the subtype value against IANA.
del ctype.maintype, ctype.subtype
_find_mime_parameters(ctype, value)
return ctype
ctype.append(ValueTerminal(';', 'parameter-separator'))
ctype.append(parse_mime_parameters(value[1:]))
return ctype
def parse_content_disposition_header(value):
""" disposition-type *( ";" parameter )
"""
disp_header = ContentDisposition()
if not value:
disp_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content disposition"))
return disp_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
disp_header.defects.append(errors.InvalidHeaderDefect(
"Expected content disposition but found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(token)
disp_header.content_disposition = token.value.strip().lower()
if not value:
return disp_header
if value[0] != ';':
disp_header.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content disposition, but "
"found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(ValueTerminal(';', 'parameter-separator'))
disp_header.append(parse_mime_parameters(value[1:]))
return disp_header
def parse_content_transfer_encoding_header(value):
""" mechanism
"""
# We should probably validate the values, since the list is fixed.
cte_header = ContentTransferEncoding()
if not value:
cte_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content transfer encoding"))
return cte_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Expected content transfer encoding but found {!r}".format(value)))
else:
cte_header.append(token)
cte_header.cte = token.value.strip().lower()
if not value:
return cte_header
while value:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Extra text after content transfer encoding"))
if value[0] in PHRASE_ENDS:
cte_header.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
cte_header.append(token)
return cte_header
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Header value parser implementing various email-related RFC parsing rules.
The parsing methods defined in this module implement various email related
parsing rules. Principal among them is RFC 5322, which is the followon
to RFC 2822 and primarily a clarification of the former. It also implements
RFC 2047 encoded word decoding.
RFC 5322 goes to considerable trouble to maintain backward compatibility with
RFC 822 in the parse phase, while cleaning up the structure on the generation
phase. This parser supports correct RFC 5322 generation by tagging white space
as folding white space only when folding is allowed in the non-obsolete rule
sets. Actually, the parser is even more generous when accepting input than RFC
5322 mandates, following the spirit of Postel's Law, which RFC 5322 encourages.
Where possible deviations from the standard are annotated on the 'defects'
attribute of tokens that deviate.
The general structure of the parser follows RFC 5322, and uses its terminology
where there is a direct correspondence. Where the implementation requires a
somewhat different structure than that used by the formal grammar, new terms
that mimic the closest existing terms are used. Thus, it really helps to have
a copy of RFC 5322 handy when studying this code.
Input to the parser is a string that has already been unfolded according to
RFC 5322 rules. According to the RFC this unfolding is the very first step, and
this parser leaves the unfolding step to a higher level message parser, which
will have already detected the line breaks that need unfolding while
determining the beginning and end of each header.
The output of the parser is a TokenList object, which is a list subclass. A
TokenList is a recursive data structure. The terminal nodes of the structure
are Terminal objects, which are subclasses of str. These do not correspond
directly to terminal objects in the formal grammar, but are instead more
practical higher level combinations of true terminals.
All TokenList and Terminal objects have a 'value' attribute, which produces the
semantically meaningful value of that part of the parse subtree. The value of
all whitespace tokens (no matter how many sub-tokens they may contain) is a
single space, as per the RFC rules. This includes 'CFWS', which is herein
included in the general class of whitespace tokens. There is one exception to
the rule that whitespace tokens are collapsed into single spaces in values: in
the value of a 'bare-quoted-string' (a quoted-string with no leading or
trailing whitespace), any whitespace that appeared between the quotation marks
is preserved in the returned value. Note that in all Terminal strings quoted
pairs are turned into their unquoted values.
All TokenList and Terminal objects also have a string value, which attempts to
be a "canonical" representation of the RFC-compliant form of the substring that
produced the parsed subtree, including minimal use of quoted pair quoting.
Whitespace runs are not collapsed.
Comment tokens also have a 'content' attribute providing the string found
between the parens (including any nested comments) with whitespace preserved.
All TokenList and Terminal objects have a 'defects' attribute which is a
possibly empty list all of the defects found while creating the token. Defects
may appear on any token in the tree, and a composite list of all defects in the
subtree is available through the 'all_defects' attribute of any node. (For
Terminal notes x.defects == x.all_defects.)
Each object in a parse tree is called a 'token', and each has a 'token_type'
attribute that gives the name from the RFC 5322 grammar that it represents.
Not all RFC 5322 nodes are produced, and there is one non-RFC 5322 node that
may be produced: 'ptext'. A 'ptext' is a string of printable ascii characters.
It is returned in place of lists of (ctext/quoted-pair) and
(qtext/quoted-pair).
XXX: provide complete list of token types.
"""
import re
import urllib # For urllib.parse.unquote
from string import hexdigits
from collections import OrderedDict
from email import _encoded_words as _ew
from email import errors
from email import utils
#
# Useful constants and functions
#
WSP = set(' \t')
CFWS_LEADER = WSP | set('(')
SPECIALS = set(r'()<>@,:;.\"[]')
ATOM_ENDS = SPECIALS | WSP
DOT_ATOM_ENDS = ATOM_ENDS - set('.')
# '.', '"', and '(' do not end phrases in order to support obs-phrase
PHRASE_ENDS = SPECIALS - set('."(')
TSPECIALS = (SPECIALS | set('/?=')) - set('.')
TOKEN_ENDS = TSPECIALS | WSP
ASPECIALS = TSPECIALS | set("*'%")
ATTRIBUTE_ENDS = ASPECIALS | WSP
EXTENDED_ATTRIBUTE_ENDS = ATTRIBUTE_ENDS - set('%')
def quote_string(value):
return '"'+str(value).replace('\\', '\\\\').replace('"', r'\"')+'"'
#
# Accumulator for header folding
#
class _Folded:
def __init__(self, maxlen, policy):
self.maxlen = maxlen
self.policy = policy
self.lastlen = 0
self.stickyspace = None
self.firstline = True
self.done = []
self.current = []
def newline(self):
self.done.extend(self.current)
self.done.append(self.policy.linesep)
self.current.clear()
self.lastlen = 0
def finalize(self):
if self.current:
self.newline()
def __str__(self):
return ''.join(self.done)
def append(self, stoken):
self.current.append(stoken)
def append_if_fits(self, token, stoken=None):
if stoken is None:
stoken = str(token)
l = len(stoken)
if self.stickyspace is not None:
stickyspace_len = len(self.stickyspace)
if self.lastlen + stickyspace_len + l <= self.maxlen:
self.current.append(self.stickyspace)
self.lastlen += stickyspace_len
self.current.append(stoken)
self.lastlen += l
self.stickyspace = None
self.firstline = False
return True
if token.has_fws:
ws = token.pop_leading_fws()
if ws is not None:
self.stickyspace += str(ws)
stickyspace_len += len(ws)
token._fold(self)
return True
if stickyspace_len and l + 1 <= self.maxlen:
margin = self.maxlen - l
if 0 < margin < stickyspace_len:
trim = stickyspace_len - margin
self.current.append(self.stickyspace[:trim])
self.stickyspace = self.stickyspace[trim:]
stickyspace_len = trim
self.newline()
self.current.append(self.stickyspace)
self.current.append(stoken)
self.lastlen = l + stickyspace_len
self.stickyspace = None
self.firstline = False
return True
if not self.firstline:
self.newline()
self.current.append(self.stickyspace)
self.current.append(stoken)
self.stickyspace = None
self.firstline = False
return True
if self.lastlen + l <= self.maxlen:
self.current.append(stoken)
self.lastlen += l
return True
if l < self.maxlen:
self.newline()
self.current.append(stoken)
self.lastlen = l
return True
return False
#
# TokenList and its subclasses
#
class TokenList(list):
token_type = None
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.defects = []
def __str__(self):
return ''.join(str(x) for x in self)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
super().__repr__())
@property
def value(self):
return ''.join(x.value for x in self if x.value)
@property
def all_defects(self):
return sum((x.all_defects for x in self), self.defects)
#
# Folding API
#
# parts():
#
# return a list of objects that constitute the "higher level syntactic
# objects" specified by the RFC as the best places to fold a header line.
# The returned objects must include leading folding white space, even if
# this means mutating the underlying parse tree of the object. Each object
# is only responsible for returning *its* parts, and should not drill down
# to any lower level except as required to meet the leading folding white
# space constraint.
#
# _fold(folded):
#
# folded: the result accumulator. This is an instance of _Folded.
# (XXX: I haven't finished factoring this out yet, the folding code
# pretty much uses this as a state object.) When the folded.current
# contains as much text as will fit, the _fold method should call
# folded.newline.
# folded.lastlen: the current length of the test stored in folded.current.
# folded.maxlen: The maximum number of characters that may appear on a
# folded line. Differs from the policy setting in that "no limit" is
# represented by +inf, which means it can be used in the trivially
# logical fashion in comparisons.
#
# Currently no subclasses implement parts, and I think this will remain
# true. A subclass only needs to implement _fold when the generic version
# isn't sufficient. _fold will need to be implemented primarily when it is
# possible for encoded words to appear in the specialized token-list, since
# there is no generic algorithm that can know where exactly the encoded
# words are allowed. A _fold implementation is responsible for filling
# lines in the same general way that the top level _fold does. It may, and
# should, call the _fold method of sub-objects in a similar fashion to that
# of the top level _fold.
#
# XXX: I'm hoping it will be possible to factor the existing code further
# to reduce redundancy and make the logic clearer.
@property
def parts(self):
klass = self.__class__
this = []
for token in self:
if token.startswith_fws():
if this:
yield this[0] if len(this)==1 else klass(this)
this.clear()
end_ws = token.pop_trailing_ws()
this.append(token)
if end_ws:
yield klass(this)
this = [end_ws]
if this:
yield this[0] if len(this)==1 else klass(this)
def startswith_fws(self):
return self[0].startswith_fws()
def pop_leading_fws(self):
if self[0].token_type == 'fws':
return self.pop(0)
return self[0].pop_leading_fws()
def pop_trailing_ws(self):
if self[-1].token_type == 'cfws':
return self.pop(-1)
return self[-1].pop_trailing_ws()
@property
def has_fws(self):
for part in self:
if part.has_fws:
return True
return False
def has_leading_comment(self):
return self[0].has_leading_comment()
@property
def comments(self):
comments = []
for token in self:
comments.extend(token.comments)
return comments
def fold(self, *, policy):
# max_line_length 0/None means no limit, ie: infinitely long.
maxlen = policy.max_line_length or float("+inf")
folded = _Folded(maxlen, policy)
self._fold(folded)
folded.finalize()
return str(folded)
def as_encoded_word(self, charset):
# This works only for things returned by 'parts', which include
# the leading fws, if any, that should be used.
res = []
ws = self.pop_leading_fws()
if ws:
res.append(ws)
trailer = self.pop(-1) if self[-1].token_type=='fws' else ''
res.append(_ew.encode(str(self), charset))
res.append(trailer)
return ''.join(res)
def cte_encode(self, charset, policy):
res = []
for part in self:
res.append(part.cte_encode(charset, policy))
return ''.join(res)
def _fold(self, folded):
for part in self.parts:
tstr = str(part)
tlen = len(tstr)
try:
str(part).encode('us-ascii')
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
# XXX: this should be a policy setting
charset = 'utf-8'
tstr = part.cte_encode(charset, folded.policy)
tlen = len(tstr)
if folded.append_if_fits(part, tstr):
continue
# Peel off the leading whitespace if any and make it sticky, to
# avoid infinite recursion.
ws = part.pop_leading_fws()
if ws is not None:
# Peel off the leading whitespace and make it sticky, to
# avoid infinite recursion.
folded.stickyspace = str(part.pop(0))
if folded.append_if_fits(part):
continue
if part.has_fws:
part._fold(folded)
continue
# There are no fold points in this one; it is too long for a single
# line and can't be split...we just have to put it on its own line.
folded.append(tstr)
folded.newline()
def pprint(self, indent=''):
print('\n'.join(self._pp(indent='')))
def ppstr(self, indent=''):
return '\n'.join(self._pp(indent=''))
def _pp(self, indent=''):
yield '{}{}/{}('.format(
indent,
self.__class__.__name__,
self.token_type)
for token in self:
if not hasattr(token, '_pp'):
yield (indent + ' !! invalid element in token '
'list: {!r}'.format(token))
else:
yield from token._pp(indent+' ')
if self.defects:
extra = ' Defects: {}'.format(self.defects)
else:
extra = ''
yield '{}){}'.format(indent, extra)
class WhiteSpaceTokenList(TokenList):
@property
def value(self):
return ' '
@property
def comments(self):
return [x.content for x in self if x.token_type=='comment']
class UnstructuredTokenList(TokenList):
token_type = 'unstructured'
def _fold(self, folded):
last_ew = None
for part in self.parts:
tstr = str(part)
is_ew = False
try:
str(part).encode('us-ascii')
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
charset = 'utf-8'
if last_ew is not None:
# We've already done an EW, combine this one with it
# if there's room.
chunk = get_unstructured(
''.join(folded.current[last_ew:]+[tstr])).as_encoded_word(charset)
oldlastlen = sum(len(x) for x in folded.current[:last_ew])
schunk = str(chunk)
lchunk = len(schunk)
if oldlastlen + lchunk <= folded.maxlen:
del folded.current[last_ew:]
folded.append(schunk)
folded.lastlen = oldlastlen + lchunk
continue
tstr = part.as_encoded_word(charset)
is_ew = True
if folded.append_if_fits(part, tstr):
if is_ew:
last_ew = len(folded.current) - 1
continue
if is_ew or last_ew:
# It's too big to fit on the line, but since we've
# got encoded words we can use encoded word folding.
part._fold_as_ew(folded)
continue
# Peel off the leading whitespace if any and make it sticky, to
# avoid infinite recursion.
ws = part.pop_leading_fws()
if ws is not None:
folded.stickyspace = str(ws)
if folded.append_if_fits(part):
continue
if part.has_fws:
part.fold(folded)
continue
# It can't be split...we just have to put it on its own line.
folded.append(tstr)
folded.newline()
last_ew = None
def cte_encode(self, charset, policy):
res = []
last_ew = None
for part in self:
spart = str(part)
try:
spart.encode('us-ascii')
res.append(spart)
except UnicodeEncodeError:
if last_ew is None:
res.append(part.cte_encode(charset, policy))
last_ew = len(res)
else:
tl = get_unstructured(''.join(res[last_ew:] + [spart]))
res.append(tl.as_encoded_word())
return ''.join(res)
class Phrase(TokenList):
token_type = 'phrase'
def _fold(self, folded):
# As with Unstructured, we can have pure ASCII with or without
# surrogateescape encoded bytes, or we could have unicode. But this
# case is more complicated, since we have to deal with the various
# sub-token types and how they can be composed in the face of
# unicode-that-needs-CTE-encoding, and the fact that if a token a
# comment that becomes a barrier across which we can't compose encoded
# words.
last_ew = None
for part in self.parts:
tstr = str(part)
tlen = len(tstr)
has_ew = False
try:
str(part).encode('us-ascii')
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
charset = 'utf-8'
if last_ew is not None and not part.has_leading_comment():
# We've already done an EW, let's see if we can combine
# this one with it. The last_ew logic ensures that all we
# have at this point is atoms, no comments or quoted
# strings. So we can treat the text between the last
# encoded word and the content of this token as
# unstructured text, and things will work correctly. But
# we have to strip off any trailing comment on this token
# first, and if it is a quoted string we have to pull out
# the content (we're encoding it, so it no longer needs to
# be quoted).
if part[-1].token_type == 'cfws' and part.comments:
remainder = part.pop(-1)
else:
remainder = ''
for i, token in enumerate(part):
if token.token_type == 'bare-quoted-string':
part[i] = UnstructuredTokenList(token[:])
chunk = get_unstructured(
''.join(folded.current[last_ew:]+[tstr])).as_encoded_word(charset)
schunk = str(chunk)
lchunk = len(schunk)
if last_ew + lchunk <= folded.maxlen:
del folded.current[last_ew:]
folded.append(schunk)
folded.lastlen = sum(len(x) for x in folded.current)
continue
tstr = part.as_encoded_word(charset)
tlen = len(tstr)
has_ew = True
if folded.append_if_fits(part, tstr):
if has_ew and not part.comments:
last_ew = len(folded.current) - 1
elif part.comments or part.token_type == 'quoted-string':
# If a comment is involved we can't combine EWs. And if a
# quoted string is involved, it's not worth the effort to
# try to combine them.
last_ew = None
continue
part._fold(folded)
def cte_encode(self, charset, policy):
res = []
last_ew = None
is_ew = False
for part in self:
spart = str(part)
try:
spart.encode('us-ascii')
res.append(spart)
except UnicodeEncodeError:
is_ew = True
if last_ew is None:
if not part.comments:
last_ew = len(res)
res.append(part.cte_encode(charset, policy))
elif not part.has_leading_comment():
if part[-1].token_type == 'cfws' and part.comments:
remainder = part.pop(-1)
else:
remainder = ''
for i, token in enumerate(part):
if token.token_type == 'bare-quoted-string':
part[i] = UnstructuredTokenList(token[:])
tl = get_unstructured(''.join(res[last_ew:] + [spart]))
res[last_ew:] = [tl.as_encoded_word(charset)]
if part.comments or (not is_ew and part.token_type == 'quoted-string'):
last_ew = None
return ''.join(res)
class Word(TokenList):
token_type = 'word'
class CFWSList(WhiteSpaceTokenList):
token_type = 'cfws'
def has_leading_comment(self):
return bool(self.comments)
class Atom(TokenList):
token_type = 'atom'
class Token(TokenList):
token_type = 'token'
class EncodedWord(TokenList):
token_type = 'encoded-word'
cte = None
charset = None
lang = None
@property
def encoded(self):
if self.cte is not None:
return self.cte
_ew.encode(str(self), self.charset)
class QuotedString(TokenList):
token_type = 'quoted-string'
@property
def content(self):
for x in self:
if x.token_type == 'bare-quoted-string':
return x.value
@property
def quoted_value(self):
res = []
for x in self:
if x.token_type == 'bare-quoted-string':
res.append(str(x))
else:
res.append(x.value)
return ''.join(res)
@property
def stripped_value(self):
for token in self:
if token.token_type == 'bare-quoted-string':
return token.value
class BareQuotedString(QuotedString):
token_type = 'bare-quoted-string'
def __str__(self):
return quote_string(''.join(str(x) for x in self))
@property
def value(self):
return ''.join(str(x) for x in self)
class Comment(WhiteSpaceTokenList):
token_type = 'comment'
def __str__(self):
return ''.join(sum([
["("],
[self.quote(x) for x in self],
[")"],
], []))
def quote(self, value):
if value.token_type == 'comment':
return str(value)
return str(value).replace('\\', '\\\\').replace(
'(', '\(').replace(
')', '\)')
@property
def content(self):
return ''.join(str(x) for x in self)
@property
def comments(self):
return [self.content]
class AddressList(TokenList):
token_type = 'address-list'
@property
def addresses(self):
return [x for x in self if x.token_type=='address']
@property
def mailboxes(self):
return sum((x.mailboxes
for x in self if x.token_type=='address'), [])
@property
def all_mailboxes(self):
return sum((x.all_mailboxes
for x in self if x.token_type=='address'), [])
class Address(TokenList):
token_type = 'address'
@property
def display_name(self):
if self[0].token_type == 'group':
return self[0].display_name
@property
def mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return [self[0]]
return self[0].all_mailboxes
class MailboxList(TokenList):
token_type = 'mailbox-list'
@property
def mailboxes(self):
return [x for x in self if x.token_type=='mailbox']
@property
def all_mailboxes(self):
return [x for x in self
if x.token_type in ('mailbox', 'invalid-mailbox')]
class GroupList(TokenList):
token_type = 'group-list'
@property
def mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].all_mailboxes
class Group(TokenList):
token_type = "group"
@property
def mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].mailboxes
@property
def all_mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].all_mailboxes
@property
def display_name(self):
return self[0].display_name
class NameAddr(TokenList):
token_type = 'name-addr'
@property
def display_name(self):
if len(self) == 1:
return None
return self[0].display_name
@property
def local_part(self):
return self[-1].local_part
@property
def domain(self):
return self[-1].domain
@property
def route(self):
return self[-1].route
@property
def addr_spec(self):
return self[-1].addr_spec
class AngleAddr(TokenList):
token_type = 'angle-addr'
@property
def local_part(self):
for x in self:
if x.token_type == 'addr-spec':
return x.local_part
@property
def domain(self):
for x in self:
if x.token_type == 'addr-spec':
return x.domain
@property
def route(self):
for x in self:
if x.token_type == 'obs-route':
return x.domains
@property
def addr_spec(self):
for x in self:
if x.token_type == 'addr-spec':
return x.addr_spec
else:
return '<>'
class ObsRoute(TokenList):
token_type = 'obs-route'
@property
def domains(self):
return [x.domain for x in self if x.token_type == 'domain']
class Mailbox(TokenList):
token_type = 'mailbox'
@property
def display_name(self):
if self[0].token_type == 'name-addr':
return self[0].display_name
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
return self[0].domain
@property
def route(self):
if self[0].token_type == 'name-addr':
return self[0].route
@property
def addr_spec(self):
return self[0].addr_spec
class InvalidMailbox(TokenList):
token_type = 'invalid-mailbox'
@property
def display_name(self):
return None
local_part = domain = route = addr_spec = display_name
class Domain(TokenList):
token_type = 'domain'
@property
def domain(self):
return ''.join(super().value.split())
class DotAtom(TokenList):
token_type = 'dot-atom'
class DotAtomText(TokenList):
token_type = 'dot-atom-text'
class AddrSpec(TokenList):
token_type = 'addr-spec'
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
if len(self) < 3:
return None
return self[-1].domain
@property
def value(self):
if len(self) < 3:
return self[0].value
return self[0].value.rstrip()+self[1].value+self[2].value.lstrip()
@property
def addr_spec(self):
nameset = set(self.local_part)
if len(nameset) > len(nameset-DOT_ATOM_ENDS):
lp = quote_string(self.local_part)
else:
lp = self.local_part
if self.domain is not None:
return lp + '@' + self.domain
return lp
class ObsLocalPart(TokenList):
token_type = 'obs-local-part'
class DisplayName(Phrase):
token_type = 'display-name'
@property
def display_name(self):
res = TokenList(self)
if res[0].token_type == 'cfws':
res.pop(0)
else:
if res[0][0].token_type == 'cfws':
res[0] = TokenList(res[0][1:])
if res[-1].token_type == 'cfws':
res.pop()
else:
if res[-1][-1].token_type == 'cfws':
res[-1] = TokenList(res[-1][:-1])
return res.value
@property
def value(self):
quote = False
if self.defects:
quote = True
else:
for x in self:
if x.token_type == 'quoted-string':
quote = True
if quote:
pre = post = ''
if self[0].token_type=='cfws' or self[0][0].token_type=='cfws':
pre = ' '
if self[-1].token_type=='cfws' or self[-1][-1].token_type=='cfws':
post = ' '
return pre+quote_string(self.display_name)+post
else:
return super().value
class LocalPart(TokenList):
token_type = 'local-part'
@property
def value(self):
if self[0].token_type == "quoted-string":
return self[0].quoted_value
else:
return self[0].value
@property
def local_part(self):
# Strip whitespace from front, back, and around dots.
res = [DOT]
last = DOT
last_is_tl = False
for tok in self[0] + [DOT]:
if tok.token_type == 'cfws':
continue
if (last_is_tl and tok.token_type == 'dot' and
last[-1].token_type == 'cfws'):
res[-1] = TokenList(last[:-1])
is_tl = isinstance(tok, TokenList)
if (is_tl and last.token_type == 'dot' and
tok[0].token_type == 'cfws'):
res.append(TokenList(tok[1:]))
else:
res.append(tok)
last = res[-1]
last_is_tl = is_tl
res = TokenList(res[1:-1])
return res.value
class DomainLiteral(TokenList):
token_type = 'domain-literal'
@property
def domain(self):
return ''.join(super().value.split())
@property
def ip(self):
for x in self:
if x.token_type == 'ptext':
return x.value
class MIMEVersion(TokenList):
token_type = 'mime-version'
major = None
minor = None
class Parameter(TokenList):
token_type = 'parameter'
sectioned = False
extended = False
charset = 'us-ascii'
@property
def section_number(self):
# Because the first token, the attribute (name) eats CFWS, the second
# token is always the section if there is one.
return self[1].number if self.sectioned else 0
@property
def param_value(self):
# This is part of the "handle quoted extended parameters" hack.
for token in self:
if token.token_type == 'value':
return token.stripped_value
if token.token_type == 'quoted-string':
for token in token:
if token.token_type == 'bare-quoted-string':
for token in token:
if token.token_type == 'value':
return token.stripped_value
return ''
class InvalidParameter(Parameter):
token_type = 'invalid-parameter'
class Attribute(TokenList):
token_type = 'attribute'
@property
def stripped_value(self):
for token in self:
if token.token_type.endswith('attrtext'):
return token.value
class Section(TokenList):
token_type = 'section'
number = None
class Value(TokenList):
token_type = 'value'
@property
def stripped_value(self):
token = self[0]
if token.token_type == 'cfws':
token = self[1]
if token.token_type.endswith(
('quoted-string', 'attribute', 'extended-attribute')):
return token.stripped_value
return self.value
class MimeParameters(TokenList):
token_type = 'mime-parameters'
@property
def params(self):
# The RFC specifically states that the ordering of parameters is not
# guaranteed and may be reordered by the transport layer. So we have
# to assume the RFC 2231 pieces can come in any order. However, we
# output them in the order that we first see a given name, which gives
# us a stable __str__.
params = OrderedDict()
for token in self:
if not token.token_type.endswith('parameter'):
continue
if token[0].token_type != 'attribute':
continue
name = token[0].value.strip()
if name not in params:
params[name] = []
params[name].append((token.section_number, token))
for name, parts in params.items():
parts = sorted(parts)
# XXX: there might be more recovery we could do here if, for
# example, this is really a case of a duplicate attribute name.
value_parts = []
charset = parts[0][1].charset
for i, (section_number, param) in enumerate(parts):
if section_number != i:
param.defects.append(errors.InvalidHeaderDefect(
"inconsistent multipart parameter numbering"))
value = param.param_value
if param.extended:
try:
value = urllib.parse.unquote_to_bytes(value)
except UnicodeEncodeError:
# source had surrogate escaped bytes. What we do now
# is a bit of an open question. I'm not sure this is
# the best choice, but it is what the old algorithm did
value = urllib.parse.unquote(value, encoding='latin-1')
else:
try:
value = value.decode(charset, 'surrogateescape')
except LookupError:
# XXX: there should really be a custom defect for
# unknown character set to make it easy to find,
# because otherwise unknown charset is a silent
# failure.
value = value.decode('us-ascii', 'surrogateescape')
if utils._has_surrogates(value):
param.defects.append(errors.UndecodableBytesDefect())
value_parts.append(value)
value = ''.join(value_parts)
yield name, value
def __str__(self):
params = []
for name, value in self.params:
if value:
params.append('{}={}'.format(name, quote_string(value)))
else:
params.append(name)
params = '; '.join(params)
return ' ' + params if params else ''
class ParameterizedHeaderValue(TokenList):
@property
def params(self):
for token in reversed(self):
if token.token_type == 'mime-parameters':
return token.params
return {}
@property
def parts(self):
if self and self[-1].token_type == 'mime-parameters':
# We don't want to start a new line if all of the params don't fit
# after the value, so unwrap the parameter list.
return TokenList(self[:-1] + self[-1])
return TokenList(self).parts
class ContentType(ParameterizedHeaderValue):
token_type = 'content-type'
maintype = 'text'
subtype = 'plain'
class ContentDisposition(ParameterizedHeaderValue):
token_type = 'content-disposition'
content_disposition = None
class ContentTransferEncoding(TokenList):
token_type = 'content-transfer-encoding'
cte = '7bit'
class HeaderLabel(TokenList):
token_type = 'header-label'
class Header(TokenList):
token_type = 'header'
def _fold(self, folded):
folded.append(str(self.pop(0)))
folded.lastlen = len(folded.current[0])
# The first line of the header is different from all others: we don't
# want to start a new object on a new line if it has any fold points in
# it that would allow part of it to be on the first header line.
# Further, if the first fold point would fit on the new line, we want
# to do that, but if it doesn't we want to put it on the first line.
# Folded supports this via the stickyspace attribute. If this
# attribute is not None, it does the special handling.
folded.stickyspace = str(self.pop(0)) if self[0].token_type == 'cfws' else ''
rest = self.pop(0)
if self:
raise ValueError("Malformed Header token list")
rest._fold(folded)
#
# Terminal classes and instances
#
class Terminal(str):
def __new__(cls, value, token_type):
self = super().__new__(cls, value)
self.token_type = token_type
self.defects = []
return self
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super().__repr__())
@property
def all_defects(self):
return list(self.defects)
def _pp(self, indent=''):
return ["{}{}/{}({}){}".format(
indent,
self.__class__.__name__,
self.token_type,
super().__repr__(),
'' if not self.defects else ' {}'.format(self.defects),
)]
def cte_encode(self, charset, policy):
value = str(self)
try:
value.encode('us-ascii')
return value
except UnicodeEncodeError:
return _ew.encode(value, charset)
def pop_trailing_ws(self):
# This terminates the recursion.
return None
def pop_leading_fws(self):
# This terminates the recursion.
return None
@property
def comments(self):
return []
def has_leading_comment(self):
return False
def __getnewargs__(self):
return(str(self), self.token_type)
class WhiteSpaceTerminal(Terminal):
@property
def value(self):
return ' '
def startswith_fws(self):
return True
has_fws = True
class ValueTerminal(Terminal):
@property
def value(self):
return self
def startswith_fws(self):
return False
has_fws = False
def as_encoded_word(self, charset):
return _ew.encode(str(self), charset)
class EWWhiteSpaceTerminal(WhiteSpaceTerminal):
@property
def value(self):
return ''
@property
def encoded(self):
return self[:]
def __str__(self):
return ''
has_fws = True
# XXX these need to become classes and used as instances so
# that a program can't change them in a parse tree and screw
# up other parse trees. Maybe should have tests for that, too.
DOT = ValueTerminal('.', 'dot')
ListSeparator = ValueTerminal(',', 'list-separator')
RouteComponentMarker = ValueTerminal('@', 'route-component-marker')
#
# Parser
#
# Parse strings according to RFC822/2047/2822/5322 rules.
#
# This is a stateless parser. Each get_XXX function accepts a string and
# returns either a Terminal or a TokenList representing the RFC object named
# by the method and a string containing the remaining unparsed characters
# from the input. Thus a parser method consumes the next syntactic construct
# of a given type and returns a token representing the construct plus the
# unparsed remainder of the input string.
#
# For example, if the first element of a structured header is a 'phrase',
# then:
#
# phrase, value = get_phrase(value)
#
# returns the complete phrase from the start of the string value, plus any
# characters left in the string after the phrase is removed.
_wsp_splitter = re.compile(r'([{}]+)'.format(''.join(WSP))).split
_non_atom_end_matcher = re.compile(r"[^{}]+".format(
''.join(ATOM_ENDS).replace('\\','\\\\').replace(']','\]'))).match
_non_printable_finder = re.compile(r"[\x00-\x20\x7F]").findall
_non_token_end_matcher = re.compile(r"[^{}]+".format(
''.join(TOKEN_ENDS).replace('\\','\\\\').replace(']','\]'))).match
_non_attribute_end_matcher = re.compile(r"[^{}]+".format(
''.join(ATTRIBUTE_ENDS).replace('\\','\\\\').replace(']','\]'))).match
_non_extended_attribute_end_matcher = re.compile(r"[^{}]+".format(
''.join(EXTENDED_ATTRIBUTE_ENDS).replace(
'\\','\\\\').replace(']','\]'))).match
def _validate_xtext(xtext):
"""If input token contains ASCII non-printables, register a defect."""
non_printables = _non_printable_finder(xtext)
if non_printables:
xtext.defects.append(errors.NonPrintableDefect(non_printables))
if utils._has_surrogates(xtext):
xtext.defects.append(errors.UndecodableBytesDefect(
"Non-ASCII characters found in header token"))
def _get_ptext_to_endchars(value, endchars):
"""Scan printables/quoted-pairs until endchars and return unquoted ptext.
This function turns a run of qcontent, ccontent-without-comments, or
dtext-with-quoted-printables into a single string by unquoting any
quoted printables. It returns the string, the remaining value, and
a flag that is True iff there were any quoted printables decoded.
"""
fragment, *remainder = _wsp_splitter(value, 1)
vchars = []
escape = False
had_qp = False
for pos in range(len(fragment)):
if fragment[pos] == '\\':
if escape:
escape = False
had_qp = True
else:
escape = True
continue
if escape:
escape = False
elif fragment[pos] in endchars:
break
vchars.append(fragment[pos])
else:
pos = pos + 1
return ''.join(vchars), ''.join([fragment[pos:]] + remainder), had_qp
def get_fws(value):
"""FWS = 1*WSP
This isn't the RFC definition. We're using fws to represent tokens where
folding can be done, but when we are parsing the *un*folding has already
been done so we don't need to watch out for CRLF.
"""
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
return fws, newvalue
def get_encoded_word(value):
""" encoded-word = "=?" charset "?" encoding "?" encoded-text "?="
"""
ew = EncodedWord()
if not value.startswith('=?'):
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
tok, *remainder = value[2:].split('?=', 1)
if tok == value[2:]:
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
remstr = ''.join(remainder)
if len(remstr) > 1 and remstr[0] in hexdigits and remstr[1] in hexdigits:
# The ? after the CTE was followed by an encoded word escape (=XX).
rest, *remainder = remstr.split('?=', 1)
tok = tok + '?=' + rest
if len(tok.split()) > 1:
ew.defects.append(errors.InvalidHeaderDefect(
"whitespace inside encoded word"))
ew.cte = value
value = ''.join(remainder)
try:
text, charset, lang, defects = _ew.decode('=?' + tok + '?=')
except ValueError:
raise errors.HeaderParseError(
"encoded word format invalid: '{}'".format(ew.cte))
ew.charset = charset
ew.lang = lang
ew.defects.extend(defects)
while text:
if text[0] in WSP:
token, text = get_fws(text)
ew.append(token)
continue
chars, *remainder = _wsp_splitter(text, 1)
vtext = ValueTerminal(chars, 'vtext')
_validate_xtext(vtext)
ew.append(vtext)
text = ''.join(remainder)
return ew, value
def get_unstructured(value):
"""unstructured = (*([FWS] vchar) *WSP) / obs-unstruct
obs-unstruct = *((*LF *CR *(obs-utext) *LF *CR)) / FWS)
obs-utext = %d0 / obs-NO-WS-CTL / LF / CR
obs-NO-WS-CTL is control characters except WSP/CR/LF.
So, basically, we have printable runs, plus control characters or nulls in
the obsolete syntax, separated by whitespace. Since RFC 2047 uses the
obsolete syntax in its specification, but requires whitespace on either
side of the encoded words, I can see no reason to need to separate the
non-printable-non-whitespace from the printable runs if they occur, so we
parse this into xtext tokens separated by WSP tokens.
Because an 'unstructured' value must by definition constitute the entire
value, this 'get' routine does not return a remaining value, only the
parsed TokenList.
"""
# XXX: but what about bare CR and LF? They might signal the start or
# end of an encoded word. YAGNI for now, since our current parsers
# will never send us strings with bare CR or LF.
unstructured = UnstructuredTokenList()
while value:
if value[0] in WSP:
token, value = get_fws(value)
unstructured.append(token)
continue
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: Need to figure out how to register defects when
# appropriate here.
pass
else:
have_ws = True
if len(unstructured) > 0:
if unstructured[-1].token_type != 'fws':
unstructured.defects.append(errors.InvalidHeaderDefect(
"missing whitespace before encoded word"))
have_ws = False
if have_ws and len(unstructured) > 1:
if unstructured[-2].token_type == 'encoded-word':
unstructured[-1] = EWWhiteSpaceTerminal(
unstructured[-1], 'fws')
unstructured.append(token)
continue
tok, *remainder = _wsp_splitter(value, 1)
vtext = ValueTerminal(tok, 'vtext')
_validate_xtext(vtext)
unstructured.append(vtext)
value = ''.join(remainder)
return unstructured
def get_qp_ctext(value):
"""ctext = <printable ascii except \ ( )>
This is not the RFC ctext, since we are handling nested comments in comment
and unquoting quoted-pairs here. We allow anything except the '()'
characters, but if we find any ASCII other than the RFC defined printable
ASCII an NonPrintableDefect is added to the token's defects list. Since
quoted pairs are converted to their unquoted values, what is returned is
a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value
is ' '.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '()')
ptext = WhiteSpaceTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_qcontent(value):
"""qcontent = qtext / quoted-pair
We allow anything except the DQUOTE character, but if we find any ASCII
other than the RFC defined printable ASCII an NonPrintableDefect is
added to the token's defects list. Any quoted pairs are converted to their
unquoted values, so what is returned is a 'ptext' token. In this case it
is a ValueTerminal.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '"')
ptext = ValueTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_atext(value):
"""atext = <matches _atext_matcher>
We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to
the token's defects list if we find non-atext characters.
"""
m = _non_atom_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected atext but found '{}'".format(value))
atext = m.group()
value = value[len(atext):]
atext = ValueTerminal(atext, 'atext')
_validate_xtext(atext)
return atext, value
def get_bare_quoted_string(value):
"""bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
A quoted-string without the leading or trailing white space. Its
value is the text between the quote marks, with whitespace
preserved and quoted pairs decoded.
"""
if value[0] != '"':
raise errors.HeaderParseError(
"expected '\"' but found '{}'".format(value))
bare_quoted_string = BareQuotedString()
value = value[1:]
while value and value[0] != '"':
if value[0] in WSP:
token, value = get_fws(value)
elif value[:2] == '=?':
try:
token, value = get_encoded_word(value)
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"encoded word inside quoted string"))
except errors.HeaderParseError:
token, value = get_qcontent(value)
else:
token, value = get_qcontent(value)
bare_quoted_string.append(token)
if not value:
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"end of header inside quoted string"))
return bare_quoted_string, value
return bare_quoted_string, value[1:]
def get_comment(value):
"""comment = "(" *([FWS] ccontent) [FWS] ")"
ccontent = ctext / quoted-pair / comment
We handle nested comments here, and quoted-pair in our qp-ctext routine.
"""
if value and value[0] != '(':
raise errors.HeaderParseError(
"expected '(' but found '{}'".format(value))
comment = Comment()
value = value[1:]
while value and value[0] != ")":
if value[0] in WSP:
token, value = get_fws(value)
elif value[0] == '(':
token, value = get_comment(value)
else:
token, value = get_qp_ctext(value)
comment.append(token)
if not value:
comment.defects.append(errors.InvalidHeaderDefect(
"end of header inside comment"))
return comment, value
return comment, value[1:]
def get_cfws(value):
"""CFWS = (1*([FWS] comment) [FWS]) / FWS
"""
cfws = CFWSList()
while value and value[0] in CFWS_LEADER:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_comment(value)
cfws.append(token)
return cfws, value
def get_quoted_string(value):
"""quoted-string = [CFWS] <bare-quoted-string> [CFWS]
'bare-quoted-string' is an intermediate class defined by this
parser and not by the RFC grammar. It is the quoted string
without any attached CFWS.
"""
quoted_string = QuotedString()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
token, value = get_bare_quoted_string(value)
quoted_string.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
return quoted_string, value
def get_atom(value):
"""atom = [CFWS] 1*atext [CFWS]
An atom could be an rfc2047 encoded word.
"""
atom = Atom()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
if value and value[0] in ATOM_ENDS:
raise errors.HeaderParseError(
"expected atom but found '{}'".format(value))
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_atext(value)
else:
token, value = get_atext(value)
atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
return atom, value
def get_dot_atom_text(value):
""" dot-text = 1*atext *("." 1*atext)
"""
dot_atom_text = DotAtomText()
if not value or value[0] in ATOM_ENDS:
raise errors.HeaderParseError("expected atom at a start of "
"dot-atom-text but found '{}'".format(value))
while value and value[0] not in ATOM_ENDS:
token, value = get_atext(value)
dot_atom_text.append(token)
if value and value[0] == '.':
dot_atom_text.append(DOT)
value = value[1:]
if dot_atom_text[-1] is DOT:
raise errors.HeaderParseError("expected atom at end of dot-atom-text "
"but found '{}'".format('.'+value))
return dot_atom_text, value
def get_dot_atom(value):
""" dot-atom = [CFWS] dot-atom-text [CFWS]
Any place we can have a dot atom, we could instead have an rfc2047 encoded
word.
"""
dot_atom = DotAtom()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_dot_atom_text(value)
else:
token, value = get_dot_atom_text(value)
dot_atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
return dot_atom, value
def get_word(value):
"""word = atom / quoted-string
Either atom or quoted-string may start with CFWS. We have to peel off this
CFWS first to determine which type of word to parse. Afterward we splice
the leading CFWS, if any, into the parsed sub-token.
If neither an atom or a quoted-string is found before the next special, a
HeaderParseError is raised.
The token returned is either an Atom or a QuotedString, as appropriate.
This means the 'word' level of the formal grammar is not represented in the
parse tree; this is because having that extra layer when manipulating the
parse tree is more confusing than it is helpful.
"""
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
else:
leader = None
if value[0]=='"':
token, value = get_quoted_string(value)
elif value[0] in SPECIALS:
raise errors.HeaderParseError("Expected 'atom' or 'quoted-string' "
"but found '{}'".format(value))
else:
token, value = get_atom(value)
if leader is not None:
token[:0] = [leader]
return token, value
def get_phrase(value):
""" phrase = 1*word / obs-phrase
obs-phrase = word *(word / "." / CFWS)
This means a phrase can be a sequence of words, periods, and CFWS in any
order as long as it starts with at least one word. If anything other than
words is detected, an ObsoleteHeaderDefect is added to the token's defect
list. We also accept a phrase that starts with CFWS followed by a dot;
this is registered as an InvalidHeaderDefect, since it is not supported by
even the obsolete grammar.
"""
phrase = Phrase()
try:
token, value = get_word(value)
phrase.append(token)
except errors.HeaderParseError:
phrase.defects.append(errors.InvalidHeaderDefect(
"phrase does not start with word"))
while value and value[0] not in PHRASE_ENDS:
if value[0]=='.':
phrase.append(DOT)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"period in 'phrase'"))
value = value[1:]
else:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"comment found without atom"))
else:
raise
phrase.append(token)
return phrase, value
def get_local_part(value):
""" local-part = dot-atom / quoted-string / obs-local-part
"""
local_part = LocalPart()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected local-part but found '{}'".format(value))
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] != '\\' and value[0] in PHRASE_ENDS:
raise
token = TokenList()
if leader is not None:
token[:0] = [leader]
local_part.append(token)
if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
obs_local_part, value = get_obs_local_part(str(local_part) + value)
if obs_local_part.token_type == 'invalid-obs-local-part':
local_part.defects.append(errors.InvalidHeaderDefect(
"local-part is not dot-atom, quoted-string, or obs-local-part"))
else:
local_part.defects.append(errors.ObsoleteHeaderDefect(
"local-part is not a dot-atom (contains CFWS)"))
local_part[0] = obs_local_part
try:
local_part.value.encode('ascii')
except UnicodeEncodeError:
local_part.defects.append(errors.NonASCIILocalPartDefect(
"local-part contains non-ASCII characters)"))
return local_part, value
def get_obs_local_part(value):
""" obs-local-part = word *("." word)
"""
obs_local_part = ObsLocalPart()
last_non_ws_was_dot = False
while value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
if value[0] == '.':
if last_non_ws_was_dot:
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"invalid repeated '.'"))
obs_local_part.append(DOT)
last_non_ws_was_dot = True
value = value[1:]
continue
elif value[0]=='\\':
obs_local_part.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"'\\' character outside of quoted-string/ccontent"))
last_non_ws_was_dot = False
continue
if obs_local_part and obs_local_part[-1].token_type != 'dot':
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"missing '.' between words"))
try:
token, value = get_word(value)
last_non_ws_was_dot = False
except errors.HeaderParseError:
if value[0] not in CFWS_LEADER:
raise
token, value = get_cfws(value)
obs_local_part.append(token)
if (obs_local_part[0].token_type == 'dot' or
obs_local_part[0].token_type=='cfws' and
obs_local_part[1].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid leading '.' in local part"))
if (obs_local_part[-1].token_type == 'dot' or
obs_local_part[-1].token_type=='cfws' and
obs_local_part[-2].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid trailing '.' in local part"))
if obs_local_part.defects:
obs_local_part.token_type = 'invalid-obs-local-part'
return obs_local_part, value
def get_dtext(value):
""" dtext = <printable ascii except \ [ ]> / obs-dtext
obs-dtext = obs-NO-WS-CTL / quoted-pair
We allow anything except the excluded characters, but if we find any
ASCII other than the RFC defined printable ASCII an NonPrintableDefect is
added to the token's defects list. Quoted pairs are converted to their
unquoted values, so what is returned is a ptext token, in this case a
ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
added to the returned token's defect list.
"""
ptext, value, had_qp = _get_ptext_to_endchars(value, '[]')
ptext = ValueTerminal(ptext, 'ptext')
if had_qp:
ptext.defects.append(errors.ObsoleteHeaderDefect(
"quoted printable found in domain-literal"))
_validate_xtext(ptext)
return ptext, value
def _check_for_early_dl_end(value, domain_literal):
if value:
return False
domain_literal.append(errors.InvalidHeaderDefect(
"end of input inside domain-literal"))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
return True
def get_domain_literal(value):
""" domain-literal = [CFWS] "[" *([FWS] dtext) [FWS] "]" [CFWS]
"""
domain_literal = DomainLiteral()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
if not value:
raise errors.HeaderParseError("expected domain-literal")
if value[0] != '[':
raise errors.HeaderParseError("expected '[' at start of domain-literal "
"but found '{}'".format(value))
value = value[1:]
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
domain_literal.append(ValueTerminal('[', 'domain-literal-start'))
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
token, value = get_dtext(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] != ']':
raise errors.HeaderParseError("expected ']' at end of domain-literal "
"but found '{}'".format(value))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
return domain_literal, value
def get_domain(value):
""" domain = dot-atom / domain-literal / obs-domain
obs-domain = atom *("." atom))
"""
domain = Domain()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected domain but found '{}'".format(value))
if value[0] == '[':
token, value = get_domain_literal(value)
if leader is not None:
token[:0] = [leader]
domain.append(token)
return domain, value
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
token, value = get_atom(value)
if leader is not None:
token[:0] = [leader]
domain.append(token)
if value and value[0] == '.':
domain.defects.append(errors.ObsoleteHeaderDefect(
"domain is not a dot-atom (contains CFWS)"))
if domain[0].token_type == 'dot-atom':
domain[:] = domain[0]
while value and value[0] == '.':
domain.append(DOT)
token, value = get_atom(value[1:])
domain.append(token)
return domain, value
def get_addr_spec(value):
""" addr-spec = local-part "@" domain
"""
addr_spec = AddrSpec()
token, value = get_local_part(value)
addr_spec.append(token)
if not value or value[0] != '@':
addr_spec.defects.append(errors.InvalidHeaderDefect(
"add-spec local part with no domain"))
return addr_spec, value
addr_spec.append(ValueTerminal('@', 'address-at-symbol'))
token, value = get_domain(value[1:])
addr_spec.append(token)
return addr_spec, value
def get_obs_route(value):
""" obs-route = obs-domain-list ":"
obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain])
Returns an obs-route token with the appropriate sub-tokens (that is,
there is no obs-domain-list in the parse tree).
"""
obs_route = ObsRoute()
while value and (value[0]==',' or value[0] in CFWS_LEADER):
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
elif value[0] == ',':
obs_route.append(ListSeparator)
value = value[1:]
if not value or value[0] != '@':
raise errors.HeaderParseError(
"expected obs-route domain but found '{}'".format(value))
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
while value and value[0]==',':
obs_route.append(ListSeparator)
value = value[1:]
if not value:
break
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
if value[0] == '@':
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
if not value:
raise errors.HeaderParseError("end of header while parsing obs-route")
if value[0] != ':':
raise errors.HeaderParseError( "expected ':' marking end of "
"obs-route but found '{}'".format(value))
obs_route.append(ValueTerminal(':', 'end-of-obs-route-marker'))
return obs_route, value[1:]
def get_angle_addr(value):
""" angle-addr = [CFWS] "<" addr-spec ">" [CFWS] / obs-angle-addr
obs-angle-addr = [CFWS] "<" obs-route addr-spec ">" [CFWS]
"""
angle_addr = AngleAddr()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
if not value or value[0] != '<':
raise errors.HeaderParseError(
"expected angle-addr but found '{}'".format(value))
angle_addr.append(ValueTerminal('<', 'angle-addr-start'))
value = value[1:]
# Although it is not legal per RFC5322, SMTP uses '<>' in certain
# circumstances.
if value[0] == '>':
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
angle_addr.defects.append(errors.InvalidHeaderDefect(
"null addr-spec in angle-addr"))
value = value[1:]
return angle_addr, value
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
try:
token, value = get_obs_route(value)
angle_addr.defects.append(errors.ObsoleteHeaderDefect(
"obsolete route specification in angle-addr"))
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected addr-spec or obs-route but found '{}'".format(value))
angle_addr.append(token)
token, value = get_addr_spec(value)
angle_addr.append(token)
if value and value[0] == '>':
value = value[1:]
else:
angle_addr.defects.append(errors.InvalidHeaderDefect(
"missing trailing '>' on angle-addr"))
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
return angle_addr, value
def get_display_name(value):
""" display-name = phrase
Because this is simply a name-rule, we don't return a display-name
token containing a phrase, but rather a display-name token with
the content of the phrase.
"""
display_name = DisplayName()
token, value = get_phrase(value)
display_name.extend(token[:])
display_name.defects = token.defects[:]
return display_name, value
def get_name_addr(value):
""" name-addr = [display-name] angle-addr
"""
name_addr = NameAddr()
# Both the optional display name and the angle-addr can start with cfws.
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(leader))
if value[0] != '<':
if value[0] in PHRASE_ENDS:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(value))
token, value = get_display_name(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(token))
if leader is not None:
token[0][:0] = [leader]
leader = None
name_addr.append(token)
token, value = get_angle_addr(value)
if leader is not None:
token[:0] = [leader]
name_addr.append(token)
return name_addr, value
def get_mailbox(value):
""" mailbox = name-addr / addr-spec
"""
# The only way to figure out if we are dealing with a name-addr or an
# addr-spec is to try parsing each one.
mailbox = Mailbox()
try:
token, value = get_name_addr(value)
except errors.HeaderParseError:
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected mailbox but found '{}'".format(value))
if any(isinstance(x, errors.InvalidHeaderDefect)
for x in token.all_defects):
mailbox.token_type = 'invalid-mailbox'
mailbox.append(token)
return mailbox, value
def get_invalid_mailbox(value, endchars):
""" Read everything up to one of the chars in endchars.
This is outside the formal grammar. The InvalidMailbox TokenList that is
returned acts like a Mailbox, but the data attributes are None.
"""
invalid_mailbox = InvalidMailbox()
while value and value[0] not in endchars:
if value[0] in PHRASE_ENDS:
invalid_mailbox.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_mailbox.append(token)
return invalid_mailbox, value
def get_mailbox_list(value):
""" mailbox-list = (mailbox *("," mailbox)) / obs-mbox-list
obs-mbox-list = *([CFWS] ",") mailbox *("," [mailbox / CFWS])
For this routine we go outside the formal grammar in order to improve error
handling. We recognize the end of the mailbox list only at the end of the
value or at a ';' (the group terminator). This is so that we can turn
invalid mailboxes into InvalidMailbox tokens and continue parsing any
remaining valid mailboxes. We also allow all mailbox entries to be null,
and this condition is handled appropriately at a higher level.
"""
mailbox_list = MailboxList()
while value and value[0] != ';':
try:
token, value = get_mailbox(value)
mailbox_list.append(token)
except errors.HeaderParseError:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] in ',;':
mailbox_list.append(leader)
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
elif value[0] == ',':
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] not in ',;':
# Crap after mailbox; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = mailbox_list[-1]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',;')
mailbox.extend(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] == ',':
mailbox_list.append(ListSeparator)
value = value[1:]
return mailbox_list, value
def get_group_list(value):
""" group-list = mailbox-list / CFWS / obs-group-list
obs-group-list = 1*([CFWS] ",") [CFWS]
"""
group_list = GroupList()
if not value:
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header before group-list"))
return group_list, value
leader = None
if value and value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
# This should never happen in email parsing, since CFWS-only is a
# legal alternative to group-list in a group, which is the only
# place group-list appears.
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header in group-list"))
group_list.append(leader)
return group_list, value
if value[0] == ';':
group_list.append(leader)
return group_list, value
token, value = get_mailbox_list(value)
if len(token.all_mailboxes)==0:
if leader is not None:
group_list.append(leader)
group_list.extend(token)
group_list.defects.append(errors.ObsoleteHeaderDefect(
"group-list with empty entries"))
return group_list, value
if leader is not None:
token[:0] = [leader]
group_list.append(token)
return group_list, value
def get_group(value):
""" group = display-name ":" [group-list] ";" [CFWS]
"""
group = Group()
token, value = get_display_name(value)
if not value or value[0] != ':':
raise errors.HeaderParseError("expected ':' at end of group "
"display name but found '{}'".format(value))
group.append(token)
group.append(ValueTerminal(':', 'group-display-name-terminator'))
value = value[1:]
if value and value[0] == ';':
group.append(ValueTerminal(';', 'group-terminator'))
return group, value[1:]
token, value = get_group_list(value)
group.append(token)
if not value:
group.defects.append(errors.InvalidHeaderDefect(
"end of header in group"))
if value[0] != ';':
raise errors.HeaderParseError(
"expected ';' at end of group but found {}".format(value))
group.append(ValueTerminal(';', 'group-terminator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
group.append(token)
return group, value
def get_address(value):
""" address = mailbox / group
Note that counter-intuitively, an address can be either a single address or
a list of addresses (a group). This is why the returned Address object has
a 'mailboxes' attribute which treats a single address as a list of length
one. When you need to differentiate between to two cases, extract the single
element, which is either a mailbox or a group token.
"""
# The formal grammar isn't very helpful when parsing an address. mailbox
# and group, especially when allowing for obsolete forms, start off very
# similarly. It is only when you reach one of @, <, or : that you know
# what you've got. So, we try each one in turn, starting with the more
# likely of the two. We could perhaps make this more efficient by looking
# for a phrase and then branching based on the next character, but that
# would be a premature optimization.
address = Address()
try:
token, value = get_group(value)
except errors.HeaderParseError:
try:
token, value = get_mailbox(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected address but found '{}'".format(value))
address.append(token)
return address, value
def get_address_list(value):
""" address_list = (address *("," address)) / obs-addr-list
obs-addr-list = *([CFWS] ",") address *("," [address / CFWS])
We depart from the formal grammar here by continuing to parse until the end
of the input, assuming the input to be entirely composed of an
address-list. This is always true in email parsing, and allows us
to skip invalid addresses to parse additional valid ones.
"""
address_list = AddressList()
while value:
try:
token, value = get_address(value)
address_list.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] == ',':
address_list.append(leader)
address_list.defects.append(errors.ObsoleteHeaderDefect(
"address-list entry with no content"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
elif value[0] == ',':
address_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in address-list"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value and value[0] != ',':
# Crap after address; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = address_list[-1][0]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',')
mailbox.extend(token)
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value: # Must be a , at this point.
address_list.append(ValueTerminal(',', 'list-separator'))
value = value[1:]
return address_list, value
#
# XXX: As I begin to add additional header parsers, I'm realizing we probably
# have two level of parser routines: the get_XXX methods that get a token in
# the grammar, and parse_XXX methods that parse an entire field value. So
# get_address_list above should really be a parse_ method, as probably should
# be get_unstructured.
#
def parse_mime_version(value):
""" mime-version = [CFWS] 1*digit [CFWS] "." [CFWS] 1*digit [CFWS]
"""
# The [CFWS] is implicit in the RFC 2045 BNF.
# XXX: This routine is a bit verbose, should factor out a get_int method.
mime_version = MIMEVersion()
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Missing MIME version number (eg: 1.0)"))
return mime_version
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Expected MIME version number but found only CFWS"))
digits = ''
while value and value[0] != '.' and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME major version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.major = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value or value[0] != '.':
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
if value:
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
mime_version.append(ValueTerminal('.', 'version-separator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
return mime_version
digits = ''
while value and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME minor version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.minor = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if value:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Excess non-CFWS text after MIME version"))
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
def get_invalid_parameter(value):
""" Read everything up to the next ';'.
This is outside the formal grammar. The InvalidParameter TokenList that is
returned acts like a Parameter, but the data attributes are None.
"""
invalid_parameter = InvalidParameter()
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
invalid_parameter.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_parameter.append(token)
return invalid_parameter, value
def get_ttext(value):
"""ttext = <matches _ttext_matcher>
We allow any non-TOKEN_ENDS in ttext, but add defects to the token's
defects list if we find non-ttext characters. We also register defects for
*any* non-printables even though the RFC doesn't exclude all of them,
because we follow the spirit of RFC 5322.
"""
m = _non_token_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected ttext but found '{}'".format(value))
ttext = m.group()
value = value[len(ttext):]
ttext = ValueTerminal(ttext, 'ttext')
_validate_xtext(ttext)
return ttext, value
def get_token(value):
"""token = [CFWS] 1*ttext [CFWS]
The RFC equivalent of ttext is any US-ASCII chars except space, ctls, or
tspecials. We also exclude tabs even though the RFC doesn't.
The RFC implies the CFWS but is not explicit about it in the BNF.
"""
mtoken = Token()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
if value and value[0] in TOKEN_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_ttext(value)
mtoken.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
return mtoken, value
def get_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character)
We allow any non-ATTRIBUTE_ENDS in attrtext, but add defects to the
token's defects list if we find non-attrtext characters. We also register
defects for *any* non-printables even though the RFC doesn't exclude all of
them, because we follow the spirit of RFC 5322.
"""
m = _non_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_attribute(value):
""" [CFWS] 1*attrtext [CFWS]
This version of the BNF makes the CFWS explicit, and as usual we use a
value terminal for the actual run of characters. The RFC equivalent of
attrtext is the token characters, with the subtraction of '*', "'", and '%'.
We include tab in the excluded set just as we do for token.
"""
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_extended_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%')
This is a special parsing routine so that we get a value that
includes % escapes as a single string (which we decode as a single
string later).
"""
m = _non_extended_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected extended attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'extended-attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_extended_attribute(value):
""" [CFWS] 1*extended_attrtext [CFWS]
This is like the non-extended version except we allow % characters, so that
we can pick up an encoded value as a single string.
"""
# XXX: should we have an ExtendedAttribute TokenList?
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in EXTENDED_ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_extended_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_section(value):
""" '*' digits
The formal BNF is more complicated because leading 0s are not allowed. We
check for that and add a defect. We also assume no CFWS is allowed between
the '*' and the digits, though the RFC is not crystal clear on that.
The caller should already have dealt with leading CFWS.
"""
section = Section()
if not value or value[0] != '*':
raise errors.HeaderParseError("Expected section but found {}".format(
value))
section.append(ValueTerminal('*', 'section-marker'))
value = value[1:]
if not value or not value[0].isdigit():
raise errors.HeaderParseError("Expected section number but "
"found {}".format(value))
digits = ''
while value and value[0].isdigit():
digits += value[0]
value = value[1:]
if digits[0] == '0' and digits != '0':
section.defects.append(errors.InvalidHeaderError("section number"
"has an invalid leading 0"))
section.number = int(digits)
section.append(ValueTerminal(digits, 'digits'))
return section, value
def get_value(value):
""" quoted-string / attribute
"""
v = Value()
if not value:
raise errors.HeaderParseError("Expected value but found end of string")
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError("Expected value but found "
"only {}".format(leader))
if value[0] == '"':
token, value = get_quoted_string(value)
else:
token, value = get_extended_attribute(value)
if leader is not None:
token[:0] = [leader]
v.append(token)
return v, value
def get_parameter(value):
""" attribute [section] ["*"] [CFWS] "=" value
The CFWS is implied by the RFC but not made explicit in the BNF. This
simplified form of the BNF from the RFC is made to conform with the RFC BNF
through some extra checks. We do it this way because it makes both error
recovery and working with the resulting parse tree easier.
"""
# It is possible CFWS would also be implicitly allowed between the section
# and the 'extended-attribute' marker (the '*') , but we've never seen that
# in the wild and we will therefore ignore the possibility.
param = Parameter()
token, value = get_attribute(value)
param.append(token)
if not value or value[0] == ';':
param.defects.append(errors.InvalidHeaderDefect("Parameter contains "
"name ({}) but no value".format(token)))
return param, value
if value[0] == '*':
try:
token, value = get_section(value)
param.sectioned = True
param.append(token)
except errors.HeaderParseError:
pass
if not value:
raise errors.HeaderParseError("Incomplete parameter")
if value[0] == '*':
param.append(ValueTerminal('*', 'extended-parameter-marker'))
value = value[1:]
param.extended = True
if value[0] != '=':
raise errors.HeaderParseError("Parameter not followed by '='")
param.append(ValueTerminal('=', 'parameter-separator'))
value = value[1:]
leader = None
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
param.append(token)
remainder = None
appendto = param
if param.extended and value and value[0] == '"':
# Now for some serious hackery to handle the common invalid case of
# double quotes around an extended value. We also accept (with defect)
# a value marked as encoded that isn't really.
qstring, remainder = get_quoted_string(value)
inner_value = qstring.stripped_value
semi_valid = False
if param.section_number == 0:
if inner_value and inner_value[0] == "'":
semi_valid = True
else:
token, rest = get_attrtext(inner_value)
if rest and rest[0] == "'":
semi_valid = True
else:
try:
token, rest = get_extended_attrtext(inner_value)
except:
pass
else:
if not rest:
semi_valid = True
if semi_valid:
param.defects.append(errors.InvalidHeaderDefect(
"Quoted string value for extended parameter is invalid"))
param.append(qstring)
for t in qstring:
if t.token_type == 'bare-quoted-string':
t[:] = []
appendto = t
break
value = inner_value
else:
remainder = None
param.defects.append(errors.InvalidHeaderDefect(
"Parameter marked as extended but appears to have a "
"quoted string value that is non-encoded"))
if value and value[0] == "'":
token = None
else:
token, value = get_value(value)
if not param.extended or param.section_number > 0:
if not value or value[0] != "'":
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
param.defects.append(errors.InvalidHeaderDefect(
"Apparent initial-extended-value but attribute "
"was not marked as extended or was not initial section"))
if not value:
# Assume the charset/lang is missing and the token is the value.
param.defects.append(errors.InvalidHeaderDefect(
"Missing required charset/lang delimiters"))
appendto.append(token)
if remainder is None:
return param, value
else:
if token is not None:
for t in token:
if t.token_type == 'extended-attrtext':
break
t.token_type == 'attrtext'
appendto.append(t)
param.charset = t.value
if value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {!r}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231 delimiter'))
value = value[1:]
if value and value[0] != "'":
token, value = get_attrtext(value)
appendto.append(token)
param.lang = token.value
if not value or value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231 delimiter'))
value = value[1:]
if remainder is not None:
# Treat the rest of value as bare quoted string content.
v = Value()
while value:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_qcontent(value)
v.append(token)
token = v
else:
token, value = get_value(value)
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
def parse_mime_parameters(value):
""" parameter *( ";" parameter )
That BNF is meant to indicate this routine should only be called after
finding and handling the leading ';'. There is no corresponding rule in
the formal RFC grammar, but it is more convenient for us for the set of
parameters to be treated as its own TokenList.
This is 'parse' routine because it consumes the reminaing value, but it
would never be called to parse a full header. Instead it is called to
parse everything after the non-parameter value of a specific MIME header.
"""
mime_parameters = MimeParameters()
while value:
try:
token, value = get_parameter(value)
mime_parameters.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
mime_parameters.append(leader)
return mime_parameters
if value[0] == ';':
if leader is not None:
mime_parameters.append(leader)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter entry with no content"))
else:
token, value = get_invalid_parameter(value)
if leader:
token[:0] = [leader]
mime_parameters.append(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"invalid parameter {!r}".format(token)))
if value and value[0] != ';':
# Junk after the otherwise valid parameter. Mark it as
# invalid, but it will have a value.
param = mime_parameters[-1]
param.token_type = 'invalid-parameter'
token, value = get_invalid_parameter(value)
param.extend(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter with invalid trailing text {!r}".format(token)))
if value:
# Must be a ';' at this point.
mime_parameters.append(ValueTerminal(';', 'parameter-separator'))
value = value[1:]
return mime_parameters
def _find_mime_parameters(tokenlist, value):
"""Do our best to find the parameters in an invalid MIME header
"""
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
tokenlist.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
tokenlist.append(token)
if not value:
return
tokenlist.append(ValueTerminal(';', 'parameter-separator'))
tokenlist.append(parse_mime_parameters(value[1:]))
def parse_content_type_header(value):
""" maintype "/" subtype *( ";" parameter )
The maintype and substype are tokens. Theoretically they could
be checked against the official IANA list + x-token, but we
don't do that.
"""
ctype = ContentType()
recover = False
if not value:
ctype.defects.append(errors.HeaderMissingRequiredValue(
"Missing content type specification"))
return ctype
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content maintype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
# XXX: If we really want to follow the formal grammer we should make
# mantype and subtype specialized TokenLists here. Probably not worth it.
if not value or value[0] != '/':
ctype.defects.append(errors.InvalidHeaderDefect(
"Invalid content type"))
if value:
_find_mime_parameters(ctype, value)
return ctype
ctype.maintype = token.value.strip().lower()
ctype.append(ValueTerminal('/', 'content-type-separator'))
value = value[1:]
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content subtype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
ctype.subtype = token.value.strip().lower()
if not value:
return ctype
if value[0] != ';':
ctype.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content type, but "
"found {!r}".format(value)))
# The RFC requires that a syntactically invalid content-type be treated
# as text/plain. Perhaps we should postel this, but we should probably
# only do that if we were checking the subtype value against IANA.
del ctype.maintype, ctype.subtype
_find_mime_parameters(ctype, value)
return ctype
ctype.append(ValueTerminal(';', 'parameter-separator'))
ctype.append(parse_mime_parameters(value[1:]))
return ctype
def parse_content_disposition_header(value):
""" disposition-type *( ";" parameter )
"""
disp_header = ContentDisposition()
if not value:
disp_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content disposition"))
return disp_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
disp_header.defects.append(errors.InvalidHeaderDefect(
"Expected content disposition but found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(token)
disp_header.content_disposition = token.value.strip().lower()
if not value:
return disp_header
if value[0] != ';':
disp_header.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content disposition, but "
"found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(ValueTerminal(';', 'parameter-separator'))
disp_header.append(parse_mime_parameters(value[1:]))
return disp_header
def parse_content_transfer_encoding_header(value):
""" mechanism
"""
# We should probably validate the values, since the list is fixed.
cte_header = ContentTransferEncoding()
if not value:
cte_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content transfer encoding"))
return cte_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Expected content transfer encoding but found {!r}".format(value)))
else:
cte_header.append(token)
cte_header.cte = token.value.strip().lower()
if not value:
return cte_header
while value:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Extra text after content transfer encoding"))
if value[0] in PHRASE_ENDS:
cte_header.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
cte_header.append(token)
return cte_header
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
ArcherSys/ArcherSys
|
Lib/email/_header_value_parser.py
|
Python
|
mit
| 311,765
|
[
"CRYSTAL"
] |
6da25e83ae6d7d83c74598f2310b7416e4931c1388f02b022349fbc0dad9a988
|
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Turn an mmCIF file into a dictionary."""
from __future__ import print_function
from Bio._py3k import input as _input
import shlex
class MMCIF2Dict(dict):
def __init__(self, filename):
with open(filename) as handle:
loop_flag = False
key = None
tokens = self._tokenize(handle)
token = next(tokens)
self[token[0:5]]=token[5:]
for token in tokens:
if token=="loop_":
loop_flag = True
keys = []
i = 0
n = 0
continue
elif loop_flag:
if token.startswith("_"):
if i > 0:
loop_flag = False
else:
self[token] = []
keys.append(token)
n += 1
continue
else:
self[keys[i%n]].append(token)
i+=1
continue
if key is None:
key = token
else:
self[key] = token
key = None
def _tokenize(self, handle):
for line in handle:
if line.startswith("#"):
continue
elif line.startswith(";"):
token = line[1:].strip()
for line in handle:
line = line.strip()
if line==';':
break
token += line
yield token
else:
tokens = shlex.split(line)
for token in tokens:
yield token
if __name__=="__main__":
import sys
if len(sys.argv)!=2:
print("Usage: python MMCIF2Dict filename.")
filename=sys.argv[1]
mmcif_dict = MMCIF2Dict(filename)
entry = ""
print("Now type a key ('q' to end, 'k' for a list of all keys):")
while(entry != "q"):
entry = _input("MMCIF dictionary key ==> ")
if entry == "q":
sys.exit()
if entry == "k":
for key in mmcif_dict:
print(key)
continue
try:
value=mmcif_dict[entry]
if isinstance(value, list):
for item in value:
print(item)
else:
print(value)
except KeyError:
print("No such key found.")
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/PDB/MMCIF2Dict.py
|
Python
|
gpl-2.0
| 2,809
|
[
"Biopython"
] |
c0cf9b0bdbf3b11abcd0709ecab0419f0f4bec55785d10ff5b610d99832bb65d
|
from aces.materials import Material
from aces.modify import get_unique_atoms
from ase import Atoms,Atom
from math import pi,sqrt,atan
import numpy as np
class structure(Material):
def set_parameters(self):
pass
def setup(self):
pass
def lmp_structure(self):
col=self.unitcell(self.laty,self.latx)
col.set_pbc([self.xp,self.yp,self.zp])
atoms=get_unique_atoms(col)
cell=atoms.cell*self.bond
atoms.set_cell(cell,scale_atoms=True)
atoms.center()
return atoms
def unitcell(self,latx,laty):
pos2=np.array([1.02325,3.54463,0
,0.34109,1.18154,0
,5.7984,4.72618,0
,5.11624,2.36309,0
,7.50382,2.95387,0
,3.41082,4.13541,0
,2.72866,1.77231,0
,8.18599,5.31696,0
,6.82166,0.59078,0
,2.04649,14.76931,0
,8.52706,14.17854,0
,5.45733,11.22468,0
,3.75191,12.997,0
,1.36433,12.40622,0
,0.68216,10.04313,0
,6.13949,13.58776,0
,3.06975,10.6339,0
,7.8449,11.81546,0
,6.48057,7.08926,0
,4.77515,8.86159,0
,7.16273,9.45235,0
,1.70542,5.90772,0
,4.09299,6.4985,0
,2.38758,8.27081,0
,0,7.68004,0
,4.43408,0,0
,7.16273,4.33233,0
,4.77515,3.74156,0
,2.38758,3.15078,0
,5.45733,6.10465,0
,3.06975,5.51387,0
,0.68216,4.92311,0
,7.8449,6.69542,0
,4.09299,1.37846,0
,1.70542,0.7877,0
,8.18599,0.19692,0
,6.48057,1.96924,0
,3.41082,14.37546,0
,1.02325,13.7847,0
,0.34109,11.42161,0
,2.72866,12.01237,0
,5.7984,14.96624,0
,5.11624,12.60315,0
,7.50382,13.19392,0
,8.52706,9.0585,0
,6.82166,10.83083,0
,3.75191,7.87696,0
,6.13949,8.46774,0
,1.36433,7.2862,0
,2.04649,9.64928,0
,0,2.56002,0
,4.43408,10.24006,0]).reshape(-1,3)/1.42
pos1=[6.928400,13.000369,0.000000
,7.794450,16.500469,0.000000]
phi=pi/2-atan((pos1[4]-pos1[1])/(pos1[3]-pos1[0]))
cbond=np.linalg.norm((pos1[4]-pos1[1],pos1[3]-pos1[0],0))
dx=sqrt(3)*cbond;
dy=3*cbond;
atoms=Atoms()
for i,coord in enumerate(pos2):
ele=['C','N'][i<26]
atom=Atom(ele,coord)
atoms.append(atom)
#atoms.rotate('z',phi)
atoms.set_cell([dx,dy,10.0])
atoms=atoms.repeat((2,2,1))
col=atoms.repeat((latx,laty,1))
return col
|
vanceeasleaf/aces
|
aces/materials/CN-rotate.py
|
Python
|
gpl-2.0
| 2,009
|
[
"ASE"
] |
e1e3502fbca6d0ac1c192ceb044a42119e66fd6b9cd80c330476cc2a3e7fa2bb
|
"""
Miscellaneous tools for the pipeline. Some may eventually be refactored into their own modules.
"""
import re
import itertools
import argparse
import pysam
import pandas as pd
import os
import procOps
from pipeline import ProcException, Procline
from cat.exceptions import ToolMissingException
from distutils.version import StrictVersion
class HashableNamespace(argparse.Namespace):
"""
Adds a __hash__ function to argparse's Namespace. Follows best practices for implementation of __hash__.
"""
def __hash__(self):
def xor(x, y):
return x ^ hash(y)
val_iter = self.__dict__.itervalues()
first = hash(val_iter.next())
return reduce(xor, val_iter, first) ^ hash(tuple(self.__dict__.values()))
class PipelineNamespace(object):
"""
A Hashable namespace that maintains knowledge of whether a member is significant and thus should be hashed.
Used to maintain information on the pipeline state but allow users to change insignificant features without forcing
the pipeline to rerun expensive modules.
"""
def __init__(self):
self.significant = {}
def set(self, name, val, significant=True):
setattr(self, name, val)
self.significant[name] = significant
def __hash__(self):
def xor(x, y):
return x ^ hash(y)
vals = tuple(name for name in self.__dict__ if name != 'significant' and self.significant[name])
val_iter = iter(vals)
first = hash(val_iter.next())
return reduce(xor, val_iter, first) ^ hash(vals)
def convert_gtf_gp(gp_target, gtf_target):
"""converts a GTF to genePred"""
cmd = ['gtfToGenePred', '-genePredExt', gtf_target.path, '/dev/stdout']
with gp_target.open('w') as outf:
procOps.run_proc(cmd, stdout=outf)
def convert_gp_gtf(gtf_target, gp_target, source='CAT'):
"""Converts a genePred to GTF"""
cmd = ['genePredToGtf', 'file', gp_target.path, '-utr', '-honorCdsStat', '-source={}'.format(source), '/dev/stdout']
with gtf_target.open('w') as outf:
procOps.run_proc(cmd, stdout=outf)
def is_exec(program):
"""checks if a program is in the global path and executable"""
if running_in_container():
# We assume containerized versions don't need to check if the
# tools are installed--they definitely are, and calling docker
# just to run "which" can be surprisingly expensive. But we do
# check for the presence of Docker, since that should take
# only a few ms.
cmd = ['which', 'docker']
pl = Procline(cmd, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null')
try:
pl.wait()
except ProcException:
raise ToolMissingException("Docker not found. Either install Docker, or install CAT's dependencies and use --binary-mode local.")
cmd = ['which', program]
try:
return procOps.call_proc_lines(cmd)[0].endswith(program)
except ProcException:
return False
def samtools_version():
"""checks the version of samtools installed"""
try:
r = procOps.call_proc_lines(['samtools', '--version'])
if StrictVersion(r[0].split()[1].split('-')[0]) < '1.3':
raise ToolMissingException('samtools version is not >= 1.3.0')
except ProcException:
raise ToolMissingException('samtools is not installed')
def is_bam(path):
"""Checks if a path is a BAMfile"""
try:
pysam.Samfile(path)
except IOError:
raise RuntimeError('Path {} does not exist'.format(path))
except ValueError:
return False
return True
def pairwise(iterable):
"""s -> (s0, s1), (s2, s3), (s4, s5), ..."""
a = iter(iterable)
return itertools.izip(a, a)
def sort_gff(input_file, output_file):
"""Sorts a GFF format file by column 1 (chromosome) then column 4(start integer)"""
cmd = [['sort', '-n', '-k4,4', input_file], ['sort', '-s', '-n', '-k5,5'], ['sort', '-s', '-k1,1']]
procOps.run_proc(cmd, stdout=output_file)
def parse_gtf_attr_line(attr_line):
"""parse a GTF attributes line"""
attr_line = [x.split(' ') for x in re.split('; +', attr_line.replace('"', ''))]
attr_line[-1][-1] = attr_line[-1][-1].rstrip().replace(';', '')
return dict(attr_line)
def parse_gff_attr_line(attr_line):
"""parse a GFF attributes line"""
attr_line = [x.split('=') for x in re.split('; *', attr_line.replace('"', ''))]
attr_line[-1][-1] = attr_line[-1][-1].rstrip().replace(';', '')
return dict(attr_line)
def slice_df(df, ix):
"""
Slices a DataFrame by an index, handling the case where the index is missing. Handles the case where a single row
is returned, thus making it a series.
"""
try:
r = df.xs(ix)
if isinstance(r, pd.core.series.Series):
return pd.DataFrame([r])
else:
return r
except KeyError:
return pd.DataFrame()
def running_in_container():
"""
Is CAT trying to run tools inside containers?
"""
return os.environ.get("CAT_BINARY_MODE") != "local"
|
ucsc-mus-strain-cactus/Comparative-Annotation-Toolkit
|
tools/misc.py
|
Python
|
apache-2.0
| 5,114
|
[
"pysam"
] |
04fdbd58ebfa7cc1ebea4cd85a421f2950078e7b09f0aa2549d9be3999dd0010
|
import pytest
from boltons.dictutils import OMD
from boltons.iterutils import (first,
remap,
default_enter,
default_exit,
get_path)
from boltons.namedutils import namedtuple
isbool = lambda x: isinstance(x, bool)
isint = lambda x: isinstance(x, int)
odd = lambda x: isint(x) and x % 2 != 0
even = lambda x: isint(x) and x % 2 == 0
is_meaning_of_life = lambda x: x == 42
class TestFirst(object):
def test_empty_iterables(self):
"""
Empty iterables return None.
"""
s = set()
l = []
assert first(s) is None
assert first(l) is None
def test_default_value(self):
"""
Empty iterables + a default value return the default value.
"""
s = set()
l = []
assert first(s, default=42) == 42
assert first(l, default=3.14) == 3.14
l = [0, False, []]
assert first(l, default=3.14) == 3.14
def test_selection(self):
"""
Success cases with and without a key function.
"""
l = [(), 0, False, 3, []]
assert first(l, default=42) == 3
assert first(l, key=isint) == 0
assert first(l, key=isbool) is False
assert first(l, key=odd) == 3
assert first(l, key=even) == 0
assert first(l, key=is_meaning_of_life) is None
class TestRemap(object):
# TODO: test namedtuples and other immutable containers
def test_basic_clone(self):
orig = {"a": "b", "c": [1, 2]}
assert orig == remap(orig)
orig2 = [{1: 2}, {"a": "b", "c": [1, 2, {"cat": "dog"}]}]
assert orig2 == remap(orig2)
def test_empty(self):
assert [] == remap([])
assert {} == remap({})
assert set() == remap(set())
def test_unremappable(self):
obj = object()
with pytest.raises(TypeError):
remap(obj)
def test_basic_upper(self):
orig = {'a': 1, 'b': object(), 'c': {'d': set()}}
remapped = remap(orig, lambda p, k, v: (k.upper(), v))
assert orig['a'] == remapped['A']
assert orig['b'] == remapped['B']
assert orig['c']['d'] == remapped['C']['D']
def test_item_drop(self):
orig = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
even_items = remap(orig, lambda p, k, v: not (v % 2))
assert even_items == [0, 2, 4, 6, 8]
def test_noncallables(self):
with pytest.raises(TypeError):
remap([], visit='test')
with pytest.raises(TypeError):
remap([], enter='test')
with pytest.raises(TypeError):
remap([], exit='test')
def test_sub_selfref(self):
coll = [0, 1, 2, 3]
sub = []
sub.append(sub)
coll.append(sub)
with pytest.raises(RuntimeError):
# if equal, should recurse infinitely
assert coll == remap(coll)
def test_root_selfref(self):
selfref = [0, 1, 2, 3]
selfref.append(selfref)
with pytest.raises(RuntimeError):
assert selfref == remap(selfref)
selfref2 = {}
selfref2['self'] = selfref2
with pytest.raises(RuntimeError):
assert selfref2 == remap(selfref2)
def test_duperef(self):
val = ['hello']
duperef = [val, val]
remapped = remap(duperef)
assert remapped[0] is remapped[1]
assert remapped[0] is not duperef[0]
def test_namedtuple(self):
"""TODO: this fails right now because namedtuples' __new__ is
overridden to accept arguments. remap's default_enter tries
to create an empty namedtuple and gets a TypeError.
Could make it so that immutable types actually don't create a
blank new parent and instead use the old_parent as a
placeholder, creating a new one at exit-time from the value's
__class__ (how default_exit works now). But even then it would
have to *args in the values, as namedtuple constructors don't
take an iterable.
"""
Point = namedtuple('Point', 'x y')
point_map = {'origin': [Point(0, 0)]}
with pytest.raises(TypeError):
remapped = remap(point_map)
assert isinstance(remapped['origin'][0], Point)
def test_path(self):
path_map = {}
# test visit's path
target_str = 'test'
orig = [[[target_str]]]
ref_path = (0, 0, 0)
def visit(path, key, value):
if value is target_str:
path_map['target_str'] = path + (key,)
return key, value
remapped = remap(orig, visit=visit)
assert remapped == orig
assert path_map['target_str'] == ref_path
# test enter's path
target_obj = object()
orig = {'a': {'b': {'c': {'d': ['e', target_obj, 'f']}}}}
ref_path = ('a', 'b', 'c', 'd', 1)
def enter(path, key, value):
if value is target_obj:
path_map['target_obj'] = path + (key,)
return default_enter(path, key, value)
remapped = remap(orig, enter=enter)
assert remapped == orig
assert path_map['target_obj'] == ref_path
# test exit's path
target_set = frozenset([1, 7, 3, 8])
orig = [0, 1, 2, [3, 4, [5, target_set]]]
ref_path = (3, 2, 1)
def exit(path, key, old_parent, new_parent, new_items):
if old_parent is target_set:
path_map['target_set'] = path + (key,)
return default_exit(path, key, old_parent, new_parent, new_items)
remapped = remap(orig, exit=exit)
assert remapped == orig
assert path_map['target_set'] == ref_path
def test_reraise_visit(self):
root = {'A': 'b', 1: 2}
key_to_lower = lambda p, k, v: (k.lower(), v)
with pytest.raises(AttributeError):
remap(root, key_to_lower)
remapped = remap(root, key_to_lower, reraise_visit=False)
assert remapped['a'] == 'b'
assert remapped[1] == 2
def test_drop_nones(self):
orig = {'a': 1, 'b': None, 'c': [3, None, 4, None]}
ref = {'a': 1, 'c': [3, 4]}
drop_none = lambda p, k, v: v is not None
remapped = remap(orig, visit=drop_none)
assert remapped == ref
orig = [None] * 100
remapped = remap(orig, drop_none)
assert not remapped
def test_dict_to_omd(self):
def enter(path, key, value):
if isinstance(value, dict):
return OMD(), sorted(value.items())
return default_enter(path, key, value)
orig = [{'title': 'Wild Palms',
'ratings': {1: 1, 2: 3, 3: 5, 4: 6, 5: 3}},
{'title': 'Twin Peaks',
'ratings': {1: 3, 2: 2, 3: 8, 4: 12, 5: 15}}]
remapped = remap(orig, enter=enter)
assert remapped == orig
assert isinstance(remapped[0], OMD)
assert isinstance(remapped[0]['ratings'], OMD)
assert isinstance(remapped[1], OMD)
assert isinstance(remapped[1]['ratings'], OMD)
def test_sort_all_lists(self):
def exit(path, key, old_parent, new_parent, new_items):
# NB: in this case, I'd normally use *a, **kw
ret = default_exit(path, key, old_parent, new_parent, new_items)
if isinstance(ret, list):
ret.sort()
return ret
# NB: Airplane model numbers (Boeing and Airbus)
orig = [[[7, 0, 7],
[7, 2, 7],
[7, 7, 7],
[7, 3, 7]],
[[3, 8, 0],
[3, 2, 0],
[3, 1, 9],
[3, 5, 0]]]
ref = [[[0, 2, 3],
[0, 3, 5],
[0, 3, 8],
[1, 3, 9]],
[[0, 7, 7],
[2, 7, 7],
[3, 7, 7],
[7, 7, 7]]]
remapped = remap(orig, exit=exit)
assert remapped == ref
def test_collector_pattern(self):
all_interests = set()
def enter(path, key, value):
try:
all_interests.update(value['interests'])
except:
pass
return default_enter(path, key, value)
orig = [{'name': 'Kate',
'interests': ['theater', 'manga'],
'dads': [{'name': 'Chris',
'interests': ['biking', 'python']}]},
{'name': 'Avery',
'interests': ['museums', 'pears'],
'dads': [{'name': 'Kurt',
'interests': ['python', 'recursion']}]}]
ref = set(['python', 'recursion', 'biking', 'museums',
'pears', 'theater', 'manga'])
remap(orig, enter=enter)
assert all_interests == ref
def test_add_length(self):
def exit(path, key, old_parent, new_parent, new_items):
ret = default_exit(path, key, old_parent, new_parent, new_items)
try:
ret['review_length'] = len(ret['review'])
except:
pass
return ret
orig = {'Star Trek':
{'TNG': {'stars': 10,
'review': "Episodic AND deep. <3 Data."},
'DS9': {'stars': 8.5,
'review': "Like TNG, but with a story and no Data."},
'ENT': {'stars': None,
'review': "Can't review what you can't watch."}},
'Babylon 5': {'stars': 6,
'review': "Sophomoric, like a bitter laugh."},
'Dr. Who': {'stars': None,
'review': "800 episodes is too many to review."}}
remapped = remap(orig, exit=exit)
assert (remapped['Star Trek']['TNG']['review_length']
< remapped['Star Trek']['DS9']['review_length'])
def test_prepop(self):
"""Demonstrating normalization and ID addition through prepopulating
the objects wth an enter callback.
"""
base_obj = {'name': None,
'rank': None,
'id': 1}
def enter(path, key, value):
new_parent, new_items = default_enter(path, key, value)
try:
new_parent.update(base_obj)
base_obj['id'] += 1
except:
pass
return new_parent, new_items
orig = [{'name': 'Firefox', 'rank': 1},
{'name': 'Chrome', 'rank': 2},
{'name': 'IE'}]
ref = [{'name': 'Firefox', 'rank': 1, 'id': 1},
{'name': 'Chrome', 'rank': 2, 'id': 2},
{'name': 'IE', 'rank': None, 'id': 3}]
remapped = remap(orig, enter=enter)
assert remapped == ref
class TestGetPath(object):
def test_depth_one(self):
root = ['test']
assert get_path(root, (0,)) == 'test'
assert get_path(root, '0') == 'test'
root = {'key': 'value'}
assert get_path(root, ('key',)) == 'value'
assert get_path(root, 'key') == 'value'
def test_depth_two(self):
root = {'key': ['test']}
assert get_path(root, ('key', 0)) == 'test'
assert get_path(root, 'key.0') == 'test'
|
suranap/boltons
|
tests/test_iterutils.py
|
Python
|
bsd-3-clause
| 11,404
|
[
"VisIt"
] |
9d3d2b92d46df0df77bd3c91cbd858c0c44e4e81fdb92342e57fe8ca8360bea4
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import pickle
from . import dependency_check
from psi4.driver.molutil import *
from psi4.driver.inputparser import process_input
from psi4.driver.p4util.util import *
from psi4.driver.p4util.text import *
from psi4.driver.qmmm import QMMM
from psi4.driver.plugin import *
from psi4.driver import gaussian_n
from psi4.driver import aliases
from psi4.driver import diatomic
from psi4.driver import wrapper_database
from psi4.driver import wrapper_autofrag
from psi4.driver import json_wrapper
from psi4.driver.driver import *
# Single functions
from psi4.driver.driver_cbs import cbs
from psi4.driver.p4util.python_helpers import set_options, set_module_options, pcm_helper
|
mhlechner/psi4
|
psi4/driver/__init__.py
|
Python
|
gpl-2.0
| 1,624
|
[
"Psi4"
] |
19928c40539796d32a5230a6db75ebde02016870a079f29d5dabe65fb73afe1d
|
from .util import make_enum, get_value_by_version, OS_NAME
OUTPUT_FILES_LAYOUTS = make_enum(
"eplusout", # {simulation_dir_path}/eplusout.{extension}
"simu", # {simulation_dir_path}/{simulation_base_name}.{extension}
"output_simu", # {simulation_dir_path}/Output/{simulation_base_name}.{extension}
"simu_table", # {simulation_dir_path}/{simulation_base_name}Table.csv
"output_simu_table", # {simulation_dir_path}/Output/{simulation_base_name}Table.csv
"eplustbl", # {simulation_dir_path}/eplusout.csv
)
_layouts_matrix = {
"windows": {
"inputs": {
(0, 0): "simu",
(8, 2): "eplusout"
},
"table": {
(0, 0): "simu_table",
(8, 2): "eplustbl",
},
"other": {
(0, 0): "simu",
(8, 2): "eplusout"
}
},
"osx": {
"inputs": {
(0, 0): "output_simu",
(8, 2): "simu"
},
"table": {
(0, 0): "output_simu_table",
(8, 2): "eplustbl",
},
"other": {
(0, 0): "output_simu",
(8, 2): "eplusout"
}
},
"linux": {
"inputs": {
(0, 0): "output_simu",
(8, 5): "eplusout"
},
"table": {
(0, 0): "output_simu_table",
(8, 5): "eplustbl",
},
"other": {
(0, 0): "output_simu",
(8, 5): "eplusout"
}
}
}
def get_output_files_layout(output_category):
"""
Parameters
----------
output_category: str
inputs: epw, idf
table: summary table
other: other
"""
# check category
if output_category not in ("inputs", "table", "other"):
raise RuntimeError(f"unknown {output_category}")
# get version dict
layouts = _layouts_matrix[OS_NAME][output_category]
# get version
return get_value_by_version(layouts)
|
Openergy/oplus
|
oplus/compatibility/outputs.py
|
Python
|
mpl-2.0
| 1,958
|
[
"EPW"
] |
9a2c260d498c05368f3d197639bee72d0f0b45b2b6a16e0ebd1561858ea33dc0
|
# Author : Kevin Murphy(@murphyk), Aleyna Kara(@karalleyna)
# import superimport
import jax
import jax.numpy as jnp
from jax import jit, value_and_grad, tree_leaves, tree_map
from jax import random
from jax.random import split, permutation
from jax.nn import one_hot
from jax.lax import scan
import optax
from functools import partial
def generate_random_basis(key, d, D):
projection_matrix = random.normal(key, shape=(d, D))
projection_matrix = projection_matrix / jnp.linalg.norm(projection_matrix, axis=-1, keepdims=True)
return projection_matrix
@jit
def convert_params_from_subspace_to_full(params_subspace, projection_matrix, params_full_init):
return jnp.matmul(params_subspace, projection_matrix)[0] + params_full_init
def data_stream(key, X, y, batch_size):
n_data = len(X)
while True:
perm_key, key = split(key)
perm = permutation(perm_key, n_data)
num_batches, mod = divmod(n_data, batch_size)
num_batches += 1 if mod else 0
for i in range(num_batches):
batch_idx = perm[i * batch_size: min((i + 1) * batch_size, n_data)]
yield X[batch_idx], y[batch_idx]
def make_potential(key, predict_fn, dataset, batch_size, l2_regularizer):
# Return function to compute negative log joint for each minibatch
dataloader = data_stream(key, dataset["X"], dataset["y"], batch_size)
n_data = dataset["X"].shape[0]
@jit
def loglikelihood(params, x, y):
logits = predict_fn(params, x)
num_classes = logits.shape[-1]
labels = one_hot(y, num_classes)
ll = jnp.sum(labels * logits, axis=-1)
return ll
@jit
def logprior(params):
# Spherical Gaussian prior
leaves_of_params = tree_leaves(params)
return sum(tree_map(lambda p: jnp.sum(jax.scipy.stats.norm.logpdf(p, scale=l2_regularizer)), leaves_of_params))
@jit
def potential(params, data):
ll = n_data * jnp.mean(loglikelihood(params, *data))
logp = logprior(params)
return -(ll + logp)
@jit
def objective(params):
return potential(params, next(dataloader))
return objective
def make_potential_subspace(key, anchor_params_tree, predict_fn, dataset, batch_size, l2_regularizer, subspace_dim,
projection_matrix=None):
# Return function to compute negative log joint in subspace for each minibatch
anchor_params_full, flat_to_pytree_fn = jax.flatten_util.ravel_pytree(anchor_params_tree)
full_dim = len(anchor_params_full)
dataloader = data_stream(key, dataset["X"], dataset["y"], batch_size)
n_data = dataset["X"].shape[0]
if projection_matrix is None:
subspace_key, key = split(key)
projection_matrix = generate_random_basis(key, subspace_dim, full_dim)
@jit
def subspace_to_pytree_fn(params_subspace):
params_full = convert_params_from_subspace_to_full(params_subspace, projection_matrix, anchor_params_full)
params_pytree = flat_to_pytree_fn(params_full)
return params_pytree
@jit
def loglikelihood(params, x, y):
logits = predict_fn(params, x)
num_classes = logits.shape[-1]
labels = one_hot(y, num_classes)
ll = jnp.sum(labels * logits, axis=-1)
return ll
@jit
def logprior(params):
# Spherical Gaussian prior
return jnp.sum(jax.scipy.stats.norm.logpdf(params, scale=l2_regularizer))
@jit
def potential(params_sub, data):
params_pytree = subspace_to_pytree_fn(params_sub)
ll = n_data * jnp.mean(loglikelihood(params_pytree, *data))
logp = logprior(params_sub)
return -(ll + logp)
@jit
def objective(params_sub):
return potential(params_sub, next(dataloader))
return objective, subspace_to_pytree_fn
def optimize_loop(objective, initial_params, optimizer, n_steps, callback=None):
opt_state = optimizer.init(initial_params)
def train_step(carry, step):
params, opt_state = carry
loss, grads = value_and_grad(objective)(params)
updates, opt_state = optimizer.update(grads, opt_state)
params = optax.apply_updates(params, updates)
callback_result = callback(params, step) if callback is not None else None
return (params, opt_state), (loss, callback_result)
steps = jnp.arange(n_steps)
(params, _), (loss, callback_hist) = scan(train_step, (initial_params, opt_state), steps)
return params, loss, callback_hist
|
probml/pyprobml
|
old/subspace_opt_lib.py
|
Python
|
mit
| 4,515
|
[
"Gaussian"
] |
5c71fa832091c61fe787b243ad34979de251d9a710d5c3c81b073f9d77528415
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import subprocess
from TestHarnessTestCase import TestHarnessTestCase
class TestHarnessTester(TestHarnessTestCase):
def testShouldExecute(self):
"""
Test should_execute logic
"""
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.runTests('-i', 'should_execute')
e = cm.exception
self.assertRegex(e.output.decode('utf-8'), r'test_harness\.should_execute_true_ok.*?OK')
self.assertRegex(e.output.decode('utf-8'), r'test_harness\.should_execute_false_ok.*?OK')
self.assertRegex(e.output.decode('utf-8'), r'test_harness\.should_execute_true_fail.*?FAILED \(EXODIFF\)')
|
harterj/moose
|
python/TestHarness/tests/test_ShouldExecute.py
|
Python
|
lgpl-2.1
| 973
|
[
"MOOSE"
] |
11a18c252f0a88fd8e6cdc792a6c66d2b56e5223bc1f35833b2a2f0527053d94
|
# -*- coding: utf-8 -*-
"""
edacc.ranking
-------------
This module implements some possible ranking schemes that can be used
by the ranking view in the analysis module.
:copyright: (c) 2010 by Daniel Diepold.
:license: MIT, see LICENSE for details.
"""
import numpy, math
from scipy.stats.mstats import mquantiles
from itertools import izip
from sqlalchemy.sql import select, and_, functions, not_, expression, literal
from edacc import statistics
def avg_point_biserial_correlation_ranking(db, experiment, instances):
""" Ranking through comparison of the RTDs of the solvers on the instances.
This ranking only makes sense if the there were multiple runs of each
solver on each instance.
See the paper "Statistical Methodology for Comparison of SAT Solvers"
by M. Nikolić for details.
"""
instance_ids = [i.idInstance for i in instances]
table = db.metadata.tables['ExperimentResults']
c_solver_config_id = table.c['SolverConfig_idSolverConfig']
c_result_time = table.c['resultTime']
c_experiment_id = table.c['Experiment_idExperiment']
c_result_code = table.c['resultCode']
c_status = table.c['status']
c_instance_id = table.c['Instances_idInstance']
s = select([c_solver_config_id, c_instance_id, c_result_time], \
and_(c_experiment_id == experiment.idExperiment, c_instance_id.in_(instance_ids),
c_result_code.in_([1, -21, -22]),
c_status.in_([1, 21, 22]),
)) \
.select_from(table)
query_results = db.session.connection().execute(s)
solver_config_results = dict(
[(s.idSolverConfig, dict([(i, list()) for i in instance_ids])) for s in experiment.solver_configurations])
for row in query_results:
solver_config_results[row[0]][row[1]].append(row[2])
def rank_simple(vector):
return sorted(range(len(vector)), key=vector.__getitem__)
def rankdata(a):
n = len(a)
ivec = rank_simple(a)
svec = [a[rank] for rank in ivec]
sumranks = 0
dupcount = 0
newarray = [0] * n
for i in xrange(n):
sumranks += i
dupcount += 1
if i == n - 1 or svec[i] != svec[i + 1]:
averank = sumranks / float(dupcount) + 1
for j in xrange(i - dupcount + 1, i + 1):
newarray[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newarray
def pointbiserialcorr(s1, s2):
""" Calculate the mean point biserial correlation of the RTDs of
the two given solvers on all instances of the experiment.
Only consider values where the statistical significance is large
enough (p-value < alpha = 0.05)
"""
from scipy import stats
alpha = 0.05 # level of statistical significant difference
d = 0.0
num = 0
for i in instance_ids:
res1 = solver_config_results[s1.idSolverConfig][i]
res2 = solver_config_results[s2.idSolverConfig][i]
ranked_data = list(rankdata(res1 + res2))
r, p = stats.pointbiserialr([1] * len(res1) + [0] * len(res2), ranked_data)
# only take instances with significant differences into account
if p < alpha:
#print str(s1), str(s2), str(i), r, p
d += r
num += 1
if num > 0:
return d / num # return mean difference
else:
return 0 # s1 == s2
def comp(s1, s2):
""" Comparator function for point biserial correlation based ranking."""
r = pointbiserialcorr(s1, s2)
if r < 0:
return 1
elif r > 0:
return -1
else:
return 0
# List of solvers sorted by their rank. Best solver first.
return list(sorted(experiment.solver_configurations, cmp=comp))
def number_of_solved_instances_ranking(db, experiment, instances, solver_configs, cost='resultTime', fixed_limit=None):
""" Ranking by the number of instances correctly solved.
This is determined by an resultCode that starts with '1' and a 'finished' status
of a job.
"""
instance_ids = [i.idInstance for i in instances]
solver_config_ids = [i.idSolverConfig for i in solver_configs]
if not solver_config_ids: return []
table = db.metadata.tables['ExperimentResults']
table_has_prop = db.metadata.tables['ExperimentResult_has_Property']
table_has_prop_value = db.metadata.tables['ExperimentResult_has_PropertyValue']
c_solver_config_id = table.c['SolverConfig_idSolverConfig']
c_result_time = table.c['resultTime']
c_experiment_id = table.c['Experiment_idExperiment']
c_result_code = table.c['resultCode']
c_status = table.c['status']
c_instance_id = table.c['Instances_idInstance']
c_solver_config_id = table.c['SolverConfig_idSolverConfig']
if cost == 'resultTime':
cost_column = table.c['resultTime']
cost_limit_column = table.c['CPUTimeLimit']
if fixed_limit:
cost_column = expression.case([(table.c['resultTime'] > fixed_limit, fixed_limit)],
else_=table.c['resultTime'])
cost_limit_column = literal(fixed_limit)
c_result_code = expression.case([(table.c['resultTime'] > fixed_limit, literal(-21))],
else_=table.c['resultCode'])
c_status = expression.case([(table.c['resultTime'] > fixed_limit, literal(21))],
else_=table.c['status'])
elif cost == 'wallTime':
cost_column = table.c['wallTime']
cost_limit_column = table.c['wallClockTimeLimit']
if fixed_limit:
cost_column = expression.case([(table.c['wallTime'] > fixed_limit, fixed_limit)],
else_=table.c['wallTime'])
cost_limit_column = literal(fixed_limit)
c_result_code = expression.case([(table.c['wallTime'] > fixed_limit, literal(-22))],
else_=table.c['resultCode'])
c_status = expression.case([(table.c['wallTime'] > fixed_limit, literal(22))],
else_=table.c['status'])
elif cost == 'cost':
cost_column = table.c['cost']
inf = float('inf')
cost_limit_column = table.c['CPUTimeLimit']
else:
cost_column = table_has_prop_value.c['value']
inf = float('inf')
cost_limit_column = table.c['CPUTimeLimit']
results = {}
if cost in ('resultTime', 'wallTime', 'cost'):
s = select([c_solver_config_id, functions.sum(cost_column), functions.count()],
and_(c_experiment_id == experiment.idExperiment, c_result_code.like(u'1%'), c_status == 1,
c_instance_id.in_(instance_ids), c_solver_config_id.in_(solver_config_ids))) \
.select_from(table) \
.group_by(c_solver_config_id)
query_results = db.session.connection().execute(s)
for row in query_results:
results[row[0]] = (row[1], row[2])
else:
table = table.join(table_has_prop, and_(table_has_prop.c['idProperty'] == int(cost),
table_has_prop.c['idExperimentResults'] == table.c['idJob'])).join(
table_has_prop_value)
s = select([c_solver_config_id, cost_column],
and_(c_experiment_id == experiment.idExperiment, c_result_code.like(u'1%'), c_status == 1,
c_instance_id.in_(instance_ids), c_solver_config_id.in_(solver_config_ids))) \
.select_from(table)
sum_by_sc_id = dict((i, 0) for i in solver_config_ids)
count_by_sc_id = dict((i, 0) for i in solver_config_ids)
query_results = db.session.connection().execute(s)
for row in query_results:
sum_by_sc_id[row[0]] += float(row[1])
count_by_sc_id[row[0]] += 1
for i in solver_config_ids:
results[i] = (sum_by_sc_id[i], count_by_sc_id[i])
def sgn(x):
if x > 0:
return 1
elif x < 0:
return -1
else:
return 0
def comp(s1, s2):
num_solved_s1, num_solved_s2 = 0, 0
if results.has_key(s1.idSolverConfig):
num_solved_s1 = results[s1.idSolverConfig][1]
if results.has_key(s2.idSolverConfig):
num_solved_s2 = results[s2.idSolverConfig][1]
if num_solved_s1 > num_solved_s2:
return 1
elif num_solved_s1 < num_solved_s2:
return -1
else:
# break ties by cumulative cost over all solved instances
if results.has_key(s1.idSolverConfig) and results.has_key(s2.idSolverConfig):
return sgn((results[s2.idSolverConfig][0] or 0.0) - (results[s1.idSolverConfig][0] or 0.0))
else:
return 0
return list(sorted(solver_configs, cmp=comp, reverse=True))
def get_ranking_data(db, experiment, ranked_solvers, instances, calculate_par10, calculate_avg_stddev, cost,
par_factor=1, fixed_limit=None):
instance_ids = [i.idInstance for i in instances]
solver_config_ids = [s.idSolverConfig for s in ranked_solvers]
if not solver_config_ids: return [], None
max_num_runs = experiment.get_max_num_runs(db)
max_num_runs_per_solver = max_num_runs * len(instance_ids)
table = db.metadata.tables['ExperimentResults']
from_table = table
table_has_prop = db.metadata.tables['ExperimentResult_has_Property']
table_has_prop_value = db.metadata.tables['ExperimentResult_has_PropertyValue']
status_column = table.c['status']
result_code_column = table.c['resultCode']
if cost == 'resultTime':
cost_column = table.c['resultTime']
cost_property = db.ExperimentResult.resultTime
cost_limit_column = table.c['CPUTimeLimit']
if fixed_limit:
cost_column = expression.case([(table.c['resultTime'] > fixed_limit, fixed_limit)],
else_=table.c['resultTime'])
cost_limit_column = literal(fixed_limit)
status_column = expression.case([(table.c['resultTime'] > fixed_limit, literal(21))],
else_=table.c['status'])
result_code_column = expression.case([(table.c['resultTime'] > fixed_limit, literal(-21))],
else_=table.c['resultCode'])
elif cost == 'wallTime':
cost_column = table.c['wallTime']
cost_property = db.ExperimentResult.wallTime
cost_limit_column = table.c['wallClockTimeLimit']
if fixed_limit:
cost_column = expression.case([(table.c['wallTime'] > fixed_limit, fixed_limit)],
else_=table.c['wallTime'])
cost_limit_column = literal(fixed_limit)
status_column = expression.case([(table.c['wallTime'] > fixed_limit, literal(22))],
else_=table.c['status'])
result_code_column = expression.case([(table.c['wallTime'] > fixed_limit, literal(-22))],
else_=table.c['resultCode'])
elif cost == 'cost':
cost_column = table.c['cost']
cost_property = db.ExperimentResult.cost
inf = float('inf')
cost_limit_column = table.c['CPUTimeLimit'] # doesnt matter
else:
cost_column = table_has_prop_value.c['value']
cost_property = db.ResultPropertyValue.value
inf = float('inf')
cost_limit_column = table.c['CPUTimeLimit']
from_table = table.join(table_has_prop, and_(table_has_prop.c['idProperty'] == int(cost),
table_has_prop.c['idExperimentResults'] == table.c['idJob'])).join(
table_has_prop_value)
vbs_num_solved = 0
vbs_cumulated_cpu = 0
from sqlalchemy import func, or_, not_
property_limit = 0
if cost in ('resultTime', 'wallTime', 'cost'):
best_instance_runtimes = db.session.query(func.min(cost_property), db.ExperimentResult.Instances_idInstance) \
.filter(db.ExperimentResult.Experiment_idExperiment == experiment.idExperiment) \
.filter(result_code_column.like(u'1%')) \
.filter(db.ExperimentResult.Instances_idInstance.in_(instance_ids)) \
.filter(db.ExperimentResult.SolverConfig_idSolverConfig.in_(solver_config_ids)) \
.group_by(db.ExperimentResult.Instances_idInstance).all()
else:
s = select([cost_property, table.c['Instances_idInstance']], and_(
table.c['Experiment_idExperiment'] == experiment.idExperiment,
table.c['resultCode'].like(u'1%'),
table.c['Instances_idInstance'].in_(instance_ids),
table.c['SolverConfig_idSolverConfig'].in_(solver_config_ids)
)).select_from(from_table)
min_by_instance = dict((i, float("inf")) for i in instance_ids)
for row in db.session.connection().execute(s):
property_limit = max(property_limit, float(row[0]))
min_by_instance[row[1]] = min(min_by_instance[row[1]], float(row[0]))
best_instance_runtimes = []
for i in instance_ids:
best_instance_runtimes.append((min_by_instance[i], i))
vbs_num_solved = len(best_instance_runtimes) * max_num_runs
vbs_cumulated_cpu = sum(r[0] for r in best_instance_runtimes if r[0] is not None) * max_num_runs
vbs_median = numpy.median([r[0] for r in best_instance_runtimes if r[0] is not None])
vbs_average = numpy.average([r[0] for r in best_instance_runtimes if r[0] is not None])
best_runtime_by_instance = dict()
for bir in best_instance_runtimes:
best_runtime_by_instance[bir[1]] = float(bir[0]) if bir[0] is not None else None
#num_unsolved_instances = len(instances) - len(best_instance_runtimes)
vbs_parX = 0.0
# Virtual best solver data
data = [('Virtual Best Solver (VBS)', # name of the solver
vbs_num_solved, # number of successful runs
0.0 if max_num_runs_per_solver == 0 else
vbs_num_solved / float(max_num_runs_per_solver), # % of all runs
1.0, # % of vbs runs
vbs_cumulated_cpu, # cumulated CPU time
(0.0 if vbs_num_solved == 0 else
vbs_average),
(0.0 if vbs_num_solved == 0 else
vbs_median),
0.0, # avg stddev
0.0,
0.0,
vbs_parX
)]
# single query fetch of all/most required data
s = select([expression.label('cost', cost_column),
table.c['SolverConfig_idSolverConfig'],
table.c['Instances_idInstance']],
and_(result_code_column.like(u'1%'),
table.c['Instances_idInstance'].in_(instance_ids),
table.c['SolverConfig_idSolverConfig'].in_(solver_config_ids),
table.c['Experiment_idExperiment'] == experiment.idExperiment,
status_column == 1)).select_from(from_table)
successful_runs = db.session.connection().execute(s)
vbs_uses_solver_count = dict((id, 0) for id in solver_config_ids)
runs_by_solver_and_instance = {}
for run in successful_runs:
if not runs_by_solver_and_instance.has_key(run.SolverConfig_idSolverConfig):
runs_by_solver_and_instance[run.SolverConfig_idSolverConfig] = {}
if not runs_by_solver_and_instance[run.SolverConfig_idSolverConfig].has_key(run.Instances_idInstance):
runs_by_solver_and_instance[run.SolverConfig_idSolverConfig][run.Instances_idInstance] = []
runs_by_solver_and_instance[run.SolverConfig_idSolverConfig][run.Instances_idInstance].append(run)
if (float(run.cost) if run.cost is not None else None) == best_runtime_by_instance[run.Instances_idInstance]:
vbs_uses_solver_count[run.SolverConfig_idSolverConfig] += 1
if calculate_avg_stddev:
finished_runs_by_solver_and_instance = {}
s = select([expression.label('cost', cost_column),
table.c['SolverConfig_idSolverConfig'],
table.c['Instances_idInstance']],
and_(table.c['Instances_idInstance'].in_(instance_ids),
table.c['SolverConfig_idSolverConfig'].in_(solver_config_ids),
table.c['Experiment_idExperiment'] == experiment.idExperiment,
not_(status_column.in_((-1, 0))))).select_from(from_table)
finished_runs = db.session.connection().execute(s)
for run in finished_runs:
if not finished_runs_by_solver_and_instance.has_key(run.SolverConfig_idSolverConfig):
finished_runs_by_solver_and_instance[run.SolverConfig_idSolverConfig] = {}
if not finished_runs_by_solver_and_instance[run.SolverConfig_idSolverConfig].has_key(
run.Instances_idInstance):
finished_runs_by_solver_and_instance[run.SolverConfig_idSolverConfig][run.Instances_idInstance] = []
finished_runs_by_solver_and_instance[run.SolverConfig_idSolverConfig][run.Instances_idInstance].append(run)
failed_runs_by_solver = dict((sc.idSolverConfig, list()) for sc in ranked_solvers)
s = select([expression.label('cost', cost_column),
expression.label('cost_limit', cost_limit_column), table.c['SolverConfig_idSolverConfig']],
and_(table.c['Experiment_idExperiment'] == experiment.idExperiment,
table.c['Instances_idInstance'].in_(instance_ids),
table.c['SolverConfig_idSolverConfig'].in_(solver_config_ids),
and_(
or_(
status_column != 1,
not_(result_code_column.like(u'1%'))
),
not_(status_column.in_([-1, 0]))
)
)).select_from(from_table)
failed_runs = db.session.connection().execute(s)
for run in failed_runs:
failed_runs_by_solver[run.SolverConfig_idSolverConfig].append(run)
for solver in ranked_solvers:
if runs_by_solver_and_instance.has_key(solver.idSolverConfig):
successful_runs = [run for ilist in runs_by_solver_and_instance[solver.idSolverConfig].values() \
for run in ilist]
else:
successful_runs = []
successful_runs_sum = sum(float(j.cost) for j in successful_runs)
penalized_average_runtime = 0.0
if calculate_par10:
if len(successful_runs) + len(failed_runs_by_solver[solver.idSolverConfig]) == 0:
# this should mean there are no jobs of this solver yet
penalized_average_runtime = 0.0
else:
penalized_average_runtime = (sum([j.cost_limit * par_factor if cost in ('resultTime',
'wallTime') else experiment.costPenalty * par_factor if cost == 'cost' else property_limit * par_factor
for j in
failed_runs_by_solver[solver.idSolverConfig]]) + successful_runs_sum) \
/ (len(successful_runs) + len(failed_runs_by_solver[solver.idSolverConfig]))
par1_median_runtime = numpy.median([j.cost_limit if cost in ('resultTime', 'wallTime') else \
experiment.costPenalty if cost == 'cost' else property_limit for j in
failed_runs_by_solver[solver.idSolverConfig]] + [float(j.cost) for j in
successful_runs])
#average_runtime = numpy.average([float(j.cost) for j in successful_runs])
cumulated_par1 = sum([j.cost_limit if cost in ('resultTime', 'wallTime') else \
experiment.costPenalty if cost == 'cost' else \
property_limit for j in
failed_runs_by_solver[solver.idSolverConfig]]) + successful_runs_sum
if len(successful_runs) + len(failed_runs_by_solver[solver.idSolverConfig]) == 0:
par1 = 0.0
else:
par1 = cumulated_par1 / float((len(successful_runs) + len(failed_runs_by_solver[solver.idSolverConfig])))
avg_stddev_runtime = 0.0
avg_cv = 0.0
avg_qcd = 0.0
if calculate_avg_stddev:
count = 0
for instance in instance_ids:
if solver.idSolverConfig in finished_runs_by_solver_and_instance and \
finished_runs_by_solver_and_instance[solver.idSolverConfig].has_key(instance):
instance_runtimes = finished_runs_by_solver_and_instance[solver.idSolverConfig][instance]
runtimes = [j[0] or 0.0 for j in instance_runtimes]
stddev = numpy.std(runtimes)
avg_stddev_runtime += stddev
avg_cv += stddev / numpy.average(runtimes)
quantiles = mquantiles(runtimes, [0.25, 0.5, 0.75])
avg_qcd += (quantiles[2] - quantiles[0]) / quantiles[1]
count += 1
if count > 0:
avg_stddev_runtime /= float(count)
avg_cv /= float(count)
avg_qcd /= float(count)
data.append((
solver,
len(successful_runs),
0 if len(successful_runs) == 0 else len(successful_runs) / float(max_num_runs_per_solver),
0 if vbs_num_solved == 0 else len(successful_runs) / float(vbs_num_solved),
cumulated_par1,
par1,
par1_median_runtime,
avg_stddev_runtime,
avg_cv,
avg_qcd,
penalized_average_runtime,
))
#if calculate_par10: data.sort(key=lambda x: x[7])
return data, vbs_uses_solver_count
def ranking_from_graph(M, edges, vertices, solver_config_ids):
"""Determine the ranking of the solvers with IDs given in solver_config_ids and vertices
and the graph described by the adjacency matrix M and list of edges. Returns a list of
lists of solver config IDs. Each list holds the solvers that are ranked equally.
"""
outedges_by_node = dict((v, list()) for v in vertices)
for e in edges:
outedges_by_node[e[0]].append(e)
indices = dict((v, -1) for v in vertices)
lowlinks = indices.copy()
index = 0
stack = []
connected_components = []
def strongly_connected(v, index):
indices[v] = index
lowlinks[v] = index
index += 1
stack.append(v)
for v, w in outedges_by_node[v]:
if indices[w] < 0:
strongly_connected(w, index)
lowlinks[v] = min(lowlinks[v], lowlinks[w])
elif w in stack:
lowlinks[v] = min(lowlinks[v], indices[w])
if indices[v] == lowlinks[v]:
connected_components.append([])
while stack[-1] != v:
connected_components[-1].append(stack.pop())
connected_components[-1].append(stack.pop())
for v in vertices:
if indices[v] < 0:
strongly_connected(v, index)
scc_edges = set()
for comp in connected_components:
for s1 in comp:
for s2 in solver_config_ids:
if s1 == s2: continue
if M[s1][s2] == 1 and s2 not in comp:
scc_edges.add((frozenset(comp), frozenset([c for c in connected_components if s2 in c][0])))
def topological_sort():
l = []
visited = set()
s = set()
for comp in connected_components:
outgoingEdges = False
for edge in scc_edges:
if frozenset(edge[0]) == frozenset(comp): outgoingEdges = True
if not outgoingEdges:
s.add(frozenset(comp))
def visit(n):
if n not in visited:
visited.add(n)
for edge in scc_edges:
if frozenset(edge[1]) == frozenset(n):
visit(frozenset(edge[0]))
l.append(list(n))
for n in s:
visit(n)
return l
l = topological_sort()
return l
def survival_ranking(db, experiment, instances, solver_configs, results, cost="resultTime", a=0.00, alpha=0.05):
instance_ids = [i.idInstance for i in instances]
solver_config_ids = [s.idSolverConfig for s in solver_configs]
sc_by_id = dict()
for sc in solver_configs:
sc_by_id[sc.idSolverConfig] = sc
def values_tied(v1, v2, a=a):
# Test if two values are tied, i.e. if the intervals [v1 - a*v1, v1 + a*v1]
# and [v2 - a*v2, v2 + a*v2] overlap.
if v1 > v2:
if v2 + a * v2 > v1 - a * v1:
return True
else:
if v1 + a * v1 > v2 - a * v2:
return True
return False
# build the matrix of pairwise comparisons:
# survival_winner[(solver1, solver2)] = 0 if no signficiant difference
# survival_winner[(solver1, solver2)] = 1 if solver1 signif. better than solver2
# and -1 otherwise
survival_winner = dict()
p_values = dict()
tests_performed = dict()
count_values_tied = dict()
better_solver = dict()
for s1 in solver_config_ids:
for s2 in solver_config_ids:
if (s1, s2) in survival_winner: continue
p_values[(s1, s2)] = 1
p_values[(s2, s1)] = 1
tests_performed[(s1, s2)] = "-"
tests_performed[(s2, s1)] = "-"
survival_winner[(s1, s2)] = 0
survival_winner[(s2, s1)] = 0
count_values_tied[(s1, s2)] = 0
count_values_tied[(s2, s1)] = 0
better_solver[(s1, s2)] = False
better_solver[(s2, s1)] = False
if s1 == s2: continue
runs_s1 = list()
runs_s2 = list()
runs_s1_censored = list()
runs_s2_censored = list()
# list of results of s1 and s2, tied pairs are replaced by their mean
for idInstance in instance_ids:
for run1, run2 in izip(results[idInstance][s1], results[idInstance][s2]):
if values_tied(run1.penalized_time1, run2.penalized_time1):
runs_s1.append((run1.penalized_time1 + run2.penalized_time1) / 2.0)
runs_s2.append((run1.penalized_time1 + run2.penalized_time1) / 2.0)
runs_s1_censored.append(run1.censored and run2.censored)
runs_s2_censored.append(run1.censored and run2.censored)
if not run1.censored or not run2.censored:
count_values_tied[(s1, s2)] += 1
count_values_tied[(s2, s1)] += 1
else:
runs_s1.append(run1.penalized_time1)
runs_s2.append(run2.penalized_time1)
runs_s1_censored.append(run1.censored)
runs_s2_censored.append(run2.censored)
# calculate p-value of the survival-analysis hypothesis test
p_value, test_performed = statistics.surv_test(runs_s1, runs_s2, runs_s1_censored, runs_s2_censored, alpha)
p_values[(s1, s2)] = p_values[(s2, s1)] = p_value
tests_performed[(s1, s2)] = tests_performed[(s2, s1)] = test_performed
if numpy.median(runs_s1) > numpy.median(runs_s2):
better_solver[(s1, s2)] = False
better_solver[(s2, s1)] = True
else:
better_solver[(s1, s2)] = True
better_solver[(s2, s1)] = False
if p_value <= alpha:
if better_solver[(s2, s1)]:
# s2 better
survival_winner[(s1, s2)] = -1
survival_winner[(s2, s1)] = 1
else:
# s1 better
survival_winner[(s1, s2)] = 1
survival_winner[(s2, s1)] = -1
# calculate adjacency matrix and list of edges (v1, v2) of the graph
edges_surv = set()
vertices = set(solver_config_ids)
M_surv = dict((i, dict()) for i in solver_config_ids)
for s1 in solver_config_ids:
for s2 in solver_config_ids:
M_surv[s1][s2] = 0
if s1 == s2: continue
if survival_winner[(s1, s2)] == 1:
M_surv[s1][s2] = 1
edges_surv.add((s1, s2))
elif survival_winner[(s1, s2)] == -1:
M_surv[s2][s1] = 1
edges_surv.add((s2, s1))
elif survival_winner[(s1, s2)] == 0:
M_surv[s1][s2] = 1
M_surv[s2][s1] = 1
edges_surv.add((s1, s2))
edges_surv.add((s2, s1))
#M_surv[s1][s2] = 1 if survival_winner[(s1, s2)] >= 0 else 0
#if M_surv[s1][s2] == 1:
# edges_surv.add((s1, s2))
#elif M_surv[s1][s2] == 0.5:
# edges_surv.add((s1, s2))
# edges_surv.add((s2, s1))
dot_code = "digraph ranking {\n"
for sc in solver_configs:
dot_code += " " + str(sc.idSolverConfig) + ' [label="' + sc.name + '"];\n'
for e in edges_surv:
dot_code += " " + str(e[0]) + " -> " + str(e[1]) + ";\n"
dot_code += "}\n"
# find strongly connected components and sort topologically
l_surv = ranking_from_graph(M_surv, edges_surv, vertices, solver_config_ids)
for comp in l_surv:
comp_p_values = set([1]) # always include alpha 1
for sc1 in comp:
for sc2 in comp:
comp_p_values.add(p_values[(sc1, sc2)])
#print "SC:", [sc_by_id[sc].name for sc in comp], "p-values:", comp_p_values
for alpha in sorted(comp_p_values):
edges_comp = set()
M_comp = dict()
for sc1 in comp:
M_comp[sc1] = dict()
for sc2 in comp:
if sc1 == sc2:
M_surv[sc1][sc2] = 0
continue
M_comp[sc1][sc2] = 0
if p_values[(sc1, sc2)] <= alpha:
if better_solver[(sc1, sc2)]:
M_comp[sc1][sc2] = 1
else:
M_comp[sc1][sc2] = 0
else:
M_comp[sc1][sc2] = 1
if M_comp[sc1][sc2] == 1:
edges_comp.add((sc1, sc2))
l_comp = ranking_from_graph(M_comp, edges_comp, set(comp), comp)
if len(l_comp) == len(comp):
flattened = [i[0] for i in l_comp]
comp.sort(key=lambda x: flattened.index(x))
#print "alpha", alpha, "SCs: ", [[sc_by_id[sc].name for sc in c] for c in l_comp]
break
return [[sc_by_id[sc] for sc in comp_surv] for comp_surv in
l_surv], survival_winner, M_surv, p_values, tests_performed, dot_code, count_values_tied
def careful_ranking(db, experiment, instances, solver_configs, results, cost="resultTime", noise=1.0, break_ties=False):
instance_ids = [i.idInstance for i in instances]
solver_config_ids = [s.idSolverConfig for s in solver_configs]
sc_by_id = dict()
for sc in solver_configs:
sc_by_id[sc.idSolverConfig] = sc
alpha = math.sqrt(noise / 2.0)
raw = dict()
for s1 in solver_config_ids:
for s2 in solver_config_ids:
if (s1, s2) in raw: continue
raw[(s1, s2)] = 0
raw[(s2, s1)] = 0
if s1 == s2: continue
#print "Comparing ", sc_by_id[s1].name, " and ", sc_by_id[s2].name
for idInstance in instance_ids:
for r1, r2 in izip(results[idInstance][s1], results[idInstance][s2]):
#print (str(r1.penalized_time1) + ("+" if r1.censored else "")), "vs.", (str(r2.penalized_time1) + ("+" if r2.censored else "")),
if r1.censored and r2.censored:
#print " .. tied"
continue
elif r1.censored and not r2.censored:
#print " uncensored wins"
raw[(s2, s1)] += 1
raw[(s1, s2)] -= 1
continue
elif not r1.censored and r2.censored:
#print " uncensored wins"
raw[(s1, s2)] += 1
raw[(s2, s1)] -= 1
continue
e1 = (r1.penalized_time1 + r2.penalized_time1) / 2.0
delta = alpha * math.sqrt(e1)
#print "Tiezone: [", e1 - delta, ",", e1 + delta, "]",
if r1.penalized_time1 < e1 - delta:
#print r1.penalized_time1, "<", e1 - delta
raw[(s1, s2)] += 1
raw[(s2, s1)] -= 1
elif r2.penalized_time1 < e1 - delta:
#print r2.penalized_time1, "<", e1 - delta
raw[(s2, s1)] += 1
raw[(s1, s2)] -= 1
#else:
#print " .. tied"
edges = set()
vertices = set(solver_config_ids)
M = dict()
for s1 in solver_config_ids:
M[s1] = dict()
for s2 in solver_config_ids:
if s1 == s2:
M[s1][s2] = 0
continue
M[s1][s2] = 1 if raw[(s1, s2)] >= 0 else 0
if M[s1][s2] == 1:
edges.add((s1, s2))
#elif M[s1][s2] == 0.5:
# edges.add((s1, s2))
# edges.add((s2, s1))
l = ranking_from_graph(M, edges, vertices, solver_config_ids)
if break_ties:
tie_break = dict()
for comp in l:
for solver in comp:
tie_break[solver] = sum(raw[(solver, s_j)] for s_j in comp)
comp.sort(key=lambda sc: tie_break[sc], reverse=True)
return [[sc_by_id[sc] for sc in comp] for comp in l], raw, M
|
EDACC/edacc_web
|
edacc/ranking.py
|
Python
|
mit
| 34,854
|
[
"VisIt"
] |
df4503802b526327ed90b576f2f1835cf10c90969fd8f6d1cfd1d12cfbe39f45
|
# encoding: utf-8
__author__ = 'Olivier Mangin <olivier.mangin@inria.fr>'
__date__ = '06/2012'
"""Module to convert frame representation as output by kinect recording
to angle and/or angle velocity histogram representations.
"""
import numpy as np
from scipy.cluster.vq import kmeans, whiten
from ..lib.transformations import (quaternion_multiply, quaternion_inverse,
euler_from_quaternion)
from ..lib.utils import delayed_velocities, meta_map
from ..lib.vector_quantization import get_histos
from ..lib.kde2d import gaussian_kde_2d
# Note: frame names from ros kinect seems to denote left / right from
# the observer point of view.
ANGLES = [
('left_shoulder', 'left_elbow'),
('left_elbow', 'left_hand'),
('torso', 'left_hip'),
('left_hip', 'left_knee'),
('left_knee', 'left_foot'),
('right_shoulder', 'right_elbow'),
('right_elbow', 'right_hand'),
('torso', 'right_hip'),
('right_hip', 'right_knee'),
('right_knee', 'right_foot'),
]
def angles_indices(marker_names):
return [(marker_names.index(source), marker_names.index(dest))
for source, dest in ANGLES]
def get_angles(sample, source_frame, dest_frame):
"""Compute rotation along three basis axis between two frames
in the given sample.
:param sample: array of translations and rotations (shape: (nb_frames, 7)
:param source_frame, dest_frame: indices of source and dest frames
"""
# All transformations are from the base frame, to get transformation from
# one frame to the other, the first one needs to be inversed.
# q = q1^{-1} * q2
q = quaternion_multiply(quaternion_inverse(sample[source_frame, 3:]),
sample[dest_frame, 3:])
return euler_from_quaternion(q)
def get_angle_array(sample, angles_idx):
angles = [get_angles(sample, s, d) for s, d in angles_idx]
return np.hstack(angles)
def record_to_angle_array(record, angles_idx):
return np.vstack([get_angle_array(sample, angles_idx)
for sample in record])
def db_to_list_of_angle_arrays(db):
angle_idx = angles_indices(db.marker_names)
return [record_to_angle_array(r[0], angle_idx) for r in db.records]
def db_to_angles_and_vels(db, vel_delay=1, vel_padding='zeros'):
angles = db_to_list_of_angle_arrays(db)
vels = [delayed_velocities(vel_delay, angle, padding=vel_padding)
for angle in angles]
return angles, vels
def get_bounds(vels):
min_vel = np.min(np.vstack(vels))
max_vel = np.max(np.vstack(vels))
return min_vel, max_vel
def filter_values(data, bounds):
"""Filter big values in data, according to given bounds.
:param data: numpy array
:param bounds: (min, max)
"""
cut = lambda x: np.maximum(np.minimum(x, bounds[1]), bounds[0])
return map(cut, data)
def db_to_binned_hist_matrix(db, vel_delay=1, vel_padding='zeros',
nb_bins=16, bounds=None, vel_bounds=None, rel_h=.3, fft=True):
"""Compute the histogram matrix from the database, using binned histograms
smoothed by a Gaussian kernel.
:param db:
the Database
:param vel_delay, vel_padding:
delayed velocity parameters
:param nb_bins: int,
number of bins (output dimension of histograms for a joint)
:param bounds, vel_bounds: (min, max), couples of floats
bounds on angle and velocities, if given, data
is cut to fit in bounds, else they are computed from data.
:param rel_h: float,
relative width of the Gaussian smoother
:param fft: bool,
whether to use fft convolution (default)
:return: (nb ex, nb features) matrix
"""
angles, vels = db_to_angles_and_vels(db, vel_delay=1, vel_padding='zeros')
# Angle bounds
if bounds is None:
bounds = get_bounds(angles)
else:
angles = filter_values(angles, bounds)
# Velocity bounds
if vel_bounds is None:
vel_bounds = get_bounds(vels)
else:
vels = filter_values(vels, vel_bounds)
# Histogram are specific to each angle and corresponding velocity
# Compute Gaussian width from relative width for angles
h = rel_h * (bounds[1] - bounds[0])
# Compute gaussian width for velocities
h_vel = rel_h * (vel_bounds[1] - vel_bounds[0])
# For fair comparison with 1D hist and VQ
nb_bins_sqrt = int(np.sqrt(nb_bins))
to_gaussKDEs2 = lambda x: [ # x = (angles, vels)
gaussian_kde_2d(
np.hstack([x[0][:, dim][:, np.newaxis],
x[1][:, dim][:, np.newaxis]]),
h, h_vel, nb_bins=nb_bins_sqrt,
bounds=(np.array([bounds[0], vel_bounds[0]]),
np.array([bounds[1], vel_bounds[1]])),
fft=fft)
for dim in range(x[0].shape[1])]
kdes = map(to_gaussKDEs2, zip(angles, vels))
# Each kde is a triplet (x_grid, y_grid, bins)
# Get and flatten histograms (second element of the couple)
hist = meta_map(2, lambda x: x[2].flatten())(kdes)
data_matrix = np.vstack(map(np.hstack, hist))
return data_matrix
def compact_examples(x):
"""Vertically stack list of array and returns stacked
array and indices to un_compact it.
"""
idx = [y.shape[0] for y in x]
return np.vstack(x), list(np.cumsum(idx))
def un_compact_examples(v, idx):
return [v[i:j, :]
for i, j in zip([0] + idx[:-1], idx)]
def db_to_VQ_hist_matrix(db, vel_delay=1, vel_padding='zeros',
nb_bins=16, bounds=None, vel_bounds=None, soft_vq=None):
"""Compute the histogram matrix from the database, using binned histograms
smoothed by a Gaussian kernel.
:param db:
the Database
:param vel_delay, vel_padding:
delayed velocity parameters
:param nb_bins: int,
number of bins (output dimension of histograms for a joint)
:param bounds, vel_bounds: (min, max), couples of floats
bounds on angle and velocities, if given, data
is cut to fit in bounds
:param soft_vq:
if not None (default) soft vector quantization parameter.
"""
angles, vels = db_to_angles_and_vels(db, vel_delay=1, vel_padding='zeros')
# Angle bounds
if bounds is not None:
angles = filter_values(angles, bounds)
# Velocity bounds
if vel_bounds is not None:
vels = filter_values(vels, vel_bounds)
# For each DOF and each example compute 2D angle-vel vects
# angles / vels => [(time, dof) for each example]
nb_dofs = angles[0].shape[1]
nb_ex = len(angles)
data = [[np.hstack([a[:, dof][:, np.newaxis],
v[:, dof][:, np.newaxis]])
for a, v in zip(angles, vels)]
for dof in range(nb_dofs)]
compacted = map(compact_examples, data)
# Whiten data for each dof
all_data = [whiten(d) for d, _ in compacted]
# Compute centroids for each DOF
centro = [kmeans(d, nb_bins, iter=20)[0] for d in all_data]
# Compute hitograms for each sample
histos = [get_histos(d, c, soft=soft_vq)
for d, c in zip(all_data, centro)]
# Group and sum by example
histos_by_ex = [un_compact_examples(h, c[1])
for h, c in zip(histos, compacted)]
ex_histos = np.array([[h.sum(axis=0) for h in hs] for hs in histos_by_ex])
# ex_histo is now (nb_dofs, nb_ex, nb_bins)
Xdata = np.swapaxes(ex_histos, 0, 1).reshape((nb_ex, nb_bins * nb_dofs))
Xdata /= Xdata.sum(axis=1)[:, np.newaxis]
return Xdata
|
omangin/multimodal
|
multimodal/features/angle_histograms.py
|
Python
|
bsd-3-clause
| 7,555
|
[
"Gaussian"
] |
80443951a1413b5634152180f5850c3ac6c0e10071774271f7551e527f0ba199
|
## This file is part of Invenio.
## Copyright (C) 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Invenio Authorlist Data Conversion Engine. """
import time
try:
import json
except ImportError:
import simplejson as json
from xml.dom import minidom
try:
from xml.etree import ElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
from invenio.legacy.webuser import page_not_authorized
from invenio.modules.access.engine import acc_authorize_action
import invenio.authorlist_config as cfg
from invenio.legacy.search_engine import perform_request_search, record_exists
from invenio.legacy.bibrecord import get_fieldvalues
from invenio.legacy.bibedit.utils import get_record
# from lxml import etree
from invenio.authorlist_dblayer import get_owner
from invenio.utils.text import escape_latex
# default name that will be used, when affiliation name is missing
UNKNOWN_AFFILIATION = 'Unknown Affiliation'
# Namespaces used in the xml file
NAMESPACES = {'cal': 'http://www.slac.stanford.edu/spires/hepnames/authors_xml/',
'foaf': 'http://xmlns.com/foaf/0.1/',
}
def retrieve_data_from_record(recid):
"""
Extract data from a record id in order to import it to the Author list
interface
"""
if not record_exists(recid):
return
output = {}
DEFAULT_AFFILIATION_TYPE = cfg.OPTIONS.AUTHOR_AFFILIATION_TYPE[0]
DEFAULT_IDENTIFIER = cfg.OPTIONS.IDENTIFIERS_LIST[0]
IDENTIFIERS_MAPPING = cfg.OPTIONS.IDENTIFIERS_MAPPING
bibrecord = get_record(recid)
try:
paper_title = get_fieldvalues(recid, '245__a')[0]
except IndexError:
paper_title = ""
try:
collaboration_name = get_fieldvalues(recid, '710__g')
except IndexError:
collaboration_name = ""
try:
experiment_number = get_fieldvalues(recid, '693__e')
except IndexError:
experiment_number = ""
record_authors = bibrecord.get('100', [])
record_authors.extend(bibrecord.get('700', []))
author_list = []
unique_affiliations = []
for i, field_instance in enumerate(record_authors, 1):
family_name = ""
given_name = ""
name_on_paper = ""
status = ""
affiliations = []
identifiers = []
field = field_instance[0]
for subfield_code, subfield_value in field:
if subfield_code == "a":
try:
family_name = subfield_value.split(',')[0]
given_name = subfield_value.split(',')[1].lstrip()
except:
pass
name_on_paper = subfield_value
elif subfield_code == "u":
affiliations.append([subfield_value, DEFAULT_AFFILIATION_TYPE])
unique_affiliations.append(subfield_value)
elif subfield_code == "i":
# FIXME This will currently work only with INSPIRE IDs
id_prefix = subfield_value.split("-")[0]
if id_prefix in IDENTIFIERS_MAPPING:
identifiers.append([subfield_value, IDENTIFIERS_MAPPING[id_prefix]])
if not identifiers:
identifiers.append(['', DEFAULT_IDENTIFIER])
if not affiliations:
affiliations.append([UNKNOWN_AFFILIATION, DEFAULT_AFFILIATION_TYPE])
unique_affiliations.append(UNKNOWN_AFFILIATION)
author_list.append([
i, # Row number
'', # Place holder for the web interface
family_name,
given_name,
name_on_paper,
status,
affiliations,
identifiers
])
unique_affiliations = list(set(unique_affiliations))
output.update({'authors': author_list})
# Generate all the affiliation related information
affiliation_list = []
for i, affiliation in enumerate(unique_affiliations, 1):
institution = perform_request_search(c="Institutions", p='110__u:"' + affiliation + '"')
full_name = affiliation
if len(institution) == 1:
full_name_110_a = get_fieldvalues(institution[0], '110__a')
if full_name_110_a:
full_name = str(full_name_110_a[0])
full_name_110_b = get_fieldvalues(institution[0], '110__b')
if full_name_110_b:
full_name += ', ' + str(full_name_110_b[0])
affiliation = [i,
'',
affiliation,
'',
full_name,
'',
True,
'']
affiliation_list.append(affiliation)
output.update({'affiliations': affiliation_list})
output.update({'paper_title': paper_title,
'collaboration': collaboration_name,
'experiment_number': experiment_number,
'last_modified': int(time.time()),
'reference_ids': [],
'paper_id': '1'})
return output
def retrieve_data_from_xml(xml):
"""
Extract data from an XML file to import it to the Author list
interface
"""
def get_element_value_helper(element, tag):
"""
Helper that takes an element and returns text from the first node
of that element
"""
text = ''
elements_list = element.getElementsByTagName(tag)
if elements_list:
child = elements_list[0].firstChild
if child:
text = child.nodeValue
return text
output = {}
# Save the affiliatons variable, the default value for "Affiliation" column
# will be always first value from type_of_affiliation table
type_of_affiliation = cfg.OPTIONS.AUTHOR_AFFILIATION_TYPE
# Save the default identifier - first element from the list of identifiers
default_identifier = cfg.OPTIONS.IDENTIFIERS_LIST[0]
# Save identifiers mapping
identifiers_mapping = cfg.OPTIONS.IDENTIFIERS_MAPPING
parsed_xml = minidom.parseString(xml)
# Extract collaboration name and experiment number
collaboration_name = ''
experiment_number = ''
collaborations = parsed_xml.getElementsByTagName('cal:collaborations')
if len(collaborations) == 1:
collaboration_name = get_element_value_helper(collaborations[0], 'foaf:name')
experiment_number = get_element_value_helper(collaborations[0], 'cal:experimentNumber')
# Extract affiliations
affiliation_list = []
affiliation_id_name = {}
affiliations = parsed_xml.getElementsByTagName('foaf:Organization')
for i, affiliation in enumerate(affiliations):
affiliation_id = affiliation.getAttribute('id') or ''
affiliation_name = get_element_value_helper(affiliation, 'foaf:name')
affiliation_acronym = get_element_value_helper(affiliation, 'cal:orgName')
if not affiliation_acronym:
# No acronym ? Use the name instead
affiliation_acronym = affiliation_name
affiliation_address = get_element_value_helper(affiliation, 'cal:orgAddress')
if not affiliation_address:
affiliation_address = affiliation_name
affiliation_domain = get_element_value_helper(affiliation, 'cal:orgDomain')
# saving {id:name}, it will be needed for authors affiliations
if affiliation_id:
# According to
# http://stackoverflow.com/questions/8214932/how-to-check-if-a-value-exists-in-a-dictionary-python
# itervalues is faster than values() and viewvalues()
if affiliation_acronym in affiliation_id_name.itervalues():
# in case we have a duplicate of acronym, make it unique by
# appending the iteration number
affiliation_acronym += str(i+1)
affiliation_id_name[affiliation_id] = affiliation_acronym
affiliation_info = [long(i+1),
'',
affiliation_acronym,
'',
affiliation_address,
affiliation_domain,
True,
'']
affiliation_list.append(affiliation_info)
# Extract authors
author_list = []
authors = parsed_xml.getElementsByTagName('foaf:Person')
for i, author in enumerate(authors):
first_name = get_element_value_helper(author, 'foaf:givenName')
# In case there was no given name under previous field, we search for initials in cal:authorNamePaperGiven
if not first_name:
first_name = get_element_value_helper(author, 'cal:authorNamePaperGiven')
last_name = get_element_value_helper(author, 'foaf:familyName')
full_name = get_element_value_helper(author, 'cal:authorNamePaper')
status = get_element_value_helper(author, 'cal:authorStatus')
# Extract author affiliations
author_affiliations = []
if author.getElementsByTagName('cal:authorAffiliations'):
for afil in author.getElementsByTagName('cal:authorAffiliations')[0].getElementsByTagName('cal:authorAffiliation'):
a_id = afil.getAttribute('organizationid')
if afil.getAttribute('connection') in type_of_affiliation:
affiliation_type = afil.getAttribute('connection')
else:
affiliation_type = type_of_affiliation[0]
author_affiliations.append([affiliation_id_name.get(a_id, UNKNOWN_AFFILIATION), affiliation_type])
else:
author_affiliations = [UNKNOWN_AFFILIATION, type_of_affiliation[0]]
identifiers = []
if author.getElementsByTagName('cal:authorids'):
for author_id in author.getElementsByTagName('cal:authorids')[0].getElementsByTagName('cal:authorid'):
if author_id.getAttribute('source') in identifiers_mapping and author_id.firstChild:
identifiers.append([
author_id.firstChild.nodeValue,
identifiers_mapping[author_id.getAttribute('source')]])
if not identifiers:
identifiers.append(['', default_identifier])
author_info = [long(i+1),
'',
last_name,
first_name,
full_name,
status,
author_affiliations,
identifiers]
author_list.append(author_info)
output.update({'authors': author_list})
output.update({'affiliations': affiliation_list})
# Add generic information about the paper
output.update({'collaboration': collaboration_name,
'experiment_number': experiment_number,
'last_modified': int(time.time()),
'reference_ids': [],
'paper_id': '1',
'paper_title': ''})
return output
def user_authorization(req, ln):
""" Check user authorization to visit page """
auth_code, auth_message = acc_authorize_action(req, 'runauthorlist')
if auth_code != 0:
referer = '/authorlist/'
return page_not_authorized(req=req, referer=referer,
text=auth_message, navmenuid="authorlist")
else:
return None
def check_user_rights(user_id, paper_id):
"""Check if user can modify this paper"""
# if the paper_id is empty - user is trying to create new record
# we allow him, because everyone can do that
if not paper_id or (user_id == get_owner(paper_id)):
return True
return False
class Converter(object):
CONTENT_TYPE = 'text/plain'
FILE_NAME = 'converted.txt'
def __init__(self):
raise NotImplementedError
def dump(self, data):
raise NotImplementedError
def dumps(self, data):
raise NotImplementedError
class NA62Latex(Converter):
FILE_NAME = 'la.tex'
def __init__(self):
pass
def dump(self, data):
pass
def dumps(self, data):
pass
class ElsevierArticle(Converter):
CONTENT_TYPE = 'text/plain'
FILE_NAME = 'elsarticle.tex'
cal = '{http://www.slac.stanford.edu/spires/hepnames/authors_xml/}'
foaf = '{http://xmlns.com/foaf/0.1/}'
def __init__(self):
pass
def dictionary_to_list(self, node):
res = {}
res[node.tag] = []
self.xmltodict(node, res[node.tag])
reply = {}
reply[node.tag] = {'value': res[node.tag], 'attribs': node.attrib, 'tail': node.tail}
return reply
def xmltodict(self, node, res):
rep = {}
if len(node):
for n in list(node):
rep[node.tag] = []
value = self.xmltodict(n, rep[node.tag])
if len(n):
value = {'value': rep[node.tag], 'attributes': n.attrib, 'tail': n.tail}
res.append({n.tag: value})
else:
res.append(rep[node.tag][0])
else:
value = {}
value = {'value': node.text, 'attributes': node.attrib, 'tail': node.tail}
res.append({node.tag: value})
return
def get_organizations(self, organizations):
organization_dict = dict()
for orgs_element in organizations:
key = orgs_element.keys()[0]
if key == self.foaf + 'Organization':
for name_element in orgs_element[key]['value']:
value_key = name_element.keys()[0]
if value_key == self.cal + 'orgAddress':
if name_element[value_key]['value']:
organization_dict[orgs_element[key]['attributes']['id']] = name_element[value_key]['value'].encode('utf-8')
else:
organization_dict[orgs_element[key]['attributes']['id']] = ''
break
return organization_dict
def get_authors(self, authors):
author_list = []
for auth_element in authors:
key = auth_element.keys()[0]
if key == self.foaf + 'Person':
affiliation_list = []
given_name = ''
family_name = ''
for name_element in auth_element[key]['value']:
value_key = name_element.keys()[0]
if value_key == self.foaf + 'familyName' and name_element[value_key]['value']:
family_name = name_element[value_key]['value'].encode('utf-8')
elif value_key == self.foaf + 'givenName' and name_element[value_key]['value']:
given_name = name_element[value_key]['value'].encode('utf-8')
elif value_key == self.cal + 'authorAffiliations':
for aff_element in name_element[value_key]['value']:
aff_key = aff_element.keys()[0]
if aff_key == self.cal + 'authorAffiliation':
if aff_element[aff_key]['attributes']['connection'] == 'Affiliated with':
affiliation_list.append(aff_element[aff_key]['attributes']['organizationid'])
author_list.append([(given_name, family_name), tuple(affiliation_list)])
return author_list
def dump(self, data):
AuthorsXMLConverter = Converters.get('authorsxml')
AuthorsXML = dumps(data, AuthorsXMLConverter)
root = ET.fromstring(AuthorsXML)
tree = ET.ElementTree(root)
res = self.dictionary_to_list(tree.getroot())
collaboration_author_list_values = res['collaborationauthorlist']['value']
organization_dict = dict()
author_list = []
for element in collaboration_author_list_values:
key = element.keys()[0]
# if the value of the key is empty, start next loop cycle
if element[key]['value'] is None:
continue
if key == self.cal + 'organizations':
organization_dict = self.get_organizations(element[key]['value'])
elif key == self.cal + 'authors':
author_list = self.get_authors(element[key]['value'])
clusters = []
organization_codes = []
for element in author_list:
if len(element[1]) >= 1:
organization_code = element[1][0]
other_affiliations = element[1][1:]
author = [element[0]]
if other_affiliations:
author.extend(other_affiliations)
# if this organization already exists in the cluster
if organization_code in organization_codes:
for cluster in clusters:
if cluster[0] == organization_code:
cluster.append(author)
break
else:
organization_codes.append(organization_code)
clusters.append([organization_code, author])
myout = ""
myout += "\\documentclass[a4paper,12pt]{article}\r\n"
myout += "\\usepackage[utf8]{inputenc}\r\n"
myout += "\\begin{document}\r\n"
myout += "\\begin{center}\r\n"
myout += "{\\Large Collaboration}\\\\\r\n"
myout += "\\vspace{2mm}\r\n%\r\n"
primary_output_string = ""
secondary_affiliation_count = 1
secondary_affiliations = ""
secondary_affiliations_pos = {}
for data in clusters:
primary_output = []
organization_code = data[0]
for author in data[1:]:
name = " " + str(escape_latex(author[0][0])) + '~' + str(escape_latex(author[0][1]))
if len(author) > 1:
for sec_affiliation in author[1:]:
if sec_affiliation in organization_dict.keys():
if organization_dict[sec_affiliation] in secondary_affiliations_pos.keys():
name += "$\\,$\\footnotemark[" + str(secondary_affiliations_pos[organization_dict[sec_affiliation]]) + "]"
else:
name += "$\\,$\\footnotemark[" + str(secondary_affiliation_count) + "]"
secondary_affiliations += "%\r\n\\footnotetext[" + str(secondary_affiliation_count) + "]{" + str(escape_latex(organization_dict[sec_affiliation])) + "}\r\n"
secondary_affiliations_pos[organization_dict[sec_affiliation]] = secondary_affiliation_count
secondary_affiliation_count += 1
primary_output.append(name)
if organization_dict.get(data[0]):
organization = organization_dict.get(data[0])
else:
organization = UNKNOWN_AFFILIATION
primary_output_string += ',\r\n'.join(primary_output) + " \\\\\r\n{\\em \\small " + str(escape_latex(organization)) + "} \\\\[0.2cm]\r\n%\r\n"
myout += primary_output_string
myout += "\\end{center}\r\n"
myout += "\\setcounter{footnote}{0}\r\n"
myout += secondary_affiliations
myout += "\\end{document}\r\n"
return myout
def dumps(self, data):
return self.dump(data)
class APSpaper(Converter):
CONTENT_TYPE = 'text/plain'
FILE_NAME = 'APSpaper.tex'
def __init__(self):
pass
def dump(self, data):
AuthorsXMLConverter = Converters.get('authorsxml')
AuthorsXML = dumps(data, AuthorsXMLConverter)
organizations_list = []
authors_list = []
root = ET.fromstring(AuthorsXML)
# save affiliations
for organization in root.findall('{%s}organizations/{%s}Organization' % (NAMESPACES['cal'], NAMESPACES['foaf'])):
org_id = organization.attrib['id']
org_name = ''
if organization.find('{%s}name' % NAMESPACES['foaf']) is not None:
org_name = organization.find('{%s}name' % NAMESPACES['foaf']).text or ''
organizations_list.append([org_id, org_name.encode('utf-8')])
# save authors
for author in root.findall('{%s}authors/{%s}Person' % (NAMESPACES['cal'], NAMESPACES['foaf'])):
author_name = ''
author_affiliations = []
if author.find('{%s}authorNamePaper' % NAMESPACES['cal']) is not None:
author_name = author.find('{%s}authorNamePaper' % NAMESPACES['cal']).text or ''
for affil in author.findall('{%(cal)s}authorAffiliations/{%(cal)s}authorAffiliation' % {'cal': NAMESPACES['cal']}):
author_affiliations.append(affil.attrib['organizationid'])
authors_list.append([author_name.encode('utf-8'), author_affiliations])
myout = ''
for author in authors_list:
myout += '\\author{' + str(escape_latex(author[0])) + '$^{' + ','.join(author[1]) + '}$}\r\n'
for org in organizations_list:
myout += '\\affiliation{$^{' + str(org[0]) + '}$ ' + str(escape_latex(org[1])) + '}\r\n'
return myout
def dumps(self, data):
return self.dump(data)
class AuthorsXML(Converter):
CONTENT_TYPE = 'text/xml'
FILE_NAME = 'authors.xml'
def __init__(self):
pass
def create_affiliation(self, document, parsed, organization_ids):
affiliation = document.createElement('cal:authorAffiliation')
affiliation_acronym = parsed[cfg.JSON.AFFILIATION_ACRONYM]
affiliation_status = parsed[cfg.JSON.AFFILIATION_STATUS]
if affiliation_acronym not in organization_ids:
affiliation.setAttribute('organizationid',
'Error - there is no organization called ' +
affiliation_acronym)
else:
affiliation.setAttribute('organizationid',
organization_ids[affiliation_acronym])
affiliation.setAttribute('connection', affiliation_status)
return affiliation
def create_identifier(self, document, parsed):
identifier = document.createElement('cal:authorid')
identifier_number = parsed[cfg.JSON.IDENTIFIER_NUMBER]
identifier_name = parsed[cfg.JSON.IDENTIFIER_NAME]
identifier.setAttribute('source', identifier_name)
identifier_text = document.createTextNode(identifier_number)
identifier.appendChild(identifier_text)
return identifier
def create_authors(self, document, root, parsed, organization_ids):
parsed_authors = parsed[cfg.JSON.AUTHORS_KEY]
authors = document.createElement('cal:authors')
root.appendChild(authors)
for parsed_author in parsed_authors:
author = self.create_author(document, parsed_author, organization_ids)
authors.appendChild(author)
def create_author(self, document, parsed, organization_ids):
author = document.createElement('foaf:Person')
# paper name
paper_name = document.createElement('cal:authorNamePaper')
paper_name_info = parsed[cfg.JSON.PAPER_NAME]
paper_name_text = document.createTextNode(paper_name_info)
paper_name.appendChild(paper_name_text)
author.appendChild(paper_name)
# given name
given_name_info = parsed[cfg.JSON.GIVEN_NAME]
if (cfg.EMPTY.match(given_name_info) is None):
given_name = document.createElement('foaf:givenName')
given_name_text = document.createTextNode(given_name_info)
given_name.appendChild(given_name_text)
author.appendChild(given_name)
# family name
family_name_info = parsed[cfg.JSON.FAMILY_NAME]
if (cfg.EMPTY.match(family_name_info) is None):
family_name = document.createElement('foaf:familyName')
family_name_text = document.createTextNode(family_name_info)
family_name.appendChild(family_name_text)
author.appendChild(family_name)
# status
author_status_info = parsed[cfg.JSON.STATUS]
if (author_status_info):
author_status = document.createElement('cal:authorStatus')
author_status_text = document.createTextNode(author_status_info)
author_status.appendChild(author_status_text)
author.appendChild(author_status)
# collaboration
collaboration = document.createElement('cal:authorCollaboration')
collaboration.setAttribute('collaborationid', cfg.AuthorsXML.COLLABORATION_ID)
author.appendChild(collaboration)
# affiliations
affiliations = document.createElement('cal:authorAffiliations')
author.appendChild(affiliations)
for parsed_affiliation in parsed[cfg.JSON.AFFILIATIONS]:
affiliation = self.create_affiliation(document, parsed_affiliation, organization_ids)
affiliations.appendChild(affiliation)
# identifiers
identifiers = document.createElement('cal:authorids')
author.appendChild(identifiers)
for parsed_identifier in parsed[cfg.JSON.IDENTIFIERS]:
identifier = self.create_identifier(document, parsed_identifier)
identifiers.appendChild(identifier)
return author
def create_collaboration(self, document, root, parsed):
# collaborations
collaborations = document.createElement('cal:collaborations')
collaboration = document.createElement('cal:collaboration')
collaboration.setAttribute('id', cfg.AuthorsXML.COLLABORATION_ID)
collaborations.appendChild(collaboration)
# name
name = document.createElement('foaf:name')
name_info = parsed[cfg.JSON.COLLABORATION]
name_text = document.createTextNode(name_info)
name.appendChild(name_text)
collaboration.appendChild(name)
# experiment number
experiment_number_info = parsed[cfg.JSON.EXPERIMENT_NUMBER]
if (cfg.EMPTY.match(experiment_number_info) is None):
experiment_number = document.createElement('cal:experimentNumber')
experiment_number_text = document.createTextNode(experiment_number_info)
experiment_number.appendChild(experiment_number_text)
collaboration.appendChild(experiment_number)
root.appendChild(collaborations)
def create_document(self):
dom = minidom.getDOMImplementation()
document = dom.createDocument(None, 'collaborationauthorlist', None)
root = document.documentElement
root.setAttribute('xmlns:foaf', 'http://xmlns.com/foaf/0.1/')
root.setAttribute('xmlns:cal', 'http://www.slac.stanford.edu/spires/hepnames/authors_xml/')
return document, root
def create_header(self, document, root, parsed):
# creation date
creation_date = document.createElement('cal:creationDate')
creation_date_info = time.strftime(cfg.AuthorsXML.TIME_FORMAT)
creation_date_text = document.createTextNode(creation_date_info)
creation_date.appendChild(creation_date_text)
root.appendChild(creation_date)
# publication reference
for reference_info in parsed[cfg.JSON.REFERENCE_IDS]:
reference = document.createElement('cal:publicationReference')
reference_text = document.createTextNode(reference_info)
reference.appendChild(reference_text)
root.appendChild(reference)
def create_organizations(self, document, root, parsed, ids):
parsed_organizations = parsed[cfg.JSON.AFFILIATIONS_KEY]
# organizations container
organizations = document.createElement('cal:organizations')
root.appendChild(organizations)
# create individual organizations and append them
for parsed_organization in parsed_organizations:
organization = self.create_organization(document, parsed_organization, ids)
organizations.appendChild(organization)
def create_organization(self, document, parsed, ids):
acronym = parsed[cfg.JSON.ACRONYM]
organization = document.createElement('foaf:Organization')
organization.setAttribute('id', ids[acronym])
# create the domain node if field is set
domain_info = parsed[cfg.JSON.DOMAIN]
if (cfg.EMPTY.match(domain_info) is None):
domain = document.createElement('cal:orgDomain')
domain_text = document.createTextNode(domain_info)
domain.appendChild(domain_text)
organization.appendChild(domain)
# organization name, no presence check, already done on the client side
name = document.createElement('foaf:name')
name_info = parsed[cfg.JSON.NAME]
name_text = document.createTextNode(name_info)
name.appendChild(name_text)
organization.appendChild(name)
# organization acronym
org_acronym = document.createElement('cal:orgName')
org_acronym_text = document.createTextNode(acronym)
org_acronym.appendChild(org_acronym_text)
organization.appendChild(org_acronym)
# organization identifier
org_name_info = parsed[cfg.JSON.SPIRES_ID]
if (cfg.EMPTY.match(org_name_info) is None):
org_name = document.createElement('cal:orgName')
org_name.setAttribute('source', cfg.AuthorsXML.SPIRES)
org_name_text = document.createTextNode(org_name_info)
org_name.appendChild(org_name_text)
organization.appendChild(org_name)
else:
org_name_info = parsed[cfg.JSON.NAME]
org_address = document.createElement('cal:orgAddress')
org_address_text = document.createTextNode(org_name_info)
org_address.appendChild(org_address_text)
organization.appendChild(org_address)
# membership
org_status_info = parsed[cfg.JSON.MEMBER]
if (not org_status_info):
org_status_info = cfg.AuthorsXML.NONMEMBER
else:
org_status_info = cfg.AuthorsXML.MEMBER
org_status = document.createElement('cal:orgStatus')
org_status_text = document.createTextNode(org_status_info)
org_status.appendChild(org_status_text)
organization.appendChild(org_status)
# umbrella organization/group
group_info = parsed[cfg.JSON.UMBRELLA]
if (cfg.EMPTY.match(group_info) is None):
if group_info in ids.keys():
group = document.createElement('cal:group')
group.setAttribute('with', ids[group_info])
organization.appendChild(group)
return organization
def dump(self, data):
parsed = json.loads(data)
document, root = self.create_document()
affiliations = parsed[cfg.JSON.AFFILIATIONS_KEY]
organization_ids = self.generate_organization_ids(affiliations)
self.create_header(document, root, parsed)
self.create_collaboration(document, root, parsed)
self.create_organizations(document, root, parsed, organization_ids)
self.create_authors(document, root, parsed, organization_ids)
return document
def dumps(self, data):
# FIX for toprettyxml function from website:
# http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-and-silly-whitespace/
def fixed_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
minidom._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
if len(self.childNodes) == 1 and self.childNodes[0].nodeType == minidom.Node.TEXT_NODE:
writer.write(">")
self.childNodes[0].writexml(writer, "", "", "")
writer.write("</%s>%s" % (self.tagName, newl))
return
writer.write(">%s" % (newl))
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % (newl))
# replace minidom's function with ours
minidom.Element.writexml = fixed_writexml
# End of FIX
return self.dump(data).toprettyxml(indent=' ', newl='\r\n', encoding='utf-8')
def generate_organization_ids(self, organizations):
ids = {}
# Map each organization acronym to an id of the kind 'o[index]'
for index, organization in enumerate(organizations):
acronym = organization[cfg.JSON.ACRONYM]
ids[acronym] = cfg.AuthorsXML.ORGANIZATION_ID + str(index)
return ids
class Converters:
__converters__ = {'authorsxml': AuthorsXML, 'elsevier': ElsevierArticle, 'aps': APSpaper}
@classmethod
def get(cls, format):
return cls.__converters__.get(format)
def dump(data, converter):
return converter().dump(data)
def dumps(data, converter):
return converter().dumps(data)
|
MSusik/invenio
|
modules/webauthorlist/lib/authorlist_engine.py
|
Python
|
gpl-2.0
| 34,357
|
[
"VisIt"
] |
3df11137e44db416d4d8207823cd208aa6ec4498d01f3394495c48da450d8074
|
# Copyright (C) 2015 Ivan Pechenezhskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains simplified interface to some specific
experiment resources. The classes defined here are intended to be used
with class Experiment from the experiment module.
"""
import os
if __file__ in [f for f in os.listdir('.') if os.path.isfile(f)]:
# This is executed when the script is loaded by the labradnode.
SCRIPT_PATH = os.path.dirname(os.getcwd())
else:
# This is executed if the script is started by clicking or
# from a command line.
SCRIPT_PATH = os.path.dirname(__file__)
LABRAD_PATH = os.path.join(SCRIPT_PATH.rsplit('LabRAD', 1)[0])
import sys
if LABRAD_PATH not in sys.path:
sys.path.append(LABRAD_PATH)
import numpy as np
import itertools
import time
from twisted.internet.error import TimeoutError
import labrad.units as units
from labrad.types import Error
import LabRAD.Servers.Instruments.GHzBoards.mem_sequences as ms
import LabRAD.Servers.Instruments.GHzBoards.auto_ghz_fpga_bringup as br
class ResourceDefinitionError(Exception):
"""Resource definition syntax error."""
pass
class ResourceInitializationError(Exception):
"""Resource initialization error."""
pass
class GHzFPGABoards(object):
"""GHz FPGA boards simplified interface."""
def __init__(self, cxn, resource):
"""
Initialize GHz FPGA boards.
Inputs:
cxn: LabRAD connection object.
resource: resource dictionary.
var: name of the variable.
Output:
None.
"""
if 'Server' in resource:
self.server_name = resource['Server']
else:
self.server_name = 'GHz FPGAs'
self.server = cxn[self.server_name]
if 'Node' in resource:
self.labradnode_name = resource['Node']
else:
self.labradnode_name = ('node ' +
os.environ['COMPUTERNAME'].lower())
self.labradnode = cxn[self.labradnode_name]
# Check the boards specification.
if 'Boards' in resource:
boards = resource['Boards']
if isinstance(boards, str):
self.boards = [boards]
elif (isinstance(boards, list) and
all([isinstance(board, str) for board in boards])):
self.boards = boards
else:
raise ResourceDefinitionError("GHz FPGA boards " +
"in the resource dictionary should be " +
"specified as a string or a list of strings.")
else:
raise ResourceDefinitionError("'Boards' field is not found " +
" in the experiment resource: " + str(resource) + ".")
# Get the board settings from the resource specifications.
self.consts = {}
self.dacs = []
self.adcs = []
self.dac_settings = []
self.adc_settings = []
self._results = []
self._dual_block_warning = True
if not boards:
return
self._data_dacs = []
self._data_adcs = []
ECL_name_list = ['ECL0', 'ECL1', 'ECL2', 'ECL3']
for board in boards:
if board not in resource:
raise ResourceDefinitionError("Settings for board '" +
board + "' are not present in the experiment " +
"resource: " + str(resource) + ".")
settings = resource[board]
if board.find('DAC') != -1:
self.dacs.append(board)
for ch in ['DAC A', 'DAC B']:
if ch not in settings or settings[ch] is None:
settings[ch] = 'None'
elif not isinstance(settings[ch], str):
raise ResourceDefinitionError("Board '" +
board + "' '" + ch + "' setting" +
" should be specified either as " +
" a string or be of a None type.")
settings['Trigger'] = []
for ecl in ECL_name_list:
if ecl not in settings or settings[ecl] is None:
settings[ecl] = 'None'
elif not isinstance(settings[ecl], str):
raise ResourceDefinitionError("Board '" +
board + "' '" + ch + "' setting" +
" should be specified either as " +
" a string or be of a None type.")
if settings[ecl] == 'Trigger':
settings['Trigger'].append(ecl)
self.dac_settings.append(settings)
if 'Data' in settings and settings['Data']:
self._data_dacs.append(board)
elif board.find('ADC') != -1:
self.adcs.append(board)
self.adc_settings.append(settings)
if 'Data' in settings and settings['Data']:
self._data_adcs.append(board)
else:
raise ResourceDefinitionError("Neither 'DAC' nor 'ADC'" +
"string is found in board name '" +
board + "'.")
if self._data_dacs and self._data_adcs:
raise ResourceDefinitionError("Either DAC or ADC boards " +
"must return the data, not both.")
# Check that all DAC and ADC boards are unique.
if len(self.dacs) != len(set(self.dacs)):
raise ExperimentDefinitionError("All DAC boards must have" +
" unique names in the resource dictionary. The following" +
" DAC boards are given: " + str(dacs) + ".")
if len(self.adcs) != len(set(self.adcs)):
raise ExperimentDefinitionError("All ADC boards must have" +
" unique names in the resource dictionary. The following" +
" ADC boards are given: " + str(self.adcs) + ".")
# Check that the boards are listed on the GHz FPGA server.
p = self.server.packet()
listed_boards = (p.list_devices().send())['list_devices']
listed_boards = [board for idx, board in listed_boards]
for board in boards:
if board not in listed_boards:
raise ResourceDefinitionError("Board '" + board +
"' is not found on server '" +
self.server_name + "'.")
# Get the board build constants from the LabRAD Registry.
cxn.registry.cd(['', 'Servers', self.server_name])
if self.dacs:
consts = cxn.registry.get('dacBuild8')
for name, value in consts:
self.consts[name] = value
if self.adcs:
consts = cxn.registry.get('adcBuild1')
for name, value in consts:
self.consts[name] = value
board_groups = cxn.registry.get('boardGroups')
for board_group in board_groups:
board, server, port, group = board_group
for board_delay in group:
name_addr, delay = board_delay
name = board + ' ' + name_addr
if name in self.dacs:
k = [idx for idx, dac in enumerate(self.dacs)
if name == dac][0]
self.dac_settings[k]['CalibDelay'] = delay
if name in self.adcs:
k = [idx for idx, adc in enumerate(self.adcs)
if name == adc][0]
self.adc_settings[k]['CalibDelay'] = delay
if self._data_dacs:
try:
timeouts = cxn.registry.get('PREAMP_TIMEOUTS')
self.consts['PREAMP_TIMEOUTS'] = {time: timeout
for time, timeout in timeouts}
except:
print("'PREAMP_TIMEOUTS' key is not found in the " +
"LabRAD Registry.")
try:
dac_zero_pad_len = cxn.registry.get('DAC_ZERO_PAD_LEN')
except:
print("'DAC_ZERO_PAD_LEN' key is not found in the " +
"LabRAD Registry. It will be set to 10 ns.")
dac_zero_pad_len = 10
self.consts['DAC_ZERO_PAD_LEN'] = dac_zero_pad_len * units.ns
# Create a list that contains all requested/used waveforms.
self.requested_waveforms = [settings[ch] for settings in
self.dac_settings for ch in ['DAC A', 'DAC B']]
self._data_flag = bool(self._data_dacs) or bool(self._data_adcs)
def restart(self):
"""Restart the GHz FPGA server with the LabRAD Node."""
restarted = False
print('Recovering from a timeout...')
while True:
running_srvs = self.labradnode.running_servers()
running_srvs = [srv for prs in running_srvs for srv in prs]
if self.server_name in running_srvs:
if not restarted:
self.labradnode.restart(self.server_name)
else:
break
else:
self.labradnode.start(self.server_name)
restarted = True
time.sleep(15)
def bringup(self):
"""Bring up the GHz FPGA boards.
Input:
None.
Output:
status (boolean): true if the bring-up succeeded, false
otherwise.
"""
k = 0
failures = True
while k < 3 and failures:
k += 1
try:
successes, failures, tries = br.auto_bringup(self.server)
except:
pass
if not failures:
return True
else:
return False
def auto_recovery(self):
"""
Restart the GHz FPGA server with the LabRAD Node and
bring it up.
"""
success = False
while not success:
self.restart()
success = self.bringup()
def check_plls(self):
"""
Check the status of the FPGA internal GHz serializer PLLs.
Input:
None.
Output:
dacs2reset: list of DACs that have the PLL lost lock
since the last reset.
adcs2init: list of ADCs that have the PLL lost lock.
"""
p = self.server.packet()
for board in self.dacs + self.adcs:
p.select_device(board)
p.pll_query(key=board)
status = p.send()
dacs2reset = [dac for dac in self.dacs if status[dac]]
adcs2init = [adc for adc in self.adcs if status[adc]]
return dacs2reset, adcs2init
def reset_or_init_plls(self, dacs2reset=[], adcs2init=[]):
"""
Reset (for DACs) or initialize (for ADCs) the PLLs.
Input:
dacs2reset: list of DACs to reset.
adcs2init: list of ADCs to initialize.
Output:
None.
"""
p = self.server.packet()
# Reset the DAC FPGA internal GHz serializer PLLs.
for dac in dacs2reset:
p.select_device(dac)
p.pll_reset()
# Send the initialization sequence to the ADC PLLs.
for adc in adcs2init:
p.select_device(adc)
p.pll_init()
p.send()
def process_waveforms(self, waveforms):
"""
Check whether the specified waveforms with the waveforms defined
in the run_once method. Get SRAMs from the waveforms.
Input:
waveforms: dictionary with the waveforms.
Output:
dac_srams: list of DAC SRAMs.
sram_length: SRAM length.
sram_delay: SRAM delay.
"""
ECLdata = [[],[],[],[]]
for idx, settings in enumerate(self.dac_settings):
for channel in ['DAC A', 'DAC B']:
if self.dac_settings[idx][channel] not in waveforms:
raise ResourceDefinitionError("'" +
str(self.dacs[idx]) +
"' '" + str(channel) + "' waveform setting: '" +
self.dac_settings[idx][channel] +
"' is not recognized.")
ecld = {}
trig = None
for ecl in ['ECL0', 'ECL1', 'ECL2', 'ECL3']:
if ecl in self.dac_settings[idx] and ecl not in self.dac_settings[idx]['Trigger']:
if self.dac_settings[idx][ecl] not in waveforms:
raise ResourceDefinitionError("'" +
str(self.dacs[idx]) +
"' '" + str(ecl) + "' waveform setting: '" +
self.dac_settings[idx][ecl] +
"' is not recognized.")
ecld[ecl] = waveforms[self.dac_settings[idx][ecl]]
else:
ecld[ecl] = []
ECLdata[idx] = ms.waves2ECL(ecld, trigs=self.dac_settings[idx]['Trigger'])
dac_srams = [ms.waves2sram(waveforms[self.dac_settings[k]['DAC A']],
waveforms[self.dac_settings[k]['DAC B']],
ECLdata[k])
for k, dac in enumerate(self.dacs)]
return dac_srams, waveforms[self.dac_settings[0]['DAC A']].size
def init_mem_lists(self):
"""
Initialize memory command sequences. The output is a list
with the length that is equal to the number of the DAC boards.
Each list item is a MemSequence object. The MemSequence methods
are described in Servers.Instruments.GHzBoards.mem_sequences.
Input:
None.
Output:
mem_seqs: list of memory command lists.
"""
mem_seqs = [ms.MemSequence() for dac in self.dacs]
for idx, settings in enumerate(self.dac_settings):
if 'FO1 FastBias Firmware Version' in settings:
mem_seqs[idx].firmware(channel=1,
version=settings['FO1 FastBias Firmware Version'])
if 'FO2 FastBias Firmware Version' in settings:
mem_seqs[idx].firmware(channel=2,
version=settings['FO2 FastBias Firmware Version'])
return mem_seqs
def get_adc(self, adc=None):
"""
If only a single ADC board is present, return its name. If more
than one board is present, check that the board with the
specified name exists, otherwise raise an error.
Return the board index as a second parameter.
Input:
adc (optional): ADC board name (default: None).
Output:
adc: ADC board name.
"""
if len(self.adcs) == 1:
return self.adcs[0], 0
elif adc is None:
raise Exception("The ADC board name should be explicitly " +
"specified since more than one ADC board is present.")
elif adc not in self.adcs:
raise Exception("ADC board '" + str(adc) + "' is not found.")
return adc, self.adcs.index(adc)
def set_adc_setting(self, setting, value, adc=None):
"""
Change an ADC setting.
Inputs:
setting: name of setting you want to change.
value: value to change the setting to.
adc: ADC board name. If None and only one board is
detected, the board name will be automatically
recognized.
Output:
None.
"""
adc, idx = self.get_adc(adc)
if setting in self.adc_settings[idx]:
self.adc_settings[idx][setting] = value
else:
raise Exception("'" + str(setting) +
"' is not a valid ADC setting.")
def get_adc_setting(self, setting, adc=None):
"""
Get an ADC setting.
Inputs:
setting: name of setting you want to change.
adc: ADC board name. If None and only one board in is
present the board name will be automatically recognized.
Output:
value: value of the ADC setting.
"""
adc, idx = self.get_adc(adc)
if setting in self.adc_settings[idx]:
return self.adc_settings[idx][setting]
else:
raise Exception("'" + str(setting) +
"' is not a valid ADC setting.")
def load_dacs(self, sram, memory):
"""Load DACs with Memory commands and SRAM."""
for k, dac in enumerate(self.dacs):
p = self.server.packet()
p.select_device(dac)
p.memory(memory[k])
p.start_delay(self.dac_settings[k]['CalibDelay'])
# Handle dual block calls here, in a different way than Sank
# did. This should be compatible.
if len(sram[k]) > self.consts['SRAM_LEN']:
if self._dual_block_warning:
print('DACs are set to the dual block mode.')
self._dual_block_warning = False
# Shove last chunk of SRAM into BLOCK1, be sure this can
# contain what you need it to contain.
sram1 = sram[k][-self.consts['SRAM_BLOCK1_LEN']:]
# Amount of SRAM that's extra.
sram_diff = len(sram[k]) - self.consts['SRAM_LEN']
# Calculate the number of the delay blocks:
# sram_diff = x * 'SRAM_DELAY_LEN' + y.
x, y = divmod(sram_diff, self.consts['SRAM_DELAY_LEN'])
if y == 0:
delay_blocks = x
else:
delay_blocks = x + 1 # Overshoot.
sram0 = sram[k][:(self.consts['SRAM_BLOCK0_LEN'] + sram_diff -
delay_blocks * self.consts['SRAM_DELAY_LEN'])]
if len(set(sram[k][(self.consts['SRAM_BLOCK0_LEN'] + sram_diff -
delay_blocks * self.consts['SRAM_DELAY_LEN']) -
4:len(sram[k]) - self.consts['SRAM_BLOCK1_LEN']])) != 1:
# Ensure that the delay block is constant.
raise Exception('Dual block mode will not work for ' +
'the requested pulse sequence.')
p.sram_dual_block(sram0, sram1, delay_blocks *
self.consts['SRAM_DELAY_LEN'])
else:
self._dual_block_warning = True
p.sram(sram[k])
self._results.append(p.send(wait=False))
def load_adcs(self):
"""Load ADCs with correct variables."""
for idx, adc in enumerate(self.adcs):
p = self.server.packet()
p.select_device(adc)
p.start_delay(int((self.adc_settings[idx]['ADCDelay']['ns']) / 4) +
self.adc_settings[idx]['CalibDelay'])
p.adc_run_mode(self.adc_settings[idx]['RunMode'])
if 'FilterStretchLen' in self.adc_settings[idx]:
stretch_len = int(self.adc_settings[idx]['FilterStretchLen']['ns'])
else:
stretch_len = 0
if 'FilterStretchAt' in self.adc_settings[idx]:
stretch_at = int(self.adc_settings[idx]['FilterStretchAt']['ns'])
else:
stretch_at = 0
p.adc_filter_func(self.filter_bytes(self.adc_settings[idx]),
stretch_len, stretch_at)
dPhi = int(self.adc_settings[idx]['DemodFreq']['Hz'] / 7629)
phi0 = int(self.adc_settings[idx]['DemodPhase']['rad'] * (2**16))
for k in range(self.consts['DEMOD_CHANNELS']):
p.adc_demod_phase(k, dPhi, phi0)
p.adc_trig_magnitude(k, self.adc_settings[idx]['DemodSinAmp'],
self.adc_settings[idx]['DemodCosAmp'])
self._results.append(p.send(wait=False))
def filter_bytes(self, settings):
"""Set the ADC filter for a specific experiment."""
# ADC collects at a 2 ns acquisition rate, but the
# filter function must have a 4 ns resolution.
filter_func = settings['FilterType'].lower()
filter_len = int(settings['FilterLength']['ns'])
window_len = filter_len / 4
if 'FilterStartAt' in settings:
start_len = int(settings['FilterStartAt']['ns'] / 4)
else:
start_len = 0
if filter_func == 'square':
env = np.full(window_len, 128.)
elif filter_func == 'gaussian':
env = np.linspace(-.5 * filter_len, .5 * filter_len, window_len)
env = np.floor(128 *
np.exp(-(env / (2 * settings['FilterWidth']['ns']))**2))
elif filter_func == 'hann':
env = np.linspace(0, window_len - 1, window_len)
env = np.floor(128 * np.sin(np.pi * env / (window_len - 1))**2)
elif filter_func == 'exp':
env = np.linspace(0, 4 * (window_len - 1), window_len)
env = np.floor(128 * np.exp(-env / settings['FilterWidth']['ns']))
else:
raise Exception('Filter function %s not recognized.'
%filter_func)
filt = np.hstack([np.zeros(start_len), env,
np.zeros(self.consts['FILTER_LEN'] - len(env) - start_len)])
return np.array(filt, dtype='<u1').tostring()
def load(self, dac_srams, dac_mems):
"""
Load FPGA boards with the required memory and settings. Input
arguments, dac_srams and dac_mems should be lists
which correspond to the SRAM and Memory in the order in which
the DACs are defined in the experiment resource dictionary.
The first listed DAC is always assumed to be the master.
Inputs:
sram: list of DAC SRAM waves. Use
ghz_self.boards_control.waves2sram method to get the right
format.
memory: list of memory commands. Use memory tools in
DAC_control to build a memory sequence.
Output:
run_data: returns the result of the self.boards.run_sequence
command.
"""
if len(dac_mems) != len(dac_srams):
raise Exception('Not enough memory commands to ' +
'populate the boards!')
# Save the command sequences in case the recovery will be
# attempted.
self._dac_srams = dac_srams
self._dac_mems = dac_mems
self.load_dacs(dac_srams, dac_mems)
if self._data_adcs:
p = self.server.packet()
# Determine which set of boards to run, not the order.
p.daisy_chain(list(itertools.chain(*[self.dacs, self.adcs])))
timing_order_list = []
for adc in self._data_adcs:
if (self.get_adc_setting('RunMode', adc).lower() ==
'average'):
timing_order_list.append(adc)
elif (self.get_adc_setting('RunMode', adc).lower() ==
'demodulate'):
# Record channel 0.
timing_order_list.append(adc + '::0')
else:
raise ResourceDefinitionError("ADC board '" +
adc + "' 'RunMode' setting " +
"should be either 'average'" +
" or 'demodulate'.")
for dac in self._data_dacs:
timing_order_list.append(dac)
p.timing_order(timing_order_list)
self._results.append(p.send(wait=False))
self.load_adcs()
elif self._data_dacs:
p = self.server.packet()
p.daisy_chain(list(itertools.chain(*[self.dacs])))
p.timing_order(self._data_dacs)
self._results.append(p.send(wait=False))
# Ensure that all packets are sent.
for result in self._results:
result.wait()
self._results = []
def run(self, reps=1020):
"""
Execute the run sequence a set number of times.
Input:
reps: number of repetitions in the sequence (default: 1020).
Output:
run_data: returns the result of the self.boards.run_sequence
command.
"""
while True:
try:
result = self.server.run_sequence(int(reps), self._data_flag)
dacs2reset, adcs2init = self.check_plls()
# Apparently, ADC PLL query always return True...
if dacs2reset:
self.reset_or_init_plls(dacs2reset, adcs2init)
# Reload the boards, just in case.
self.load(self._dac_srams, self._dac_mems)
else:
return result
except (Error, TimeoutError):
# self.auto_recovery()
# Restart the GHz FPGA Server and reload the boards.
self.restart()
self.load(self._dac_srams, self._dac_mems)
except:
import sys
print(sys.exc_info()[0])
raise
def load_and_run(self, dac_srams, dac_mems, reps=1020):
"""
Load FPGA boards with the required memory and settings, and
execute the run sequence a set number of times. This method
should be called at the end of each run_once. Input arguments,
dac_srams and dac_mems should be lists which correspond to the
SRAM and Memory in the order in which the DACs are defined in
the experiment resource dictionary. The first listed DAC is
always assumed to be the master.
Inputs:
sram: list of DAC SRAM waves. Use
ghz_self.boards_control.waves2sram method to get the
right format.
memory: list of memory commands. Use memory tools in
DAC_control to build a memory sequence.
reps: number of repetitions in the sequence (default: 1020).
Output:
run_data: returns the result of the self.boards.run_sequence
command.
"""
self.load(dac_srams, dac_mems)
return self.run(reps)
class BasicInterface(object):
"""
Basic interface class.
"""
def __init__(self, cxn, res, var):
"""
Initialize a resource.
Input:
cxn: LabRAD connection object.
res: resource dictionary.
var: name of the variable.
Output:
None.
"""
self._res = res
self._var = var
self._setting = None
self._request_sent = False
self.server = self._get_server(cxn)
try:
self._init_resource()
except:
raise ResourceInitializationError('Resource ' +
str(self._res) + ' could not be intialized.')
def __exit__(self, type, value, traceback):
"""Properly exit the resource."""
pass
def _get_server(self, cxn):
"""Get a server connection object."""
if 'Server' in self._res:
try:
return cxn[self._res['Server']]
except:
raise ResourceDefinitionError("Could not connect to " +
"server '" + str(self._res['Server']) + "'.")
else:
raise ResourceDefinitionError("Key 'Server' is not found" +
" in resource: " + str(self._res) + ".")
def _init_resource(self):
"""Device specific initialization."""
if ('Variables' in self._res and
self._var in self._res['Variables'] and
isinstance(self._res['Variables'], dict) and
'Setting' in self._res['Variables'][self._var]):
self._setting = self._res['Variables'][self._var]['Setting']
def send_request(self, value=None):
"""Send a request."""
p = self.server.packet()
if self._setting is not None:
p[self._setting](value)
self._result = p.send(wait=False)
self._request_sent = True
def acknowledge_request(self):
"""Wait for the result of a non-blocking request."""
if self._request_sent:
self._request_sent = False
if self._setting is not None:
return self._result.wait()[self._setting]
else:
return self._result.wait()
class GPIBInterface(BasicInterface):
"""
Simplified GPIB interface class.
"""
def __exit__(self, type, value, traceback):
"""Deselect a device."""
if hasattr(self, 'address'):
p = self.server.packet()
p.deselect_device().send()
def _init_resource(self):
"""Initialize a GPIB resource."""
p = self.server.packet()
devices = (p.list_devices().send())['list_devices']
devices = [dev for id, dev in devices]
if 'Address' in self._res:
if self._res['Address'] in devices:
self.address = self._res['Address']
else:
raise ResourceDefinitionError("Device with address '" +
str(self._res['Address']) + "' is not found.")
elif len(devices) == 1:
self.address = devices[0]
else:
raise ResourceDefinitionError("'Address' field is absent " +
" in the experiment resource: " +
str(self._res) + ".")
if len(devices) == 1:
self._single_device = True
p = self.server.packet()
p.select_device(self.address).send()
else:
self._single_device = False
self._init_gpib_resource()
def _init_gpib_resource(self):
"""Variable specific initialization."""
pass
def send_request(self, value=None):
"""Send a request to set a setting."""
p = self.server.packet()
if not self._single_device:
p.select_device(self.address)
if self._setting is not None:
p[self._setting](value)
self._result = p.send(wait=False)
self._request_sent = True
class RFGenerator(GPIBInterface):
"""
GPIB RF generator simplified interface.
"""
def __exit__(self, type, value, traceback):
"""Turn the RF generator off and deselect it."""
if hasattr(self, 'address'):
p = self.server.packet()
p.select_device(self.address).output(False).deselect_device().send()
def _get_server(self, cxn):
"""Get server connection object."""
if 'Server' in self._res:
server_name = self._res['Server']
else:
server_name = 'GPIB RF Generators'
try:
return cxn[server_name]
except:
raise ResourceDefinitionError("Could not connect to " +
"server '" + server_name + "'.")
def _init_gpib_resource(self):
"""Initialize an RF generator."""
if ('Variables' in self._res and
self._var in self._res['Variables'] and
isinstance(self._res['Variables'], dict) and
'Setting' in self._res['Variables'][self._var]):
self._setting = self._res['Variables'][self._var]['Setting']
elif self._var.lower().find('freq') != -1:
self._setting = 'Frequency'
elif self._var.lower().find('power') != -1:
self._setting = 'Power'
else:
raise ResourceDefinitionError("Setting responsible for " +
"variable '" + self._var + "' is not specified " +
"in the experiment resource: " +
str(self._res) + ".")
p = self.server.packet()
if not self._single_device:
p.select_device(self.address)
p.reset().send()
self._output_set = False
def send_request(self, value=None):
"""Send a setting request."""
p = self.server.packet()
if not self._single_device:
p.select_device(self.address)
p[self._setting](value)
if value is not None and not self._output_set:
p.output(True)
self._output_set = True
self._result = p.send(wait=False)
self._request_sent = True
class SIM928VoltageSource(GPIBInterface):
"""
SRS SIM928 voltage source simplified interface.
"""
def __exit__(self, type, value, traceback):
"""Turn the voltage source off and deselect it."""
if hasattr(self, 'address'):
p = self.server.packet()
p.select_device(self.address).voltage(0 * units.V).deselect_device().send()
def _get_server(self, cxn):
"""Get server connection object."""
if 'Server' in self._res:
server_name = self._res['Server']
else:
server_name = 'SIM928'
try:
return cxn[server_name]
except:
raise ResourceDefinitionError("Could not connect to " +
"server '" + server_name + "'.")
def _init_gpib_resource(self):
"""Initialize a voltage source."""
self._output_set = False
def send_request(self, voltage=None):
"""Send a request to set/get the output voltage."""
p = self.server.packet()
if not self._single_device:
p.select_device(self.address)
p.voltage(voltage)
if voltage is not None and not self._output_set:
p.output(True)
self._output_set = True
self._result = p.send(wait=False)
self._request_sent = True
class NetworkAnalyzer(GPIBInterface):
"""
Network analyzer simplified interface.
"""
def _get_server(self, cxn):
"""Get server connection object."""
if 'Server' in self._res:
server_name = self._res['Server']
else:
server_name = 'Agilent N5230A Network Analyzer'
try:
return cxn[server_name]
except:
try:
return cxn['Agilent 8720ET Network Analyzer']
except:
raise ResourceDefinitionError("Could not find a " +
"network analyzer server.")
def _init_gpib_resource(self):
"""Initialize a network analyzer generator."""
if ('Variables' in self._res and
self._var in self._res['Variables'] and
isinstance(self._res['Variables'], dict) and
'Setting' in self._res['Variables'][self._var]):
self._setting = self._res['Variables'][self._var]['Setting']
elif self._var.lower().find('center') != -1:
self._setting = 'Center Frequency'
elif self._var.lower().find('span') != -1:
self._setting = 'Frequency Span'
elif self._var.lower().find('start') != -1:
self._setting = 'Start Frequency'
elif self._var.lower().find('stop') != -1:
self._setting = 'Stop Frequency'
elif self._var.lower().find('power') != -1:
self._setting = 'Source Power'
elif self._var.lower().find('frequency points') != -1:
self._setting = 'Frequency Points'
elif self._var.lower().find('average points') != -1:
self._setting = 'Average Points'
elif self._var.lower().find('trace') != -1:
self._setting = 'Get Trace'
elif self._var.lower().find('s2p') != -1:
self._setting = 'Get S2P'
else:
raise ResourceDefinitionError("Setting responsible for " +
"variable '" + self._var + "' is not specified " +
"in the experiment resource: " +
str(self._res) + ".")
if self._setting == 'Get S2P':
if 'Ports' in self._res['Variables'][self._var]:
self._ports = self._res['Variables'][self._var]['Ports']
else:
self._ports = (3, 4)
def send_request(self, value=None):
"""Send a setting request to set a setting."""
p = self.server.packet()
if not self._single_device:
p.select_device(self.address)
if self._setting is not None:
if self._setting == 'Get S2P':
p[self._setting](self._ports)
else:
p[self._setting](value)
if self._setting == 'Average Points' and value is not None:
if value > 1:
p['Average Mode'](True)
else:
p['Average Mode'](False)
self._result = p.send(wait=False)
self._request_sent = True
class LabBrickAttenuator(BasicInterface):
"""
Lab Brick attenuator simplified interface.
"""
def __exit__(self, type, value, traceback):
"""Deselect the attenuator."""
if hasattr(self, 'server'):
p = self.server.packet()
p.deselect_attenuator().send()
def _get_server(self, cxn):
"""Get server connection object."""
if 'Server' in self._res:
server_name = self._res['Server']
else:
server_name = (os.environ['COMPUTERNAME'].lower() +
' Lab Brick Attenuators')
try:
return cxn[server_name]
except:
raise ResourceDefinitionError("Could not connect to " +
"server '" + server_name + "'.")
def _init_resource(self):
"""Initialize a Lab Brick attenuator."""
p = self.server.packet()
devices = (p.list_devices().send())['list_devices']
if 'Serial Number' in self._res:
if self._res['Serial Number'] in devices:
self.address = self._res['Serial Number']
else:
raise ResourceDefinitionError("Device with serial " +
"number " + str(self._res['Serial Number']) +
" is not found.")
elif len(devices) == 1:
self.address = devices[0]
else:
raise ResourceDefinitionError("'Serial Number' field is " +
"absent in the experiment resource: " +
str(self._res) + ".")
if len(devices) == 1:
self._single_device = True
p = self.server.packet()
p.select_attenuator(self.address).send()
else:
self._single_device = False
def send_request(self, value=None):
"""Set the attenuation."""
p = self.server.packet()
if not self._single_device:
p.select_attenuator(self.address)
p.attenuation(value)
self._result = p.send(wait=False)
self._request_sent = True
class LabBrickRFGenerator(BasicInterface):
"""
Lab Brick RF generator simplified interface.
"""
def __exit__(self, type, value, traceback):
"""Deselect the RF generator."""
if hasattr(self, 'server'):
p = self.server.packet()
p.select_rf_generator(self.address).rf_output_state(False)
p.deselect_rf_generator().send()
def _get_server(self, cxn):
"""Get server connection object."""
if 'Server' in self._res:
server_name = self._res['Server']
else:
server_name = (os.environ['COMPUTERNAME'].lower() +
' Lab Brick RF Generators')
try:
return cxn[server_name]
except:
raise ResourceDefinitionError("Could not connect to " +
"server '" + server_name + "'.")
def _init_resource(self):
"""Initialize a Lab Brick RF generator."""
p = self.server.packet()
devices = (p.list_devices().send())['list_devices']
if 'Serial Number' in self._res:
if self._res['Serial Number'] in devices:
self.address = self._res['Serial Number']
else:
raise ResourceDefinitionError("Device with serial " +
"number " + str(self._res['Serial Number']) +
" is not found.")
elif len(devices) == 1:
self.address = devices[0]
print(self.address)
else:
raise ResourceDefinitionError("'Serial Number' field is " +
"absent in the experiment resource: " +
str(self._res) + ".")
if len(devices) == 1:
self._single_device = True
p = self.server.packet()
p.select_rf_generator(self.address).send()
else:
self._single_device = False
if ('Variables' in self._res and
self._var in self._res['Variables'] and
isinstance(self._res['Variables'], dict) and
'Setting' in self._res['Variables'][self._var]):
self._setting = self._res['Variables'][self._var]['Setting']
elif self._var.lower().find('freq') != -1:
self._setting = 'Frequency'
elif self._var.lower().find('power') != -1:
self._setting = 'Power'
self._output_set = False
def send_request(self, value=None):
"""Send a request to the RF generator."""
p = self.server.packet()
if not self._single_device:
p.select_rf_generator(self.address)
p[self._setting](value)
if value is not None and not self._output_set:
p.rf_output_state(True)
self._output_set = True
self._result = p.send(wait=True)
self._request_sent = False
class ADR3(BasicInterface):
"""
Simplified interface for ADR3 temperature monitoring.
"""
def _get_server(self, cxn):
"""Get server connection object."""
if 'Server' in self._res:
server_name = self._res['Server']
else:
server_name = 'ADR3'
try:
cxn = cxn[server_name]
self._connected = True
return cxn
except:
print("Could not connect to server '" + server_name + "'.")
self._connected = False
def _init_resource(self):
"""Initialize the temperature variable."""
if self._connected:
if ('Variables' in self._res and
self._var in self._res['Variables'] and
isinstance(self._res['Variables'], dict)):
var_dict = True
else:
var_dict = False
if var_dict and 'Setting' in self._res['Variables'][self._var]:
self._setting = self._res['Variables'][self._var]['Setting']
else:
self._setting = 'Temperatures'
if var_dict and 'Stage' in self._res['Variables'][self._var]:
if self._res['Variables'][self._var]['Stage'].lower().find('50k') != -1:
self._temp_idx = 0
elif self._res['Variables'][self._var]['Stage'].lower().find('3k') != -1:
self._temp_idx = 1
elif self._res['Variables'][self._var]['Stage'].lower().find('ggg') != -1:
self._temp_idx = 2
elif self._res['Variables'][self._var]['Stage'].lower().find('faa') != -1:
self._temp_idx = 3
else:
self._temp_idx = 3
def send_request(self, value=None):
"""Send a request."""
if self._connected:
p = self.server.packet()
if self._setting is not None:
p[self._setting](value)
self._result = p.send(wait=False)
self._request_sent = True
else:
self._request_sent = False
def acknowledge_request(self):
"""Wait for the result of a non-blocking request."""
if self._connected and self._request_sent:
self._request_sent = False
temperatures = self._result.wait()[self._setting]
return temperatures[self._temp_idx]
else:
return np.nan * units.K
class Leiden(BasicInterface):
"""
Simplified interface for Leiden fridge temperature monitoring.
"""
def _get_server(self, cxn):
"""Get server connection object."""
if 'Server' in self._res:
server_name = self._res['Server']
else:
server_name = 'Leiden DR Temperature'
try:
return cxn[server_name]
except:
raise ResourceDefinitionError("Could not connect to " +
"server '" + server_name + "'.")
def _init_resource(self):
"""Initialize the temperature variable."""
if ('Variables' in self._res and
self._var in self._res['Variables'] and
isinstance(self._res['Variables'], dict)):
var_dict = True
else:
var_dict = False
self._setting = None
if var_dict and 'Setting' in self._res['Variables'][self._var]:
self._setting = self._res['Variables'][self._var]['Setting']
elif 'Stage' in self._res:
if self._res['Stage'].lower().find('exch') != -1:
self._setting = 'Exhange Temperature'
elif self._res['Stage'].lower().find('still') != -1:
self._setting = 'Still Temperature'
if self._setting is None:
self._setting = 'Mix Temperature'
|
McDermott-Group/LabRAD
|
LabRAD/Measurements/General/server_interfaces.py
|
Python
|
gpl-2.0
| 46,740
|
[
"Gaussian"
] |
79ac2e0265cfcc9e40a485b822ff9df2aaaefd352e0c1b932d77e7bbe05d0065
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.