text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Acceptance tests for studio related to the outline page.
"""
import json
from datetime import datetime, timedelta
import itertools
from pytz import UTC
from bok_choy.promise import EmptyPromise
from nose.plugins.attrib import attr
from ...pages.studio.settings_advanced import AdvancedSettingsPage
from ...pages.studio.overview import CourseOutlinePage, ContainerPage, ExpandCollapseLinkState
from ...pages.studio.utils import add_discussion, drag, verify_ordering
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.lms.staff_view import StaffPage
from ...fixtures.course import XBlockFixtureDesc
from base_studio_test import StudioCourseTest
from ..helpers import load_data_str
from ...pages.lms.progress import ProgressPage
SECTION_NAME = 'Test Section'
SUBSECTION_NAME = 'Test Subsection'
UNIT_NAME = 'Test Unit'
class CourseOutlineTest(StudioCourseTest):
"""
Base class for all course outline tests
"""
def setUp(self):
"""
Install a course with no content using a fixture.
"""
super(CourseOutlineTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self.advanced_settings = AdvancedSettingsPage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
""" Install a course with sections/problems, tabs, updates, and handouts """
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME).add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('html', 'Test HTML Component'),
XBlockFixtureDesc('discussion', 'Test Discussion Component')
)
)
)
)
def do_action_and_verify(self, outline_page, action, expected_ordering):
"""
Perform the supplied action and then verify the resulting ordering.
"""
if outline_page is None:
outline_page = self.course_outline_page.visit()
action(outline_page)
verify_ordering(self, outline_page, expected_ordering)
# Reload the page and expand all subsections to see that the change was persisted.
outline_page = self.course_outline_page.visit()
outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').click()
verify_ordering(self, outline_page, expected_ordering)
@attr('shard_3')
class CourseOutlineDragAndDropTest(CourseOutlineTest):
"""
Tests of drag and drop within the outline page.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
"""
Create a course with one section, two subsections, and four units
"""
# with collapsed outline
self.chap_1_handle = 0
self.chap_1_seq_1_handle = 1
# with first sequential expanded
self.seq_1_vert_1_handle = 2
self.seq_1_vert_2_handle = 3
self.chap_1_seq_2_handle = 4
course_fixture.add_children(
XBlockFixtureDesc('chapter', "1").add_children(
XBlockFixtureDesc('sequential', '1.1').add_children(
XBlockFixtureDesc('vertical', '1.1.1'),
XBlockFixtureDesc('vertical', '1.1.2')
),
XBlockFixtureDesc('sequential', '1.2').add_children(
XBlockFixtureDesc('vertical', '1.2.1'),
XBlockFixtureDesc('vertical', '1.2.2')
)
)
)
def drag_and_verify(self, source, target, expected_ordering, outline_page=None):
self.do_action_and_verify(
outline_page,
lambda (outline): drag(outline, source, target),
expected_ordering
)
def test_drop_unit_in_collapsed_subsection(self):
"""
Drag vertical "1.1.2" from subsection "1.1" into collapsed subsection "1.2" which already
have its own verticals.
"""
course_outline_page = self.course_outline_page.visit()
# expand first subsection
course_outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').first.click()
expected_ordering = [{"1": ["1.1", "1.2"]},
{"1.1": ["1.1.1"]},
{"1.2": ["1.1.2", "1.2.1", "1.2.2"]}]
self.drag_and_verify(self.seq_1_vert_2_handle, self.chap_1_seq_2_handle, expected_ordering, course_outline_page)
@attr('shard_3')
class WarningMessagesTest(CourseOutlineTest):
"""
Feature: Warning messages on sections, subsections, and units
"""
__test__ = True
STAFF_ONLY_WARNING = 'Contains staff only content'
LIVE_UNPUBLISHED_WARNING = 'Unpublished changes to live content'
FUTURE_UNPUBLISHED_WARNING = 'Unpublished changes to content that will release in the future'
NEVER_PUBLISHED_WARNING = 'Unpublished units will not be released'
class PublishState:
NEVER_PUBLISHED = 1
UNPUBLISHED_CHANGES = 2
PUBLISHED = 3
VALUES = [NEVER_PUBLISHED, UNPUBLISHED_CHANGES, PUBLISHED]
class UnitState:
""" Represents the state of a unit """
def __init__(self, is_released, publish_state, is_locked):
""" Creates a new UnitState with the given properties """
self.is_released = is_released
self.publish_state = publish_state
self.is_locked = is_locked
@property
def name(self):
""" Returns an appropriate name based on the properties of the unit """
result = "Released " if self.is_released else "Unreleased "
if self.publish_state == WarningMessagesTest.PublishState.NEVER_PUBLISHED:
result += "Never Published "
elif self.publish_state == WarningMessagesTest.PublishState.UNPUBLISHED_CHANGES:
result += "Unpublished Changes "
else:
result += "Published "
result += "Locked" if self.is_locked else "Unlocked"
return result
def populate_course_fixture(self, course_fixture):
""" Install a course with various configurations that could produce warning messages """
# Define the dimensions that map to the UnitState constructor
features = [
[True, False], # Possible values for is_released
self.PublishState.VALUES, # Possible values for publish_state
[True, False] # Possible values for is_locked
]
# Add a fixture for every state in the product of features
course_fixture.add_children(*[
self._build_fixture(self.UnitState(*state)) for state in itertools.product(*features)
])
def _build_fixture(self, unit_state):
""" Returns an XBlockFixtureDesc with a section, subsection, and possibly unit that has the given state. """
name = unit_state.name
start = (datetime(1984, 3, 4) if unit_state.is_released else datetime.now(UTC) + timedelta(1)).isoformat()
subsection = XBlockFixtureDesc('sequential', name, metadata={'start': start})
# Children of never published subsections will be added on demand via _ensure_unit_present
return XBlockFixtureDesc('chapter', name).add_children(
subsection if unit_state.publish_state == self.PublishState.NEVER_PUBLISHED
else subsection.add_children(
XBlockFixtureDesc('vertical', name, metadata={
'visible_to_staff_only': True if unit_state.is_locked else None
})
)
)
def test_released_never_published_locked(self):
""" Tests that released never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_never_published_unlocked(self):
""" Tests that released never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_released_unpublished_changes_locked(self):
""" Tests that released unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_unpublished_changes_unlocked(self):
""" Tests that released unpublished changes unlocked units display 'Unpublished changes to live content' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.LIVE_UNPUBLISHED_WARNING
)
def test_released_published_locked(self):
""" Tests that released published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_published_unlocked(self):
""" Tests that released published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def test_unreleased_never_published_locked(self):
""" Tests that unreleased never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_never_published_unlocked(self):
""" Tests that unreleased never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_unreleased_unpublished_changes_locked(self):
""" Tests that unreleased unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_unpublished_changes_unlocked(self):
"""
Tests that unreleased unpublished changes unlocked units display 'Unpublished changes to content that will
release in the future'
"""
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.FUTURE_UNPUBLISHED_WARNING
)
def test_unreleased_published_locked(self):
""" Tests that unreleased published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_published_unlocked(self):
""" Tests that unreleased published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def _verify_unit_warning(self, unit_state, expected_status_message):
"""
Verifies that the given unit's messages match the expected messages.
If expected_status_message is None, then the unit status message is expected to not be present.
"""
self._ensure_unit_present(unit_state)
self.course_outline_page.visit()
section = self.course_outline_page.section(unit_state.name)
subsection = section.subsection_at(0)
subsection.expand_subsection()
unit = subsection.unit_at(0)
if expected_status_message == self.STAFF_ONLY_WARNING:
self.assertEqual(section.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(subsection.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(unit.status_message, self.STAFF_ONLY_WARNING)
else:
self.assertFalse(section.has_status_message)
self.assertFalse(subsection.has_status_message)
if expected_status_message:
self.assertEqual(unit.status_message, expected_status_message)
else:
self.assertFalse(unit.has_status_message)
def _ensure_unit_present(self, unit_state):
""" Ensures that a unit with the given state is present on the course outline """
if unit_state.publish_state == self.PublishState.PUBLISHED:
return
name = unit_state.name
self.course_outline_page.visit()
subsection = self.course_outline_page.section(name).subsection(name)
subsection.expand_subsection()
if unit_state.publish_state == self.PublishState.UNPUBLISHED_CHANGES:
unit = subsection.unit(name).go_to()
add_discussion(unit)
elif unit_state.publish_state == self.PublishState.NEVER_PUBLISHED:
subsection.add_unit()
unit = ContainerPage(self.browser, None)
unit.wait_for_page()
if unit.is_staff_locked != unit_state.is_locked:
unit.toggle_staff_lock()
@attr('shard_3')
class EditingSectionsTest(CourseOutlineTest):
"""
Feature: Editing Release date, Due date and grading type.
"""
__test__ = True
def test_can_edit_subsection(self):
"""
Scenario: I can edit settings of subsection.
Given that I have created a subsection
Then I see release date, due date and grading policy of subsection in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date, due date and grading policy fields present
And they have correct initial values
Then I set new values for these fields
And I click save button on the modal
Then I see release date, due date and grading policy of subsection in course outline
"""
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(subsection.release_date)
# Verify that Due date and Policy hidden by default
self.assertFalse(subsection.due_date)
self.assertFalse(subsection.policy)
modal = subsection.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertTrue(modal.has_due_date())
self.assertTrue(modal.has_policy())
# Verify initial values
self.assertEqual(modal.release_date, u'1/1/1970')
self.assertEqual(modal.due_date, u'')
self.assertEqual(modal.policy, u'Not Graded')
# Set new values
modal.release_date = '3/12/1972'
modal.due_date = '7/21/2014'
modal.policy = 'Lab'
modal.save()
self.assertIn(u'Released: Mar 12, 1972', subsection.release_date)
self.assertIn(u'Due: Jul 21, 2014', subsection.due_date)
self.assertIn(u'Lab', subsection.policy)
def test_can_edit_section(self):
"""
Scenario: I can edit settings of section.
Given that I have created a section
Then I see release date of section in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date field present
And it has correct initial value
Then I set new value for this field
And I click save button on the modal
Then I see release date of section in course outline
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
modal = section.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertFalse(modal.has_due_date())
self.assertFalse(modal.has_policy())
# Verify initial value
self.assertEqual(modal.release_date, u'1/1/1970')
# Set new value
modal.release_date = '5/14/1969'
modal.save()
self.assertIn(u'Released: May 14, 1969', section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
def test_subsection_is_graded_in_lms(self):
"""
Scenario: I can grade subsection from course outline page.
Given I visit progress page
And I see that problem in subsection has grading type "Practice"
Then I visit course outline page
And I click on the configuration icon of subsection
And I set grading policy to "Lab"
And I click save button on the modal
Then I visit progress page
And I see that problem in subsection has grading type "Problem"
"""
progress_page = ProgressPage(self.browser, self.course_id)
progress_page.visit()
progress_page.wait_for_page()
self.assertEqual(u'Practice', progress_page.grading_formats[0])
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
modal = subsection.edit()
# Set new values
modal.policy = 'Lab'
modal.save()
progress_page.visit()
self.assertEqual(u'Problem', progress_page.grading_formats[0])
def test_unchanged_release_date_is_not_saved(self):
"""
Scenario: Saving a subsection without changing the release date will not override the release date
Given that I have created a section with a subsection
When I open the settings modal for the subsection
And I pressed save
And I open the settings modal for the section
And I change the release date to 07/20/1969
And I press save
Then the subsection and the section have the release date 07/20/1969
"""
self.course_outline_page.visit()
modal = self.course_outline_page.section_at(0).subsection_at(0).edit()
modal.save()
modal = self.course_outline_page.section_at(0).edit()
modal.release_date = '7/20/1969'
modal.save()
release_text = 'Released: Jul 20, 1969'
self.assertIn(release_text, self.course_outline_page.section_at(0).release_date)
self.assertIn(release_text, self.course_outline_page.section_at(0).subsection_at(0).release_date)
@attr('shard_3')
class StaffLockTest(CourseOutlineTest):
"""
Feature: Sections, subsections, and units can be locked and unlocked from the course outline.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Create a course with one section, two subsections, and four units """
course_fixture.add_children(
XBlockFixtureDesc('chapter', '1').add_children(
XBlockFixtureDesc('sequential', '1.1').add_children(
XBlockFixtureDesc('vertical', '1.1.1'),
XBlockFixtureDesc('vertical', '1.1.2')
),
XBlockFixtureDesc('sequential', '1.2').add_children(
XBlockFixtureDesc('vertical', '1.2.1'),
XBlockFixtureDesc('vertical', '1.2.2')
)
)
)
def _verify_descendants_are_staff_only(self, item):
"""Verifies that all the descendants of item are staff only"""
self.assertTrue(item.is_staff_only)
if hasattr(item, 'children'):
for child in item.children():
self._verify_descendants_are_staff_only(child)
def _remove_staff_lock_and_verify_warning(self, outline_item, expect_warning):
"""Removes staff lock from a course outline item and checks whether or not a warning appears."""
modal = outline_item.edit()
modal.is_explicitly_locked = False
if expect_warning:
self.assertTrue(modal.shows_staff_lock_warning())
else:
self.assertFalse(modal.shows_staff_lock_warning())
modal.save()
def _toggle_lock_on_unlocked_item(self, outline_item):
"""Toggles outline_item's staff lock on and then off, verifying the staff lock warning"""
self.assertFalse(outline_item.has_staff_lock_warning)
outline_item.set_staff_lock(True)
self.assertTrue(outline_item.has_staff_lock_warning)
self._verify_descendants_are_staff_only(outline_item)
outline_item.set_staff_lock(False)
self.assertFalse(outline_item.has_staff_lock_warning)
def _verify_explicit_staff_lock_remains_after_unlocking_parent(self, child_item, parent_item):
"""Verifies that child_item's explicit staff lock remains after removing parent_item's staff lock"""
child_item.set_staff_lock(True)
parent_item.set_staff_lock(True)
self.assertTrue(parent_item.has_staff_lock_warning)
self.assertTrue(child_item.has_staff_lock_warning)
parent_item.set_staff_lock(False)
self.assertFalse(parent_item.has_staff_lock_warning)
self.assertTrue(child_item.has_staff_lock_warning)
def test_units_can_be_locked(self):
"""
Scenario: Units can be locked and unlocked from the course outline page
Given I have a course with a unit
When I click on the configuration icon
And I enable explicit staff locking
And I click save
Then the unit shows a staff lock warning
And when I click on the configuration icon
And I disable explicit staff locking
And I click save
Then the unit does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0)
self._toggle_lock_on_unlocked_item(unit)
def test_subsections_can_be_locked(self):
"""
Scenario: Subsections can be locked and unlocked from the course outline page
Given I have a course with a subsection
When I click on the subsection's configuration icon
And I enable explicit staff locking
And I click save
Then the subsection shows a staff lock warning
And all its descendants are staff locked
And when I click on the subsection's configuration icon
And I disable explicit staff locking
And I click save
Then the the subsection does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
self._toggle_lock_on_unlocked_item(subsection)
def test_sections_can_be_locked(self):
"""
Scenario: Sections can be locked and unlocked from the course outline page
Given I have a course with a section
When I click on the section's configuration icon
And I enable explicit staff locking
And I click save
Then the section shows a staff lock warning
And all its descendants are staff locked
And when I click on the section's configuration icon
And I disable explicit staff locking
And I click save
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
self._toggle_lock_on_unlocked_item(section)
def test_explicit_staff_lock_remains_after_unlocking_section(self):
"""
Scenario: An explicitly locked unit is still locked after removing an inherited lock from a section
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a section and one of its units
When I click on the section's configuration icon
And I disable explicit staff locking
And I click save
Then the unit still shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
unit = section.subsection_at(0).unit_at(0)
self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, section)
def test_explicit_staff_lock_remains_after_unlocking_subsection(self):
"""
Scenario: An explicitly locked unit is still locked after removing an inherited lock from a subsection
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a subsection and one of its units
When I click on the subsection's configuration icon
And I disable explicit staff locking
And I click save
Then the unit still shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, subsection)
def test_section_displays_lock_when_all_subsections_locked(self):
"""
Scenario: All subsections in section are explicitly locked, section should display staff only warning
Given I have a course one section and two subsections
When I enable explicit staff lock on all the subsections
Then the section shows a staff lock warning
"""
self.course_outline_page.visit()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).set_staff_lock(True)
section.subsection_at(1).set_staff_lock(True)
self.assertTrue(section.has_staff_lock_warning)
def test_section_displays_lock_when_all_units_locked(self):
"""
Scenario: All units in a section are explicitly locked, section should display staff only warning
Given I have a course with one section, two subsections, and four units
When I enable explicit staff lock on all the units
Then the section shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).unit_at(0).set_staff_lock(True)
section.subsection_at(0).unit_at(1).set_staff_lock(True)
section.subsection_at(1).unit_at(0).set_staff_lock(True)
section.subsection_at(1).unit_at(1).set_staff_lock(True)
self.assertTrue(section.has_staff_lock_warning)
def test_subsection_displays_lock_when_all_units_locked(self):
"""
Scenario: All units in subsection are explicitly locked, subsection should display staff only warning
Given I have a course with one subsection and two units
When I enable explicit staff lock on all the units
Then the subsection shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.unit_at(0).set_staff_lock(True)
subsection.unit_at(1).set_staff_lock(True)
self.assertTrue(subsection.has_staff_lock_warning)
def test_section_does_not_display_lock_when_some_subsections_locked(self):
"""
Scenario: Only some subsections in section are explicitly locked, section should NOT display staff only warning
Given I have a course with one section and two subsections
When I enable explicit staff lock on one subsection
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).set_staff_lock(True)
self.assertFalse(section.has_staff_lock_warning)
def test_section_does_not_display_lock_when_some_units_locked(self):
"""
Scenario: Only some units in section are explicitly locked, section should NOT display staff only warning
Given I have a course with one section, two subsections, and four units
When I enable explicit staff lock on three units
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).unit_at(0).set_staff_lock(True)
section.subsection_at(0).unit_at(1).set_staff_lock(True)
section.subsection_at(1).unit_at(1).set_staff_lock(True)
self.assertFalse(section.has_staff_lock_warning)
def test_subsection_does_not_display_lock_when_some_units_locked(self):
"""
Scenario: Only some units in subsection are explicitly locked, subsection should NOT display staff only warning
Given I have a course with one subsection and two units
When I enable explicit staff lock on one unit
Then the subsection does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.unit_at(0).set_staff_lock(True)
self.assertFalse(subsection.has_staff_lock_warning)
def test_locked_sections_do_not_appear_in_lms(self):
"""
Scenario: A locked section is not visible to students in the LMS
Given I have a course with two sections
When I enable explicit staff lock on one section
And I click the View Live button to switch to staff view
Then I see two sections in the sidebar
And when I switch the view mode to student view
Then I see one section in the sidebar
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.course_outline_page.section_at(1).set_staff_lock(True)
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_sections, 2)
StaffPage(self.browser, self.course_id).set_staff_view_mode('Student')
self.assertEqual(courseware.num_sections, 1)
def test_locked_subsections_do_not_appear_in_lms(self):
"""
Scenario: A locked subsection is not visible to students in the LMS
Given I have a course with two subsections
When I enable explicit staff lock on one subsection
And I click the View Live button to switch to staff view
Then I see two subsections in the sidebar
And when I switch the view mode to student view
Then I see one section in the sidebar
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(1).set_staff_lock(True)
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_subsections, 2)
StaffPage(self.browser, self.course_id).set_staff_view_mode('Student')
self.assertEqual(courseware.num_subsections, 1)
def test_toggling_staff_lock_on_section_does_not_publish_draft_units(self):
"""
Scenario: Locking and unlocking a section will not publish its draft units
Given I have a course with a section and unit
And the unit has a draft and published version
When I enable explicit staff lock on the section
And I disable explicit staff lock on the section
And I click the View Live button to switch to staff view
Then I see the published version of the unit
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
add_discussion(unit)
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.set_staff_lock(True)
section.set_staff_lock(False)
unit = section.subsection_at(0).unit_at(0).go_to()
unit.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 0)
def test_toggling_staff_lock_on_subsection_does_not_publish_draft_units(self):
"""
Scenario: Locking and unlocking a subsection will not publish its draft units
Given I have a course with a subsection and unit
And the unit has a draft and published version
When I enable explicit staff lock on the subsection
And I disable explicit staff lock on the subsection
And I click the View Live button to switch to staff view
Then I see the published version of the unit
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
add_discussion(unit)
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.set_staff_lock(True)
subsection.set_staff_lock(False)
unit = subsection.unit_at(0).go_to()
unit.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 0)
def test_removing_staff_lock_from_unit_without_inherited_lock_shows_warning(self):
"""
Scenario: Removing explicit staff lock from a unit which does not inherit staff lock displays a warning.
Given I have a course with a subsection and unit
When I enable explicit staff lock on the unit
And I disable explicit staff lock on the unit
Then I see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0)
unit.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(unit, True)
def test_removing_staff_lock_from_subsection_without_inherited_lock_shows_warning(self):
"""
Scenario: Removing explicit staff lock from a subsection which does not inherit staff lock displays a warning.
Given I have a course with a section and subsection
When I enable explicit staff lock on the subsection
And I disable explicit staff lock on the subsection
Then I see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(subsection, True)
def test_removing_staff_lock_from_unit_with_inherited_lock_shows_no_warning(self):
"""
Scenario: Removing explicit staff lock from a unit which also inherits staff lock displays no warning.
Given I have a course with a subsection and unit
When I enable explicit staff lock on the subsection
And I enable explicit staff lock on the unit
When I disable explicit staff lock on the unit
Then I do not see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
subsection.set_staff_lock(True)
unit.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(unit, False)
def test_removing_staff_lock_from_subsection_with_inherited_lock_shows_no_warning(self):
"""
Scenario: Removing explicit staff lock from a subsection which also inherits staff lock displays no warning.
Given I have a course with a section and subsection
When I enable explicit staff lock on the section
And I enable explicit staff lock on the subsection
When I disable explicit staff lock on the subsection
Then I do not see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
subsection = section.subsection_at(0)
section.set_staff_lock(True)
subsection.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(subsection, False)
@attr('shard_3')
class EditNamesTest(CourseOutlineTest):
"""
Feature: Click-to-edit section/subsection names
"""
__test__ = True
def set_name_and_verify(self, item, old_name, new_name, expected_name):
"""
Changes the display name of item from old_name to new_name, then verifies that its value is expected_name.
"""
self.assertEqual(item.name, old_name)
item.change_name(new_name)
self.assertFalse(item.in_editable_form())
self.assertEqual(item.name, expected_name)
def test_edit_section_name(self):
"""
Scenario: Click-to-edit section name
Given that I have created a section
When I click on the name of section
Then the section name becomes editable
And given that I have edited the section name
When I click outside of the edited section name
Then the section name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'Changed',
'Changed'
)
def test_edit_subsection_name(self):
"""
Scenario: Click-to-edit subsection name
Given that I have created a subsection
When I click on the name of subsection
Then the subsection name becomes editable
And given that I have edited the subsection name
When I click outside of the edited subsection name
Then the subsection name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'Changed',
'Changed'
)
def test_edit_empty_section_name(self):
"""
Scenario: Click-to-edit section name, enter empty name
Given that I have created a section
And I have clicked to edit the name of the section
And I have entered an empty section name
When I click outside of the edited section name
Then the section name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'',
'Test Section'
)
def test_edit_empty_subsection_name(self):
"""
Scenario: Click-to-edit subsection name, enter empty name
Given that I have created a subsection
And I have clicked to edit the name of the subsection
And I have entered an empty subsection name
When I click outside of the edited subsection name
Then the subsection name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'',
'Test Subsection'
)
def test_editing_names_does_not_expand_collapse(self):
"""
Scenario: A section stays in the same expand/collapse state while its name is edited
Given that I have created a section
And the section is collapsed
When I click on the name of the section
Then the section is collapsed
And given that I have entered a new name
Then the section is collapsed
And given that I press ENTER to finalize the name
Then the section is collapsed
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).expand_subsection()
self.assertFalse(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).edit_name()
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).enter_name('Changed')
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).finalize_name()
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
@attr('shard_3')
class CreateSectionsTest(CourseOutlineTest):
"""
Feature: Create new sections/subsections/units
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a completely empty course to easily test adding things to it """
pass
def test_create_new_section_from_top_button(self):
"""
Scenario: Create new section from button at top of page
Given that I am on the course outline
When I click the "+ Add section" button at the top of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_section_from_bottom_button(self):
"""
Scenario: Create new section from button at bottom of page
Given that I am on the course outline
When I click the "+ Add section" button at the bottom of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_bottom_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_section_from_bottom_button_plus_icon(self):
"""
Scenario: Create new section from button plus icon at bottom of page
Given that I am on the course outline
When I click the plus icon in "+ Add section" button at the bottom of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_bottom_button(click_child_icon=True)
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_subsection(self):
"""
Scenario: Create new subsection
Given that I have created a section
When I click the "+ Add subsection" button in that section
Then I see a new subsection added to the bottom of the section
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
subsections = self.course_outline_page.section_at(0).subsections()
self.assertEqual(len(subsections), 1)
self.assertTrue(subsections[0].in_editable_form())
def test_create_new_unit(self):
"""
Scenario: Create new unit
Given that I have created a section
And that I have created a subsection within that section
When I click the "+ Add unit" button in that subsection
Then I am redirected to a New Unit page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).add_unit()
unit_page = ContainerPage(self.browser, None)
EmptyPromise(unit_page.is_browser_on_page, 'Browser is on the unit page').fulfill()
self.assertTrue(unit_page.is_inline_editing_display_name())
@attr('shard_3')
class DeleteContentTest(CourseOutlineTest):
"""
Feature: Deleting sections/subsections/units
"""
__test__ = True
def test_delete_section(self):
"""
Scenario: Delete section
Given that I am on the course outline
When I click the delete button for a section on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the section
When I click "Yes, I want to delete this component"
Then the confirmation message should close
And the section should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
def test_cancel_delete_section(self):
"""
Scenario: Cancel delete of section
Given that I clicked the delte button for a section on the course outline
And I received a confirmation message, asking me if I really want to delete the component
When I click "Cancel"
Then the confirmation message should close
And the section should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.sections()), 1)
def test_delete_subsection(self):
"""
Scenario: Delete subsection
Given that I am on the course outline
When I click the delete button for a subsection on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the subsection
When I click "Yes, I want to delete this component"
Then the confiramtion message should close
And the subsection should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 0)
def test_cancel_delete_subsection(self):
"""
Scenario: Cancel delete of subsection
Given that I clicked the delete button for a subsection on the course outline
And I received a confirmation message, asking me if I really want to delete the subsection
When I click "cancel"
Then the confirmation message should close
And the subsection should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
def test_delete_unit(self):
"""
Scenario: Delete unit
Given that I am on the course outline
When I click the delete button for a unit on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the unit
When I click "Yes, I want to delete this unit"
Then the confirmation message should close
And the unit should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 0)
def test_cancel_delete_unit(self):
"""
Scenario: Cancel delete of unit
Given that I clicked the delete button for a unit on the course outline
And I received a confirmation message, asking me if I really want to delete the unit
When I click "Cancel"
Then the confirmation message should close
And the unit should remain in the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
def test_delete_all_no_content_message(self):
"""
Scenario: Delete all sections/subsections/units in a course, "no content" message should appear
Given that I delete all sections, subsections, and units in a course
When I visit the course outline
Then I will see a message that says, "You haven't added any content to this course yet"
Add see a + Add Section button
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.has_no_content_message)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
self.assertTrue(self.course_outline_page.has_no_content_message)
@attr('shard_3')
class ExpandCollapseMultipleSectionsTest(CourseOutlineTest):
"""
Feature: Courses with multiple sections can expand and collapse all sections.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a course with two sections """
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 2')
)
)
)
def verify_all_sections(self, collapsed):
"""
Verifies that all sections are collapsed if collapsed is True, otherwise all expanded.
"""
for section in self.course_outline_page.sections():
self.assertEqual(collapsed, section.is_collapsed)
def toggle_all_sections(self):
"""
Toggles the expand collapse state of all sections.
"""
for section in self.course_outline_page.sections():
section.expand_subsection()
def test_expanded_by_default(self):
"""
Scenario: The default layout for the outline page is to show sections in expanded view
Given I have a course with sections
When I navigate to the course outline page
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with multiple sections
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
for section in self.course_outline_page.sections():
section.delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_collapse_all_when_all_expanded(self):
"""
Scenario: Collapse all sections when all sections are expanded
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_collapse_all_when_some_expanded(self):
"""
Scenario: Collapsing all sections when 1 or more sections are already collapsed
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I collapse the first section
And I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.section_at(0).expand_subsection()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_expand_all_when_all_collapsed(self):
"""
Scenario: Expanding all sections when all sections are collapsed
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_expand_all_when_some_collapsed(self):
"""
Scenario: Expanding all sections when 1 or more sections are already expanded
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I expand the first section
And I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.course_outline_page.section_at(0).expand_subsection()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
@attr('shard_3')
class ExpandCollapseSingleSectionTest(CourseOutlineTest):
"""
Feature: Courses with a single section can expand and collapse all sections.
"""
__test__ = True
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with one section
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_old_subsection_stays_collapsed_after_creation(self):
"""
Scenario: Collapsed subsection stays collapsed after creating a new subsection
Given I have a course with one section and subsection
And I navigate to the course outline page
Then the subsection is collapsed
And when I create a new subsection
Then the first subsection is collapsed
And the second subsection is expanded
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.course_outline_page.section_at(0).add_subsection()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.assertFalse(self.course_outline_page.section_at(0).subsection_at(1).is_collapsed)
@attr('shard_3')
class ExpandCollapseEmptyTest(CourseOutlineTest):
"""
Feature: Courses with no sections initially can expand and collapse all sections after addition.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Expand/collapse for a course with no sections
Given I have a course with no sections
When I navigate to the course outline page
Then I do not see the "Collapse All Sections" link
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
def test_link_appears_after_section_creation(self):
"""
Scenario: Collapse link appears after creating first section of a course
Given I have a course with no sections
When I navigate to the course outline page
And I add a section
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.course_outline_page.add_section_from_top_button()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.assertFalse(self.course_outline_page.section_at(0).is_collapsed)
@attr('shard_3')
class DefaultStatesEmptyTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with an empty course
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_empty_course_message(self):
"""
Scenario: Empty course state
Given that I am in a course with no sections, subsections, nor units
When I visit the course outline
Then I will see a message that says "You haven't added any content to this course yet"
And see a + Add Section button
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.has_no_content_message)
self.assertTrue(self.course_outline_page.bottom_add_section_button.is_present())
@attr('shard_3')
class DefaultStatesContentTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with a course with content
"""
__test__ = True
def test_view_live(self):
"""
Scenario: View Live version from course outline
Given that I am on the course outline
When I click the "View Live" button
Then a new tab will open to the course on the LMS
"""
self.course_outline_page.visit()
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 3)
self.assertEqual(courseware.xblock_component_type(0), 'problem')
self.assertEqual(courseware.xblock_component_type(1), 'html')
self.assertEqual(courseware.xblock_component_type(2), 'discussion')
@attr('shard_3')
class UnitNavigationTest(CourseOutlineTest):
"""
Feature: Navigate to units
"""
__test__ = True
def test_navigate_to_unit(self):
"""
Scenario: Click unit name to navigate to unit page
Given that I have expanded a section/subsection so I can see unit names
When I click on a unit name
Then I will be taken to the appropriate unit page
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
self.assertTrue(unit.is_browser_on_page)
@attr('shard_3')
class PublishSectionTest(CourseOutlineTest):
"""
Feature: Publish sections.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with 2 subsections inside a single section.
The first subsection has 2 units, and the second subsection has one unit.
"""
self.courseware = CoursewarePage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME),
XBlockFixtureDesc('vertical', 'Test Unit 2'),
),
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 3'),
),
),
)
def test_unit_publishing(self):
"""
Scenario: Can publish a unit and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the first unit, subsection, section
When I publish the first unit
Then I see that publish button for the first unit disappears
And I see publish buttons for subsection, section
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
unit.publish()
self.assertFalse(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
def test_subsection_publishing(self):
"""
Scenario: Can publish a subsection and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the unit, subsection, section
When I publish the first subsection
Then I see that publish button for the first subsection disappears
And I see that publish buttons disappear for the child units of the subsection
And I see publish button for section
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME).publish()
self.assertFalse(unit.publish_action)
self.assertFalse(subsection.publish_action)
self.assertTrue(section.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_sequential_position(2)
self.assertEqual(1, self.courseware.num_xblock_components)
def test_section_publishing(self):
"""
Scenario: Can publish a section and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the unit, subsection, section
When I publish the section
Then I see that publish buttons disappears
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.assertTrue(unit.publish_action)
self.course_outline_page.section(SECTION_NAME).publish()
self.assertFalse(subsection.publish_action)
self.assertFalse(section.publish_action)
self.assertFalse(unit.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_sequential_position(2)
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_section(SECTION_NAME, 'Test Subsection 2')
self.assertEqual(1, self.courseware.num_xblock_components)
def _add_unpublished_content(self):
"""
Adds unpublished HTML content to first three units in the course.
"""
for index in xrange(3):
self.course_fixture.create_xblock(
self.course_fixture.get_nested_xblocks(category="vertical")[index].locator,
XBlockFixtureDesc('html', 'Unpublished HTML Component ' + str(index)),
)
def _get_items(self):
"""
Returns first section, subsection, and unit on the page.
"""
section = self.course_outline_page.section(SECTION_NAME)
subsection = section.subsection(SUBSECTION_NAME)
unit = subsection.expand_subsection().unit(UNIT_NAME)
return (section, subsection, unit)
@attr('shard_3')
class DeprecationWarningMessageTest(CourseOutlineTest):
"""
Feature: Verify deprecation warning message.
"""
HEADING_TEXT = 'This course uses features that are no longer supported.'
COMPONENT_LIST_HEADING = 'You must delete or replace the following components.'
ADVANCE_MODULES_REMOVE_TEXT = ('To avoid errors, edX strongly recommends that you remove unsupported features '
'from the course advanced settings. To do this, go to the Advanced Settings '
'page, locate the "Advanced Module List" setting, and then delete the following '
'modules from the list.')
def _add_deprecated_advance_modules(self, block_types):
"""
Add `block_types` into `Advanced Module List`
Arguments:
block_types (list): list of block types
"""
self.advanced_settings.visit()
self.advanced_settings.set_values({"Advanced Module List": json.dumps(block_types)})
def _create_deprecated_components(self):
"""
Create deprecated components.
"""
parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
parent_vertical.locator,
XBlockFixtureDesc('combinedopenended', "Open", data=load_data_str('ora_peer_problem.xml'))
)
self.course_fixture.create_xblock(parent_vertical.locator, XBlockFixtureDesc('peergrading', 'Peer'))
def _verify_deprecation_warning_info(
self,
deprecated_blocks_present,
components_present,
components_display_name_list=None,
deprecated_modules_list=None
):
"""
Verify deprecation warning
Arguments:
deprecated_blocks_present (bool): deprecated blocks remove text and
is list is visible if True else False
components_present (bool): components list shown if True else False
components_display_name_list (list): list of components display name
deprecated_modules_list (list): list of deprecated advance modules
"""
self.assertTrue(self.course_outline_page.deprecated_warning_visible)
self.assertEqual(self.course_outline_page.warning_heading_text, self.HEADING_TEXT)
self.assertEqual(self.course_outline_page.modules_remove_text_shown, deprecated_blocks_present)
if deprecated_blocks_present:
self.assertEqual(self.course_outline_page.modules_remove_text, self.ADVANCE_MODULES_REMOVE_TEXT)
self.assertEqual(self.course_outline_page.deprecated_advance_modules, deprecated_modules_list)
self.assertEqual(self.course_outline_page.components_visible, components_present)
if components_present:
self.assertEqual(self.course_outline_page.components_list_heading, self.COMPONENT_LIST_HEADING)
self.assertItemsEqual(self.course_outline_page.components_display_names, components_display_name_list)
def test_no_deprecation_warning_message_present(self):
"""
Scenario: Verify that deprecation warning message is not shown if ORA1
advance modules are not present and also no ORA1 component exist in
course outline.
When I goto course outline
Then I don't see ORA1 deprecated warning
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.deprecated_warning_visible)
def test_deprecation_warning_message_present(self):
"""
Scenario: Verify deprecation warning message if ORA1 advance modules
and ORA1 components are present.
Given I have ORA1 advance modules present in `Advanced Module List`
And I have created 2 ORA1 components
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I see correct ORA1 deprecated warning advance modules remove text
And I see list of ORA1 components with correct display names
"""
self._add_deprecated_advance_modules(block_types=['peergrading', 'combinedopenended'])
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=True,
components_display_name_list=['Open', 'Peer'],
deprecated_modules_list=['peergrading', 'combinedopenended']
)
def test_warning_with_ora1_advance_modules_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
ORA1 advance modules are present and no ORA1 component exist.
Given I have ORA1 advance modules present in `Advanced Module List`
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I see correct ORA1 deprecated warning advance modules remove text
And I don't see list of ORA1 components
"""
self._add_deprecated_advance_modules(block_types=['peergrading', 'combinedopenended'])
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=False,
deprecated_modules_list=['peergrading', 'combinedopenended']
)
def test_warning_with_ora1_components_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
ORA1 component exist and no ORA1 advance modules are present.
Given I have created two ORA1 components
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I don't see ORA1 deprecated warning advance modules remove text
And I see list of ORA1 components with correct display names
"""
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=False,
components_present=True,
components_display_name_list=['Open', 'Peer']
)
|
B-MOOC/edx-platform
|
common/test/acceptance/tests/studio/test_studio_outline.py
|
Python
|
agpl-3.0
| 77,904
|
[
"VisIt"
] |
5777528e51749235ca125b09074909252569ab0942e7eaf6dd05d6286abf7c2b
|
__author__ = 'robswift'
__project__ = 'blastnfilter'
import os
from BlastNFilter.PreRelease import ParsePreRelease
from BlastNFilter.Blast import ParseAlignment
import OutPut
def run(options):
non_polymer = options.non_polymer
polymer = options.polymer
out_dir = options.out
blast_dir = os.path.abspath(options.blast_db)
pdb_db = os.path.join(blast_dir, 'pdb_db')
fasta = os.path.join(blast_dir, 'pdb_seqres.txt')
target_list = ParsePreRelease.add_ligands(non_polymer)
target_list = ParsePreRelease.add_sequences(polymer, target_list)
#new = [x for x in target_list if x.get_pdb_id().lower() == '2n02']
target_list = ParseAlignment.blast_the_targets(target_list, pdb_db, fasta, out_dir)
target_list = ParseAlignment.remove_multiple_dockers(target_list)
OutPut.write_csv(target_list, out)
|
rvswift/BlastNFilter
|
BlastNFilter/Utilities/Run.py
|
Python
|
bsd-3-clause
| 840
|
[
"BLAST"
] |
8679ac1a524735f374246a0251ccb9b87b062152490aa2944e4d9d4d6c1821b9
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
DOCUMENTATION = '''
---
version_added: "1.2"
module: jabber
short_description: Send a message to jabber user or chat room
description:
- Send a message to jabber
options:
user:
description:
- User as which to connect
required: true
password:
description:
- password for user to connect
required: true
to:
description:
- user ID or name of the room, when using room use a slash to indicate your nick.
required: true
msg:
description:
- The message body.
required: true
default: null
host:
description:
- host to connect, overrides user info
required: false
port:
description:
- port to connect to, overrides default
required: false
default: 5222
encoding:
description:
- message encoding
required: false
# informational: requirements for nodes
requirements:
- python xmpp (xmpppy)
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# send a message to a user
- jabber: user=mybot@example.net
password=secret
to=friend@example.net
msg="Ansible task finished"
# send a message to a room
- jabber: user=mybot@example.net
password=secret
to=mychaps@conference.example.net/ansiblebot
msg="Ansible task finished"
# send a message, specifying the host and port
- jabber user=mybot@example.net
host=talk.example.net
port=5223
password=secret
to=mychaps@example.net
msg="Ansible task finished"
'''
import os
import re
import time
HAS_XMPP = True
try:
import xmpp
except ImportError:
HAS_XMPP = False
def main():
module = AnsibleModule(
argument_spec=dict(
user=dict(required=True),
password=dict(required=True, no_log=True),
to=dict(required=True),
msg=dict(required=True),
host=dict(required=False),
port=dict(required=False,default=5222),
encoding=dict(required=False),
),
supports_check_mode=True
)
if not HAS_XMPP:
module.fail_json(msg="The required python xmpp library (xmpppy) is not installed")
jid = xmpp.JID(module.params['user'])
user = jid.getNode()
server = jid.getDomain()
port = module.params['port']
password = module.params['password']
try:
to, nick = module.params['to'].split('/', 1)
except ValueError:
to, nick = module.params['to'], None
if module.params['host']:
host = module.params['host']
else:
host = server
if module.params['encoding']:
xmpp.simplexml.ENCODING = params['encoding']
msg = xmpp.protocol.Message(body=module.params['msg'])
try:
conn=xmpp.Client(server, debug=[])
if not conn.connect(server=(host,port)):
module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server))
if not conn.auth(user,password,'Ansible'):
module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user,server))
# some old servers require this, also the sleep following send
conn.sendInitPresence(requestRoster=0)
if nick: # sending to room instead of user, need to join
msg.setType('groupchat')
msg.setTag('x', namespace='http://jabber.org/protocol/muc#user')
conn.send(xmpp.Presence(to=module.params['to']))
time.sleep(1)
else:
msg.setType('chat')
msg.setTo(to)
if not module.check_mode:
conn.send(msg)
time.sleep(1)
conn.disconnect()
except Exception, e:
module.fail_json(msg="unable to send msg: %s" % e)
module.exit_json(changed=False, to=to, user=user, msg=msg.getBody())
# import module snippets
from ansible.module_utils.basic import *
main()
|
milad-soufastai/ansible-modules-extras
|
notification/jabber.py
|
Python
|
gpl-3.0
| 4,578
|
[
"Brian"
] |
b02df74224de9e72993010013a0b572f73b7356fac7624f37829f7e19d8f06f7
|
##
## Copyright (C) 2011 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import gtk
from kiwi.currency import currency
from kiwi.python import Settable
from kiwi.ui.objectlist import ColoredColumn, Column, ObjectTree
from stoqlib.domain.views import Account, AccountView
from stoqlib.gui.stockicons import (STOQ_MONEY, STOQ_PAYABLE_APP, STOQ_BILLS,
STOQ_TILL_APP)
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class StockTextColumn(Column):
"A column which you can add a stock item and a text"
def __init__(self, *args, **kwargs):
Column.__init__(self, *args, **kwargs)
def attach(self, objectlist):
column = Column.attach(self, objectlist)
self._pixbuf_renderer = gtk.CellRendererPixbuf()
column.pack_start(self._pixbuf_renderer, False)
return column
def cell_data_func(self, tree_column, renderer,
model, treeiter, (column, renderer_prop)):
row = model[treeiter]
data = column.get_attribute(row[0], column.attribute, None)
text = column.as_string(data)
renderer.set_property(renderer_prop, text)
pixbuf = self._objectlist.get_pixbuf(row[0])
self._pixbuf_renderer.set_property('pixbuf', pixbuf)
def sort_models(a, b):
return cmp(a.lower(),
b.lower())
class AccountTree(ObjectTree):
__gtype_name__ = 'AccountTree'
def __init__(self, with_code=True, create_mode=False):
self.create_mode = create_mode
self._accounts = {}
columns = [StockTextColumn('description', title=_("Account name"),
data_type=str, pack_end=True, expand=True,
sorted=True, sort_func=sort_models)]
if with_code:
columns.append(Column('code', title=_("Code"), data_type=str,
width=120))
if not create_mode:
# FIXME: This needs to be much better colorized, and moved to the
# domain classes
def colorize(account):
if (account.kind == 'account' and
account.account_type == Account.TYPE_INCOME):
return False
else:
return account.total < 0
columns.append(ColoredColumn('total', title=_("Total"), width=100,
data_type=currency,
color='red',
data_func=colorize,
use_data_model=True))
ObjectTree.__init__(self, columns,
mode=gtk.SELECTION_SINGLE)
def render_icon(icon):
return self.render_icon(icon, gtk.ICON_SIZE_MENU)
self._pixbuf_money = render_icon(STOQ_MONEY)
self._pixbuf_payable = render_icon(STOQ_PAYABLE_APP)
self._pixbuf_receivable = render_icon(STOQ_BILLS)
self._pixbuf_till = render_icon(STOQ_TILL_APP)
if self.create_mode:
self.set_headers_visible(False)
# Order the accounts top to bottom so
# ObjectTree.append() works as expected
def _orderaccounts(self, all_accounts, res=None, parent=None):
if not res:
res = []
if parent is None:
accounts = [a for a in all_accounts if a.parent_id is None]
else:
accounts = [a for a in all_accounts if a.parent_id == parent.id]
res.extend(accounts)
for account in accounts:
account.selectable = True
self._orderaccounts(all_accounts, res, account)
return res
def _calculate_total(self, all_accounts, account):
total = account.get_combined_value()
for a in all_accounts:
if a.parent_id == account.id:
total += self._calculate_total(all_accounts, a)
return total
def get_pixbuf(self, model):
kind = model.kind
if kind == 'payable':
pixbuf = self._pixbuf_payable
elif kind == 'receivable':
pixbuf = self._pixbuf_receivable
elif kind == 'account':
till_account_id = sysparam.get_object_id('TILLS_ACCOUNT')
if model.matches(till_account_id):
pixbuf = self._pixbuf_till
else:
pixbuf = self._pixbuf_money
else:
return None
return pixbuf
def insert_initial(self, store, edited_account=None):
""" Insert accounts and parent accounts in a ObjectTree.
:param store: a store
:param edited_account: If not None, this is the account being edited.
In this case, this acount (and its decendents) will not be shown in
the account tree.
"""
till_id = sysparam.get_object_id('TILLS_ACCOUNT')
if self.create_mode and edited_account:
accounts = list(store.find(AccountView,
AccountView.id != edited_account.id))
else:
accounts = list(store.find(AccountView))
accounts = self._orderaccounts(accounts)
for account in accounts:
account.total = self._calculate_total(accounts, account)
if self.create_mode and account.matches(till_id):
account.selectable = False
self.add_account(account.parent_id, account)
selectable = not self.create_mode
# Tabs cache requires unique ids
self.append(None, Settable(description=_("Accounts Payable"),
id=-1,
parent=None,
kind='payable',
selectable=selectable,
total=None))
self.append(None, Settable(description=_("Accounts Receivable"),
id=-2,
parent=None,
kind='receivable',
selectable=selectable,
total=None))
self.flush()
def add_account(self, parent_id, account):
account.kind = 'account'
parent = self._accounts.get(parent_id)
self.append(parent, account)
self._accounts[account.id] = account
def get_account_by_id(self, account_id):
return self._accounts.get(account_id)
def refresh_accounts(self, store, account=None):
self._accounts = {}
self.clear()
self.insert_initial(store)
if account:
self.select(account)
self.flush()
|
andrebellafronte/stoq
|
stoqlib/gui/widgets/accounttree.py
|
Python
|
gpl-2.0
| 7,546
|
[
"VisIt"
] |
ca2b385fbfae2433ccdfd5ba770510afae9d9b6896cce4da1d71f8046e63f257
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
#
# @file matplotlib-ref-zernike.py -- plot zernike functions with matplotlib
# Copyright (C) 2011--2013 Tim van Werkhoven (timvanwerkhoven@gmail.com)
#
# This work is licensed under the Creative Commons Attribution-Share Alike
# 3.0 Unported License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to Creative
# Commons, 171 Second Street, Suite 300, San Francisco, California,94105, USA.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import numpy as np
from scipy.misc import factorial as fac
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap, Normalize
from matplotlib import cm
from matplotlib import pyplot as plt
### Init zernike generation functions (from libtim.zern)
def zernike_rad(m, n, rho):
if (np.mod(n-m, 2) == 1):
return rho*0.0
wf = rho*0.0
for k in range((n-m)/2+1):
wf += rho**(n-2.0*k) * (-1.0)**k * fac(n-k) / ( fac(k) * fac( (n+m)/2.0 - k ) * fac( (n-m)/2.0 - k ) )
return wf
def zernike(m, n, rho, phi):
if (m > 0): return zernike_rad(m, n, rho) * np.cos(m * phi)
if (m < 0): return zernike_rad(-m, n, rho) * np.sin(-m * phi)
return zernike_rad(0, n, rho)
def zernikel(j, rho, phi):
n = 0
while (j > n):
n += 1
j = j - n
m = -n+2*j
return zernike(m, n, rho, phi)
# Make colormap based on Paul Tol's best visibility gradients. See
# <http://www.sron.nl/~pault/> for more info on these colors. Also see
# <http://matplotlib.sourceforge.net/api/colors_api.html>
# and <http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps> on some
# matplotlib examples
colhue = np.arange(256.)/256
colred = 0.237 - 2.13*colhue + 26.92*colhue**2 - 65.5*colhue**3 + 63.5*colhue**4 - 22.36*colhue**5
colgrn = ((0.572 + 1.524*colhue - 1.811*colhue**2)/(1 - 0.291*colhue + 0.1574*colhue**2))**2
colblu = 1/(1.579 - 4.03*colhue + 12.92*colhue**2 - 31.4*colhue**3 + 48.6*colhue**4 - 23.36*colhue**5)
colmap = ListedColormap(np.r_[[colred],[colgrn],[colblu]].T, name='sron_blue-red', N=256)
### Generate coordinate grid in polar coordinates
r = np.linspace(0, 1.0, 15)**0.5
p = np.linspace(0, 2*np.pi, 100)
R,P = np.meshgrid(r,p)
# transform them to cartesian system
X,Y = R*np.cos(P), R*np.sin(P)
circ_rad = (X**2 + Y**2)**0.5
### Generate Zernike modes 5, 7, 11, 15
for zmode in (5, 7, 11, 15):
print "Generating matplotlib-ref-zernike_%d.pdf..." % zmode
zern_wf = zernikel(zmode, circ_rad, np.arctan2(X, Y))
fig = plt.figure(1+zmode); plt.clf()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, zern_wf, rstride=1, cstride=1, cmap=colmap)
ax.set_xlabel(r'X')
ax.set_ylabel(r'Y')
ax.set_zlabel(r'$\phi$')
ax.set_title("Zernike mode j=%d" % zmode)
plt.show()
plt.savefig("matplotlib-ref-zernike_%d.pdf" % zmode)
plt.savefig("matplotlib-ref-zernike_%d.eps" % zmode)
plt.close()
# EOF
|
tvwerkhoven/pretty-plots
|
matplotlib-ref-zernike.py
|
Python
|
gpl-2.0
| 3,558
|
[
"VisIt"
] |
6a472e06ddbcdabf73b9895319f163eb3e79612a601f769ae768a7b8a93f8447
|
#!/usr/bin/env python3
import os, sys, time, traceback
import argparse, multiprocessing
if sys.version_info < (3,7):
sys.exit('Sorry, Python < 3.7 is not supported')
# get the path of this script and add it to the "pythonpath"
SCRIPT_PATH = os.path.split(os.path.realpath(os.path.abspath(__file__)))[0]
sys.path.insert(0, SCRIPT_PATH)
from eggnogmapper.emapperException import EmapperException
from eggnogmapper.utils import colorify
from eggnogmapper.emapper import Emapper
from eggnogmapper.genepred.genepred_modes import GENEPRED_MODE_SEARCH, GENEPRED_MODE_PRODIGAL
from eggnogmapper.search.search_modes import \
SEARCH_MODE_NO_SEARCH, SEARCH_MODE_DIAMOND, \
SEARCH_MODE_HMMER, SEARCH_MODE_MMSEQS2, SEARCH_MODE_CACHE
from eggnogmapper.search.diamond.diamond import SENSMODES, SENSMODE_SENSITIVE, \
ALLOW_OVERLAPS_NONE, ALLOW_OVERLAPS_ALL, ALLOW_OVERLAPS_DIFF_FRAME, ALLOW_OVERLAPS_OPPOSITE_STRAND, \
DMND_ITERATE_YES, DMND_ITERATE_NO, DMND_ITERATE_DEFAULT, \
DMND_ALGO_AUTO, DMND_ALGO_0, DMND_ALGO_1, DMND_ALGO_CTG, DMND_ALGO_DEFAULT
from eggnogmapper.search.hmmer.hmmer_search import \
QUERY_TYPE_SEQ, QUERY_TYPE_HMM, \
DB_TYPE_SEQ, DB_TYPE_HMM
from eggnogmapper.search.hmmer.hmmer_setup import DEFAULT_PORT, DEFAULT_END_PORT
from eggnogmapper.annotation.pfam.pfam_modes import PFAM_REALIGN_NONE, PFAM_REALIGN_REALIGN, PFAM_REALIGN_DENOVO
from eggnogmapper.deco.decoration import \
DECORATE_GFF_NONE, DECORATE_GFF_GENEPRED, DECORATE_GFF_FIELD_DEFAULT
from eggnogmapper.annotation.tax_scopes.tax_scopes import \
parse_tax_scope, print_taxa, \
TAX_SCOPE_MODE_BROADEST, TAX_SCOPE_MODE_INNER_BROADEST, \
TAX_SCOPE_MODE_INNER_NARROWEST, TAX_SCOPE_MODE_NARROWEST
from eggnogmapper.common import existing_file, existing_dir, set_data_path, pexists, \
get_eggnogdb_file, get_eggnog_dmnd_db, get_eggnog_mmseqs_db, \
get_version, get_full_version_info, get_citation, get_call_info, \
ITYPE_CDS, ITYPE_PROTS, ITYPE_GENOME, ITYPE_META, \
MP_START_METHOD_DEFAULT, MP_START_METHOD_FORK, MP_START_METHOD_SPAWN, MP_START_METHOD_FORKSERVER
__description__ = ('A program for bulk functional annotation of novel '
'sequences using EggNOG database orthology assignments')
__author__ = 'Jaime Huerta Cepas'
__license__ = "GPL v2"
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
def create_arg_parser():
parser = argparse.ArgumentParser(formatter_class=CustomFormatter)
parser.add_argument('-v', '--version', action='store_true',
help="show version and exit.")
parser.add_argument('--list_taxa', action="store_true",
help="List taxa available for --tax_scope/--tax_scope_mode, and exit")
##
pg_exec = parser.add_argument_group('Execution Options')
pg_exec.add_argument('--cpu', type=int, default=1, metavar='NUM_CPU',
help="Number of CPUs to be used. --cpu 0 to run with all available CPUs.")
pg_exec.add_argument('--mp_start_method', type=str, default=MP_START_METHOD_DEFAULT,
choices = [MP_START_METHOD_FORK, MP_START_METHOD_SPAWN, MP_START_METHOD_FORKSERVER],
help="Sets the python multiprocessing start method. Check https://docs.python.org/3/library/multiprocessing.html. Only use if the default method is not working properly in your OS.")
pg_exec.add_argument('--resume', action="store_true",
help=("Resumes a previous emapper run, skipping results in existing output files."))
pg_exec.add_argument('--override', action="store_true",
help=(
"Overwrites output files if they exist. "
"By default, execution is aborted if conflicting files are detected."))
##
pg_input = parser.add_argument_group('Input Data Options')
pg_input.add_argument('-i', dest="input", metavar='FASTA_FILE', type=existing_file,
help=f'Input FASTA file containing query sequences (proteins by default; see --itype and --translate). '
f'Required unless -m {SEARCH_MODE_NO_SEARCH}.')
pg_input.add_argument('--itype', dest="itype", choices = [ITYPE_CDS, ITYPE_PROTS, ITYPE_GENOME, ITYPE_META],
default=ITYPE_PROTS,
help=f'Type of data in the input (-i) file.')
pg_input.add_argument('--translate', action="store_true",
help=('When --itype CDS, translate CDS to proteins before search. '
'When --itype genome/metagenome and --genepred search, '
'translate predicted CDS from blastx hits to proteins.'))
pg_input.add_argument('--annotate_hits_table', type=str, metavar='SEED_ORTHOLOGS_FILE',
help=f'Annotate TSV formatted table with 4 fields:'
f' query, hit, evalue, score. '
f' Usually, a .seed_orthologs file from a previous emapper.py run. '
f' Requires -m {SEARCH_MODE_NO_SEARCH}.')
pg_input.add_argument('-c', '--cache', dest="cache_file", metavar='FILE', type=existing_file,
help=f'File containing annotations and md5 hashes of queries, to be used as cache. '
f'Required if -m {SEARCH_MODE_CACHE}')
pg_input.add_argument("--data_dir", metavar='DIR', type=existing_dir,
help=('Path to eggnog-mapper databases. '
'By default, "data/" or the path specified in the '
'environment variable EGGNOG_DATA_DIR.')) # DATA_PATH in eggnogmapper.commons
##
pg_genepred = parser.add_argument_group('Gene Prediction Options')
pg_genepred.add_argument('--genepred', dest='genepred', type=str, choices = [GENEPRED_MODE_SEARCH, GENEPRED_MODE_PRODIGAL],
default = GENEPRED_MODE_SEARCH,
help=(
f'This is applied when --itype {ITYPE_GENOME} or --itype {ITYPE_META}. '
f'{GENEPRED_MODE_SEARCH}: gene prediction is inferred from Diamond/MMseqs2 blastx hits. '
f'{GENEPRED_MODE_PRODIGAL}: gene prediction is performed using Prodigal. '
))
pg_genepred.add_argument('--trans_table', dest='trans_table', type=str, metavar='TRANS_TABLE_CODE',
help=(
f"This option will be used for Prodigal, Diamond or MMseqs2, depending on the mode. "
f"For Diamond searches, this option corresponds to the --query-gencode option. "
f"For MMseqs2 searches, this option corresponds to the --translation-table option. "
f"For Prodigal, this option corresponds to the -g/--trans_table option. "
f"It is also used when --translate, check https://biopython.org/docs/1.75/api/Bio.Seq.html#Bio.Seq.Seq.translate. "
f"Default is the corresponding programs defaults. "
))
pg_genepred.add_argument('--training_genome', dest='training_genome', type=existing_file, metavar='FILE',
help=(
"A genome to run Prodigal in training mode. "
"If this parameter is used, Prodigal will run in two steps: "
"firstly in training mode, and secondly using the training to analize the emapper input data. "
"See Prodigal documentation about Traning mode for more info. "
f"Only used if --genepred {GENEPRED_MODE_PRODIGAL}."
))
pg_genepred.add_argument('--training_file', dest='training_file', type=str, metavar='FILE',
help=(
"A training file from Prodigal training mode. "
"If this parameter is used, Prodigal will run using this training file to analyze the emapper input data. "
f"Only used if --genepred {GENEPRED_MODE_PRODIGAL}."
))
pg_genepred.add_argument('--allow_overlaps', dest='allow_overlaps', type=str, choices = [ALLOW_OVERLAPS_NONE,
ALLOW_OVERLAPS_OPPOSITE_STRAND,
ALLOW_OVERLAPS_DIFF_FRAME,
ALLOW_OVERLAPS_ALL],
default = ALLOW_OVERLAPS_NONE,
help = ("When using 'blastx'-based genepred (--genepred search --itype genome/metagenome) "
"this option controls whether overlapping hits are reported or not, "
"or if only those overlapping hits in a different strand or frame are reported. "
"Also, the degree of accepted overlap can be controlled with --overlap_tol."))
pg_genepred.add_argument('--overlap_tol', dest='overlap_tol', type=float, default=0.0, metavar='FLOAT',
help=("This value (0-1) is the proportion such that if (overlap size / hit length) "
"> overlap_tol, hits are considered to overlap. "
"e.g: if overlap_tol is 0.0, any overlap is considered as such. "
"e.g: if overlap_tol is 1.0, one of the hits must overlap entirely to "
"consider that hits do overlap."))
##
pg_search = parser.add_argument_group('Search Options')
pg_search.add_argument('-m', dest='mode',
choices = [SEARCH_MODE_DIAMOND, SEARCH_MODE_MMSEQS2, SEARCH_MODE_HMMER, SEARCH_MODE_NO_SEARCH, SEARCH_MODE_CACHE],
default=SEARCH_MODE_DIAMOND,
help=(
f'{SEARCH_MODE_DIAMOND}: search seed orthologs using diamond (-i is required). '
f'{SEARCH_MODE_MMSEQS2}: search seed orthologs using MMseqs2 (-i is required). '
f'{SEARCH_MODE_HMMER}: search seed orthologs using HMMER. (-i is required). '
f'{SEARCH_MODE_NO_SEARCH}: skip seed orthologs search (--annotate_hits_table is required, unless --no_annot). '
f'{SEARCH_MODE_CACHE}: skip seed orthologs search and annotate based on cached results (-i and -c are required).'
))
##
pg_diamond_mmseqs = parser.add_argument_group('Search filtering common options')
pg_diamond_mmseqs.add_argument('--pident', dest='pident', type=float, default=None,
help=(
f'Report only alignments above or equal to the given percentage of identity (0-100).'
f'No effect if -m {SEARCH_MODE_HMMER}.'))
pg_diamond_mmseqs.add_argument('--query_cover', dest='query_cover', type=float, default=None,
help=(
f'Report only alignments above or equal the given percentage of query cover (0-100).'
f'No effect if -m {SEARCH_MODE_HMMER}.'))
pg_diamond_mmseqs.add_argument('--subject_cover', dest='subject_cover', type=float, default=None,
help=(
f'Report only alignments above or equal the given percentage of subject cover (0-100).'
f'No effect if -m {SEARCH_MODE_HMMER}.'))
pg_diamond_mmseqs.add_argument('--evalue', dest='evalue', type=float, default=0.001,
help='Report only alignments below or equal the e-value threshold.')
pg_diamond_mmseqs.add_argument('--score', dest='score', type=float, default=None,
help='Report only alignments above or equal the score threshold.')
##
pg_diamond = parser.add_argument_group('Diamond Search Options')
pg_diamond.add_argument('--dmnd_algo', dest="dmnd_algo", choices = [DMND_ALGO_AUTO, DMND_ALGO_0, DMND_ALGO_1, DMND_ALGO_CTG],
default = DMND_ALGO_DEFAULT,
help=("Diamond's --algo option, which can be tuned to search small query sets. "
"By default, it is adjusted automatically. "
f"However, the {DMND_ALGO_CTG} option should be activated manually. "
"If you plan to search a small input set of sequences, use --dmnd_algo ctg to make it faster."
))
pg_diamond.add_argument('--dmnd_db', dest="dmnd_db", metavar='DMND_DB_FILE',
help="Path to DIAMOND-compatible database")
pg_diamond.add_argument('--sensmode', dest='sensmode',
choices = SENSMODES,
default=SENSMODE_SENSITIVE,
help=(
"Diamond's sensitivity mode. "
"Note that emapper's default is "+SENSMODE_SENSITIVE+", "
"which is different from diamond's default, which can "
"be activated with --sensmode default."
))
pg_diamond.add_argument('--dmnd_iterate', dest='dmnd_iterate', choices = [DMND_ITERATE_YES, DMND_ITERATE_NO],
default = DMND_ITERATE_DEFAULT,
help=(
f"--dmnd_iterate {DMND_ITERATE_YES} --> activates the --iterate option of diamond for iterative searches, "
f"from faster, less sensitive modes, up to the sensitivity specified with --sensmode. "
f"Available since diamond 2.0.11. --dmnd_iterate {DMND_ITERATE_NO} --> disables the --iterate mode. "
))
pg_diamond.add_argument('--matrix', dest='matrix',
choices = ['BLOSUM62', 'BLOSUM90','BLOSUM80','BLOSUM50','BLOSUM45','PAM250','PAM70','PAM30'],
default=None, help='Scoring matrix')
pg_diamond.add_argument('--dmnd_frameshift', dest='dmnd_frameshift', type=int, default=None,
help='Diamond --frameshift/-F option. Not used by default. Recommended by diamond: 15.')
pg_diamond.add_argument('--gapopen', dest='gapopen', type=int, default=None,
help='Gap open penalty')
pg_diamond.add_argument('--gapextend', dest='gapextend', type=int, default=None,
help='Gap extend penalty')
pg_diamond.add_argument('--block_size', dest='dmnd_block_size', type=float, default=None, metavar='BLOCK_SIZE',
help="Diamond -b/--block-size option. Default is the diamond's default.")
pg_diamond.add_argument('--index_chunks', dest='dmnd_index_chunks', type=int, default=None, metavar='CHUNKS',
help="Diamond -c/--index-chunks option. Default is the diamond's default.")
pg_diamond.add_argument('--outfmt_short', action="store_true",
help=(
"Diamond output will include only qseqid sseqid evalue and score. "
"This could help obtain better performance, if also no --pident, --query_cover or --subject_cover thresholds are used. "
"This option is ignored when the diamond search is run in blastx mode for gene prediction (see --genepred)."
))
pg_diamond.add_argument('--dmnd_ignore_warnings', action="store_true",
help=(
"Diamond --ignore-warnings option. "
"It avoids Diamond stopping due to warnings (e.g. when a protein contains only ATGC symbols."
))
##
pg_mmseqs = parser.add_argument_group('MMseqs2 Search Options')
pg_mmseqs.add_argument('--mmseqs_db', dest="mmseqs_db", metavar='MMSEQS_DB_FILE',
help="Path to MMseqs2-compatible database")
pg_mmseqs.add_argument('--start_sens', dest='start_sens', default=3, type=float, metavar='START_SENS',
help="Starting sensitivity.")
pg_mmseqs.add_argument('--sens_steps', dest='sens_steps', default=3, type=int, metavar='SENS_STEPS',
help="Number of sensitivity steps.")
pg_mmseqs.add_argument('--final_sens', dest='final_sens', default=7, type=float, metavar='FINAL_SENS',
help="Final sensititivy step.")
pg_mmseqs.add_argument('--mmseqs_sub_mat', dest='mmseqs_sub_mat', default=None, type=str, metavar='SUBS_MATRIX',
help="Matrix to be used for --sub-mat MMseqs2 search option. Default=default used by MMseqs2")
##
pg_hmmer = parser.add_argument_group('HMMER Search Options')
pg_hmmer.add_argument('-d', '--database', dest='db', metavar='HMMER_DB_PREFIX',
help=('specify the target database for sequence searches. '
'Choose among: euk,bact,arch, or a database loaded in a server, db.hmm:host:port (see hmm_server.py)'))
pg_hmmer.add_argument('--servers_list', dest="servers_list", metavar="FILE",
help="A FILE with a list of remote hmmpgmd servers. "
"Each row in the file represents a server, in the format 'host:port'. "
"If --servers_list is specified, host and port from -d option will be ignored.")
pg_hmmer.add_argument('--qtype', choices=[QUERY_TYPE_HMM, QUERY_TYPE_SEQ], default=QUERY_TYPE_SEQ,
help="Type of input data (-i).")
pg_hmmer.add_argument('--dbtype', dest="dbtype",
choices=[DB_TYPE_HMM, DB_TYPE_SEQ], default=DB_TYPE_HMM,
help="Type of data in DB (-db).")
pg_hmmer.add_argument('--usemem', action="store_true",
help='''Use this option to allocate the whole database (-d) in memory using hmmpgmd.
If --dbtype hmm, the database must be a hmmpress-ed database.
If --dbtype seqdb, the database must be a HMMER-format database created with esl-reformat.
Database will be unloaded after execution.
Note that this only works for HMMER based searches.
To load the eggnog-mapper annotation DB into memory use --dbmem.''')
pg_hmmer.add_argument('-p', '--port', dest='port', type=int, default=DEFAULT_PORT, metavar='PORT',
help=('Port used to setup HMM server, when --usemem. Also used for --pfam_realign modes.'))
pg_hmmer.add_argument('--end_port', dest='end_port', type=int, default=DEFAULT_END_PORT, metavar='PORT',
help=('Last port to be used to setup HMM server, when --usemem. Also used for --pfam_realign modes.'))
pg_hmmer.add_argument('--num_servers', dest='num_servers', type=int, default=1, metavar="NUM_SERVERS",
help=("When using --usemem, specify the number of servers to fire up."
"Note that cpus specified with --cpu will be distributed among servers and workers."
" Also used for --pfam_realign modes."))
pg_hmmer.add_argument('--num_workers', dest='num_workers', type=int, default=1, metavar="NUM_WORKERS",
help=("When using --usemem, specify the number of "
"workers per server (--num_servers) to fire up. "
"By default, cpus specified with --cpu will be "
"distributed among servers and workers. "
"Also used for --pfam_realign modes."))
pg_hmmer.add_argument('--hmm_maxhits', dest='maxhits', type=int, default=1, metavar='MAXHITS',
help="Max number of hits to report (0 to report all).")
pg_hmmer.add_argument('--report_no_hits', action="store_true",
help="Whether queries without hits should be included in the output table.")
pg_hmmer.add_argument('--hmm_maxseqlen', dest='maxseqlen', type=int, default=5000, metavar='MAXSEQLEN',
help="Ignore query sequences larger than `maxseqlen`.")
pg_hmmer.add_argument('--Z', dest='Z', type=float, default=40000000, metavar='DB_SIZE',
help='Fixed database size used in phmmer/hmmscan'
' (allows comparing e-values among databases).')
pg_hmmer.add_argument('--cut_ga', action="store_true",
help=("Adds the --cut_ga to hmmer commands (useful for "
"Pfam mappings, for example). See hmmer documentation."))
pg_hmmer.add_argument('--clean_overlaps', dest="clean_overlaps", type=str, default=None, metavar="none|all|clans|hmmsearch_all|hmmsearch_clans",
help=('Removes those hits which overlap, keeping only the one with best evalue. '
'Use the "all" and "clans" options when performing a '
'hmmscan type search (i.e. domains are in the database). '
'Use the "hmmsearch_all" and "hmmsearch_clans" options '
'when using a hmmsearch type search (i.e. domains are the queries from -i file). '
'The "clans" and "hmmsearch_clans" and options will only '
'have effect for hits to/from Pfam.'))
##
pg_annot = parser.add_argument_group('Annotation Options')
pg_annot.add_argument("--no_annot", action="store_true",
help="Skip functional annotation, reporting only hits.")
pg_annot.add_argument('--dbmem', action="store_true",
help='''Use this option to allocate the whole eggnog.db DB in memory.
Database will be unloaded after execution.''')
pg_annot.add_argument('--seed_ortholog_evalue', default=0.001, type=float, metavar='MIN_E-VALUE',
help='Min E-value expected when searching for seed eggNOG ortholog.'
' Queries not having a significant'
' seed orthologs will not be annotated.')
pg_annot.add_argument('--seed_ortholog_score', default=None, type=float, metavar='MIN_SCORE',
help='Min bit score expected when searching for seed eggNOG ortholog.'
' Queries not having a significant'
' seed orthologs will not be annotated.')
pg_annot.add_argument("--tax_scope", type=str, default='auto',
help=("Fix the taxonomic scope used for annotation, so only speciation events from a "
"particular clade are used for functional transfer. "
"More specifically, the --tax_scope list is intersected with the "
"seed orthologs clades, "
"and the resulting clades are used for annotation based on --tax_scope_mode. "
"Note that those seed orthologs without clades intersecting with --tax_scope "
"will be filtered out, and won't annotated. "
"Possible arguments for --tax_scope are: "
"1) A path to a file defined by the user, which contains "
"a list of tax IDs and/or tax names. "
"2) The name of a pre-configured tax scope, whose source is "
"a file stored within the 'eggnogmapper/annotation/tax_scopes/' directory "
"By default, available ones are: 'auto' ('all'), 'auto_broad' ('all_broad'), "
"'all_narrow', 'archaea', "
"'bacteria', 'bacteria_broad', 'eukaryota', 'eukaryota_broad' "
"and 'prokaryota_broad'."
"3) A comma-separated list of taxonomic names and/or taxonomic IDs, "
"sorted by preference. "
"An example of list of tax IDs would be 2759,2157,2,1 for Eukaryota, "
"Archaea, Bacteria and root, in that order of preference. "
"4) 'none': do not filter out annotations based on taxonomic scope."))
pg_annot.add_argument("--tax_scope_mode", type=str, default=TAX_SCOPE_MODE_INNER_NARROWEST,
help=("For a seed ortholog which passed the filter imposed by --tax_scope, "
"--tax_scope_mode controls which specific clade, to which the "
"seed ortholog belongs, will be used for annotation. "
f"Options: "
f"1) {TAX_SCOPE_MODE_BROADEST}: use the broadest clade. "
f"2) {TAX_SCOPE_MODE_INNER_BROADEST}: use the broadest clade "
"from the intersection with --tax_scope. "
f"3) {TAX_SCOPE_MODE_INNER_NARROWEST}: use the narrowest clade "
"from the intersection with --tax_scope. "
f"4) {TAX_SCOPE_MODE_NARROWEST}: use the narrowest clade. "
f"5) A taxonomic scope as in --tax_scope: use this second list "
"to intersect with seed ortholog clades and "
f"use the narrowest (as in inner_narrowest) from the intersection to annotate."))
pg_annot.add_argument('--target_orthologs', choices=["one2one", "many2one",
"one2many","many2many", "all"],
default="all",
help='defines what type of orthologs (in relation to the seed ortholog) should be used for functional transfer')
pg_annot.add_argument('--target_taxa', type=str, metavar="LIST_OF_TAX_IDS",
default=None,
help=("Only orthologs from the specified comma-separated list of taxa and all its descendants "
"will be used for annotation transference. By default, all taxa are used."))
pg_annot.add_argument('--excluded_taxa', type=str, metavar="LIST_OF_TAX_IDS",
default=None,
help=('Orthologs from the specified comma-separated list of taxa and all its descendants will not be '
'used for annotation transference. By default, no taxa is excluded.'))
pg_annot.add_argument("--report_orthologs", action="store_true",
help="Output the list of orthologs found for each query to a .orthologs file")
pg_annot.add_argument('--go_evidence', type=str, choices=('experimental', 'non-electronic', 'all'),
default='non-electronic',
help='Defines what type of GO terms should be used for annotation. '
'experimental = Use only terms inferred from experimental evidence. '
'non-electronic = Use only non-electronically curated terms')
pg_annot.add_argument('--pfam_realign', type=str,
choices=(PFAM_REALIGN_NONE, PFAM_REALIGN_REALIGN, PFAM_REALIGN_DENOVO),
default=PFAM_REALIGN_NONE,
help=('Realign the queries to the PFAM domains. '
f'{PFAM_REALIGN_NONE} = no realignment is performed. PFAM annotation will be '
'that transferred as specify in the --pfam_transfer option. '
f'{PFAM_REALIGN_REALIGN} = queries will be realigned to the PFAM domains '
'found according to the --pfam_transfer option. '
f'{PFAM_REALIGN_DENOVO} = queries will be realigned to the whole PFAM database, '
'ignoring the --pfam_transfer option. '
f'Check hmmer options (--num_servers, --num_workers, --port, --end_port) '
'to change how the hmmpgmd server is run. '))
pg_annot.add_argument("--md5", action="store_true",
help="Adds the md5 hash of each query as an additional field in annotations output files.")
##
pg_out = parser.add_argument_group('Output options')
pg_out.add_argument('--output', '-o', type=str, metavar='FILE_PREFIX',
help="base name for output files")
pg_out.add_argument("--output_dir", default=os.getcwd(), type=existing_dir, metavar='DIR',
help="Where output files should be written")
pg_out.add_argument("--scratch_dir", metavar='DIR', type=existing_dir,
help='Write output files in a temporary scratch dir, move them to the final'
' output dir when finished. Speed up large computations using network file'
' systems.')
pg_out.add_argument("--temp_dir", default=os.getcwd(), type=existing_dir, metavar='DIR',
help="Where temporary files are created. Better if this is a local disk.")
pg_out.add_argument('--no_file_comments', action="store_true",
help="No header lines nor stats are included in the output files")
pg_out.add_argument('--decorate_gff', type=str, default=DECORATE_GFF_NONE,
help=(
"Add search hits and/or annotation results to GFF file from gene prediction of a user specified one. "
f"{DECORATE_GFF_NONE} = no GFF decoration at all. GFF file from blastx-based gene prediction will be created anyway. "
f"{DECORATE_GFF_GENEPRED} = add search hits and/or annotations to GFF file from Prodigal or blastx-based gene prediction. "
f"FILE = decorate the specified pre-existing GFF FILE. e.g. --decorage_gff myfile.gff "
f"You change the field interpreted as ID of the feature with --decorate_gff_ID_field. "
))
pg_out.add_argument('--decorate_gff_ID_field', type=str, default=DECORATE_GFF_FIELD_DEFAULT,
help="Change the field used in GFF files as ID of the feature.")
pg_out.add_argument("--excel", action="store_true",
help="Output annotations also in .xlsx format.")
return parser
##
def parse_args(parser):
args = parser.parse_args()
if "EGGNOG_DATA_DIR" in os.environ:
set_data_path(os.environ["EGGNOG_DATA_DIR"])
if args.data_dir:
set_data_path(args.data_dir)
if args.version:
version = ""
try:
version = get_full_version_info()
except Exception:
version = get_version()
print(version)
sys.exit(0)
args.call_info = get_call_info()
if args.list_taxa:
print_taxa()
sys.exit(0)
if args.cpu == 0:
args.cpu = multiprocessing.cpu_count()
multiprocessing.set_start_method(args.mp_start_method)
if args.resume == True and args.override == True:
parser.error('Only one of --resume or --override is allowed.')
# Gene prediction
if args.training_genome is not None and args.training_file is None:
parser.error('"--training_genome requires --training_file"')
if args.training_genome is None and args.training_file is not None:
if not os.path.isfile(args.training_file):
parser.error('"--training_file must point to an existing file, if no --training_genome is provided."')
# Search modes
if args.mode == SEARCH_MODE_DIAMOND:
dmnd_db = args.dmnd_db if args.dmnd_db else get_eggnog_dmnd_db()
if not pexists(dmnd_db):
print(colorify('DIAMOND database %s not present. Use download_eggnog_database.py to fetch it' % dmnd_db, 'red'))
raise EmapperException()
if args.input is not None:
if args.annotate_hits_table is not None:
print(colorify(f"--annotate_hits_table will be ignored, due to -m {SEARCH_MODE_DIAMOND}", 'blue'))
args.annotate_hits_table = None
else:
# the default -m is diamond, but we will consider -m no_search as default when
# --annotate_hits_table has been provided and -i has not been provided
if args.annotate_hits_table is not None:
print(colorify(f"Assuming -m {SEARCH_MODE_NO_SEARCH}", 'blue'))
args.mode = SEARCH_MODE_NO_SEARCH
else:
parser.error('An input fasta file is required (-i)')
# Output file required
if not args.output:
parser.error('An output project name is required (-o)')
elif args.mode == SEARCH_MODE_MMSEQS2:
mmseqs_db = args.mmseqs_db if args.mmseqs_db else get_eggnog_mmseqs_db()
if not pexists(mmseqs_db):
print(colorify('MMseqs2 database %s not present. Use download_eggnog_database.py to fetch it' % mmseqs_db, 'red'))
raise EmapperException()
if not args.input:
parser.error('An input fasta file is required (-i)')
# Output file required
if not args.output:
parser.error('An output project name is required (-o)')
if args.annotate_hits_table is not None:
print(colorify(f"--annotate_hits_table will be ignored, due to -m {SEARCH_MODE_MMSEQS2}", 'blue'))
args.annotate_hits_table = None
elif args.mode == SEARCH_MODE_HMMER:
if not args.input:
parser.error('An input file is required (-i)')
# Output file required
if not args.output:
parser.error('An output project name is required (-o)')
# Hmmer database
# NOTE: hmmer database format, name and checking if exists is done within hmmer module
if not args.db:
parser.error('HMMER mode requires a target database (-d, --database).')
if args.itype == ITYPE_CDS:
args.translate = True
if (args.itype == ITYPE_GENOME or args.itype == ITYPE_META) and args.genepred == GENEPRED_MODE_SEARCH:
parser.error('HMMER mode is not compatible with "--genepred search" option.')
if args.annotate_hits_table is not None:
print(colorify(f"--annotate_hits_table will be ignored, due to -m {SEARCH_MODE_HMMER}", 'blue'))
args.annotate_hits_table = None
if args.clean_overlaps is not None:
if args.clean_overlaps == "none":
args.clean_overlaps = None
elif args.mode == SEARCH_MODE_CACHE:
if args.cache_file is None:
parser.error('A file with annotations and md5 of queries is required (-c FILE)')
if args.decorate_gff != DECORATE_GFF_NONE:
print(colorify("WARNING: no GFF will be created for cache-based annotations. It is not implemented yet, sorry.", 'red'))
if args.no_annot == True:
parser.error(f'Cache mode (-m {SEARCH_MODE_CACHE}) should be used to annotate.')
elif args.mode == SEARCH_MODE_NO_SEARCH:
if args.no_annot == False and not args.annotate_hits_table:
parser.error(f'No search mode (-m {SEARCH_MODE_NO_SEARCH}) requires a hits table to annotate (--annotate_hits_table FILE.seed_orthologs)')
if args.md5 == True and args.input is None:
parser.error(f'--md5 requires an input FASTA file (-i FASTA).')
else:
parser.error(f'unrecognized search mode (-m {args.mode})')
# Search thresholds
args.dmnd_evalue = args.mmseqs_evalue = args.hmm_evalue = args.evalue
args.dmnd_score = args.mmseqs_score = args_hmm_score = args.score
args.qcov = args.query_cover
# Annotation options
if args.no_annot == False or args.report_orthologs == True:
if not pexists(get_eggnogdb_file()):
print(colorify('Annotation database data/eggnog.db not present. Use download_eggnog_database.py to fetch it', 'red'))
raise EmapperException()
args.tax_scope_ids = parse_tax_scope(args.tax_scope)
if args.target_taxa is not None:
args.target_taxa = args.target_taxa.split(",")
if args.excluded_taxa is not None:
args.excluded_taxa = args.excluded_taxa.split(",")
# Sets GO evidence bases
if args.go_evidence == 'experimental':
args.go_evidence = set(["EXP","IDA","IPI","IMP","IGI","IEP"])
args.go_excluded = set(["ND", "IEA"])
elif args.go_evidence == 'non-electronic':
args.go_evidence = None
args.go_excluded = set(["ND", "IEA"])
elif args.go_evidence == 'all':
args.go_evidence = None
args.go_excluded = None
else:
raise ValueError('Invalid --go_evidence value')
# PFAM annotation options
if args.pfam_realign == PFAM_REALIGN_NONE:
pass
elif args.pfam_realign == PFAM_REALIGN_REALIGN or args.pfam_realign == PFAM_REALIGN_DENOVO:
if not args.input:
parser.error(f'An input fasta file is required (-i) for --pfam_realign {args.pfam_realign}')
else:
raise ValueError(f'Invalid --pfam_realign option {args.pfam_realign}')
total_workers = args.num_workers * args.num_servers
if args.cpu < total_workers:
parser.error(f"Less cpus ({args.cpu}) than total workers ({total_workers}) were specified.")
if args.cpu % total_workers != 0:
parser.error(f"Number of cpus ({args.cpu}) must be a multiple of total workers ({total_workers}).")
args.cpus_per_worker = int(args.cpu / total_workers)
return args
if __name__ == "__main__":
__spec__ = None
try:
start_time = time.time()
parser = create_arg_parser()
args = parse_args(parser)
print('# ', get_version())
print('# emapper.py ', ' '.join(sys.argv[1:]))
emapper = Emapper(args.itype, args.genepred, args.mode, (not args.no_annot),
args.excel, args.report_orthologs, args.decorate_gff,
args.output, args.output_dir, args.scratch_dir,
args.resume, args.override)
n, elapsed_time = emapper.run(args, args.input, args.annotate_hits_table, args.cache_file)
elapsed_time = time.time() - start_time
print(get_citation([args.mode, args.genepred]))
print(f'Total hits processed: {n}')
print(f'Total time: {elapsed_time:.0f} secs')
except EmapperException as ee:
print(ee)
sys.exit(1)
except Exception as e:
traceback.print_exc()
# print(e)
sys.exit(1)
else:
print("FINISHED")
sys.exit(0)
## END
|
jhcepas/eggnog-mapper
|
emapper.py
|
Python
|
gpl-2.0
| 40,600
|
[
"Biopython"
] |
0221474c0e1240c43d190900f23e96d73dff93cc14499ba381a136024f1efdd5
|
from mc_objects import (ENCHANT_WEAPONS, ENCHANT_ARMOR, ENCHANT_HELMETS,
ENCHANT_BOOTS, ENCHANT_TOOLS, ENCHANT_BOWS, ENCHANT_SHIELDS,
ENCHANT_ELYTRA, MCEnchant, register_enchant, register_item, MCItem,
WEAPONS, BOOTS, HELMETS, ARMOR, TOOLS, BOWS, SHIELDS, ELYTRA, AXES,
ENCHANT_AXES, register_attribute, MCAttribute, ITEM_ATTRIBUTES,
MOB_ATTRIBUTES, register_potion, MCPotion, register_mob, MCMob)
from os.path import join, dirname, abspath
RESOURCE_ADD = 'minecraft'
def get_texture_location(name, cat='items'):
return join(dirname(abspath(__file__)), 'textures', cat, name)
def register_potions():
register_potion(MCPotion(1, "Speed"))
register_potion(MCPotion(2, "Slowness"))
register_potion(MCPotion(3, "Haste"))
register_potion(MCPotion(4, "Mining Fatigue"))
register_potion(MCPotion(5, "Strength"))
register_potion(MCPotion(6, "Instant Health"))
register_potion(MCPotion(7, "Instant Damage"))
register_potion(MCPotion(8, "Jump Boost"))
register_potion(MCPotion(9, "Nausea"))
register_potion(MCPotion(10, "Regeneration"))
register_potion(MCPotion(11, "Resistance"))
register_potion(MCPotion(12, "Fire Resistance"))
register_potion(MCPotion(13, "Water Breathing"))
register_potion(MCPotion(14, "Invisibility"))
register_potion(MCPotion(15, "Blindness"))
register_potion(MCPotion(16, "Night Vision"))
register_potion(MCPotion(17, "Hunger"))
register_potion(MCPotion(18, "Weakness"))
register_potion(MCPotion(19, "Poison"))
register_potion(MCPotion(20, "Wither"))
register_potion(MCPotion(21, "Health Boost"))
register_potion(MCPotion(22, "Absorption"))
register_potion(MCPotion(23, "Saturation"))
register_potion(MCPotion(24, "Glowing"))
register_potion(MCPotion(25, "Levitation"))
register_potion(MCPotion(26, "Luck"))
register_potion(MCPotion(27, "Bad Luck"))
def register_mobs():
register_mob(MCMob(
"minecraft", "Blaze",
image=get_texture_location('Blaze_Face.png', cat='mobs')))
register_mob(MCMob(
"minecraft", "Creeper",
image=get_texture_location('CreeperFace.png', cat='mobs'),
options=[('powered', 'boolean', None, 'Byte'),
('ExplosionRadius', 'range', (0, 10, 1), 'Byte'),
('Fuse', 'range', (0, 240, 1), 'Short'),],)
)
register_mob(MCMob(
"minecraft", "Spider",
image=get_texture_location('SpiderFace.png', cat='mobs')))
register_mob(MCMob(
"minecraft", "CaveSpider",
image=get_texture_location('CaveSpiderFace.png', cat='mobs')))
register_mob(MCMob(
"minecraft", "LavaSlime",
image=get_texture_location('Magma_Cube_Face.png', cat='mobs'),
options=[('Size', 'range', (0, 5, 1), 'Int')]))
register_mob(MCMob(
"minecraft", "Shulker",
image=get_texture_location('ShulkerFace.png', cat='mobs')))
register_mob(MCMob(
"minecraft", "Silverfish",
image=get_texture_location('SilverfishFace.png', cat='mobs')))
register_mob(MCMob(
"minecraft", "Skeleton",
image=get_texture_location('SkeletonFace.png', cat='mobs'),
has_inventory=True,
options=[('SkeletonType', 'options', [('Normal', 0), ('Wither', 1)],
'Byte')]))
register_mob(MCMob(
"minecraft", "Witch",
image=get_texture_location('Witchface2.png', cat='mobs')))
register_mob(MCMob(
"minecraft", "Bat",
image=get_texture_location('BatFace.png', cat='mobs')))
register_mob(MCMob(
"minecraft", "Zombie",
has_inventory=True,
image=get_texture_location('ZombieFace.png', cat='mobs'),
options=[('IsVillager', 'boolean', None, 'Byte'),
('IsBaby', 'boolean', None, 'Byte'),
('CanBreakDoors', 'boolean', None, 'Byte'),
('VillagerProfession', 'options',
[('Farmer', 0), ('Librarian', 1),
('Priest', 2), ('Blacksmith', 3),
('Butcher', 4)], 'Int')]))
register_mob(MCMob(
"minecraft", "PigZombie",
image=get_texture_location('ZombiePigmanFace.png', cat='mobs'),
has_inventory=True,
options=[('IsVillager', 'boolean', None, 'Byte'),
('IsBaby', 'boolean', None, 'Byte'),
('CanBreakDoors', 'boolean', None, 'Byte'),
('VillagerProfession', 'options',
[('Farmer', 0), ('Librarian', 1),
('Priest', 2), ('Blacksmith', 3),
('Butcher', 4)], 'Int'),
('Anger', 'range', (0, 32767, 20), 'Short')]))
register_mob(MCMob(
'minecraft', 'Chicken',
image=get_texture_location("ChickenFace.png", cat='mobs'),
options=[('IsChickenJockey', 'boolean', None, 'Byte'),
('EggLayTime', 'range', (-999999, 999999, 1), 'Int'),
('Age', 'range', (-999999, 999999, 1), 'Int')]))
register_mob(MCMob(
'minecraft', 'Pig',
image=get_texture_location("PigFace.png", cat='mobs'),
options=[('Saddle', 'boolean', None, 'Byte'),
('Age', 'range', (-999999, 999999, 1), 'Int')]))
register_mob(MCMob(
'minecraft', 'EntityHorse',
image=get_texture_location("HorseHead.png", cat='mobs'),
options=[('Saddle', 'boolean', None, 'Byte'),
('Tame', 'boolean', None, 'Byte'),
('Age', 'range', (-999999, 999999, 1), 'Int'),
('SkeletonTrap', 'boolean', None, 'Byte'),
('SkeletonTrapTime', 'range', (-999999, 999999, 1), 'Int'),
('Type', 'options',
[('Horse', 0), ('Donkey', 1),
('Mule', 2), ('Zombie', 3),
('Skeleton', 4)], 'Int'),
('Variant', 'options',
[('White', 0), ('Creamy', 1),
('Chestnut', 2), ('Brown', 3),
('Black', 4), ('Gray', 5),
('Dark Brown', 6), ('White-White', 256),
('White-Creamy', 257), ('White-Chestnut', 258),
('White-Brown', 259), ('White-Black', 260),
('White-Gray', 261), ('White-DarkBrown', 262),
('WhiteField-White', 512), ('WhiteField-Creamy', 513),
('WhiteField-Chestnut', 514), ('WhiteField-Brown', 515),
('WhiteField-Black', 516), ('WhiteField-Gray', 517),
('WhiteField-DarkBrown', 518),
('WhiteDots-White', 768), ('WhiteDots-Creamy', 769),
('WhiteDots-Chestnut', 770), ('WhiteDots-Brown', 771),
('WhiteDots-Black', 772), ('WhiteDots-Gray', 773),
('WhiteDots-DarkBrown', 774),
('BlackDots-White', 1024), ('BlackDots-Creamy', 1025),
('BlackDots-Chestnut', 1026), ('BlackDots-Brown', 1027),
('BlackDots-Black', 1028), ('BlackDots-Gray', 1029),
('BlackDots-DarkBrown', 1030)], 'Int'),
]))
def register_enchants():
register_enchant(MCEnchant(0, 'Protection', 4, [ENCHANT_ARMOR,
ENCHANT_HELMETS,
ENCHANT_BOOTS]))
register_enchant(MCEnchant(1, 'Fire Protection', 4, [ENCHANT_ARMOR,
ENCHANT_HELMETS,
ENCHANT_BOOTS]))
register_enchant(MCEnchant(2, 'Feather Falling', 4, [ENCHANT_BOOTS]))
register_enchant(MCEnchant(3, 'Blast Protection', 4, [ENCHANT_ARMOR,
ENCHANT_HELMETS,
ENCHANT_BOOTS]))
register_enchant(MCEnchant(4, 'Projectile Protection', 4, [ENCHANT_ARMOR,
ENCHANT_HELMETS,
ENCHANT_BOOTS]))
register_enchant(MCEnchant(5, 'Respiration', 3, [ENCHANT_HELMETS]))
register_enchant(MCEnchant(6, 'Aqua Affinity', 1, [ENCHANT_HELMETS]))
register_enchant(MCEnchant(7, 'Thorns', 3, [ENCHANT_ARMOR, ENCHANT_BOOTS,
ENCHANT_HELMETS]))
register_enchant(MCEnchant(8, 'Depth Strider', 3, [ENCHANT_ARMOR,
ENCHANT_HELMETS,
ENCHANT_BOOTS]))
register_enchant(MCEnchant(9, 'Frost Walker', 2, [ENCHANT_BOOTS]))
register_enchant(MCEnchant(16, 'Sharpness', 5, [ENCHANT_WEAPONS,
ENCHANT_AXES]))
register_enchant(MCEnchant(17, 'Smite', 5, [ENCHANT_WEAPONS,
ENCHANT_AXES]))
register_enchant(MCEnchant(18, 'Bane of Arthropods', 5, [ENCHANT_WEAPONS,
ENCHANT_AXES]))
register_enchant(MCEnchant(19, 'Knockback', 2, [ENCHANT_WEAPONS,
ENCHANT_AXES]))
register_enchant(MCEnchant(20, 'Fire Aspect', 2, [ENCHANT_WEAPONS,
ENCHANT_AXES]))
register_enchant(MCEnchant(21, 'Looting', 3, [ENCHANT_WEAPONS,
ENCHANT_AXES]))
register_enchant(MCEnchant(32, 'Efficiency', 5, [ENCHANT_TOOLS,
ENCHANT_AXES]))
register_enchant(MCEnchant(33, 'Silk Touch', 1, [ENCHANT_TOOLS,
ENCHANT_AXES]))
register_enchant(MCEnchant(34, 'Unbreaking', 3,
[ENCHANT_TOOLS, ENCHANT_ARMOR, ENCHANT_WEAPONS,
ENCHANT_BOWS, ENCHANT_SHIELDS,
ENCHANT_ELYTRA, ENCHANT_BOOTS,
ENCHANT_HELMETS, ENCHANT_AXES]))
register_enchant(MCEnchant(35, 'Fortune', 3, [ENCHANT_TOOLS,
ENCHANT_AXES]))
register_enchant(MCEnchant(48, 'Power', 2, [ENCHANT_BOWS]))
register_enchant(MCEnchant(49, 'Punch', 1, [ENCHANT_BOWS]))
register_enchant(MCEnchant(50, 'Flame', 1, [ENCHANT_BOWS]))
register_enchant(MCEnchant(51, 'Infinity', 1, [ENCHANT_BOWS]))
register_enchant(MCEnchant(70, 'Mending', 1,
[ENCHANT_TOOLS, ENCHANT_ARMOR, ENCHANT_WEAPONS,
ENCHANT_BOWS, ENCHANT_SHIELDS,
ENCHANT_ELYTRA, ENCHANT_BOOTS,
ENCHANT_HELMETS, ENCHANT_AXES]))
def register_items():
#wood
register_item(MCItem(RESOURCE_ADD, 'wooden_sword', WEAPONS,
get_texture_location('wood_sword.png')))
register_item(MCItem(RESOURCE_ADD, 'wooden_axe', AXES,
get_texture_location('wood_axe.png')))
register_item(MCItem(RESOURCE_ADD, 'wooden_pickaxe', TOOLS,
get_texture_location('wood_pickaxe.png')))
register_item(MCItem(RESOURCE_ADD, 'wooden_hoe', TOOLS,
get_texture_location('wood_hoe.png')))
register_item(MCItem(RESOURCE_ADD, 'wooden_shovel', TOOLS,
get_texture_location('wood_shovel.png')))
#stone
register_item(MCItem(RESOURCE_ADD, 'stone_sword', WEAPONS,
get_texture_location('stone_sword.png')))
register_item(MCItem(RESOURCE_ADD, 'stone_axe', AXES,
get_texture_location('stone_axe.png')))
register_item(MCItem(RESOURCE_ADD, 'stone_pickaxe', TOOLS,
get_texture_location('stone_pickaxe.png')))
register_item(MCItem(RESOURCE_ADD, 'stone_hoe', TOOLS,
get_texture_location('stone_hoe.png')))
register_item(MCItem(RESOURCE_ADD, 'stone_shovel', TOOLS,
get_texture_location('stone_shovel.png')))
#iron
register_item(MCItem(RESOURCE_ADD, 'iron_sword', WEAPONS,
get_texture_location('iron_sword.png')))
register_item(MCItem(RESOURCE_ADD, 'iron_axe', AXES,
get_texture_location('iron_axe.png')))
register_item(MCItem(RESOURCE_ADD, 'iron_pickaxe', TOOLS,
get_texture_location('iron_pickaxe.png')))
register_item(MCItem(RESOURCE_ADD, 'iron_hoe', TOOLS,
get_texture_location('iron_hoe.png')))
register_item(MCItem(RESOURCE_ADD, 'iron_shovel', TOOLS,
get_texture_location('iron_shovel.png')))
#gold
register_item(MCItem(RESOURCE_ADD, 'golden_sword', WEAPONS,
get_texture_location('gold_sword.png')))
register_item(MCItem(RESOURCE_ADD, 'golden_axe', AXES,
get_texture_location('gold_axe.png')))
register_item(MCItem(RESOURCE_ADD, 'golden_pickaxe', TOOLS,
get_texture_location('gold_pickaxe.png')))
register_item(MCItem(RESOURCE_ADD, 'golden_hoe', TOOLS,
get_texture_location('gold_hoe.png')))
register_item(MCItem(RESOURCE_ADD, 'golden_shovel', TOOLS,
get_texture_location('gold_shovel.png')))
#diamond
register_item(MCItem(RESOURCE_ADD, 'diamond_sword', WEAPONS,
get_texture_location('diamond_sword.png')))
register_item(MCItem(RESOURCE_ADD, 'diamond_axe', AXES,
get_texture_location('diamond_axe.png')))
register_item(MCItem(RESOURCE_ADD, 'diamond_pickaxe', TOOLS,
get_texture_location('diamond_pickaxe.png')))
register_item(MCItem(RESOURCE_ADD, 'diamond_hoe', TOOLS,
get_texture_location('diamond_hoe.png')))
register_item(MCItem(RESOURCE_ADD, 'diamond_shovel', TOOLS,
get_texture_location('diamond_shovel.png')))
#leather armor
register_item(MCItem(RESOURCE_ADD, 'leather_helmet', HELMETS,
get_texture_location('leather_helmet.png')))
register_item(MCItem(RESOURCE_ADD, 'leather_boots', BOOTS,
get_texture_location('leather_boots.png')))
register_item(MCItem(RESOURCE_ADD, 'leather_chestplate', ARMOR,
get_texture_location('leather_chestplate.png')))
register_item(MCItem(RESOURCE_ADD, 'leather_leggings', ARMOR,
get_texture_location('leather_leggings.png')))
#iron
register_item(MCItem(RESOURCE_ADD, 'iron_helmet', HELMETS,
get_texture_location('iron_helmet.png')))
register_item(MCItem(RESOURCE_ADD, 'iron_boots', BOOTS,
get_texture_location('iron_boots.png')))
register_item(MCItem(RESOURCE_ADD, 'iron_chestplate', ARMOR,
get_texture_location('iron_chestplate.png')))
register_item(MCItem(RESOURCE_ADD, 'iron_leggings', ARMOR,
get_texture_location('iron_leggings.png')))
#gold
register_item(MCItem(RESOURCE_ADD, 'golden_helmet', HELMETS,
get_texture_location('gold_helmet.png')))
register_item(MCItem(RESOURCE_ADD, 'golden_boots', BOOTS,
get_texture_location('gold_boots.png')))
register_item(MCItem(RESOURCE_ADD, 'golden_chestplate', ARMOR,
get_texture_location('gold_chestplate.png')))
register_item(MCItem(RESOURCE_ADD, 'golden_leggings', ARMOR,
get_texture_location('gold_leggings.png')))
#diamond
register_item(MCItem(RESOURCE_ADD, 'diamond_helmet', HELMETS,
get_texture_location('diamond_helmet.png')))
register_item(MCItem(RESOURCE_ADD, 'diamond_boots', BOOTS,
get_texture_location('diamond_boots.png')))
register_item(MCItem(RESOURCE_ADD, 'diamond_chestplate', ARMOR,
get_texture_location('diamond_chestplate.png')))
register_item(MCItem(RESOURCE_ADD, 'diamond_leggings', ARMOR,
get_texture_location('diamond_leggings.png')))
#chainmail
register_item(MCItem(RESOURCE_ADD, 'chainmail_helmet', HELMETS,
get_texture_location('chainmail_helmet.png')))
register_item(MCItem(RESOURCE_ADD, 'chainmail_boots', BOOTS,
get_texture_location('chainmail_boots.png')))
register_item(MCItem(RESOURCE_ADD, 'chainmail_chestplate', ARMOR,
get_texture_location('chainmail_chestplate.png')))
register_item(MCItem(RESOURCE_ADD, 'chainmail_leggings', ARMOR,
get_texture_location('chainmail_leggings.png')))
register_item(MCItem(RESOURCE_ADD, 'shield', SHIELDS))
register_item(MCItem(RESOURCE_ADD, 'elytra', ELYTRA,
get_texture_location('elytra.png')))
register_item(MCItem(RESOURCE_ADD, 'bow', BOWS,
get_texture_location('bow_standby.png')))
def register_attributes():
register_attribute(MCAttribute(RESOURCE_ADD, 'generic.maxHealth',
0.0, 100.0, 1.0, 'maxHealth',
[ITEM_ATTRIBUTES, MOB_ATTRIBUTES]))
register_attribute(MCAttribute(RESOURCE_ADD, 'generic.knockbackResistance',
0.0, 1.0, .01, 'knockbackResistance',
[ITEM_ATTRIBUTES, MOB_ATTRIBUTES]))
register_attribute(MCAttribute(RESOURCE_ADD, 'generic.movementSpeed',
-.7, 10.0, .1, 'movementSpeed',
[ITEM_ATTRIBUTES, MOB_ATTRIBUTES]))
register_attribute(MCAttribute(RESOURCE_ADD, 'generic.attackDamage',
0.0, 100.0, .5, 'attackDamage',
[ITEM_ATTRIBUTES, MOB_ATTRIBUTES]))
register_attribute(MCAttribute(RESOURCE_ADD, 'generic.armor',
0.0, 30.0, .5, 'armor',
[ITEM_ATTRIBUTES, MOB_ATTRIBUTES]))
register_attribute(MCAttribute(RESOURCE_ADD, 'generic.armorToughness',
0.0, 100.0, .5, 'armorToughness',
[ITEM_ATTRIBUTES, MOB_ATTRIBUTES]))
register_attribute(MCAttribute(RESOURCE_ADD, 'generic.attackSpeed',
-4., 1020.0, .1, 'attackSpeed',
[ITEM_ATTRIBUTES]))
register_attribute(MCAttribute(RESOURCE_ADD, 'generic.luck',
-1024., 1024.0, 1.0, 'luck',
[ITEM_ATTRIBUTES]))
def register():
register_enchants()
register_items()
register_attributes()
register_mobs()
register_potions()
|
Kovak/KivyNBT
|
mc_data/minecraft/__init__.py
|
Python
|
mit
| 18,812
|
[
"BLAST"
] |
252484b6935720bc14698e172c0ecd96f24c2cad4752fafb147ab0e755eacd68
|
import shutil
import CxDataHandler
import numpy as np
from pyusound.dicom.recloader import DicomRecordingBMode2dCompounding
import os
import sys
from collections import defaultdict
def RemoveRect(nav_acq):
if nav_acq[-1]=='/':
nav_acq=nav_acq[:-1]
if os.path.isdir(nav_acq+"_removed_rect"):
print "Folder already exists"
return
shutil.copytree(nav_acq,nav_acq+"_removed_rect")
CxData=CxDataHandler.cxOpenCV.from_acq_folder(nav_acq+"_removed_rect")
for frame_no in range(0,CxData.get_no_of_frames()):
print frame_no
vtk=CxData.load_mhd(frame_no)
data=vtk.loadRawData_removeRect()
CxDataHandler.rawFileWrither(vtk.getRawFilePath(),data,True)
vtk.save_to_new_file(vtk.getFilePath(),overwrite=True)
#CxDataHandler.rawFileWrither('/home/dahoiv/Dokumenter/nav_doppler/data/2014-01-26_11-21_Laboratory_17.cx3/test/'+str(frame_no)+'.raw',data,True) #
if __name__ == '__main__':
# folder='/home/dahoiv/Dokumenter/nav_doppler/data/2014-01-26_11-21_Laboratory_17.cx3/US_Acq/Acq_03_20140126T115603'
# RemoveRect(folder)
# folder='/home/dahoiv/Dokumenter/nav_doppler/data/2014-01-26_11-21_Laboratory_17.cx3/US_Acq/Acq_04_20140126T115704'
# RemoveRect(folder)
# folder='/home/dahoiv/Dokumenter/nav_doppler/data/2014-01-26_11-21_Laboratory_17.cx3/US_Acq/Acq_06_20140126T115838'
# RemoveRect(folder)
# folder='/home/dahoiv/Dokumenter/nav_doppler/data/2014-02-06_AVM.cx3/US_Acq/US_26_20140206T114213'
# RemoveRect(folder)
# folder='/home/dahoiv/Dokumenter/nav_doppler/data/2014-02-06_AVM.cx3/US_Acq/US_27_20140206T114335'
# RemoveRect(folder)
# folder='/home/dahoiv/Dokumenter/nav_doppler/data/2014-02-06_AVM.cx3/US_Acq/US_28_20140206T114435'
# RemoveRect(folder)
folder ='/home/dahoiv/Dokumenter/nav_doppler/data/2014-01-26_11-21_Laboratory_17.cx3/US_Acq/Acq_05_20140126T115746'
RemoveRect(folder)
folder='/home/dahoiv/Dokumenter/nav_doppler/data/2014-01-26_11-21_Laboratory_17.cx3/US_Acq/Acq_07_20140126T115931'
RemoveRect(folder)
|
Danielhiversen/pyCustusX
|
CxDataHandler/examples/RemoveRect.py
|
Python
|
mit
| 2,075
|
[
"VTK"
] |
a7a6bf8d1296a5174e62064a78a76493904e62e7849df2d75d69f417705e7444
|
# This file is part of cldoc. cldoc is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from .node import Node
from .method import Method
from .ctype import Type
from ..clang import cindex
class Class(Node):
kind = cindex.CursorKind.CLASS_DECL
class Base:
def __init__(self, cursor, access=cindex.AccessSpecifier.PUBLIC):
self.cursor = cursor
self.access = access
self.type = Type(cursor.type, cursor=cursor)
self.node = None
def __init__(self, cursor, comment):
super(Class, self).__init__(cursor, comment)
self.process_children = True
self.current_access = cindex.AccessSpecifier.PRIVATE
self.bases = []
self.implements = []
self.implemented_by = []
self.subclasses = []
self.name_to_method = {}
def _all_bases(self):
for b in self.bases:
yield b
for b in self.implements:
yield b
def resolve_bases(self, mapping):
for b in self.bases:
tpname = b.type.typename
if tpname in mapping:
b.node = mapping[tpname]
b.node.subclasses.append(self)
for b in self.implements:
tpname = b.type.typename
if tpname in mapping:
b.node = mapping[tpname]
b.node.implemented_by.append(self)
@property
def resolve_nodes(self):
for child in Node.resolve_nodes.fget(self):
yield child
for base in self._all_bases():
if base.node and base.access != cindex.AccessSpecifier.PRIVATE:
yield base.node
for child in base.node.resolve_nodes:
yield child
def append(self, child):
super(Class, self).append(child)
if isinstance(child, Method):
self.name_to_method[child.name] = child
@property
def methods(self):
for child in self.children:
if isinstance(child, Method):
yield child
def visit(self, cursor, citer):
if cursor.kind == cindex.CursorKind.CXX_ACCESS_SPEC_DECL:
self.current_access = cursor.access_specifier
return []
elif cursor.kind == cindex.CursorKind.CXX_BASE_SPECIFIER:
# Add base
self.bases.append(Class.Base(cursor.type.get_declaration(), cursor.access_specifier))
return []
return Node.visit(self, cursor, citer)
@property
def force_page(self):
return True
# vi:ts=4:et
|
jessevdk/cldoc
|
cldoc/nodes/cclass.py
|
Python
|
gpl-2.0
| 3,144
|
[
"VisIt"
] |
2b9b76cc4992d43d00ec78bc4cef91ab18df6e7294b8f552dbc9ad961be93f53
|
import re
import copy
import os
import tempfile
import logging
import subprocess
import shlex
import time
from threading import Timer
from pycparser import parse_file, c_generator
from pycparser.c_ast import Break
_ENV_ORIG_SRC = 'MUTATE_ORIG_SRC'
_ENV_MODIFIED_SRC = 'MUTATE_MODIFIED_SRC'
def _node_to_str(node):
'''Produce a string representation of an AST node.'''
gen = c_generator.CGenerator()
return gen.visit(node)
def _find_nodes(node, nodetype):
'''Find nodes of a given type in a given subtree.'''
nodes = []
if isinstance(node, nodetype):
nodes.append(node)
for _, c in node.children():
nodes.extend(_find_nodes(c, nodetype))
return nodes
def _get_op_swaps(op, swaps):
'''Find the set of mutations to perform on a given op.'''
ops = set()
for s in swaps:
if op in s:
ops |= s - set(op)
return ops
def _run_process(cmd, env_vars=None, timeout_sec=0, log_filename=None):
def timeout(p):
p.kill()
raise TimeoutError()
env = os.environ.copy()
if env_vars is not None:
env.update(env_vars)
if log_filename is not None:
log_file = open(log_filename, 'w')
p = subprocess.Popen(shlex.split(cmd),
env=env,
stdout=log_file,
stderr=log_file)
else:
p = subprocess.Popen(shlex.split(cmd), env=env)
timer = Timer(timeout_sec, timeout, [p])
try:
if timeout_sec > 0:
timer.start()
p.wait()
finally:
timer.cancel()
if log_filename is not None:
log_file.close()
return p.returncode
class MutationGenerator(c_generator.CGenerator):
def __init__(self, swap_nodes=None):
self.swap_nodes = swap_nodes
super(MutationGenerator, self).__init__()
def visit(self, node):
if node is self.swap_nodes[0]:
if self.swap_nodes[1] is not None:
return super(MutationGenerator, self).visit(self.swap_nodes[1])
else:
return ''
else:
return super(MutationGenerator, self).visit(node)
class Mutator:
_MIN_RUN_TIME = 5
_TIMEOUT_MULTIPLIER = 4
def __init__(self, build_cmd, test_exe, mutate_file, inject_path, log_dir,
exclude_patterns=None):
self._build_cmd = build_cmd
self._test_exe = test_exe
self._orig_filename = mutate_file
self._ast = parse_file(self._orig_filename, use_cpp=True)
self._inject_path = inject_path
self._iteration = 0
self._log_dir = log_dir
self._run_timeout = 0
self.build_failed = 0
self.crashed = 0
self.timed_out = 0
self.caught = 0
self.missed = 0
self._gen_orig_filename = '{}/{}.orig'.format(
self._log_dir, os.path.basename(self._orig_filename))
if exclude_patterns is None:
self._exclude_patterns = []
else:
self._exclude_patterns = [re.compile(p) for p in exclude_patterns]
@property
def runs(self):
return self.caught + self.missed + self.build_failed + self.crashed
def __call__(self):
if self._initial_run():
self._visit(self._ast, None)
def _visit(self, node, parent):
# Skip nodes that aren't in the original file (i.e. are in headers)
if node.coord is not None and node.coord.file != self._orig_filename:
return
method = getattr(self,
'_visit_' + node.__class__.__name__,
self._generic_visit)
descend = method(node, parent)
# Visit the children, unless the visit callback for this node
# indicated we should stop.
if descend:
for _, c in node.children():
self._visit(c, node)
def _generic_visit(self, node, parent):
return True
def _visit_Case(self, node, parent):
# Find any breaks, and remove them.
for b in _find_nodes(node, Break):
self._test((b, None))
return True
def _visit_BinaryOp(self, node, parent):
new_node = copy.copy(node)
ops = _get_op_swaps(node.op,
[{'+', '-'},
{'<', '>', '<=', '>='},
{'<<', '>>'},
{'!=', '=='},
{'&', '&&'},
{'&', '|'},
{'&&', '||'},
{'<<', '>>'},
{'|=', '&='}])
for op in ops:
new_node.op = op
self._test((node, new_node))
return True
def _visit_FuncDef(self, node, parent):
# Skip all of this function if its name matches one of the exclusion
# patterns.
for p in self._exclude_patterns:
if p.search(node.decl.name):
return False
return True
def _visit_UnaryOp(self, node, parent):
if node.op == '!':
self._test((node, node.expr))
else:
ops = _get_op_swaps(node.op,
[{'p++', 'p--', '++', '--'}])
new_node = copy.copy(node)
for op in ops:
new_node.op = op
self._test((node, new_node))
return True
def _iter_log_dirname(self):
'''Return the log directory name for the current iteration.'''
return self._log_dir + '/run{}'.format(self._iteration)
def _write_mutation(self, swap_nodes):
ext = os.path.splitext(self._orig_filename)[1]
with tempfile.NamedTemporaryFile(
mode='w', suffix=ext, delete=False) as f:
gen = MutationGenerator(swap_nodes)
f.write(gen.visit(self._ast))
return f.name
def _build_mutation(self, mutation_filename):
'''Build the test executable, with the mutated file subbed in.'''
env_vars = {
'LD_PRELOAD': self._inject_path,
_ENV_ORIG_SRC: self._orig_filename,
_ENV_MODIFIED_SRC: mutation_filename
}
rc = _run_process(cmd=self._build_cmd,
env_vars=env_vars,
log_filename=self._iter_log_dirname() + '/build.log')
return rc == 0
def _write_diff(self, mutation_filename):
# TODO: Check if the diff executable exists etc?
os.system('diff -c {} {} > {}/{}.diff'.format(
self._gen_orig_filename,
mutation_filename,
self._iter_log_dirname(),
os.path.basename(self._orig_filename)))
def _run_mutation(self, mutation_str, coord):
# TODO: Build the exe path correctly
# TODO: Check for crashes
try:
run_log = '{}/{}.log'.format(self._iter_log_dirname(),
os.path.basename(self._test_exe))
rc = _run_process(cmd='./' + self._test_exe,
log_filename=run_log,
timeout_sec=self._run_timeout)
except TimeoutError:
self.timed_out += 1
result_str = 'timed out'
log_fn = logging.error
else:
if rc == 0:
self.missed += 1
result_str = 'missed'
log_fn = logging.error
else:
self.caught += 1
result_str = 'caught'
log_fn = logging.info
log_fn('Run {}: {} {}, test output {} - {}'.format(
self._iteration,
coord,
mutation_str,
rc,
result_str))
def _test(self, swap_nodes, mutation_str=''):
if mutation_str == '':
if swap_nodes[1] is None:
mutation_str = 'Remove {}'.format(_node_to_str(swap_nodes[0]))
else:
mutation_str = '"{}" -> "{}"'.format(
_node_to_str(swap_nodes[0]),
_node_to_str(swap_nodes[1]))
# Create the log directory for this iteration, the calls below will
# assume it exists.
os.mkdir(self._iter_log_dirname())
# Write the mutated AST to disk.
mutation_filename = self._write_mutation(swap_nodes)
# Build the test with the mutated file.
build_success = self._build_mutation(mutation_filename)
# Write out the diff before deleting the mutated file.
self._write_diff(mutation_filename)
os.system('rm {}'.format(mutation_filename))
if not build_success:
self.build_failed += 1
logging.error('Run {}: {} {} - build failed'.format(
self._iteration,
swap_nodes[0].coord,
mutation_str))
else:
self._run_mutation(mutation_str, swap_nodes[0].coord)
self._iteration += 1
def _initial_run(self):
# Dump the unmutated C file from the AST so that we can diff the
# mutations against it. If we just diffed against the file we parsed,
# the diffs would contain layout changes between the original file
# and the layout that the generator produces.
with open(self._gen_orig_filename, 'w') as f:
gen = c_generator.CGenerator()
f.write(gen.visit(self._ast))
build_log = self._log_dir + '/build.log.orig'
rc = _run_process(cmd=self._build_cmd, log_filename=build_log)
if rc == 0:
# Time the unmutated run so that we can time out mutated runs that
# have got stuck (e.g. a mutation leads to an infinite loop or
# deadlock).
start = time.time()
# TODO: Build the exe path correctly
run_log = '{}/{}.log.orig'.format(
self._log_dir, os.path.basename(self._test_exe))
rc = _run_process(cmd='./' + self._test_exe, log_filename=run_log)
end = time.time()
# Use a min run time - if a process runs very quickly we don't
# mind waiting a bit longer.
self._run_timeout = (Mutator._TIMEOUT_MULTIPLIER *
max(Mutator._MIN_RUN_TIME, int(end - start)))
return rc == 0
|
Juzley/Fawkes
|
mutate/mutator.py
|
Python
|
mit
| 10,400
|
[
"VisIt"
] |
54c371c4af6f02f6c2c02a60b88f50c0e3f80ba402b9d5f0517bf5e23da21d8a
|
"""
This module implements a Resource Allocating Vector Quantizer (RAVQ) and
described by Linaker and Niklasson, 2006
Adapted for FoREST-cat by Emily Dolson from code from the pyrobot project,
originally modified for CS81 at Swarthmore College
Spring 2012, by Lisa Meeden
Copyright 2012-2013, Emily Dolson, distributed under the Affero GNU Public
License.
This file is part of FoREST-cat.
FoREST-cat is free software: you can redistribute it and/or modify
it under the terms of the Affero GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
FoREST-cat is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Affero GNU General Public License for more details.
You should have received a copy of the Affero GNU General Public License
along with FoREST-cat. If not, see <http://www.gnu.org/licenses/>.
"""
from conx import *
from random import random
from math import sqrt
from event import *
class Region:
"""
The region class, adapted from the GNG region class used for CBIM,
handles all state-specific features. Most notably, this includes the
"expert" (neural net in this implementation) used for predicting
sensor values on the next time step. However, it also keeps track of
running means and standard deviations for each sensor.
"""
def __init__(self, inputVectorSize, targetVectorSize, errors, n):
"""
In original IAC timeWindow was 15 and smoothing was 25.
"""
self.inputVectorSize = inputVectorSize
self.targetVectorSize = targetVectorSize
self.errors = errors #stores prediction error on each step
self.name = "R" + str(n) #region name
self.timeWindow = 15 #Time window for error tracking
self.smoothing = 25 #smoothing for error tracking
self.inputs = [] #list of input vectors
self.targets = [] #list of target vectors associated with inputs
self.trace = []
self.timeStep = 0 #keeps track of number of points in this state
self.sensitivity = 3 #number of SDs out a point needs to be flagged
#as anomalous if gauss = True, or number of
#times the average prediction error off from
#the predicted value a point must be to
#get flagged as anonmalous if gauss = False
self.gauss = False
#Set up three layer, fully connected, feedforward neural net
self.expert = Network()
self.expert.addLayer("input", self.inputVectorSize)
self.expert.addLayer("hidden", self.inputVectorSize)
self.expert.addLayer("output", self.targetVectorSize)
self.expert.connect("input", "hidden")
self.expert.connect("hidden", "output")
self.expert.resetEpoch = 1
self.expert.resetLimit = 1
self.expert.momentum = 0
self.expert.epsilon = 0.5
#Set up running means and standard deviations
self.runningMeans = [0.0 for i in range(inputVectorSize)]
self.runningSDs = [0.0 for i in range(inputVectorSize)]
def inVec(self, vec):
self.timeStep += 1
currSDs = []
for i in range(len(vec)):
if self.runningMeans[i] == 0:
self.runningMeans[i] = vec[i]
currSDs.append(0)
else:
#Wellford's algorithm
prevMean = self.runningMeans[i]
self.runningMeans[i] += (vec[i]-prevMean)/self.timeStep
self.runningSDs[i] += (vec[i]-prevMean)*(vec[i]-self.runningMeans[i])
currSDs.append(sqrt(self.runningSDs[i]/(self.timeStep - 1)))
potErrs = []
return vec, potErrs
def inVecCuriosity(self, vec, prevVec):
"""
Handles incoming vectors when curiosity module (which also handles
error tolerance) is active.
Input: vec - the vector of sensor values for the current timestep
prevVec - the vector of sensor values for previous time step (so that
we can so what predictions they would have elicited)
"""
self.timeStep += 1
if prevVec == None:
vec = [i.value for i in vec]
for i in range(len(vec)):
if math.isnan(vec[i].value):
procVec[i] = 0
else:
#Calculate prediction error
prediction = self.askExpert([i.value for i in prevVec])
predErr = 0
errs = []
for i in range(len(vec)):
if math.isnan(vec[i].value):
vec[i].value = prediction[i]
errs.append(Error(vec[i].source+" "+vec[i].name, vec[i].time, vec[i].value, prediction[i], "missing or out of range data"))
else:
predErr += (prediction[i] - vec[i].value)**2
print "Prediction Error:", predErr, prediction
#Computer running means and standard deviations
currSDs = []
for i in range(len(vec)):
if self.runningMeans[i] == 0:
self.runningMeans[i] = vec[i].value
currSDs.append(0)
else:
#Wellford's algorithm (running means)
prevMean = self.runningMeans[i]
self.runningMeans[i] += (vec[i].value-prevMean)/self.timeStep
self.runningSDs[i] += (vec[i].value-prevMean)*(vec[i].value-self.runningMeans[i])
currSDs.append(sqrt(self.runningSDs[i]/(self.timeStep - 1)))
#Add this vector to the neural net's training set
self.addExemplar(prevVec, vec) #A bit self-reinforcing, but what
self.trainExpert() #can we do about it?
potErrs = []
for i in range(len(vec)):
#If we're assuming things have a gaussian distribution around the mean, we use sd = true
if (self.gauss and abs(self.runningMeans[i] - vec[i].value) > currSDs[i]*self.sensitivity) or vec[i].replaced:
potErrs.append(Error(vec[i].source+" "+vec[i].name, vec[i].time, vec[i].value, vec[i].replaced, (vec[i].flag if vec[i].flag==None else "Abnormal value")))
#We can make fewer assumptions by just looking at deviation from prediction relative to prediction error
elif abs(prediction[i] - vec[i].value) > sqrt(predErr)/self.timeStep*self.sensitivity:
#predErr is sum of squared errors so this gives us an average prediction error so far.
#This will be an overestimate, as prediction error will go down over time.
potErrs.append(Error(vec[i].source+" "+vec[i].name, vec[i].time, vec[i].value, vec[i].replaced, (vec[i].flag if vec[i].flag==None else "Abnormal value")))
else:
vec[i].sensor.errorState = False
return vec, potErrs
def trainExpert(self):
"""
Train the expert on most recent exemplar.
"""
print self.targets[-1]
self.expert.step(input = self.inputs[-1], output = self.targets[-1])
def trainExpertOnAll(self):
"""
Train the expert on all exemplars.
"""
self.expert.setInputs(self.inputs)
self.expert.setOutputs(self.targets)
self.expert.train()
def askExpert(self, input):
"""
Find out what the expert predicts for the given input.
"""
self.expert['input'].copyActivations(input)
self.expert.propagate()
return self.expert['output'].activation
def storeError(self, error, step):
"""
Errors are stored with the most recent at the head of the list.
"""
self.errors.insert(0, error)
n = self.timeWindow + self.smoothing
if len(self.errors) > n:
self.trace.append((step, sum(self.errors[:n])/float(n)))
def makeErrorGraph(self):
"""
Make a graph of error over time
"""
if len(self.trace) == 0:
return
fp = open(self.name + ".err", "w")
for step, err in self.trace:
fp.write("%d %.6f\n" % (step, err))
fp.flush()
fp.close()
def learningProgress(self):
"""
Returns the learning progress which is an approximation of
the first derivative of the error.
"""
if len(self.errors) < (self.timeWindow + self.smoothing + 1):
return 0
decrease = self.meanErrorRate(0) - self.meanErrorRate(self.timeWindow)
return -1 * decrease
def meanErrorRate(self, start):
"""
Returns the average error rate over self.smoothing steps
starting from the given start index.
"""
result = 0
end = start + self.smoothing + 1
if end > len(self.errors):
return 0
for i in range(start, end, 1):
result += self.errors[i]
return result / float(self.smoothing + 1)
def addExemplar(self, input, target):
"""
Adds the given input and target to the appropriate lists.
"""
self.inputs.append(input)
self.targets.append(target)
def exemplarsToStr(self):
if len(self.inputs) < 5:
return ""
result = ""
for i in range(1,5):
result += "Input: "
for inVal in self.inputs[-i]:
result += "%.3f " % inVal
result += "\n"
result += "Target: "
for tarVal in self.targets[-i]:
result += "%.3f " % tarVal
result += "\n"
return result
|
emilydolson/forestcat
|
netGNGRegion.py
|
Python
|
agpl-3.0
| 9,880
|
[
"Gaussian"
] |
b085e01fb3f0ecf21d7cbd8f0b7a73bc6b89307a2bae0b88e2c16d5a023f18bc
|
'''
Superclass to extract yield data from tables
and from mppnp simulations
Christian Ritter 11/2013
Two classes: One for reading and extracting of
NuGrid table data, the other one for SN1a data.
'''
import matplotlib.pyplot as plt
import numpy as np
import os
color=['r','k','b','g']
marker_type=['o','p','s','D']
line_style=['--','-','-.',':']
#global notebookmode
notebookmode=False
#class read_yields():
#
# def __init__(self,nugridtable='element_yield_table.txt',sn1a_table='sn1a_ivo12_stable_z.txt'):
#
# self.sn1a_table=sn1a_table
# self.nugridtable=nugridtable ,...
class read_nugrid_yields():
def __init__(self,nugridtable,isotopes=[],excludemass=[]):
'''
dir : specifing the filename of the table file
'''
table=nugridtable
import os
if '/' in table:
self.label=table.split('/')[-1]
else:
self.label=table
self.path=table
if notebookmode==True:
os.system('sudo python cp.py '+nugridtable)
file1=open('tmp/'+nugridtable)
lines=file1.readlines()
file1.close()
os.system('sudo python delete.py '+nugridtable)
else:
file1=open(nugridtable)
lines=file1.readlines()
file1.close()
header1=[]
table_header=[]
age=[]
yield_data=[]
#kin_e=[]
#lum_bands=[]
#m_final=[]
header_done=False
ignore=False
col_attrs_data=[]
######read through all lines
for line in lines:
if 'H' in line[0]:
if not 'Table' in line:
if header_done==False:
header1.append(line.strip())
else:
table_header[-1].append(line.strip())
else:
ignore=False
for kk in range(len(excludemass)):
if float(excludemass[kk]) == float(line.split(',')[0].split('=')[1]):
ignore=True
#print 'ignore',float(line.split(',')[0].split('=')[1])
break
#print line,'ignore',ignore
if ignore==True:
header_done=True
continue
table_header.append([])
table_header[-1].append(line.strip())
yield_data.append([])
#lum_bands.append([])
#m_final.append([])
col_attrs_data.append([])
col_attrs_data[-1].append(line.strip())
header_done=True
continue
if ignore==True:
continue
if header_done==True:
#col_attrs_data.append([])
col_attrs_data[-1].append(float(line.split(':')[1]))
#age is special col_attrs, used in chem_evol.py
if 'Lifetime' in line:
age.append(float(line.split(':')[1]))
'''
if 'kinetic energy' in line:
kin_e.append(float(line.split(':')[1]))
if 'band' in line:
lum_bands[-1].append(float(line.split(':')[1]))
if 'Mfinal' in line:
m_final[-1].append(float(line.split(':')[1]))
'''
continue
if ignore==True:
continue
if '&Isotopes &Yields' in line or '&Elements &Yields' in line:
title_line=line.split('&')[1:]
column_titles=[]
for t in title_line:
yield_data[-1].append([])
column_titles.append(t.strip())
#print column_titles
continue
#iso ,name and yields
iso_name=line.split('&')[1].strip()
#print line
#print line.split('&')
yield_data[-1][0].append(line.split('&')[1].strip())
#if len(isotopes)>0:
# if not iso_name in isotopes:
#else:
yield_data[-1][1].append(float(line.split('&')[2].strip()))
# for additional data
for t in range(2,len(yield_data[-1])):
if column_titles[t] == 'A' or column_titles[t] =='Z':
yield_data[-1][t].append(int(line.split('&')[t+1].strip()))
else:
yield_data[-1][t].append(float(line.split('&')[t+1].strip()))
#choose only isotoopes and right order
######reading finished
#In [43]: tablesN.col_attrs
#Out[43]: ['Isotopes', 'Yields', 'X0', 'Z', 'A']
if len(isotopes)>0:
#print 'correct for isotopes'
data_new=[]
for k in range(len(yield_data)):
#print 'k'
data_new.append([])
#print 'len',len(yield_data[k])
#print ([[]]*len(yield_data[k]))[0]
for h in range(len(yield_data[k])):
data_new[-1].append([])
#print 'testaa',data_new[-1]
data_all=yield_data[k]
for iso_name in isotopes:
if iso_name in data_all[0]:
#print 'test',data_all[1][data_all[0].index(iso_name)]
for hh in range(1,len(data_all)):
data_new[-1][hh].append(data_all[hh][data_all[0].index(iso_name)])
#data_new[-1][1].append(data_all[2][data_all[0].index(iso_name)])
#data_new[-1][1].append(data_all[2][data_all[0].index(iso_name)])
else:
for hh in range(1,len(data_all)):
data_new[-1][hh].append(0)
#data_new[-1][1].append(0)
#print 'GRID exclude',iso_name
data_new[-1][0].append(iso_name)
#print 'new list'
#print data_new[0][0]
#print data_new[0][1]
yield_data=data_new
self.yield_data=yield_data
#table header points to element in yield_data
self.table_idx={}
i=0
self.col_attrs=[]
self.table_mz=[]
self.metallicities=[]
#self.col_attrs=table_header
#go through all MZ pairs
for table1 in table_header:
#go through col_attrs
for k in range(len(table1)):
table1[k]=table1[k][2:]
if 'Table' in table1[k]:
self.table_idx[table1[k].split(':')[1].strip()]=i
tablename=table1[k].split(':')[1].strip()
self.table_mz.append(tablename)
metal=tablename.split(',')[1].split('=')[1][:-1]
if float(metal) not in self.metallicities:
self.metallicities.append(float(metal))
if table1 ==table_header[0]:
if 'Table' in table1[k]:
table1[k] = 'Table (M,Z):'
self.col_attrs.append(table1[k].split(':')[0].strip())
#col_attrs_data
#table1.split(':')[1].strip()
i+=1
#define header
self.header_attrs={}
#print 'header1: ',header1
for h in header1:
self.header_attrs[h.split(':')[0][1:].strip()]=h.split(':')[1].strip()
self.data_cols=column_titles #previous data_attrs
self.age=age
#self.kin_e=kin_e
#self.lum_bands=lum_bands
#self.m_final=m_final
self.col_attrs_data=col_attrs_data
def set(self,M=0,Z=-1,specie='',value=0):
'''
Replace the values in column 3 which
are usually the yields with value.
Use in combination with the write routine
to write out modification into new file.
M: initial mass to be modified
Z: initial Z to
specie: quantity (e.g. yield) of specie will be modified
'''
inp='(M='+str(float(M))+',Z='+str(float(Z))+')'
idx=self.table_idx[inp]
data=self.yield_data[idx]
idx_col=self.data_cols.index('Yields')
set1=self.yield_data[idx][idx_col]
specie_all= data[0]
for k in range(len(set1)):
if specie == specie_all[k]:
#return set1[k]
self.yield_data[idx][idx_col][k] = value
def write_table(self,filename='isotope_yield_table_mod.txt'):
'''
Allows to write out table in NuGrid yield table format.
Note that method has to be generalized for all tables
and lines about NuGrid removed.
fname: Table name
needs ascii_table.py from NuGrid python tools
'''
#part of the NuGrid python tools
import ascii_table as ascii1
import getpass
user=getpass.getuser()
import time
date=time.strftime("%d %b %Y", time.localtime())
tables=self.table_mz
#write header attrs
f=open(filename,'w')
self.header_attrs
out=''
l='H NuGrid yields Set1: '+self.header_attrs['NuGrid yields Set1']+'\n'
out = out +l
l='H Data prepared by: '+user+'\n'
out=out +l
l='H Data prepared date: '+date+'\n'
out=out +l
l='H Isotopes: '+ self.header_attrs['Isotopes'] +'\n'
out = out +l
l='H Number of metallicities: '+self.header_attrs['Number of metallicities']+'\n'
out = out +l
l='H Units: ' + self.header_attrs['Units'] + '\n'
out = out + l
f.write(out)
f.close()
for k in range(len(tables)):
print 'Write table ',tables[k]
mass=float(self.table_mz[k].split(',')[0].split('=')[1])
metallicity=float(self.table_mz[k].split(',')[1].split('=')[1][:-1])
data=self.yield_data[k]
#search data_cols
idx_y=self.data_cols.index('Yields')
yields=data[idx_y]
idx_x0=self.data_cols.index('X0')
mass_frac_ini=data[idx_x0]
idx_specie=self.data_cols.index(self.data_cols[0])
species=data[idx_specie]
#over col attrs, first is MZ pair which will be skipped, see special_header
attr_lines=[]
for h in range(1,len(self.col_attrs)):
attr=self.col_attrs[h]
idx=self.col_attrs.index(attr)
# over MZ pairs
attr_data=self.col_attrs_data[k][idx]
line=attr+': '+'{:.3E}'.format(attr_data)
attr_lines.append(line)
special_header='Table: (M='+str(mass)+',Z='+str(metallicity)+')'
dcols=[self.data_cols[0],'Yields','X0']
data=[species,list(yields),mass_frac_ini]
headers=[special_header]+attr_lines
ascii1.writeGCE_table(filename=filename,headers=headers,data=data,dcols=dcols)
'''
#add ages
#time=self.age[k]
time=[]
idx=self.col_attrs.index('Lifetime')
for k in range(len(self.table_mz)):
time.append(col_attrs_data[k][idx])
f1=open(filename,'r')
lines=f1.readlines()
f1.close()
i=-1
line1=''
while (True):
i+=1
if i>len(lines)-1:
break
line=lines[i]
line1+=lines[i]
if tables[k] in lines[i]:
line1+=('H Lifetime: '+'{:.3E}'.format(time)+'\n')
f1=open(filename,'w')
f1.write(line1)
f1.close()
'''
def get(self,M=0,Z=-1,quantity='',specie=''):
'''
Allows to extract table data in 2 Modes:
1) For extracting of table data for
star of mass M and metallicity Z.
Returns either table attributes,
given by yield.col_attrs
or table columns,
given by yield.data_cols.
2) For extraction of a table attribute
from all available tables. Can be
directly used in the following way:
get(tableattribute)
M: Stellar mass in Msun
Z: Stellar metallicity (e.g. solar: 0.02)
quantity: table attribute or data column/data_cols
specie: optional, return certain specie (e.g. 'H-1')
'''
#scale down to Z=0.00001
#print 'get yields ',Z
if float(Z) == 0.00001:
#scale abundance
if quantity=='Yields':
return self.get_scaled_Z(M=M,Z=Z,quantity=quantity,specie=specie)
#Take all other parameter from Z=0.0001 case
else:
Z=0.0001
all_tattrs=False
if Z ==-1:
if M ==0 and len(quantity)>0:
quantity1=quantity
all_tattrs=True
elif (M in self.col_attrs) and quantity == '':
quantity1=M
all_tattrs=True
else:
print 'Error: Wrong input'
return 0
quantity=quantity1
if (all_tattrs==False) and (not M ==0):
inp='(M='+str(float(M))+',Z='+str(float(Z))+')'
idx=self.table_idx[inp]
#print 'len tableidx:',len(self.table_idx)
#print 'len age',len(self.age)
'''
if quantity=='Lifetime':
if all_tattrs==True:
data=self.age
else:
data=self.age[idx]
return data
if quantity =='Total kinetic energy':
if all_tattrs==True:
data=self.kin_e
else:
data=self.kin_e[idx]
return data
if quantity == 'Lyman-Werner band':
if all_tattrs==True:
data=[list(i) for i in zip(*self.lum_bands)][0]
else:
data=self.lum_bands[idx][0]
return data
if quantity== 'Hydrogen-ionizing band':
if all_tattrs==True:
data=[list(i) for i in zip(*self.lum_bands)][1]
else:
data=self.lum_bands[idx][1]
return data
if quantity == 'High-energy band':
if all_tattrs==True:
data=[list(i) for i in zip(*self.lum_bands)][2]
else:
data=self.lum_bands[idx][2]
return data
if quantity == 'Mfinal':
if all_tattrs==True:
data=self.m_final
else:
data=self.m_final[idx][0]
return data
if quantity== 'Table (M,Z)':
if all_tattrs==True:
data=self.table_mz
else:
data=self.table_mz[idx]
return data
'''
if quantity in self.col_attrs:
if all_tattrs==False:
data=self.col_attrs_data[idx][self.col_attrs.index(quantity)]
return data
else:
data=[]
for k in range(len(self.table_idx)):
data.append(self.col_attrs_data[k][self.col_attrs.index(quantity)])
return data
if quantity=='masses':
data_tables=self.table_mz
masses=[]
for table in data_tables:
if str(float(Z)) in table:
masses.append(float(table.split(',')[0].split('=')[1]))
return masses
else:
data=self.yield_data[idx]
if specie=='':
idx_col=self.data_cols.index(quantity)
set1=data[idx_col]
return set1
else:
idx_col=self.data_cols.index('Yields')
set1=data[idx_col]
specie_all= data[0]
for k in range(len(set1)):
if specie == specie_all[k]: #bug was here
return set1[k]
def get_scaled_Z(self,table, table_yields,iniabu,iniabu_scale,M=0,Z=0,quantity='Yields',specie=''):
'''
Scaled down yields of isotopes 'He','C', 'O', 'Mg', 'Ca', 'Ti', 'Fe', 'Co','Zn','H','N'
down to Z=1e-5 and Z=1e-6 (for Brian). The rest is set to zero.
'''
#print '####################################'
#print 'Enter routine get_scaled_Z'
elem_prim=['He','C', 'O', 'Mg', 'Ca', 'Ti', 'Fe', 'Co','Zn','H']
elem_sec=['N']
##Scale down
import utils as u
import re
#table=ry.read_nugrid_yields('yield_tables/prodfac_iso_table.txt')
#table_yields=ry.read_nugrid_yields('yield_tables/isotope_yield_table.txt')
#iniabu=u.iniabu('yield_tables/iniabu/iniab1.0E-05GN93_alpha.ppn')
#iniabu_scale=u.iniabu('yield_tables/iniabu/iniab1.0E-04GN93_alpha.ppn')
iniiso=[]
iniabu_massfrac=[]
for k in range(len(iniabu.habu)):
iso=iniabu.habu.keys()[k]
iniiso.append(re.split('(\d+)',iso)[0].strip().capitalize()+'-'+re.split('(\d+)',iso)[1])
iniabu_massfrac.append(iniabu.habu.values()[k])
#iniabu_scale=u.iniabu('yield_tables/iniabu/iniab1.0E-04GN93_alpha.ppn')
iniiso_scale=[]
iniabu_scale_massfrac=[]
for k in range(len(iniabu_scale.habu)):
iso=iniabu_scale.habu.keys()[k]
iniiso_scale.append(re.split('(\d+)',iso)[0].strip().capitalize()+'-'+re.split('(\d+)',iso)[1])
iniabu_scale_massfrac.append(iniabu_scale.habu.values()[k])
grid_yields=[]
grid_masses=[]
isotope_names=[]
origin_yields=[]
for k in range(len(table.table_mz)):
if 'Z=0.0001' in table.table_mz[k]:
#print table.table_mz[k]
mini=float(table.table_mz[k].split('=')[1].split(',')[0])
grid_masses.append(mini)
#this is production factor (see file name)
prodfac=table.get(M=mini,Z=0.0001,quantity='Yields')
isotopes=table.get(M=mini,Z=0.0001,quantity='Isotopes')
#this is yields
yields=table_yields.get(M=mini,Z=0.0001,quantity='Yields')
mtot_eject=sum(yields)
origin_yields.append([])
#print 'tot eject',mtot_eject
mout=[]
sumnonh=0
isotope_names.append([])
for h in range(len(isotopes)):
if not (isotopes[h].split('-')[0] in (elem_prim+elem_sec) ):
#Isotopes/elements not considered/scaled are set to 0
#mout.append(0)
#isotope_names[-1].append(isotopes[h])
continue
isotope_names[-1].append(isotopes[h])
idx=iniiso.index(isotopes[h])
inix=iniabu_massfrac[idx]
idx=iniiso_scale.index(isotopes[h])
inix_scale=iniabu_scale_massfrac[idx]
prodf=prodfac[isotopes.index(isotopes[h])]
origin_yields[-1].append(yields[isotopes.index(isotopes[h])])
if isotopes[h].split('-')[0] in elem_prim:
#primary
mout1=(prodf-1.)*(inix_scale*mtot_eject) + (inix*mtot_eject)
#check if amount destroyed was more than it was initial there
if mout1<0:
#print 'Problem with ',isotopes[h]
#print 'Was more destroyed than evailable'
#Then only what was there can be destroyed
mout1=0
#if isotopes[h] == 'C-13':
# print 'inix',inix
# print 'inixscale',inix_scale
# print 'prodf',prodf
# print (prodf)*(inix_scale*mtot_eject)
# print (inix*mtot_eject)
else:
#secondary
mout1=(prodf-1.)*(inix*mtot_eject) + (inix*mtot_eject)
if (not isotopes[h]) == 'H-1' and (mout1>0):
sumnonh+= (mout1 - (inix*mtot_eject))
mout.append(mout1)
#for mass conservation, assume total mass lost is same as in case of Z=0.0001
idx_h=isotope_names[-1].index('H-1')
mout[idx_h]-=sumnonh
for k in range(len(mout)):
mout[k] = float('{:.3E}'.format(mout[k]))
grid_yields.append(mout)
####data
idx=grid_masses.index(M)
all_tattrs=False
if specie=='':
return grid_yields[idx]
else:
set1=data[idx]
names=isotope_names[idx]
for k in range(len(names)):
if specie in names[k]:
return set1[k]
class read_yield_sn1a_tables():
def __init__(self,sn1a_table,isotopes=[]):
'''
Read SN1a tables.
Fills up missing isotope yields
with zeros.
If different Zs are available
do ...
'''
import re
if notebookmode==True:
os.system('sudo python cp.py '+sn1a_table)
f1=open('tmp/'+sn1a_table)
lines=f1.readlines()
f1.close()
os.system('sudo python delete.py '+sn1a_table)
else:
f1=open(sn1a_table)
lines=f1.readlines()
f1.close()
iso=[]
self.header=[]
self.col_attrs=[]
yields=[]
metallicities=[]
isotopes_avail=[]
for line in lines:
#for header
if 'H' in line[0]:
self.header.append(line)
continue
if ('Isotopes' in line) or ('Elements' in line):
l=line.replace('\n','').split('&')[1:]
self.col_attrs=l
metallicities=l[1:]
#print metallicities
# metallicity dependent yields
#if len(l)>2:
#else:
for k in l[1:]:
yields.append([])
continue
linesp=line.strip().split('&')[1:]
iso.append(linesp[0].strip())
#print iso
for k in range(1,len(linesp)):
yields[k-1].append(float(linesp[k]))
#if isotope list emtpy take all isotopes
if len(isotopes)==0:
isotopes=iso
yields1=[]
#fill up the missing isotope yields with zero
for z in range(len(yields)):
yields1.append([])
for iso1 in isotopes:
#iso1=iso1.split('-')[1]+iso1.split('-')[0]
#ison= iso1+((10-len(iso1))*' ')
if iso1 in iso:
yields1[-1].append(yields[z][iso.index(iso1)])
else:
yields1[-1].append(0.)
self.yields=yields1
self.metallicities=[]
for m in metallicities:
self.metallicities.append(float(m.split('=')[1]))
#self.metallicities=metallicities
#print yields1
self.isotopes=iso
def get(self,Z=0,quantity='Yields',specie=''):
'''
Allows to extract SN1a table data.
If metallicity dependent yield tables
were used, data is taken for the closest metallicity available
to reach given Z
quantity: if 'Yields' return yields
if 'Isotopes' return all isotopes available
'''
if quantity=='Yields':
idx = (np.abs(np.array(self.metallicities)-Z)).argmin()
yields=self.yields[idx]
return np.array(yields)
elif quantity=='Isotopes':
return self.isotopes
class read_yield_rawd_tables():
def __init__(self,rawd_table,isotopes):
'''
Read RAWD tables.
Fills up missing isotope yields
with zeros.
If different Zs are available
do ...
'''
import re
if notebookmode==True:
os.system('sudo python cp.py '+rawd_table)
f1=open('tmp/'+rawd_table)
lines=f1.readlines()
f1.close()
os.system('sudo python delete.py '+rawd_table)
else:
f1=open(rawd_table)
lines=f1.readlines()
f1.close()
iso=[]
self.header=[]
self.col_attrs=[]
yields=[]
metallicities=[]
for line in lines:
#for header
if 'H' in line[0]:
self.header.append(line)
continue
if ('Isotopes' in line) or ('Elements' in line):
l=line.replace('\n','').split('&')[1:]
self.col_attrs=l
metallicities=l[1:]
#print metallicities
# metallicity dependent yields
#if len(l)>2:
#else:
for k in l[1:]:
yields.append([])
continue
linesp=line.strip().split('&')[1:]
iso.append(linesp[0].strip())
#print iso
for k in range(1,len(linesp)):
yields[k-1].append(float(linesp[k]))
yields1=[]
#fill up the missing isotope yields with zero
for z in range(len(yields)):
yields1.append([])
for iso1 in isotopes:
#iso1=iso1.split('-')[1]+iso1.split('-')[0]
#ison= iso1+((10-len(iso1))*' ')
if iso1 in iso:
yields1[-1].append(yields[z][iso.index(iso1)])
else:
yields1[-1].append(0.)
self.yields=yields1
self.metallicities=[]
for m in metallicities:
self.metallicities.append(float(m.split('=')[1]))
#self.metallicities=metallicities
#print yields1
def get(self,Z=0,quantity='Yields',specie=''):
'''
Allows to extract rawd table data.
If metallicity dependent yield tables
were used, data is taken for the closest metallicity available
to reach given Z
quantity: yields only possible atm
'''
idx = (np.abs(np.array(self.metallicities)-Z)).argmin()
yields=self.yields[idx]
return np.array(yields)
'''
Adapted from NuGrid Utility class
'''
#import numpy as np
#import scipy as sc
#import ascii_table as att
#from scipy import optimize
#import matplotlib.pyplot as pl
#import os
class iniabu():
'''
This class in the utils package reads an abundance
distribution file of the type iniab.dat. It then provides you
with methods to change some abundances, modify, normalise and
eventually write out the final distribution in a format that
can be used as an initial abundance file for ppn. This class
also contains a method to write initial abundance files for a
MESA run, for a given MESA netowrk.
'''
# clean variables that we will use in this class
filename = ''
def __init__(self,filename):
'''
Init method will read file of type iniab.dat, as they are for
example found in the frames/mppnp/USEPP directory.
An instance of this class will have the following data arrays
z charge number
a mass number
abu abundance
names name of species
habu a hash array of abundances, referenced by species name
hindex hash index returning index of species from name
E.g. if x is an instance then x.names[4] gives you the
name of species 4, and x.habu['c 12'] gives you the
abundance of C12, and x.hindex['c 12'] returns
4. Note, that you have to use the species names as
they are provided in the iniabu.dat file.
Example - generate modified input file ppn calculations:
import utils
p=utils.iniabu('iniab1.0E-02.ppn_asplund05')
sp={}
sp['h 1']=0.2
sp['c 12']=0.5
sp['o 16']=0.2
p.set_and_normalize(sp)
p.write('p_ini.dat','header for this example')
p.write_mesa allows you to write this NuGrid initial abundance
file into a MESA readable initial abundance file.
'''
f0=open(filename)
sol=f0.readlines()
f0.close
# Now read in the whole file and create a hashed array:
names=[]
z=[]
yps=np.zeros(len(sol))
mass_number=np.zeros(len(sol))
for i in range(len(sol)):
z.append(int(sol[i][1:3]))
names.extend([sol[i].split(" ")[0][4:]])
yps[i]=float(sol[i].split(" ")[1])
try:
mass_number[i]=int(names[i][2:5])
except ValueError:
#print "WARNING:"
#print "This initial abundance file uses an element name that does"
#print "not contain the mass number in the 3rd to 5th position."
#print "It is assumed that this is the proton and we will change"
#print "the name to 'h 1' to be consistent with the notation used"
#print "in iniab.dat files"
names[i]='h 1'
mass_number[i]=int(names[i][2:5])
# now zip them together:
hash_abu={}
hash_index={}
for a,b in zip(names,yps):
hash_abu[a] = b
for i in range(len(names)):
hash_index[names[i]] = i
self.z=z
self.abu=yps
self.a=mass_number
self.names=names
self.habu=hash_abu
self.hindex=hash_index
def iso_abundance(self,isos):
'''
This routine returns the abundance of a specific isotope. Isotope given as, e.g., 'Si-28' or as list ['Si-28','Si-29','Si-30']
'''
if type(isos) == list:
dumb = []
for it in range(len(isos)):
dumb.append(isos[it].split('-'))
ssratio = []
isos = dumb
for it in range(len(isos)):
ssratio.append(self.habu[isos[it][0].ljust(2).lower() + str(int(isos[it][1])).rjust(3)])
else:
isos = isos.split('-')
ssratio = self.habu[isos[0].ljust(2).lower() + str(int(isos[1])).rjust(3)]
return ssratio
def read_iniabu(filename,isotopes):
import read_yields as ry
if notebookmode==True:
os.system('sudo python cp.py '+'iniabu/'+filename)
iniabu_class=ry.iniabu('tmp/'+filename)
iniabu= np.array(iniabu_class.iso_abundance(isotopes))
os.system('sudo python delete.py '+filename)
else:
iniabu_class=ry.iniabu(filename)
iniabu= np.array(iniabu_class.iso_abundance(isotopes))
return iniabu
def read_strip_param(filename):
'''
To read Elses simulatin files
'''
import read_yields as ry
f1=open(filename)
lines=f1.readlines()
f1.close()
info=['timebins','SFR','Mcool','Meject','Minfall','Mreinc','Mcoldgas','Mhotgas','Mejectedgas','Mstripej','Mstriphot','Mstripcold','Mstripstar']
data=[]
for k in range(len(lines)):
#to skip header
if k <14:
continue
#to read column header
if k==14:
cheader=lines[k].split()
idx=[]
for h in info:
idx.append(cheader.index(h))
data.append([])
continue
#units line
if k==15:
continue
line=lines[k].split()
for i in range(len(idx)):
data[i].append(float(line[idx[i]]))
data_dict={}
for k in range(len(data)):
data_dict[info[k]]=data[k]
return data_dict
|
jan-rybizki/Chempy
|
Chempy/input/yields/NuGrid/read_yields.py
|
Python
|
mit
| 29,284
|
[
"Brian"
] |
c7898342a9672215f81177eda23993e3d8ef909be120e5c6b0ae44416e00134a
|
"""Echo module based on the GFAL2_StorageBase class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# from DIRAC
from DIRAC.Resources.Storage.GFAL2_StorageBase import GFAL2_StorageBase
from DIRAC import gLogger, S_ERROR, S_OK
class EchoStorage(GFAL2_StorageBase):
""" .. class:: EchoStorage
Interface to the Echo storage.
This plugin will work with both gsiftp and root protocol.
According to the RAL admins, Echo best use is done:
* using gsiftp for all WAN transfers, and LAN write.
* using root from LAN read.
Note that there are still a few issues to be sorted out with xroot
This would translate in a configuration such as::
RAL-ECHO
{
BackendType = Echo
SEType = T0D1
AccessProtocols = gsiftp,root
WriteProtocols = gsiftp
XRootConfig
{
Host = xrootd.echo.stfc.ac.uk
PluginName = Echo
Protocol = root
Path = lhcb:user
Access = remote
}
GidFTPConfig
{
Host = gridftp.echo.stfc.ac.uk
PluginName = Echo
Protocol = gsiftp
Path = lhcb:user
Access = remote
}
}
Operations
{
Defaults
{
DataManagement
{
ThirdPartyProtocols=srm,gsiftp,dips
RegistrationProtocols=srm,gsiftp,dips
}
}
}
"""
def __init__(self, storageName, parameters):
""" c'tor
"""
# # init base class
super(EchoStorage, self).__init__(storageName, parameters)
self.srmSpecificParse = False
self.log = gLogger.getSubLogger("EchoStorage")
self.pluginName = 'Echo'
# Because Echo considers '<host>/lhcb:prod' differently from '<host>//lhcb:prod' as it normally should be
# we need to disable the automatic normalization done by gfal2
self.ctx.set_opt_boolean("XROOTD PLUGIN", "NORMALIZE_PATH", False)
# This is in case the protocol is xroot
# Because some storages are configured to use krb5 auth first
# we end up in trouble for interactive sessions. This
# environment variable enforces the use of certificates
if self.protocolParameters['Protocol'] == 'root' and 'XrdSecPROTOCOL' not in os.environ:
os.environ['XrdSecPROTOCOL'] = 'gsi,unix'
# We don't need extended attributes for metadata
self._defaultExtendedAttributes = None
def putDirectory(self, path):
"""Not available on Echo
:returns: S_ERROR
"""
return S_ERROR("Putting directory does not exist in Echo")
def listDirectory(self, path):
"""Not available on Echo
:returns: S_ERROR
"""
return S_ERROR("Listing directory does not exist in Echo")
def isDirectory(self, path):
"""Not available on Echo
:returns: S_ERROR
"""
return S_ERROR("Stating directory does not exist in Echo")
def getDirectory(self, path, localPath=False):
"""Not available on Echo
:returns: S_ERROR
"""
return S_ERROR("Getting directory does not exist in Echo")
def removeDirectory(self, path, recursive=False):
"""Not available on Echo
:returns: S_ERROR
"""
return S_ERROR("Removing directory does not exist in Echo")
def getDirectorySize(self, path):
"""Not available on Echo
:returns: S_ERROR
"""
return S_ERROR("Getting directory size does not exist in Echo")
def getDirectoryMetadata(self, path):
"""Not available on Echo
:returns: S_ERROR
"""
return S_ERROR("Getting directory metadata does not exist in Echo")
def _createSingleDirectory(self, path):
""" Emulates creating directory on Echo by returning success (as Echo does)
:returns: S_OK()
"""
return S_OK()
|
yujikato/DIRAC
|
src/DIRAC/Resources/Storage/EchoStorage.py
|
Python
|
gpl-3.0
| 3,832
|
[
"DIRAC"
] |
8a03aa86045706a96d193d544e3c9bb54d4bb68860d972bc963cd7e8ec8d43be
|
__author__ = "John D. Chodera, Frank Noe"
__copyright__ = "Copyright 2015, John D. Chodera and Frank Noe"
__credits__ = ["John D. Chodera", "Frank Noe"]
__license__ = "LGPL"
__maintainer__ = "John D. Chodera, Frank Noe"
__email__="jchodera AT gmail DOT com, frank DOT noe AT fu-berlin DOT de"
import copy
import numpy as np
from math import log
import bhmm.output_models
from bhmm.output_models import OutputModel
from bhmm.util import config
class DiscreteOutputModel(OutputModel):
"""
HMM output probability model using discrete symbols. This is the "standard" HMM that is classically used in the
literature
"""
def __init__(self, B):
"""
Create a 1D Gaussian output model.
Parameters
----------
B : ndarray((N,M),dtype=float)
output probability matrix using N hidden states and M observable symbols.
This matrix needs to be row-stochastic.
Examples
--------
Create an observation model.
>>> import numpy as np
>>> B = np.array([[0.5,0.5],[0.1,0.9]])
>>> output_model = DiscreteOutputModel(B)
"""
self._output_probabilities = np.array(B, dtype=config.dtype)
nstates,self._nsymbols = self._output_probabilities.shape[0],self._output_probabilities.shape[1]
# superclass constructor
OutputModel.__init__(self, nstates)
# test if row-stochastic
assert np.allclose(np.sum(self._output_probabilities, axis=1), np.ones(self.nstates)), 'B is not a stochastic matrix'
# set output matrix
self._output_probabilities = B
def __repr__(self):
r""" String representation of this output model
>>> import numpy as np
>>> output_model = DiscreteOutputModel(np.array([[0.5,0.5],[0.1,0.9]]))
>>> print repr(output_model)
DiscreteOutputModel(array([[ 0.5, 0.5],
[ 0.1, 0.9]]))
"""
return "DiscreteOutputModel(%s)" % repr(self._output_probabilities)
def __str__(self):
r""" Human-readable string representation of this output model
>>> output_model = DiscreteOutputModel(np.array([[0.5,0.5],[0.1,0.9]]))
>>> print str(output_model)
--------------------------------------------------------------------------------
DiscreteOutputModel
nstates: 2
nsymbols: 2
B[0] = [ 0.5 0.5]
B[1] = [ 0.1 0.9]
--------------------------------------------------------------------------------
"""
output = "--------------------------------------------------------------------------------\n"
output += "DiscreteOutputModel\n"
output += "nstates: %d\n" % self.nstates
output += "nsymbols: %d\n" % self._nsymbols
for i in range(self.nstates):
output += "B["+str(i)+"] = %s\n" % str(self._output_probabilities[i])
output += "--------------------------------------------------------------------------------"
return output
@property
def model_type(self):
r""" Model type. Returns 'discrete' """
return 'discrete'
@property
def output_probabilities(self):
r""" Row-stochastic (n,m) output probability matrix from n hidden states to m symbols. """
return self._output_probabilities
@property
def nsymbols(self):
r""" Number of symbols, or observable output states """
return self._nsymbols
# TODO: remove this code if we're sure we don't need it.
# def p_o_i(self, o, i):
# """
# Returns the output probability for symbol o given hidden state i
#
# Parameters
# ----------
# o : int
# the discrete symbol o (observation)
# i : int
# the hidden state index
#
# Return
# ------
# p_o : float
# the probability that hidden state i generates symbol o
#
# """
# # TODO: so far we don't use this method. Perhaps we don't need it.
# return self.B[i,o]
#
# def log_p_o_i(self, o, i):
# """
# Returns the logarithm of the output probability for symbol o given hidden state i
#
# Parameters
# ----------
# o : int
# the discrete symbol o (observation)
# i : int
# the hidden state index
#
# Return
# ------
# p_o : float
# the log probability that hidden state i generates symbol o
#
# """
# # TODO: check if we need the log-probabilities
# return log(self.B[i,o])
#
#
# def p_o(self, o):
# """
# Returns the output probability for symbol o from all hidden states
#
# Parameters
# ----------
# o : int
# the discrete symbol o (observation)
#
# Return
# ------
# p_o : ndarray (N)
# the probability that any of the N hidden states generates symbol o
#
# """
# # TODO: so far we don't use this method. Perhaps we don't need it.
# return self.B[:,o]
#
# def log_p_o(self, o):
# """
# Returns the logarithm of the output probabilities for symbol o from all hidden states
#
# Parameters
# ----------
# o : int
# the discrete symbol o (observation)
#
# Return
# ------
# p_o : ndarray (N)
# the log probability that any of the N hidden states generates symbol o
#
# """
# return np.log(self.B[:,o])
def p_obs(self, obs, out=None):
"""
Returns the output probabilities for an entire trajectory and all hidden states
Parameters
----------
obs : ndarray((T), dtype=int)
a discrete trajectory of length T
Return
------
p_o : ndarray (T,N)
the probability of generating the symbol at time point t from any of the N hidden states
"""
# much faster
if (out is None):
out = self._output_probabilities[:,obs].T
#out /= np.sum(out, axis=1)[:,None]
return out
else:
if (obs.shape[0] == out.shape[0]):
out[:,:] = self._output_probabilities[:,obs].T
elif (obs.shape[0] < out.shape[0]):
out[:obs.shape[0],:] = self._output_probabilities[:,obs].T
else:
raise ValueError('output array out is too small: '+str(out.shape[0])+' < '+str(obs.shape[0]))
#out /= np.sum(out, axis=1)[:,None]
return out
def _estimate_output_model(self, observations, weights):
"""
Fits the output model given the observations and weights
Parameters
----------
observations : [ ndarray(T_k) ] with K elements
A list of K observation trajectories, each having length T_k
weights : [ ndarray(T_k,N) ] with K elements
A list of K weight matrices, each having length T_k and containing the probability of any of the states in
the given time step
Examples
--------
Generate an observation model and samples from each state.
>>> import numpy as np
>>> ntrajectories = 3
>>> nobs = 1000
>>> B = np.array([[0.5,0.5],[0.1,0.9]])
>>> output_model = DiscreteOutputModel(B)
>>> from scipy import stats
>>> nobs = 1000
>>> obs = np.empty((nobs), dtype = object)
>>> weights = np.empty((nobs), dtype = object)
>>> gens = [stats.rv_discrete(values=(range(len(B[i])), B[i])) for i in range(B.shape[0])]
>>> obs = [gens[i].rvs(size=nobs) for i in range(B.shape[0])]
>>> weights = [np.zeros((nobs, B.shape[1])) for i in range(B.shape[0])]
>>> for i in range(B.shape[0]): weights[i][:,i] = 1.0
Update the observation model parameters my a maximum-likelihood fit.
>>> output_model._estimate_output_model(obs, weights)
"""
# sizes
N = self._output_probabilities.shape[0]
M = self._output_probabilities.shape[1]
K = len(observations)
# initialize output probability matrix
self._output_probabilities = np.zeros((N,M))
for k in range(K):
# update nominator
obs = observations[k]
for o in range(M):
times = np.where(obs == o)[0]
self._output_probabilities[:,o] += np.sum(weights[k][times,:], axis=0)
# normalize
self._output_probabilities /= np.sum(self._output_probabilities, axis=1)[:,None]
def _sample_output_mode(self, observations):
"""
Sample a new set of distribution parameters given a sample of observations from the given state.
Both the internal parameters and the attached HMM model are updated.
Parameters
----------
observations : [ numpy.array with shape (N_k,) ] with nstates elements
observations[k] is a set of observations sampled from state k
Examples
--------
initialize output model
>>> B = np.array([[0.5,0.5],[0.1,0.9]])
>>> output_model = DiscreteOutputModel(B)
sample given observation
>>> obs = [[0,0,0,1,1,1],[1,1,1,1,1,1]]
>>> output_model._sample_output_mode(obs)
"""
from numpy.random import dirichlet
# total number of observation symbols
M = self._output_probabilities.shape[1]
count_full = np.zeros((M), dtype = int)
for i in range(len(observations)):
# count symbols found in data
count = np.bincount(observations[i])
# blow up to full symbol space (if symbols are missing in this observation)
count_full[:count.shape[0]] = count[:]
# sample dirichlet distribution
self._output_probabilities[i,:] = dirichlet(count_full + 1)
def generate_observation_from_state(self, state_index):
"""
Generate a single synthetic observation data from a given state.
Parameters
----------
state_index : int
Index of the state from which observations are to be generated.
Returns
-------
observation : float
A single observation from the given state.
Examples
--------
Generate an observation model.
>>> output_model = DiscreteOutputModel(np.array([[0.5,0.5],[0.1,0.9]]))
Generate sample from each state.
>>> observation = output_model.generate_observation_from_state(0)
"""
# generate random generator (note that this is inefficient - better use one of the next functions
import scipy.stats
gen = scipy.stats.rv_discrete(values=(range(len(self._output_probabilities[state_index])), self._output_probabilities[state_index]))
gen.rvs(size=1)
def generate_observations_from_state(self, state_index, nobs):
"""
Generate synthetic observation data from a given state.
Parameters
----------
state_index : int
Index of the state from which observations are to be generated.
nobs : int
The number of observations to generate.
Returns
-------
observations : numpy.array of shape(nobs,) with type dtype
A sample of `nobs` observations from the specified state.
Examples
--------
Generate an observation model.
>>> output_model = DiscreteOutputModel(np.array([[0.5,0.5],[0.1,0.9]]))
Generate sample from each state.
>>> observations = [ output_model.generate_observations_from_state(state_index, nobs=100) for state_index in range(output_model.nstates) ]
"""
import scipy.stats
gen = scipy.stats.rv_discrete(values=(range(self._nsymbols), self._output_probabilities[state_index]))
gen.rvs(size=nobs)
def generate_observation_trajectory(self, s_t, dtype=None):
"""
Generate synthetic observation data from a given state sequence.
Parameters
----------
s_t : numpy.array with shape (T,) of int type
s_t[t] is the hidden state sampled at time t
Returns
-------
o_t : numpy.array with shape (T,) of type dtype
o_t[t] is the observation associated with state s_t[t]
dtype : numpy.dtype, optional, default=None
The datatype to return the resulting observations in. If None, will select int32.
Examples
--------
Generate an observation model and synthetic state trajectory.
>>> nobs = 1000
>>> output_model = DiscreteOutputModel(np.array([[0.5,0.5],[0.1,0.9]]))
>>> s_t = np.random.randint(0, output_model.nstates, size=[nobs])
Generate a synthetic trajectory
>>> o_t = output_model.generate_observation_trajectory(s_t)
"""
if dtype == None:
dtype = np.int32
# Determine number of samples to generate.
T = s_t.shape[0]
nsymbols = self._output_probabilities.shape[1]
if (s_t.max() >= self.nstates) or (s_t.min() < 0):
str = ''
str += 's_t = %s\n' % s_t
str += 's_t.min() = %d, s_t.max() = %d\n' % (s_t.min(), s_t.max())
str += 's_t.argmax = %d\n' % s_t.argmax()
str += 'self.nstates = %d\n' % self.nstates
str += 's_t is out of bounds.\n'
raise Exception(str)
# generate random generators
#import scipy.stats
#gens = [scipy.stats.rv_discrete(values=(range(len(self.B[state_index])), self.B[state_index])) for state_index in range(self.B.shape[0])]
#o_t = np.zeros([T], dtype=dtype)
#for t in range(T):
# s = s_t[t]
# o_t[t] = gens[s].rvs(size=1)
#return o_t
o_t = np.zeros([T], dtype=dtype)
for t in range(T):
s = s_t[t]
o_t[t] = np.random.choice(nsymbols, p=self._output_probabilities[s,:])
return o_t
|
bhmm/legacy-bhmm-force-spectroscopy-manuscript
|
bhmm/output_models/discrete.py
|
Python
|
lgpl-3.0
| 14,241
|
[
"Gaussian"
] |
fbfaa92928761c39fd3e5cca6980828adb1d4403bf63aea0d559b9e8d17d7fa9
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""VGG, implemented in Gluon."""
from __future__ import division
__all__ = ['VGG',
'vgg11', 'vgg13', 'vgg16', 'vgg19',
'vgg11_bn', 'vgg13_bn', 'vgg16_bn', 'vgg19_bn',
'get_vgg']
from ....context import cpu
from ....initializer import Xavier
from ...block import HybridBlock
from ... import nn
class VGG(HybridBlock):
r"""VGG model from the `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_ paper.
Parameters
----------
layers : list of int
Numbers of layers in each feature block.
filters : list of int
Numbers of filters in each feature block. List length should match the layers.
classes : int, default 1000
Number of classification classes.
batch_norm : bool, default False
Use batch normalization.
"""
def __init__(self, layers, filters, classes=1000, batch_norm=False, **kwargs):
super(VGG, self).__init__(**kwargs)
assert len(layers) == len(filters)
with self.name_scope():
self.features = self._make_features(layers, filters, batch_norm)
self.features.add(nn.Dense(4096, activation='relu',
weight_initializer='normal',
bias_initializer='zeros'))
self.features.add(nn.Dropout(rate=0.5))
self.features.add(nn.Dense(4096, activation='relu',
weight_initializer='normal',
bias_initializer='zeros'))
self.features.add(nn.Dropout(rate=0.5))
self.output = nn.Dense(classes,
weight_initializer='normal',
bias_initializer='zeros')
def _make_features(self, layers, filters, batch_norm):
featurizer = nn.HybridSequential(prefix='')
for i, num in enumerate(layers):
for _ in range(num):
featurizer.add(nn.Conv2D(filters[i], kernel_size=3, padding=1,
weight_initializer=Xavier(rnd_type='gaussian',
factor_type='out',
magnitude=2),
bias_initializer='zeros'))
if batch_norm:
featurizer.add(nn.BatchNorm())
featurizer.add(nn.Activation('relu'))
featurizer.add(nn.MaxPool2D(strides=2))
return featurizer
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
# Specification
vgg_spec = {11: ([1, 1, 2, 2, 2], [64, 128, 256, 512, 512]),
13: ([2, 2, 2, 2, 2], [64, 128, 256, 512, 512]),
16: ([2, 2, 3, 3, 3], [64, 128, 256, 512, 512]),
19: ([2, 2, 4, 4, 4], [64, 128, 256, 512, 512])}
# Constructors
def get_vgg(num_layers, pretrained=False, ctx=cpu(), root='~/.mxnet/models', **kwargs):
r"""VGG model from the `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_ paper.
Parameters
----------
num_layers : int
Number of layers for the variant of densenet. Options are 11, 13, 16, 19.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
layers, filters = vgg_spec[num_layers]
net = VGG(layers, filters, **kwargs)
if pretrained:
from ..model_store import get_model_file
batch_norm_suffix = '_bn' if kwargs.get('batch_norm') else ''
net.load_params(get_model_file('vgg%d%s'%(num_layers, batch_norm_suffix),
root=root), ctx=ctx)
return net
def vgg11(**kwargs):
r"""VGG-11 model from the `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(11, **kwargs)
def vgg13(**kwargs):
r"""VGG-13 model from the `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(13, **kwargs)
def vgg16(**kwargs):
r"""VGG-16 model from the `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(16, **kwargs)
def vgg19(**kwargs):
r"""VGG-19 model from the `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(19, **kwargs)
def vgg11_bn(**kwargs):
r"""VGG-11 model with batch normalization from the
`"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
kwargs['batch_norm'] = True
return get_vgg(11, **kwargs)
def vgg13_bn(**kwargs):
r"""VGG-13 model with batch normalization from the
`"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
kwargs['batch_norm'] = True
return get_vgg(13, **kwargs)
def vgg16_bn(**kwargs):
r"""VGG-16 model with batch normalization from the
`"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
kwargs['batch_norm'] = True
return get_vgg(16, **kwargs)
def vgg19_bn(**kwargs):
r"""VGG-19 model with batch normalization from the
`"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
kwargs['batch_norm'] = True
return get_vgg(19, **kwargs)
|
kkk669/mxnet
|
python/mxnet/gluon/model_zoo/vision/vgg.py
|
Python
|
apache-2.0
| 9,301
|
[
"Gaussian"
] |
f5b5541caa9bea4fd9e5f086261354ff6141444eaad9865df3e5e3641ae41b46
|
########################################################################
# File: FTSClient.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/04/08 14:29:43
########################################################################
"""
:mod: FTSClient
.. module: FTSClient
:synopsis: FTS client
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
FTS client
"""
# #
# @file FTSClient.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/04/08 14:29:47
# @brief Definition of FTSClient class.
# # imports
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.Base.Client import Client
# # from DMS
from DIRAC.DataManagementSystem.Client.FTSJob import FTSJob
from DIRAC.DataManagementSystem.Client.FTSFile import FTSFile
from DIRAC.DataManagementSystem.private.FTSHistoryView import FTSHistoryView
from DIRAC.DataManagementSystem.private.FTSValidator import FTSValidator
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
# # from Resources
from DIRAC.Resources.Storage.StorageFactory import StorageFactory
########################################################################
class FTSClient( Client ):
"""
.. class:: FTSClient
"""
def __init__( self, useCertificates = False ):
"""c'tor
:param self: self reference
:param bool useCertificates: flag to enable/disable certificates
"""
Client.__init__( self )
self.log = gLogger.getSubLogger( "DataManagement/FTSClient" )
self.setServer( "DataManagement/FTSManager" )
# getting other clients
self.ftsValidator = FTSValidator()
self.dataManager = DataManager()
self.storageFactory = StorageFactory()
url = PathFinder.getServiceURL( "DataManagement/FTSManager" )
if not url:
raise RuntimeError( "CS option DataManagement/FTSManager URL is not set!" )
self.ftsManager = RPCClient( url )
def getFTSFileList( self, statusList = None, limit = None ):
""" get list of FTSFiles with status in statusList """
statusList = statusList if statusList else [ "Waiting" ]
limit = limit if limit else 1000
getFTSFileList = self.ftsManager.getFTSFileList( statusList, limit )
if not getFTSFileList['OK']:
self.log.error( "Failed getFTSFileList", "%s" % getFTSFileList['Message'] )
return getFTSFileList
getFTSFileList = getFTSFileList['Value']
return S_OK( [ FTSFile( ftsFile ) for ftsFile in getFTSFileList ] )
def getFTSJobList( self, statusList = None, limit = None ):
""" get FTSJobs wit statues in :statusList: """
statusList = statusList if statusList else list( FTSJob.INITSTATES + FTSJob.TRANSSTATES )
limit = limit if limit else 500
getFTSJobList = self.ftsManager.getFTSJobList( statusList, limit )
if not getFTSJobList['OK']:
self.log.error( "Failed getFTSJobList", "%s" % getFTSJobList['Message'] )
return getFTSJobList
getFTSJobList = getFTSJobList['Value']
return S_OK( [ FTSJob( ftsJobDict ) for ftsJobDict in getFTSJobList ] )
def getFTSFilesForRequest( self, requestID, statusList = None ):
""" read FTSFiles for a given :requestID:
:param int requestID: ReqDB.Request.RequestID
:param list statusList: List of statuses (default: Waiting)
"""
ftsFiles = self.ftsManager.getFTSFilesForRequest( requestID, statusList )
if not ftsFiles['OK']:
self.log.error( "Failed getFTSFilesForRequest", "%s" % ftsFiles['Message'] )
return ftsFiles
return S_OK( [ FTSFile( ftsFileDict ) for ftsFileDict in ftsFiles['Value'] ] )
def getAllFTSFilesForRequest( self, requestID ):
""" read FTSFiles for a given :requestID:
:param int requestID: ReqDB.Request.RequestID
"""
ftsFiles = self.ftsManager.getAllFTSFilesForRequest( requestID )
if not ftsFiles['OK']:
self.log.error( "Failed getFTSFilesForRequest", "%s" % ftsFiles['Message'] )
return ftsFiles
return S_OK( [ FTSFile( ftsFileDict ) for ftsFileDict in ftsFiles['Value'] ] )
def getFTSJobsForRequest( self, requestID, statusList = None ):
""" get list of FTSJobs with statues in :statusList: given requestID
:param int requestID: ReqDB.Request.RequestID
:param list statusList: list with FTSJob statuses
:return: [ FTSJob, FTSJob, ... ]
"""
statusList = statusList if statusList else list( FTSJob.INITSTATES + FTSJob.TRANSSTATES )
getJobs = self.ftsManager.getFTSJobsForRequest( requestID, statusList )
if not getJobs['OK']:
self.log.error( "Failed getFTSJobsForRequest", "%s" % getJobs['Message'] )
return getJobs
return S_OK( [ FTSJob( ftsJobDict ) for ftsJobDict in getJobs['Value'] ] )
def getFTSFile( self, ftsFileID = None ):
""" get FTSFile
:param int ftsFileID: FTSFileID
"""
getFile = self.ftsManager.getFTSFile( ftsFileID )
if not getFile['OK']:
self.log.error( 'Failed to get FTS file', getFile['Message'] )
# # de-serialize
if getFile['Value']:
ftsFile = FTSFile( getFile['Value'] )
return S_OK( ftsFile )
def putFTSJob( self, ftsJob ):
""" put FTSJob into FTSDB
:param FTSJob ftsJob: FTSJob instance
"""
ftsJobJSON = ftsJob.toJSON()
if not ftsJobJSON['OK']:
self.log.error( 'Failed to get JSON of an FTS job', ftsJobJSON['Message'] )
return ftsJobJSON
isValid = self.ftsValidator.validate( ftsJob )
if not isValid['OK']:
self.log.error( "Failed to validate FTS job", "%s %s" % ( isValid['Message'], str( ftsJobJSON['Value'] ) ) )
return isValid
return self.ftsManager.putFTSJob( ftsJobJSON['Value'] )
def getFTSJob( self, ftsJobID ):
""" get FTS job, change its status to 'Assigned'
:param int ftsJobID: FTSJobID
"""
getJob = self.ftsManager.getFTSJob( ftsJobID )
if not getJob['OK']:
self.log.error( 'Failed to get FTS job', getJob['Message'] )
return getJob
setStatus = self.ftsManager.setFTSJobStatus( ftsJobID, 'Assigned' )
if not setStatus['OK']:
self.log.error( 'Failed to set status of FTS job', setStatus['Message'] )
# # de-serialize
# if getJob['Value']:
# getJob = FTSJob( getJob['Value'] )
return getJob
def peekFTSJob( self, ftsJobID ):
""" just peek FTSJob
:param int ftsJobID: FTSJobID
"""
getJob = self.ftsManager.getFTSJob( ftsJobID )
if not getJob['OK']:
self.log.error( 'Failed to get FTS job', getJob['Message'] )
return getJob
return getJob
def deleteFTSJob( self, ftsJobID ):
""" delete FTSJob into FTSDB
:param int ftsJob: FTSJobID
"""
deleteJob = self.ftsManager.deleteFTSJob( ftsJobID )
if not deleteJob['OK']:
self.log.error( 'Failed to delete FTS job', deleteJob['Message'] )
return deleteJob
def getFTSJobIDs( self, statusList = None ):
""" get list of FTSJobIDs for a given status list """
statusList = statusList if statusList else [ "Submitted", "Ready", "Active" ]
ftsJobIDs = self.ftsManager.getFTSJobIDs( statusList )
if not ftsJobIDs['OK']:
self.log.error( 'Failed to get FTS job IDs', ftsJobIDs['Message'] )
return ftsJobIDs
def getFTSFileIDs( self, statusList = None ):
""" get list of FTSFileIDs for a given status list """
statusList = statusList if statusList else [ "Waiting" ]
ftsFileIDs = self.ftsManager.getFTSFileIDs( statusList )
if not ftsFileIDs['OK']:
self.log.error( 'Failed to get FTS file IDs', ftsFileIDs['Message'] )
return ftsFileIDs
def getFTSHistory( self ):
""" get FTS history snapshot """
getFTSHistory = self.ftsManager.getFTSHistory()
if not getFTSHistory['OK']:
self.log.error( 'Failed to get FTS history', getFTSHistory['Message'] )
return getFTSHistory
getFTSHistory = getFTSHistory['Value']
return S_OK( [ FTSHistoryView( ftsHistory ) for ftsHistory in getFTSHistory ] )
def getDBSummary( self ):
""" get FTDB summary """
dbSummary = self.ftsManager.getDBSummary()
if not dbSummary['OK']:
self.log.error( "Failed getDBSummary", "%s" % dbSummary['Message'] )
return dbSummary
def setFTSFilesWaiting( self, operationID, sourceSE, opFileIDList = None ):
""" update status for waiting FTSFiles from 'Waiting#SourceSE' to 'Waiting'
:param int operationID: ReqDB.Operation.OperationID
:param str sourceSE: source SE name
:param opFileIDList: [ ReqDB.File.FileID, ... ]
"""
return self.ftsManager.setFTSFilesWaiting( operationID, sourceSE, opFileIDList )
def deleteFTSFiles( self, operationID, opFileIDList = None ):
""" delete FTSFiles for rescheduling
:param int operationID: ReqDB.Operation.OperationID
:param list opFileIDList: [ ReqDB.File.FileID, ... ]
"""
return self.ftsManager.deleteFTSFiles( operationID, opFileIDList )
def ftsSchedule( self, requestID, operationID, opFileList ):
""" schedule lfn for FTS job
:param int requestID: RequestDB.Request.RequestID
:param int operationID: RequestDB.Operation.OperationID
:param list opFileList: list of tuples ( File.toJSON()['Value'], sourcesList, targetList )
"""
# Check whether there are duplicates
fList = []
for fTuple in opFileList:
if fTuple not in fList:
fList.append( fTuple )
else:
self.log.warn( 'File list for FTS scheduling has duplicates, fix it:\n', fTuple )
fileIDs = [int( fileJSON.get( 'FileID', 0 ) ) for fileJSON, _sourceSEs, _targetSEs in fList ]
res = self.ftsManager.cleanUpFTSFiles( requestID, fileIDs )
if not res['OK']:
self.log.error( "Failed ftsSchedule", "%s" % res['Message'] )
return S_ERROR( "ftsSchedule: %s" % res['Message'] )
ftsFiles = []
# # this will be returned on success
result = { "Successful": [], "Failed": {} }
for fileJSON, sourceSEs, targetSEs in fList:
lfn = fileJSON.get( "LFN", "" )
size = int( fileJSON.get( "Size", 0 ) )
fileID = int( fileJSON.get( "FileID", 0 ) )
opID = int( fileJSON.get( "OperationID", 0 ) )
self.log.verbose( "ftsSchedule: LFN=%s FileID=%s OperationID=%s sources=%s targets=%s" % ( lfn, fileID, opID,
sourceSEs,
targetSEs ) )
res = self.dataManager.getActiveReplicas( lfn )
if not res['OK']:
self.log.error( "Failed ftsSchedule", "%s" % res['Message'] )
result["Failed"][fileID] = res['Message']
continue
replicaDict = res['Value']
if lfn in replicaDict["Failed"] and lfn not in replicaDict["Successful"]:
result["Failed"][fileID] = "no active replicas found"
continue
replicaDict = replicaDict["Successful"].get( lfn, {} )
# # use valid replicas only
validReplicasDict = dict( [ ( se, pfn ) for se, pfn in replicaDict.items() if se in sourceSEs ] )
if not validReplicasDict:
self.log.warn( "No active replicas found in sources" )
result["Failed"][fileID] = "no active replicas found in sources"
continue
tree = self.ftsManager.getReplicationTree( sourceSEs, targetSEs, size )
if not tree['OK']:
self.log.error( "Failed ftsSchedule", "%s cannot be scheduled: %s" % ( lfn, tree['Message'] ) )
result["Failed"][fileID] = tree['Message']
continue
tree = tree['Value']
self.log.verbose( "LFN=%s tree=%s" % ( lfn, tree ) )
treeBranches = []
printed = False
for repDict in tree.values():
if repDict in treeBranches:
if not printed:
self.log.warn( 'Duplicate tree branch', str( tree ) )
printed = True
else:
treeBranches.append( repDict )
for repDict in treeBranches:
self.log.verbose( "Strategy=%s Ancestor=%s SourceSE=%s TargetSE=%s" % ( repDict["Strategy"],
repDict["Ancestor"],
repDict["SourceSE"],
repDict["TargetSE"] ) )
transferSURLs = self._getTransferURLs( lfn, repDict, sourceSEs, validReplicasDict )
if not transferSURLs['OK']:
result["Failed"][fileID] = transferSURLs['Message']
continue
sourceSURL, targetSURL, fileStatus = transferSURLs['Value']
if sourceSURL == targetSURL:
result["Failed"][fileID] = "sourceSURL equals to targetSURL for %s" % lfn
continue
self.log.verbose( "sourceURL=%s targetURL=%s FTSFile.Status=%s" % ( sourceSURL, targetSURL, fileStatus ) )
ftsFile = FTSFile()
for key in ( "LFN", "FileID", "OperationID", "Checksum", "ChecksumType", "Size" ):
if fileJSON.get( key ):
setattr( ftsFile, key, fileJSON.get( key ) )
ftsFile.RequestID = requestID
ftsFile.OperationID = operationID
ftsFile.SourceSURL = sourceSURL
ftsFile.TargetSURL = targetSURL
ftsFile.SourceSE = repDict["SourceSE"]
ftsFile.TargetSE = repDict["TargetSE"]
ftsFile.Status = fileStatus
ftsFiles.append( ftsFile )
if not ftsFiles:
self.log.info( "ftsSchedule: no FTSFiles to put for request %d" % requestID )
return S_OK( result )
ftsFilesJSONList = [ftsFile.toJSON()['Value'] for ftsFile in ftsFiles]
res = self.ftsManager.putFTSFileList( ftsFilesJSONList )
if not res['OK']:
self.log.error( "Failed ftsSchedule", "%s" % res['Message'] )
return S_ERROR( "ftsSchedule: %s" % res['Message'] )
result['Successful'] += [ fileID for fileID in fileIDs if fileID not in result['Failed']]
# # if we land here some files have been properly scheduled
return S_OK( result )
################################################################################################################
# Some utilities function
def _getSurlForLFN( self, targetSE, lfn ):
""" Get the targetSURL for the storage and LFN supplied.
:param self: self reference
:param str targetSE: target SE
:param str lfn: LFN
"""
res = self.storageFactory.getStorages( targetSE, pluginList = ["SRM2"] )
if not res['OK']:
errStr = "_getSurlForLFN: Failed to create SRM2 storage for %s: %s" % ( targetSE, res['Message'] )
self.log.error( "_getSurlForLFN: Failed to create SRM2 storage",
"%s: %s" % ( targetSE, res['Message'] ) )
return S_ERROR( errStr )
storageObjects = res['Value']["StorageObjects"]
for storageObject in storageObjects:
res = storageObject.getCurrentURL( lfn )
if res['OK']:
return res
self.log.error( "_getSurlForLFN: Failed to get SRM compliant storage.", targetSE )
return S_ERROR( "_getSurlForLFN: Failed to get SRM compliant storage." )
def _getTransferURLs( self, lfn, repDict, replicas, replicaDict ):
""" prepare TURLs for given LFN and replication tree
:param self: self reference
:param str lfn: LFN
:param dict repDict: replication dictionary
:param dict replicas: LFN replicas
"""
hopSourceSE = repDict["SourceSE"]
hopTargetSE = repDict["TargetSE"]
hopAncestor = repDict["Ancestor"]
# # get targetSURL
res = self._getSurlForLFN( hopTargetSE, lfn )
if not res['OK']:
self.log.error( "Failed _getTransferURLs", "%s" % res['Message'] )
return res
targetSURL = res['Value']
status = "Waiting"
# # get the sourceSURL
if hopAncestor:
status = "Waiting#%s" % ( hopAncestor )
res = self._getSurlForLFN( hopSourceSE, lfn )
sourceSURL = res.get( 'Value', replicaDict.get( hopSourceSE, None ) )
if not sourceSURL:
self.log.error( "Failed _getTransferURLs", "%s" % res['Message'] )
return res
return S_OK( ( sourceSURL, targetSURL, status ) )
|
miloszz/DIRAC
|
DataManagementSystem/Client/FTSClient.py
|
Python
|
gpl-3.0
| 16,128
|
[
"DIRAC"
] |
16f9b915abc662d7ef06e3b6d493823f075a6af82f0a53cddadacf574fc9cd6f
|
'''
lofar_source_sorter.py
the main code for the decision tree
'''
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from astropy.table import Table, join, Column
import astropy.units as u
import astropy.coordinates as ac
import utils.plot_util as pp
import os
class Mask:
'''Mask store a boolean mask and associated information necessary for the flowchart
mask - boolean mask to apply
trait - short description
label - long detailed description (default: same as trait)
level - level of nested masks (defatul: 0)
verbose - print some of output (default: True)
masterlist - add output mask to list (default: None)
qlabel - for flowchart: what question gets asked of this level (default: None)
color - for flowchart: color to plot the mask (default: None=black)
'''
def __init__(self, mask, trait, label=None, level=0, verbose=True, masterlist=None, qlabel=None, color=None):
self.mask = mask
if qlabel is not None :
self.qlabel = qlabel
else:
self.qlabel = label
if label is None:
label = trait
self.label = label
if isinstance(trait,str):
self.traits = list([trait])
else:
self.traits = list(trait)
self.name = '_'.join(self.traits)
self.color = color
self.level = level
self.N = self.total()
self.n = self.msum()
self.f = self.fraction()
self.p = self.percent()
self.has_children = False
self.has_parent = False
self.Nchildren = 0
self.children = None
self.parent = None
if masterlist is not None:
masterlist.append(self)
if verbose:
self.print_frac()
return
def percent(self):
return 100.*np.sum(self.mask)/self.N
def fraction(self):
return 1.*np.sum(self.mask)/self.N
def msum(self):
return np.sum(self.mask)
def total(self):
return len(self.mask)
def print_frac(self, vformat=True):
'''vformat = True will print with formatted spaces indicative of the hierarchical structure
'''
if vformat and self.level > 0:
vv = ' '*self.level + '-'*self.level
else:
vv = ' '
print '{n:6d} ({f:5.1f}%){vv:s}{label:s}'.format(vv=vv, n=self.n, f=self.p, label=self.label)
def __str__(self):
return self.name
def submask(self, joinmask, newtrait, label=None, edgelabel='Y', verbose=True, qlabel=None, masterlist=None, color=None):
'''create a new submask based on this instance -- join masks with AND
# qlabel is the question that will be asked
# edgelabel is the answer to the question asked to get here
'''
newmask = self.mask & joinmask
newtraits = list(self.traits) # copy list of traits - lists are mutable!!
newtraits.append(newtrait) # append new trait onto copy
newlevel = self.level + 1
if label is None:
label = newtrait
childmask = Mask(newmask, newtraits, label, level=newlevel, masterlist=masterlist, verbose=verbose, qlabel=qlabel, color=color)
childmask.has_parent = True
childmask.parent = self
childmask.edgelabel = edgelabel
if not self.has_children:
self.has_children = True
self.children = [childmask]
self.Nchildren = 1
else:
newchildren = list(self.children) # copy list of traits - lists are mutable!!
newchildren.append(childmask)
self.children = newchildren
self.Nchildren = len(newchildren)
return childmask
# make sample files
def make_sample(self, cat, Nsample=250):
'''create a random subsample of the masked catalogue 'cat'
'''
t = cat[self.mask]
if Nsample is None:
Nsample = len(t)
Nsample = np.min((Nsample, len(t)))
if Nsample ==0 : return
if Nsample < len(t):
t = t[np.random.choice(np.arange(len(t)), Nsample, replace=False)]
fitsname = 'sample_'+self.name+'.fits'
if os.path.exists(fitsname):
os.remove(fitsname)
t.write(fitsname)
return
def is_disjoint(self, othermask):
assert isinstance(othermask, Mask), 'need to compare to another Mask instance'
if np.sum((self.mask) & (othermask.mask)) == 0:
return True
else:
return False
return
def Masks_disjoint_complete(masklist):
'''test whether a list of masks is disjoint and complete
'''
Z = np.zeros(len(masklist[0].mask), dtype=bool)
O = np.ones(len(masklist[0].mask), dtype=bool)
for t in masklist:
Z = Z & t.mask
O = O | t.mask
return np.all(O) and np.all(~Z)
if __name__=='__main__':
### Required INPUTS
# lofar source catalogue, gaussian catalogue and ML catalogues for each
#path = '/local/wwilliams/projects/radio_imaging/lofar_surveys/source_class/t1_dr1/'
#lofargcat_file = path+'LOFAR_HBA_T1_DR1_catalog_v0.1.gaus.fits'
#lofarcat_file = path+'LOFAR_HBA_T1_DR1_catalog_v0.1.srl.fits'
#psmlcat_file = path+'lofar_matched_all.fix.fits'
#psmlgcat_file = path+'lofar_matched_gaus.fits'
path = '/local/wwilliams/projects/radio_imaging/lofar_surveys/LoTSS-DR1-July21-2017/'
lofargcat_file = path+'LOFAR_HBA_T1_DR1_catalog_v0.9.gaus.fixed.fits'
lofarcat_file = path+'LOFAR_HBA_T1_DR1_catalog_v0.9.srl.fixed.presort.fits'
psmlcat_file = path+'lofar_pw.fixed.fits'
psmlgcat_file = path+'lofar_gaus_pw.fixed.fits'
lofarcat_file_srt = path+'LOFAR_HBA_T1_DR1_catalog_v0.9.srl.fixed.sorted.fits'
# Gaus catalogue
lofargcat = Table.read(lofargcat_file)
# only relevant gaussians are in M or C sources
lofargcat = lofargcat[lofargcat['S_Code'] != 'S']
# Source catalogue
lofarcat = Table.read(lofarcat_file)
# PS ML - matches for sources and gaussians
psmlcat = Table.read(psmlcat_file)
psmlgcat = Table.read(psmlgcat_file)
## match the gaussians to the sources
## quicker to generate new unique names than match on 2 columns
## get new unique source_id by combining mosaic and src id
## replace string mosaic ID with unique int (perhaps there is a more logical mapping of mosaic name to int value)
#mid = lofargcat['Mosaic_ID']
#mid_unique = np.unique(mid)
#mid_int = np.array([np.where(mid_unique==m)[0][0] for m in mid])
## combine with Source_id for unique ID
#g_src_id_new = 10000*mid_int + lofargcat['Source_Name']
#lofargcat.add_column(Column(g_src_id_new, 'SID'))
#mid = lofarcat['Mosaic_ID']
#mid_unique = np.unique(mid)
#mid_int = np.array([np.where(mid_unique==m)[0][0] for m in mid])
## combine with Source_id for unique ID
#src_id_new = 10000*mid_int + lofarcat['Source_Name']
#lofarcat.add_column(Column(src_id_new, 'SID'))
## get the panstarrs ML information
# join the ps ml cat - they have identical RA/DEC (source_names were wrong)
c = ac.SkyCoord(lofarcat['RA'], lofarcat['DEC'], unit="deg")
cpsml = ac.SkyCoord(psmlcat['RA'], psmlcat['DEC'], unit="deg")
f_nn_idx,f_nn_sep2d,f_nn_dist3d = ac.match_coordinates_sky(c,cpsml,nthneighbor=1)
#psmlcat = psmlcat[f_nn_idx][f_nn_sep2d==0]
#lofarcat = lofarcat[f_nn_sep2d==0]
# note the large sources are missing from the ML catalogue
lrcol = np.zeros(len(lofarcat),dtype=float)
lrcol[f_nn_sep2d==0] = psmlcat['lr'][f_nn_idx][f_nn_sep2d==0]
#lofarcat.add_column(Column(psmlcat['lr_pc_7th'], 'LR'))
lofarcat.add_column(Column(lrcol, 'cLR'))
lrcol[np.isnan(lrcol)] = 0
lofarcat.add_column(Column(lrcol, 'LR'))
lrcol = np.zeros(len(lofarcat),dtype='S19')
lrcol[f_nn_sep2d==0] = psmlcat['AllWISE'][f_nn_idx][f_nn_sep2d==0]
lofarcat.add_column(Column(lrcol, 'LR_name_wise'))
lrcol = np.zeros(len(lofarcat),dtype=int)
lrcol[f_nn_sep2d==0] = psmlcat['objID'][f_nn_idx][f_nn_sep2d==0]
lofarcat.add_column(Column(lrcol, 'LR_name_ps'))
lrcol = np.zeros(len(lofarcat),dtype=float)
lrcol[f_nn_sep2d==0] = psmlcat['ra'][f_nn_idx][f_nn_sep2d==0]
lofarcat.add_column(Column(lrcol, 'LR_ra'))
lrcol = np.zeros(len(lofarcat),dtype=float)
lrcol[f_nn_sep2d==0] = psmlcat['dec'][f_nn_idx][f_nn_sep2d==0]
lofarcat.add_column(Column(lrcol, 'LR_dec'))
# join the ps ml gaus cat - they have identical RA/DEC (source_names were wrong)
cg = ac.SkyCoord(lofargcat['RA'], lofargcat['DEC'], unit="deg")
cpsmlg = ac.SkyCoord(psmlgcat['RA'], psmlgcat['DEC'], unit="deg")
f_nn_idx_g,f_nn_sep2d_g,f_nn_dist3d_g = ac.match_coordinates_sky(cg,cpsmlg,nthneighbor=1)
# note the large sources are missing from the ML catalogue
lrgcol = np.zeros(len(lofargcat),dtype=float)
lrgcol[f_nn_sep2d_g==0] = psmlgcat['lr'][f_nn_idx_g][f_nn_sep2d_g==0]
#lofarcat.add_column(Column(psmlcat['lr_pc_7th'], 'LR'))
lofargcat.add_column(Column(lrgcol, 'LR'))
lrgcol = np.zeros(len(lofargcat),dtype=float)
lrgcol[f_nn_sep2d_g==0] = psmlgcat['ra'][f_nn_idx_g][f_nn_sep2d_g==0]
lofargcat.add_column(Column(lrgcol, 'LR_ra'))
lrgcol = np.zeros(len(lofargcat),dtype=float)
lrgcol[f_nn_sep2d_g==0] = psmlgcat['dec'][f_nn_idx_g][f_nn_sep2d_g==0]
lofargcat.add_column(Column(lrgcol, 'LR_dec'))
add_G = False # add the gaussian information
lofarcat.add_column(Column(np.ones(len(lofarcat),dtype=int), 'Ng'))
lofarcat.add_column(Column(np.ones(len(lofarcat),dtype=float), 'G_LR_max'))
lofarcat.add_column(Column(np.ones(len(lofarcat),dtype=int), 'Ng_LR_good'))
lofarcat.add_column(Column(np.zeros(len(lofarcat),dtype=bool), 'Flag_G_LR_problem'))
if add_G:
lofarcat.add_column(Column(np.zeros(len(lofarcat),dtype=list), 'G_ind'))
m_S = lofarcat['S_Code'] =='S'
minds = np.where(~m_S)[0]
for i,sid in zip(minds, lofarcat['Source_Name'][~m_S]):
ig = np.where(lofargcat['Source_Name']==sid)[0]
lofarcat['Ng'][i]= len(ig)
lofarcat['G_LR_max'][i]= np.nanmax(lofargcat['LR'][ig])
igi = np.argmax(lofargcat['LR'][ig])
# for now, if one of the gaussian LR is better, take that
if lofarcat['G_LR_max'][i] > lofarcat['LR'][i]:
lofarcat['LR'][i] = lofarcat['G_LR_max'][i]
lofarcat['LR_ra'][i] = lofargcat['LR_ra'][ig[igi]]
lofarcat['LR_dec'][i] = lofargcat['LR_dec'][ig[igi]]
# how many unique acceptable matches are there for the gaussian components
matches_ra = np.unique(lofargcat['LR_ra'][ig][np.log10(1+lofargcat['LR'][ig]) > 0.36])
n_matches_ra = len(matches_ra)
if n_matches_ra > 1:
lofarcat['Flag_G_LR_problem'][i] = True
# any different to source match
if np.sum(matches_ra != lofarcat['LR_ra'][i]):
lofarcat['Flag_G_LR_problem'][i] = True
lofarcat['Ng_LR_good'][i]= np.nansum(np.log10(1+lofargcat['LR'][ig]) > 0.36)
if add_G:
lofarcat['G_ind'][i]= ig
lofarcat['G_LR_max'][m_S] = lofarcat['LR'][m_S]
lofarcat['Ng_LR_good'][m_S] = 1*(np.log10(1+lofarcat['LR'][m_S]) > 0.36)
# some flags for mult_gaus sources:
# source has good LR match, and no gaus
# multiple matches to different sources
# source has no good LR match, but one gaus does
#sys.exit()
# get the visual flags (must run get_visual_flags for these after doing visual confirmation - classify_*.py)
if 'clustered_flag' not in lofarcat.colnames:
raise RuntimeError('need the visual flag information for the clustered sources')
clustered_flag = lofarcat['clustered_flag']
if 'Lclustered_flag' not in lofarcat.colnames:
raise RuntimeError('need the visual flag information for the large faint clustered sources')
Lclustered_flag = lofarcat['Lclustered_flag']
if 'huge_faint_flag' not in lofarcat.colnames:
raise RuntimeError('need the visual flag information for the huge faint sources')
huge_faint_flag = lofarcat['huge_faint_flag']
if 'nhuge_faint_flag' not in lofarcat.colnames:
raise RuntimeError('need the visual flag information for the huge faint sources')
nhuge_faint_flag = lofarcat['nhuge_faint_flag']
if 'nhuge_2masx_flag' not in lofarcat.colnames:
raise RuntimeError('need the visual flag information for the large_nhuge_2masx sources')
nhuge_2masx_flag = lofarcat['nhuge_2masx_flag']
# get the large 2masx sources (must run match_2masx for these)
if '2MASX_match_large' not in lofarcat.colnames:
raise RuntimeError('need the 2masx information')
big2masx = lofarcat['2MASX_match_large']
## get artefact information (must run find_artefacts for these)
if 'artefact' not in lofarcat.colnames:
raise RuntimeError('need the artefact information')
artefact = lofarcat['artefact']
# combine the artefact flags
# artefacts have been identified through various routes of visual checking
Artefact_flag = (artefact == 1) | (huge_faint_flag ==4) | (nhuge_2masx_flag==4) | (Lclustered_flag == 1) | (clustered_flag == 1) | (nhuge_faint_flag==5)
lofarcat.add_column(Column(Artefact_flag, 'Artefact_flag'))
lofarcat.add_column(Column(np.zeros(len(lofarcat),dtype=int),'ID_flag'))
lofarcat.add_column(Column(np.zeros(len(lofarcat),dtype=int),'LGZ_flag'))
#############################################################################
# ## nearest neighbour separation
# get nearest neighbour for all sources
c = ac.SkyCoord(lofarcat['RA'], lofarcat['DEC'], unit="deg")
f_nn_idx,f_nn_sep2d,_ = ac.match_coordinates_sky(c,c,nthneighbor=2)
#f_nn3_idx,f_nn3_sep2d,_ = ac.match_coordinates_sky(c,c,nthneighbor=3)
f_nn4_idx,f_nn4_sep2d,_ = ac.match_coordinates_sky(c,c,nthneighbor=4)
f_nn5_idx,f_nn5_sep2d,_ = ac.match_coordinates_sky(c,c,nthneighbor=5)
#f_nn6_idx,f_nn6_sep2d,_ = ac.match_coordinates_sky(c,c,nthneighbor=6)
# now exclude artefacts - just put them far away always at the south pole
dec = lofarcat['DEC']
dec[Artefact_flag] = -90
cclean = ac.SkyCoord(lofarcat['RA'], dec, unit="deg")
f_nnc_idx,f_nnc_sep2d,_ = ac.match_coordinates_sky(cclean,cclean,nthneighbor=2)
f_nnc4_idx,f_nnc4_sep2d,_ = ac.match_coordinates_sky(cclean,cclean,nthneighbor=4)
f_nnc5_idx,f_nnc5_sep2d,_ = ac.match_coordinates_sky(cclean,cclean,nthneighbor=5)
if 'NN_sep' not in lofarcat.colnames:
lofarcat.add_column(Column(lofarcat['LR'][f_nn_idx], 'NN_LR'))
lofarcat.add_column(Column(f_nn_sep2d.to(u.arcsec).value, 'NN_sep'))
lofarcat.add_column(Column(f_nn_idx, 'NN_idx'))
lofarcat.add_column(Column(f_nn5_sep2d.to(u.arcsec).value, 'NN5_sep'))
lofarcat.add_column(Column(f_nn4_sep2d.to(u.arcsec).value, 'NN4_sep'))
lofarcat.add_column(Column(lofarcat['Total_flux'][f_nn_idx], 'NN_Total_flux'))
lofarcat.add_column(Column(lofarcat['Total_flux']/lofarcat['NN_Total_flux'], 'NN_Frat'))
lofarcat.add_column(Column(lofarcat['Maj'][f_nn_idx], 'NN_Maj'))
#'clean' nearest neighbour
if 'NNC_sep' not in lofarcat.colnames:
lofarcat.add_column(Column(lofarcat['LR'][f_nnc_idx], 'NNC_LR'))
lofarcat.add_column(Column(f_nnc_sep2d.to(u.arcsec).value, 'NNC_sep'))
lofarcat.add_column(Column(f_nnc_idx, 'NNC_idx'))
lofarcat.add_column(Column(f_nnc5_sep2d.to(u.arcsec).value, 'NNC5_sep'))
lofarcat.add_column(Column(f_nnc4_sep2d.to(u.arcsec).value, 'NNC4_sep'))
lofarcat.add_column(Column(lofarcat['Total_flux'][f_nnc_idx], 'NNC_Total_flux'))
lofarcat.add_column(Column(lofarcat['Total_flux']/lofarcat['NNC_Total_flux'], 'NNC_Frat'))
lofarcat.add_column(Column(lofarcat['Maj'][f_nnc_idx], 'NNC_Maj'))
########################################################
# make samples
# # source classes
#
# clases from draft flowchart
#
# source classes - parameters & masks
# >15 " and 10mJY -2%
size_large = 15. # in arcsec
separation1 = 45. # in arcsec
size_huge = 25. # in arcsec
#separation2 = 30. # in arcsec
lLR_thresh = 0.36 # LR threshold
lLR_thresh2 = 0.72 # LR threshold - stricter
fluxcut = 10 # in mJy
fluxcut2 = 2.5 # in mJy
Ncat = len(lofarcat)
#m_all = lofarcat['RA'] > -1
masterlist = []
M_all = Mask(lofarcat['RA'] > -1,
'all',
qlabel='artefact?\n(visual confirmation)',
masterlist=masterlist)
# artefacts
M_all_artefact = M_all.submask(artefact,
'artefact',
'Artefact\n(visually confirmed)',
edgelabel='Y',
color='gray',
masterlist=masterlist)
lofarcat['ID_flag'][M_all_artefact.mask] = -1
# sources
M_all_clean = M_all.submask(~artefact,
'src',
'Clean'.format(s=size_large),
edgelabel='N',
qlabel='Huge 2MASX?\n(r(2MASX)>60")',
masterlist=masterlist)
# big optical gal
M_all_biggal = M_all_clean.submask((big2masx),
'big2MASX',
'Huge 2MASX source\n(64 r(2MASX)>60" galaxies)',
edgelabel='Y',
masterlist=masterlist)
lofarcat['ID_flag'][M_all_biggal.mask] = 2
# sources - clean
M_all_clean2 = M_all_clean.submask(~big2masx,
'clean',
'Clean2'.format(s=size_large),
edgelabel='N',
qlabel='Large?\n(s>{s:.0f}")'.format(s=size_large),
masterlist=masterlist)
# large
M_large = M_all_clean2.submask(lofarcat['Maj'] > size_large,
'large',
'large (s>{s:.0f}")'.format(s=size_large),
qlabel='Bright?\n(S>{f:.0f} mJy)'.format(f=fluxcut, s=size_large),
masterlist=masterlist)
# large bright
M_large_bright = M_large.submask(lofarcat['Total_flux'] > fluxcut,
'bright',
'large (s>{s:.0f}") & bright (S>{f:.0f} mJy)'.format(f=fluxcut, s=size_large),
qlabel='LGZ v1',
color='green',
masterlist=masterlist)
lofarcat['ID_flag'][M_large_bright.mask] = 3
lofarcat['LGZ_flag'][M_large_bright.mask] = 1
# large faint
M_large_faint = M_large.submask(lofarcat['Total_flux'] <= fluxcut,
'faint',
'large (s>{s:.0f}") & faint (S<={f:.0f} mJy)'.format(f=fluxcut, s=size_large),
edgelabel='N',
qlabel='Visual sorting',
#color='orange',
masterlist=masterlist)
# huge faint
huge = (lofarcat['Maj'] > 2*size_large)
# artefacts
lf_artefacts = (huge & (huge_faint_flag ==4)) | \
(lofarcat['2MASX'] & (nhuge_2masx_flag==4)) | \
((lofarcat['NN4_sep'] <= separation1+size_large) & (Lclustered_flag == 1)) | \
(nhuge_faint_flag == 5)
# complex
lf_complex = (huge & (huge_faint_flag ==1)) | \
(lofarcat['2MASX'] & (nhuge_2masx_flag==2)) | \
((lofarcat['NN4_sep'] <= separation1+size_large) & (Lclustered_flag == 2)) | \
(nhuge_faint_flag == 1)
# complex-zoom
lf_complex_zoom = (nhuge_faint_flag == 4)
# bright galaxy
lf_bright = (lofarcat['2MASX'] & (nhuge_2masx_flag==1)) | \
(huge & (huge_faint_flag ==2))
# no match possible
lf_nomatch = ( huge & (huge_faint_flag ==3)) | \
(nhuge_faint_flag == 3)
# good lr
lf_match = (nhuge_faint_flag == 2)
M_large_faint_artefact = M_large_faint.submask(lf_artefacts,
'artefact',
edgelabel='N(r)',
qlabel='artefact\n(visually confirmed)',
color='gray',
masterlist=masterlist)
lofarcat['ID_flag'][M_large_faint_artefact.mask] = -1
M_large_faint_complex = M_large_faint.submask(lf_complex,
'complex',
edgelabel='Y(*)',
qlabel='complex\n(LGZ)',
color='green',
masterlist=masterlist)
lofarcat['ID_flag'][M_large_faint_complex.mask] = 3210
lofarcat['LGZ_flag'][M_large_faint_complex.mask] = 2
import ipdb ; ipdb.set_trace()
M_large_faint_complex_zoom = M_large_faint.submask(lf_complex_zoom,
'lgzz',
edgelabel='complex-zoom',
qlabel='LGZ-zoom',
color='green',
masterlist=masterlist)
lofarcat['ID_flag'][M_large_faint_complex_zoom.mask] = 3220
lofarcat['LGZ_flag'][M_large_faint_complex_zoom.mask] = 20
M_large_faint_match = M_large_faint.submask(lf_match,
'match',
edgelabel='Y(m)',
qlabel='Accept ML\n(visually confirmed)',
color='blue',
masterlist=masterlist)
lofarcat['ID_flag'][M_large_faint_match.mask] = 1
M_large_faint_nomatch = M_large_faint.submask(lf_nomatch,
'nomatch',
edgelabel='Y(nm)',
qlabel='No match possible\n(visually confirmed)',
color='red',
masterlist=masterlist)
lofarcat['ID_flag'][M_large_faint_nomatch.mask] = 4
M_large_faint_2masx = M_large_faint.submask(lf_bright,
'2masx',
edgelabel='Y',
qlabel='bright galaxy\n(visually confirmed)',
color='blue',
masterlist=masterlist)
lofarcat['ID_flag'][M_large_faint_2masx.mask] = 2
#####
# compact
M_small = M_all_clean2.submask(lofarcat['Maj'] <= size_large,
'small',
'compact (s<{s:.0f}")'.format(s=size_large),
edgelabel='N',
qlabel='Isolated?\n(NN>{nn:.0f}")'.format(nn=separation1),
masterlist=masterlist)
# compact isolated
M_small_isol = M_small.submask(lofarcat['NN_sep'] > separation1,
'isol',
'compact isolated (s<{s:.0f}", NN>{nn:.0f}")'.format(s=size_large, nn=separation1),
qlabel='S?',
masterlist=masterlist)
# compact isolated
M_small_isol_S = M_small_isol.submask(lofarcat['S_Code'] == 'S',
'S',
'compact isolated (s<{s:.0f}", NN>{nn:.0f}") S'.format(s=size_large, nn=separation1),
qlabel='LR > {l:.2f}?'.format(l=lLR_thresh),
masterlist=masterlist)
# compact isolated good lr
M_small_isol_S_lr = M_small_isol_S.submask(np.log10(1+lofarcat['LR']) > lLR_thresh,
'lr',
'compact isolated good LR (s<{s:.0f}", NN>{nn:.0f}")'.format(s=size_large, nn=separation1),
color='blue',
qlabel='Accept LR',
masterlist=masterlist)
lofarcat['ID_flag'][M_small_isol_S_lr.mask] = 1
# compact isolated badd lr
M_small_isol_S_nlr = M_small_isol_S.submask(np.log10(1+lofarcat['LR']) <= lLR_thresh,
'nlr',
'compact isolated bad LR (s<{s:.0f}", NN>{nn:.0f}")'.format(s=size_large, nn=separation1),
edgelabel='N',
color='red',
qlabel='Accept no LR',
masterlist=masterlist)
lofarcat['ID_flag'][M_small_isol_S_nlr.mask] = 1
# compact isolated nS
M_small_isol_nS = M_small_isol.submask(lofarcat['S_Code'] != 'S',
'nS',
'compact isolated (s<{s:.0f}", NN>{nn:.0f}") !S'.format(s=size_large, nn=separation1),
edgelabel='N',
color='orange',
qlabel='TBC?\nprob w LR',
masterlist=masterlist)
lofarcat['ID_flag'][M_small_isol_nS.mask] = 5
# compact isolated good lr
M_small_isol_nS_gprob = M_small_isol_nS.submask(lofarcat['Flag_G_LR_problem'],
'gprob',
color='orange',
qlabel='problem',
edgelabel='Y',
masterlist=masterlist)
# compact isolated good lr
M_small_isol_nS_nprob = M_small_isol_nS.submask(~lofarcat['Flag_G_LR_problem'],
'nprob',
qlabel='LR?',
edgelabel='N',
masterlist=masterlist)
# compact isolated good lr
M_small_isol_nS_nprob_lr = M_small_isol_nS_nprob.submask(np.log10(1+lofarcat['LR']) > lLR_thresh,
'lr',
qlabel='accept LR',
edgelabel='Y',
color='blue',
masterlist=masterlist)
# compact isolated good lr
M_small_isol_nS_nprob_nlr = M_small_isol_nS_nprob.submask(np.log10(1+lofarcat['LR']) <= lLR_thresh,
'nlr',
qlabel='accept no LR',
edgelabel='N',
color='red',
masterlist=masterlist)
# compact not isolated
M_small_nisol = M_small.submask(lofarcat['NN_sep'] <= separation1,
'nisol',
'compact not isolated (s<{s:.0f}", NN<{nn:.0f}")'.format(s=size_large, nn=separation1),
edgelabel='N',
qlabel='Clustered?\n(NN5<{nn:.0f}"))\n(visual confirmation)'.format(s=size_large, nn=separation1),
masterlist=masterlist)
M_small_nisol_artefact = M_small_nisol.submask((lofarcat['NN5_sep'] <= separation1) & (clustered_flag == 1),
'artefact',
edgelabel='N(r)',
qlabel='artefact\n(visually confirmated)',
color='gray',
masterlist=masterlist)
lofarcat['ID_flag'][M_small_nisol_artefact.mask] = -1
M_small_nisol_complex = M_small_nisol.submask((lofarcat['NN5_sep'] <= separation1) & (clustered_flag == 2),
'complex',
edgelabel='Y',
qlabel='complex\n(LGZ)',
color='green',
masterlist=masterlist)
lofarcat['ID_flag'][M_small_nisol_complex.mask] = 3210
lofarcat['LGZ_flag'][M_small_nisol_complex.mask] = 2
M_small_nisol_nclustered = M_small_nisol.submask((lofarcat['NN5_sep'] > separation1) | ((lofarcat['NN5_sep'] <= separation1) & (clustered_flag == 3)),
'nclustered',
'compact not isolated (s<{s:.0f}", NN5>{nn:.0f}")'.format(s=size_large, nn=separation1),
edgelabel='N',
qlabel='NN Large?',
masterlist=masterlist)
# compact not isolated, nnlarge
M_small_nisol_nclustered_NNlarge = M_small_nisol_nclustered.submask(lofarcat['NN_Maj'] > size_large,
'NNlarge',
'compact not isolated (s<{s:.0f}", NN<{nn:.0f}") NN large (s>{s:.0f}")'.format(s=size_large, nn=separation1),
edgelabel='Y',
qlabel='NN in VC list\nLR > {l:.2f}?'.format(l=lLR_thresh),
masterlist=masterlist)
# compact not isolated, nnlarge, nnbright
in_LGZ = (lofarcat['LGZ_flag'][lofarcat['NN_idx']] == 1) | (lofarcat['LGZ_flag'][lofarcat['NN_idx']] == 2) | (lofarcat['LGZ_flag'][lofarcat['NN_idx']] == 20)
M_small_nisol_nclustered_NNlarge_NNvc = M_small_nisol_nclustered_NNlarge.submask(in_LGZ,
'NNvc',
edgelabel='Y',
color='orange',
masterlist=masterlist)
lofarcat['ID_flag'][M_small_nisol_nclustered_NNlarge_NNvc.mask] = 1
# compact not isolated, nnlarge, nnbright
M_small_nisol_nclustered_NNlarge_NNnvc = M_small_nisol_nclustered_NNlarge.submask(~in_LGZ,
'NNnvc',
edgelabel='N',
color='orange',
masterlist=masterlist)
lofarcat['ID_flag'][M_small_nisol_nclustered_NNlarge_NNnvc.mask] = 1
# compact not isolated, nnsmall
M_small_nisol_nclustered_NNsmall = M_small_nisol_nclustered.submask(lofarcat['NN_Maj'] <= size_large,
'NNsmall',
'compact not isolated (s<{s:.0f}", NN<{nn:.0f}") NN small (s<={s:.0f}")'.format(s=size_large, nn=separation1),
edgelabel='N',
qlabel='LR > {l:.2f}?'.format(l=lLR_thresh),
masterlist=masterlist)
# compact not isolated, nnsmall, lr
M_small_nisol_nclustered_NNsmall_lr = M_small_nisol_nclustered_NNsmall.submask(np.log10(1+lofarcat['LR']) > lLR_thresh,
'lr',
'compact not isolated (s<{s:.0f}", NN<{nn:.0f}") NN small (s<={s:.0f}"), good LR'.format(s=size_large, nn=separation1),
edgelabel='Y',
qlabel='NN LR > {l:.2f}?'.format(l=lLR_thresh),
masterlist=masterlist)
# compact not isolated, nnsmall, lr, NNlr
M_small_nisol_nclustered_NNsmall_lr_NNlr = M_small_nisol_nclustered_NNsmall_lr.submask(np.log10(1+lofarcat['NN_LR']) > lLR_thresh,
'NNlr',
'compact not isolated (s<{s:.0f}", NN<{nn:.0f}") NN small (s<={s:.0f}"), good LR, NN good lr'.format(s=size_large, nn=separation1),
edgelabel='Y',
color='blue',
qlabel='accept LR',
masterlist=masterlist)
lofarcat['ID_flag'][M_small_nisol_nclustered_NNsmall_lr_NNlr.mask] = 1
# compact not isolated, nnsmall, lr, NNnlr
M_small_nisol_nclustered_NNsmall_lr_NNnlr = M_small_nisol_nclustered_NNsmall_lr.submask(np.log10(1+lofarcat['NN_LR']) <= lLR_thresh,
'NNnlr',
'compact not isolated (s<{s:.0f}", NN<{nn:.0f}") NN small (s<={s:.0f}"), good LR, NN bad lr'.format(s=size_large, nn=separation1),
edgelabel='N',
color='blue',
qlabel='accept LR',
masterlist=masterlist)
lofarcat['ID_flag'][M_small_nisol_nclustered_NNsmall_lr_NNnlr.mask] = 1
# compact not isolated, nnsmall, nlr
M_small_nisol_nclustered_NNsmall_nlr = M_small_nisol_nclustered_NNsmall.submask(np.log10(1+lofarcat['LR']) <= lLR_thresh,
'nlr',
'compact not isolated (s<{s:.0f}", NN<{nn:.0f}") NN small (s<={s:.0f}"), bad LR'.format(s=size_large, nn=separation1),
edgelabel='N',
qlabel='NN LR > {l:.2f}?'.format(l=lLR_thresh),
masterlist=masterlist)
# compact not isolated, nnsmall, nlr, NNlr
M_small_nisol_nclustered_NNsmall_nlr_NNlr = M_small_nisol_nclustered_NNsmall_nlr.submask(np.log10(1+lofarcat['NN_LR']) > lLR_thresh,
'NNlr',
'compact not isolated (s<{s:.0f}", NN<{nn:.0f}") NN small (s<={s:.0f}"), bad LR, NN good lr'.format(s=size_large, nn=separation1),
edgelabel='Y',
color='red',
qlabel='accept no LR',
masterlist=masterlist)
lofarcat['ID_flag'][M_small_nisol_nclustered_NNsmall_nlr_NNlr.mask] = 1
# compact not isolated, nnsmall, nlr, NNnlr - there are possible doubles here!!
M_small_nisol_nclustered_NNsmall_nlr_NNnlr = M_small_nisol_nclustered_NNsmall_nlr.submask(np.log10(1+lofarcat['NN_LR']) <= lLR_thresh,
'NNnlr',
'compact not isolated (s<{s:.0f}", NN<{nn:.0f}") NN small (s<={s:.0f}"), bad LR, NN bad lr'.format(s=size_large, nn=separation1),
edgelabel='N',
#color='orange',
qlabel='0.1 < flux ratio < 10?',
masterlist=masterlist)
C1_simflux = (lofarcat['NN_Frat'] <= 10) & (lofarcat['NN_Frat'] > 0.1)
M_small_nisol_nclustered_NNsmall_nlr_NNnlr_simflux = M_small_nisol_nclustered_NNsmall_nlr_NNnlr.submask(C1_simflux,
'simflux',
'compact not isolated (s<{s:.0f}", NN<{nn:.0f}") NN small (s<={s:.0f}"), bad LR, NN bad lr, sim flux'.format(s=size_large, nn=separation1),
edgelabel='Y',
#color='red',
qlabel='S1+S2 >= 50*(sep/100)**2 ?',
masterlist=masterlist)
lofarcat['ID_flag'][M_small_nisol_nclustered_NNsmall_nlr_NNnlr_simflux.mask] = 5
M_small_nisol_nclustered_NNsmall_nlr_NNnlr_diffflux = M_small_nisol_nclustered_NNsmall_nlr_NNnlr.submask(~C1_simflux,
'diffflux',
'compact not isolated (s<{s:.0f}", NN<{nn:.0f}") NN small (s<={s:.0f}"), bad LR, NN bad lr, diffflux'.format(s=size_large, nn=separation1),
edgelabel='N',
color='red',
qlabel='accept no LR',
masterlist=masterlist)
lofarcat['ID_flag'][M_small_nisol_nclustered_NNsmall_nlr_NNnlr_diffflux.mask] = 1
C2_dist = ((lofarcat['NN_Total_flux']+lofarcat['Total_flux']) >= 50*(lofarcat['NN_sep']/100.)**2.)
M_small_nisol_nclustered_NNsmall_nlr_NNnlr_simflux_sep = M_small_nisol_nclustered_NNsmall_nlr_NNnlr_simflux.submask(C2_dist,
'dist',
'compact not isolated (s<{s:.0f}", NN<{nn:.0f}") NN small (s<={s:.0f}"), bad LR, NN bad lr, sim flux'.format(s=size_large, nn=separation1),
edgelabel='Y',
color='cyan',
qlabel='check?',
masterlist=masterlist)
lofarcat['ID_flag'][M_small_nisol_nclustered_NNsmall_nlr_NNnlr_simflux_sep.mask] = 5
M_small_nisol_nclustered_NNsmall_nlr_NNnlr_simflux_nsep = M_small_nisol_nclustered_NNsmall_nlr_NNnlr_simflux.submask(~C2_dist,
'ndist',
'compact not isolated (s<{s:.0f}", NN<{nn:.0f}") NN small (s<={s:.0f}"), bad LR, NN bad lr, sim flux'.format(s=size_large, nn=separation1),
edgelabel='N',
color='red',
qlabel='accept no LR?',
masterlist=masterlist)
lofarcat['ID_flag'][M_small_nisol_nclustered_NNsmall_nlr_NNnlr_simflux_nsep.mask] = 1
# other masks
#maskDC0 = lofarcat['DC_Maj'] == 0
maskDC0 = lofarcat['Maj'] == 0
M_S = Mask(lofarcat['S_Code'] == 'S', 'single')
M_M = Mask(lofarcat['S_Code'] == 'M', 'multiple')
M_C = Mask(lofarcat['S_Code'] == 'C', 'complex')
M_Ngaus = []
for i in range(1,6):
M_Ngaus.append(Mask(lofarcat['Ng'] == i, 'Ng='+str(i)))
M_huge = Mask(lofarcat['Maj'] > size_huge, 'huge')
M_small = Mask(lofarcat['Maj'] <= size_large, 'small')
M_isol = Mask(lofarcat['NN_sep'] > separation1, 'isol')
M_cluster = Mask(lofarcat['NN5_sep'] < separation1, 'clustered',
'Clustered (5 sources within sep1)')
M_bright = Mask(lofarcat['Total_flux'] > fluxcut, 'bright')
M_nlr = Mask(np.log10(1+lofarcat['LR']) > lLR_thresh, 'lr')
M_lr = Mask(np.log10(1+lofarcat['LR']) <= lLR_thresh,'nlr')
M_huge = Mask(lofarcat['Maj'] > 100., 'huge')
M_LGZ2 = Mask(lofarcat['LGZ_flag'] == 2., 'LGZv2')
M_LGZz2 = Mask(lofarcat['LGZ_flag'] == 20., 'LGZv2_zoom')
# make a test sample for each final mask
makesample = 1
if makesample:
for t in masterlist:
if not t.has_children :
print t.name
t.make_sample(lofarcat,Nsample=None)
# test that the final masks are indeed mutually disjoint and cover all sources
endlist = []
for t in masterlist:
if not t.has_children:
endlist.append(t)
if not Masks_disjoint_complete(endlist):
print 'WARNING: children aren\'t disjoint and complete'
if 'FC_flag' not in lofarcat.colnames:
lofarcat.add_column(Column(-1*np.ones(len(lofarcat),dtype=int), 'FC_flag'))
i = 0
for t in masterlist:
if not t.has_children:
lofarcat['FC_flag'][t.mask] = i
i += 1
## write output file
if os.path.exists(lofarcat_file_srt):
os.remove(lofarcat_file_srt)
lofarcat.write(lofarcat_file_srt)
# make flowchart from list of masks
plot_flowchart = True
plot_verbose = False
try:
import pygraphviz as pgv
except ImportError:
print 'no pygraphviz; cannot make visual flowchart'
plot_flowchart = False
if plot_flowchart:
PW = 75.
A=pgv.AGraph(directed=True, strict=True)
A.edge_attr['arrowhead']='none'
A.node_attr['start']='south'
A.node_attr['end']='north'
A.node_attr['style']='filled'
A.node_attr['fillcolor']='white'
A.node_attr['fixed-size']='true'
A.node_attr['width']='2.5'
A.node_attr['height']='2'
A.edge_attr['color']='gray'
A.edge_attr['tailclip']='false'
A.edge_attr['headclip']='false'
A.graph_attr['outputorder'] = 'edgesfirst'
#A.graph_attr['splines'] = 'ortho' # orthogonal
A.graph_attr['rankdir'] = 'TB'
A.add_node('start', label='ALL\n{n:n}'.format(n=M_all.N), shape='parallelogram')
#A.add_node('m_all', label='Large?'.format(n=M_all.N), shape='diamond')
A.add_edge('start', 'all', label='', penwidth=M_all.f*PW)
i = 0
for t in masterlist:
if t.has_children:
shape='diamond' # intermediate point is a question
if t.p < 1.:
label='{lab:s}\n{n:n}\n{p:.2f}%'.format(lab=t.qlabel,n=t.n,p=t.p)
if t.p < 10.:
label='{lab:s}\n{n:n}\n{p:.1f}%'.format(lab=t.qlabel,n=t.n,p=t.p)
else:
label='{lab:s}\n{n:n}\n{p:.0f}%'.format(lab=t.qlabel,n=t.n,p=t.p)
else:
shape='parallelogram' # end point is a final mask
if t.p < 1.:
label='- {i:n} -\n{lab:s}\n{n:n}\n{p:.2f}%'.format(i=i,lab=t.qlabel,n=t.n,p=t.p)
elif t.p < 10.:
label='- {i:n} -\n{lab:s}\n{n:n}\n{p:.1f}%'.format(i=i,lab=t.qlabel,n=t.n,p=t.p)
else:
label='- {i:n} -\n{lab:s}\n{n:n}\n{p:.0f}%'.format(i=i,lab=t.qlabel,n=t.n,p=t.p)
i += 1
if t.color:
c = t.color
else:
c = 'black'
# add node
A.add_node(t.name, label=label, shape=shape, color=c)
# add edge to parent
if t.has_parent:
A.add_edge(t.parent.name, t.name, label=t.edgelabel, penwidth=t.f*PW)
if plot_verbose:
print(A.string()) # print dot file to standard output
# make the flowchart
#Optional prog=['neato'|'dot'|'twopi'|'circo'|'fdp'|'nop']
#neato, dot, twopi, circo, fdp, nop, wc, acyclic, gvpr, gvcolor, ccomps, sccmap, tred, sfdp.
A.layout('dot') # layout with dot
A.draw('flow_s{s:.0f}_nn{nn:.0f}.png'.format(s=size_large,nn=separation1)) # write to file
A.write('flow_s{s:.0f}_nn{nn:.0f}.dot'.format(s=size_large,nn=separation1)) # write to file
## TESTING ##
## check gaus ML
if add_G:
lofarmcat = lofarcat[M_M.mask]
sepmaxes = np.zeros(len(lofarmcat))
classes = np.zeros(len(lofarmcat))
for i in range(len(lofarmcat)):
lr = lofarmcat['LR'][i]
alr = np.log10(1+lr) >= lLR_thresh
c = ac.SkyCoord(lofargcat[lofarmcat['G_ind'][i]]['RA'], lofargcat[lofarmcat['G_ind'][i]]['DEC'], unit='deg')
sepmax = 0
#print np.array(lofargcat[lofarmcat['G_ind'][i]]['RA'])
for ci in c:
#print ci.separation(c).to('arcsec')
sepmax = np.max((sepmax, np.max(ci.separation(c).to('arcsec').value)))
glr = np.array(lofargcat[lofarmcat['G_ind'][i]]['LR'] )
aglr = np.log10(1+glr) >= lLR_thresh
#print lr, glr
if alr:
if np.any(aglr):
classes[i] = 1
else:
# accept source match
classes[i] = 2
elif ~alr:
if np.any(aglr):
classes[i] = 3
else:
classes[i] = 4
# accept no source match and no gaus match
sepmaxes[i] = sepmax
#print alr, aglr, sepmax, lofarmcat['Maj'][i]
f,axs = plt.subplots(2,2,sharex=True, sharey=True)
axs = axs.flatten()
for ic,lab in [[1,'A LR ; G LR'],[2,'A LR ; G !LR'],[3,'A !LR ; G LR'],[4,'A !LR ; G !LR']]:
ax = axs[ic-1]
ax.plot(lofarmcat['Maj'][classes==ic], sepmaxes[classes==ic], '.', label=lab)
ax.legend()
ax.set_ylabel('max G separation [arcsec]')
ax.set_xlabel('size [arcsec]')
plt.savefig('gaus_size_separation')
fluxcuts = np.logspace(-4, 0, 1000)
nS_fluxcuts = np.nan*np.zeros(len(fluxcuts))
for fi,fluxcut in enumerate(fluxcuts):
m = lofarcat['Total_flux']/1e3 > fluxcut
nS_fluxcuts[fi] = 1.*np.sum(lofarcat['S_Code'][m] == 'S') /np.sum(m)
#nS_fluxcuts[fi] = 1.*np.sum(m)
f,ax = pp.paper_single_ax()
ax.plot(fluxcuts, nS_fluxcuts)
ax.set_ylabel('f(Single) ($S>S_{cut}$)')
ax.set_xlabel('$\log S_{cut}$ [Jy]')
plt.savefig('fraction_single_vs_S')
sizecuts = np.linspace(15, 60, 10)
fluxcuts = np.logspace(-3, 1, 1000)
f,ax = pp.paper_single_ax()
for si,sizecut in enumerate(sizecuts):
ms = lofarcat['Maj'] > sizecut
nS_fluxcuts = np.nan*np.zeros(len(fluxcuts))
for fi,fluxcut in enumerate(fluxcuts):
m = ms & (lofarcat['Total_flux']/1e3 > fluxcut)
nS_fluxcuts[fi] = 1.*np.sum(m) /np.sum(ms)
ax.plot(fluxcuts, nS_fluxcuts)
#ax.set_ylabel('$f(Maj>Maj_{cut})$ ($S>S_{cut}$)')
#ax.set_xlabel('$\log S_{cut}$ [Jy]')
plt.savefig('fraction_large_vs_S')
sizecuts = np.arange(10, 35, 1)
NNcuts = np.arange(20, 125, 5)
IM = np.zeros((len(sizecuts), len(NNcuts)))
fluxcuts = np.logspace(-3, 1, 1000)
f,ax = pp.paper_single_ax()
for si,sizecut in enumerate(sizecuts):
for ni,NNcut in enumerate(NNcuts):
m = (lofarcat['Maj'] <= sizecut) & (lofarcat['NN_sep'] >= NNcut)
IM[si,ni] = np.sum(m)
IM = IM/Ncat
c = ax.imshow(IM.T, origin='lower', extent=(10,60, 20,120))
cbar = plt.colorbar(c)
cbar.set_label('fraction')
ax.invert_xaxis()
ax.set_xlabel(r'$<$ size [arcsec]')
ax.set_ylabel(r'$>$ NN separation [arcsec]')
plt.savefig('number_compact_isolated')
f,axs = plt.subplots(1,2,sharex=False,sharey=True,figsize=(12,6))
ax=axs[0]
ax.plot(NNcuts,IM.T)
ax.set_ylabel('fraction')
ax.set_xlabel(r'$>$ NN separation [arcsec]')
ax=axs[1]
ax.plot(sizecuts,IM)
ax.set_xlabel(r'$<$ size [arcsec]')
nb=100
# plot LR distribuion for different classes
f,ax = pp.paper_single_ax()
_ =ax.hist(np.log10(1.+lofarcat['LR']), bins=100, normed=True, log=False,histtype='step',color='k',linewidth=2,label='All')
_ =ax.hist(np.log10(1.+lofarcat['LR'][M_small_isol_S.mask]), bins=100, normed=True, histtype='step', label=M_small_isol_S.name.replace('_','\_'))
#_ =ax.hist(np.log10(1.+lofarcat['LR'][m_small_isol_nS]), bins=100, normed=True, histtype='step', label=l_small_isol_nS)
_ =ax.hist(np.log10(1.+lofarcat['LR'][M_small_nisol.mask]), bins=100, normed=True, histtype='step', label=M_small_nisol.name.replace('_','\_'))
_ =ax.hist(np.log10(1.+lofarcat['LR'][M_large.mask]), bins=100, normed=True, histtype='step', label=M_large.name.replace('_','\_'))
_ =ax.hist(np.log10(1.+lofarcat['LR'][M_large_faint_complex.mask]), bins=100, normed=True, histtype='step', label=M_large_faint_complex.name.replace('_','\_'))
#_ =ax.hist(np.log10(1.+lofarcat['LR'][M_large_faint_nhuge_n2masx.mask]), bins=100, normed=True, histtype='step', label=M_large_faint_nhuge_n2masx.name.replace('_','\_'))
ax.legend()
ax.set_ylim(0,2)
ax.set_xlabel('$\log (1+LR)$')
ax.set_ylabel('$N$')
plt.savefig('lr_dist_classes')
# plot LR distribuion for different classes
f,ax = pp.paper_single_ax()
counts, xedges, yedges, im =ax.hist2d(np.log10(1.+lofarcat['LR']), np.log10(lofarcat['Maj']), bins=100, normed=True, vmin=0, vmax=2, label='')
cbar = plt.colorbar(im, ax=ax)
ax.legend()
#ax.set_ylim(0,2)
ax.set_xlabel('$\log (1+LR)$')
ax.set_ylabel('$\log$ Maj [arcsec]')
cbar.set_label('$N$')
plt.savefig('lr_dist_size')
f,ax = pp.paper_single_ax()
counts, xedges, yedges, im =ax.hist2d(np.log10(1.+lofarcat['LR']), np.log10(lofarcat['Total_flux']), bins=100, normed=True, vmin=0, vmax=2, label='')
cbar = plt.colorbar(im, ax=ax)
ax.legend()
#ax.set_ylim(0,2)
ax.set_xlabel('$\log (1+LR)$')
ax.set_ylabel('$\log S$ [Jy]')
cbar.set_label('$N$')
plt.savefig('lr_dist_flux')
#f,ax = pp.paper_single_ax()
#f = plt.figure()
f,axs = plt.subplots(1,2,sharex=True,sharey=True,figsize=(12,6))
ax = axs[0]
counts, xedges, yedges, im =ax.hist2d(np.log10(lofarcat['Maj'][M_S.mask]), np.log10(lofarcat['Total_flux'][m_S]), bins=100, label='')
cbar = plt.colorbar(im, ax=ax)
x1,x2 = ax.get_xlim()
y1,y2 = ax.get_ylim()
ax.vlines(np.log10(15.),y1,y2)
ax.hlines(np.log10(10.),x1,x2)
ax.legend()
ax.set_title('S')
#ax.set_ylim(0,2)
ax.set_xlabel('$\log $ Maj [arcsec]')
ax.set_ylabel('$\log S$ [mJy]')
cbar.set_label('$N$')
ax = axs[1]
counts, xedges, yedges, im =ax.hist2d(np.log10(lofarcat['Maj'][~M_S.mask]), np.log10(lofarcat['Total_flux'][~m_S]), bins=100, label='')
cbar = plt.colorbar(im, ax=ax)
x1,x2 = ax.get_xlim()
y1,y2 = ax.get_ylim()
ax.vlines(np.log10(15.),y1,y2)
ax.hlines(np.log10(10.),x1,x2)
ax.legend()
ax.set_title('!S')
#ax.set_ylim(0,2)
ax.set_xlabel('$\log $ Maj [arcsec]')
#ax.set_ylabel('$\log S$ [mJy]')
cbar.set_label('$N$')
plt.savefig('lr_dist_size_flux')
#f,ax = pp.paper_single_ax()
#f = plt.figure()
f,axs = plt.subplots(1,2,sharex=True,sharey=True,figsize=(12,6))
ax = axs[0]
counts, xedges, yedges, im =ax.hist2d((lofarcat['Maj'][M_lr.mask]), (lofarcat['NN_sep'][M_lr.mask]), bins=200, range=((0,50),(0,200)), label='')
cbar = plt.colorbar(im, ax=ax)
x1,x2 = ax.get_xlim()
y1,y2 = ax.get_ylim()
ax.vlines((15.),y1,y2)
ax.hlines((10.),x1,x2)
ax.legend()
ax.set_title('good LR')
#ax.set_ylim(0,2)
ax.set_xlabel('Maj [arcsec]')
ax.set_ylabel('NN separation [arcsec]')
cbar.set_label('$N$')
ax = axs[1]
counts, xedges, yedges, im =ax.hist2d((lofarcat['Maj'][~M_lr.mask]), (lofarcat['NN_sep'][~M_lr.mask]), bins=200, range=((0,50),(0,200)), label='')
cbar = plt.colorbar(im, ax=ax)
x1,x2 = ax.get_xlim()
y1,y2 = ax.get_ylim()
ax.vlines((15.),y1,y2)
ax.hlines((10.),x1,x2)
ax.legend()
ax.set_title('bad LR')
#ax.set_ylim(0,2)
ax.set_xlabel('Maj [arcsec]')
#ax.set_ylabel('$\log S$ [mJy]')
cbar.set_label('$N$')
plt.savefig('lr_dist_size_nnsep')
# # diagnostic plots
# plot size distribution
f, ax = pp.paper_single_ax()
ax.hist(lofarcat['Maj'][~maskDC0], range=(0,80), bins=100, histtype='step', label='All')
ax.hist(lofarcat['Maj'][~maskDC0&M_S.mask], range=(0,80), bins=100, histtype='step', label='S')
ax.hist(lofarcat['Maj'][~maskDC0&M_M.mask], range=(0,80), bins=100, histtype='step', label='M')
ax.hist(lofarcat['Maj'][~maskDC0&M_C.mask], range=(0,80), bins=100, histtype='step', label='C')
ax.set_xlabel('Major Axis [arcsec]')
ax.set_ylabel('N')
ax.legend()
plt.savefig('size_dist_classes')
# plot nearest neighbour distribution
f,ax = pp.paper_single_ax()
ax.hist(lofarcat['NN_sep'], bins=100, histtype='step', label='All')
ax.hist(lofarcat['NN_sep'][M_S.mask], bins=100, histtype='step', label='S')
ax.set_xlabel('Nearest source [arcsec]')
ax.set_ylabel('N')
ax.legend()
plt.savefig('NNdist_dist')
# 2D histogram : size-nearest neighbour distance
# for 'S' sources
f,ax = pp.paper_single_ax()
X = lofarcat['NN_sep'][~maskDC0&M_S.mask]
Y = lofarcat['Maj'][~maskDC0&M_S.mask]
H, xe, ye = np.histogram2d( X, Y, bins=(100,100), normed=True)
H2 = H.T
xc = (xe[1:] +xe[:-1] )/2.
yc = (ye[1:] +ye[:-1] )/2.
c = ax.contour(xc, yc, H2, [0.5])
xind = np.sum(X>xe[:,np.newaxis],axis=0)-1
yind = np.sum(Y>ye[:,np.newaxis],axis=0)-1
Hval = H2[yind,xind]
c = ax.scatter(X, Y,c=Hval,s=10, edgecolor='none',zorder=1)
x1,x2 = ax.get_xlim()
y1,y2 = ax.get_ylim()
ax.hlines(size_large,x1,x2,colors='k',linestyle='dashed')
ax.vlines(separation1,y1,y2,colors='k',linestyle='dashed')
ax.set_xlabel('NN separation [arcsec]')
ax.set_ylabel('DCmaj [arcsec]')
ax.contour(xc, yc, H2)
plt.savefig('size_NNdist_dist_s')
# and 'M' sources
f,ax = pp.paper_single_ax()
X = lofarcat['NN_sep'][~maskDC0&M_M.mask]
Y = lofarcat['Maj'][~maskDC0&M_M.mask]
H, xe, ye = np.histogram2d( X, Y, bins=(100,100), normed=True)
H2 = H.T
xc = (xe[1:] +xe[:-1] )/2.
yc = (ye[1:] +ye[:-1] )/2.
c = ax.contour(xc, yc, H2, [0.5])
xind = np.sum(X>xe[:,np.newaxis],axis=0)-1
yind = np.sum(Y>ye[:,np.newaxis],axis=0)-1
Hval = H2[yind,xind]
c = ax.scatter(X, Y,c=Hval,s=10, edgecolor='none',zorder=1)
x1,x2 = ax.get_xlim()
y1,y2 = ax.get_ylim()
ax.hlines(size_large,x1,x2,colors='k',linestyle='dashed')
ax.vlines(separation1,y1,y2,colors='k',linestyle='dashed')
ax.set_xlabel('NN separation [arcsec]')
ax.set_ylabel('DCmaj [arcsec]')
ax.contour(xc, yc, H2)
plt.savefig('size_NNdist_dist_m')
|
wllwen007/lofar-sources
|
flowchart/lofar_source_sorter.py
|
Python
|
gpl-3.0
| 51,851
|
[
"Galaxy",
"Gaussian"
] |
11695ccb22034eb0b7073222498dea1fd2106c27fe6b1a57eb0adc4ab6df408c
|
# proxy module
from __future__ import absolute_import
from mayavi.filters.cut_plane import *
|
enthought/etsproxy
|
enthought/mayavi/filters/cut_plane.py
|
Python
|
bsd-3-clause
| 93
|
[
"Mayavi"
] |
fcc1a73f8c2ebc89839cf2654f61e6ca9a1c97a384608b090057c55ac920d3df
|
# Autodetecting setup.py script for building the Python extensions
#
__version__ = "$Revision: 60581 $"
import sys, os, imp, re, optparse
from distutils import log
from distutils import sysconfig
from distutils import text_file
from distutils.errors import *
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
# This global variable is used to hold the list of modules to be disabled.
disabled_module_list = []
def add_dir_to_list(dirlist, dir):
"""Add the directory 'dir' to the list 'dirlist' (at the front) if
1) 'dir' is not already in 'dirlist'
2) 'dir' actually exists, and is a directory."""
if dir is not None and os.path.isdir(dir) and dir not in dirlist:
dirlist.insert(0, dir)
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
if the file couldn't be found at all.
'filename' is the name of a file, such as readline.h or libcrypto.a.
'std_dirs' is the list of standard system directories; if the
file is found in one of them, no additional directives are needed.
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
# Check the standard locations
for dir in std_dirs:
f = os.path.join(dir, filename)
if os.path.exists(f): return []
# Check the additional directories
for dir in paths:
f = os.path.join(dir, filename)
if os.path.exists(f):
return [dir]
# Not found anywhere
return None
def find_library_file(compiler, libname, std_dirs, paths):
result = compiler.find_library_file(std_dirs + paths, libname)
if result is None:
return None
# Check whether the found file is in one of the standard directories
dirname = os.path.dirname(result)
for p in std_dirs:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if p == dirname:
return [ ]
# Otherwise, it must have been in one of the additional directories,
# so we have to figure out which one.
for p in paths:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if p == dirname:
return [p]
else:
assert False, "Internal error: Path not found in std_dirs or paths"
def module_enabled(extlist, modname):
"""Returns whether the module 'modname' is present in the list
of extensions 'extlist'."""
extlist = [ext for ext in extlist if ext.name == modname]
return len(extlist)
def find_module_file(module, dirlist):
"""Find a module in a set of possible folders. If it is not found
return the unadorned filename"""
list = find_file(module, [], dirlist)
if not list:
return module
if len(list) > 1:
log.info("WARNING: multiple copies of %s found"%module)
return os.path.join(list[0], module)
class PyBuildExt(build_ext):
def build_extensions(self):
# Detect which modules should be compiled
self.detect_modules()
# Remove modules that are present on the disabled list
self.extensions = [ext for ext in self.extensions
if ext.name not in disabled_module_list]
# Fix up the autodetected modules, prefixing all the source files
# with Modules/ and adding Python's include directory to the path.
(srcdir,) = sysconfig.get_config_vars('srcdir')
if not srcdir:
# Maybe running on Windows but not using CYGWIN?
raise ValueError("No source directory; cannot proceed.")
# Figure out the location of the source code for extension modules
moddir = os.path.join(os.getcwd(), srcdir, 'Modules')
moddir = os.path.normpath(moddir)
srcdir, tail = os.path.split(moddir)
srcdir = os.path.normpath(srcdir)
moddir = os.path.normpath(moddir)
moddirlist = [moddir]
incdirlist = ['./Include']
# Platform-dependent module source and include directories
platform = self.get_platform()
if platform in ('darwin', 'mac') and ("--disable-toolbox-glue" not in
sysconfig.get_config_var("CONFIG_ARGS")):
# Mac OS X also includes some mac-specific modules
macmoddir = os.path.join(os.getcwd(), srcdir, 'Mac/Modules')
moddirlist.append(macmoddir)
incdirlist.append('./Mac/Include')
alldirlist = moddirlist + incdirlist
# Fix up the paths for scripts, too
self.distribution.scripts = [os.path.join(srcdir, filename)
for filename in self.distribution.scripts]
for ext in self.extensions[:]:
ext.sources = [ find_module_file(filename, moddirlist)
for filename in ext.sources ]
if ext.depends is not None:
ext.depends = [find_module_file(filename, alldirlist)
for filename in ext.depends]
ext.include_dirs.append( '.' ) # to get config.h
for incdir in incdirlist:
ext.include_dirs.append( os.path.join(srcdir, incdir) )
# If a module has already been built statically,
# don't build it here
if ext.name in sys.builtin_module_names:
self.extensions.remove(ext)
if platform != 'mac':
# Parse Modules/Setup and Modules/Setup.local to figure out which
# modules are turned on in the file.
remove_modules = []
for filename in ('Modules/Setup', 'Modules/Setup.local'):
input = text_file.TextFile(filename, join_lines=1)
while 1:
line = input.readline()
if not line: break
line = line.split()
remove_modules.append(line[0])
input.close()
for ext in self.extensions[:]:
if ext.name in remove_modules:
self.extensions.remove(ext)
# When you run "make CC=altcc" or something similar, you really want
# those environment variables passed into the setup.py phase. Here's
# a small set of useful ones.
compiler = os.environ.get('CC')
args = {}
# unfortunately, distutils doesn't let us provide separate C and C++
# compilers
if compiler is not None:
(ccshared,cflags) = sysconfig.get_config_vars('CCSHARED','CFLAGS')
args['compiler_so'] = compiler + ' ' + ccshared + ' ' + cflags
self.compiler.set_executables(**args)
build_ext.build_extensions(self)
def build_extension(self, ext):
if ext.name == '_ctypes':
if not self.configure_ctypes(ext):
return
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsError), why:
self.announce('WARNING: building of extension "%s" failed: %s' %
(ext.name, sys.exc_info()[1]))
return
# Workaround for Mac OS X: The Carbon-based modules cannot be
# reliably imported into a command-line Python
if 'Carbon' in ext.extra_link_args:
self.announce(
'WARNING: skipping import check for Carbon-based "%s"' %
ext.name)
return
# Workaround for Cygwin: Cygwin currently has fork issues when many
# modules have been imported
if self.get_platform() == 'cygwin':
self.announce('WARNING: skipping import check for Cygwin-based "%s"'
% ext.name)
return
ext_filename = os.path.join(
self.build_lib,
self.get_ext_filename(self.get_ext_fullname(ext.name)))
try:
imp.load_dynamic(ext.name, ext_filename)
except ImportError, why:
self.announce('*** WARNING: renaming "%s" since importing it'
' failed: %s' % (ext.name, why), level=3)
assert not self.inplace
basename, tail = os.path.splitext(ext_filename)
newname = basename + "_failed" + tail
if os.path.exists(newname):
os.remove(newname)
os.rename(ext_filename, newname)
# XXX -- This relies on a Vile HACK in
# distutils.command.build_ext.build_extension(). The
# _built_objects attribute is stored there strictly for
# use here.
# If there is a failure, _built_objects may not be there,
# so catch the AttributeError and move on.
try:
for filename in self._built_objects:
os.remove(filename)
except AttributeError:
self.announce('unable to remove files (ignored)')
except:
exc_type, why, tb = sys.exc_info()
self.announce('*** WARNING: importing extension "%s" '
'failed with %s: %s' % (ext.name, exc_type, why),
level=3)
def get_platform(self):
# Get value of sys.platform
for platform in ['cygwin', 'beos', 'darwin', 'atheos', 'osf1']:
if sys.platform.startswith(platform):
return platform
return sys.platform
def detect_modules(self):
# Ensure that /usr/local is always used
add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')
add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
# Add paths specified in the environment variables LDFLAGS and
# CPPFLAGS for header and library files.
# We must get the values from the Makefile and not the environment
# directly since an inconsistently reproducible issue comes up where
# the environment variable is not set even though the value were passed
# into configure and stored in the Makefile (issue found on OS X 10.3).
for env_var, arg_name, dir_list in (
('LDFLAGS', '-L', self.compiler.library_dirs),
('CPPFLAGS', '-I', self.compiler.include_dirs)):
env_val = sysconfig.get_config_var(env_var)
if env_val:
# To prevent optparse from raising an exception about any
# options in env_val that is doesn't know about we strip out
# all double dashes and any dashes followed by a character
# that is not for the option we are dealing with.
#
# Please note that order of the regex is important! We must
# strip out double-dashes first so that we don't end up with
# substituting "--Long" to "-Long" and thus lead to "ong" being
# used for a library directory.
env_val = re.sub(r'(^|\s+)-(-|(?!%s))' % arg_name[1],
' ', env_val)
parser = optparse.OptionParser()
# Make sure that allowing args interspersed with options is
# allowed
parser.allow_interspersed_args = True
parser.error = lambda msg: None
parser.add_option(arg_name, dest="dirs", action="append")
options = parser.parse_args(env_val.split())[0]
if options.dirs:
for directory in reversed(options.dirs):
add_dir_to_list(dir_list, directory)
if os.path.normpath(sys.prefix) != '/usr':
add_dir_to_list(self.compiler.library_dirs,
sysconfig.get_config_var("LIBDIR"))
add_dir_to_list(self.compiler.include_dirs,
sysconfig.get_config_var("INCLUDEDIR"))
try:
have_unicode = unicode
except NameError:
have_unicode = 0
# lib_dirs and inc_dirs are used to search for files;
# if a file is found in one of those directories, it can
# be assumed that no additional -I,-L directives are needed.
lib_dirs = self.compiler.library_dirs + [
'/lib64', '/usr/lib64',
'/lib', '/usr/lib',
]
inc_dirs = self.compiler.include_dirs + ['/usr/include']
exts = []
config_h = sysconfig.get_config_h_filename()
config_h_vars = sysconfig.parse_config_h(open(config_h))
platform = self.get_platform()
(srcdir,) = sysconfig.get_config_vars('srcdir')
# Check for AtheOS which has libraries in non-standard locations
if platform == 'atheos':
lib_dirs += ['/system/libs', '/atheos/autolnk/lib']
lib_dirs += os.getenv('LIBRARY_PATH', '').split(os.pathsep)
inc_dirs += ['/system/include', '/atheos/autolnk/include']
inc_dirs += os.getenv('C_INCLUDE_PATH', '').split(os.pathsep)
# OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb)
if platform in ['osf1', 'unixware7', 'openunix8']:
lib_dirs += ['/usr/ccs/lib']
if platform == 'darwin':
# This should work on any unixy platform ;-)
# If the user has bothered specifying additional -I and -L flags
# in OPT and LDFLAGS we might as well use them here.
# NOTE: using shlex.split would technically be more correct, but
# also gives a bootstrap problem. Let's hope nobody uses directories
# with whitespace in the name to store libraries.
cflags, ldflags = sysconfig.get_config_vars(
'CFLAGS', 'LDFLAGS')
for item in cflags.split():
if item.startswith('-I'):
inc_dirs.append(item[2:])
for item in ldflags.split():
if item.startswith('-L'):
lib_dirs.append(item[2:])
# Check for MacOS X, which doesn't need libm.a at all
math_libs = ['m']
if platform in ['darwin', 'beos', 'mac']:
math_libs = []
# XXX Omitted modules: gl, pure, dl, SGI-specific modules
#
# The following modules are all pretty straightforward, and compile
# on pretty much any POSIXish platform.
#
# Some modules that are normally always on:
exts.append( Extension('_weakref', ['_weakref.c']) )
# array objects
exts.append( Extension('array', ['arraymodule.c']) )
# complex math library functions
exts.append( Extension('cmath', ['cmathmodule.c'],
libraries=math_libs) )
# math library functions, e.g. sin()
exts.append( Extension('math', ['mathmodule.c'],
libraries=math_libs) )
# fast string operations implemented in C
exts.append( Extension('strop', ['stropmodule.c']) )
# time operations and variables
exts.append( Extension('time', ['timemodule.c'],
libraries=math_libs) )
exts.append( Extension('datetime', ['datetimemodule.c', 'timemodule.c'],
libraries=math_libs) )
# random number generator implemented in C
exts.append( Extension("_random", ["_randommodule.c"]) )
# fast iterator tools implemented in C
exts.append( Extension("itertools", ["itertoolsmodule.c"]) )
# high-performance collections
exts.append( Extension("collections", ["collectionsmodule.c"]) )
# bisect
exts.append( Extension("_bisect", ["_bisectmodule.c"]) )
# heapq
exts.append( Extension("_heapq", ["_heapqmodule.c"]) )
# operator.add() and similar goodies
exts.append( Extension('operator', ['operator.c']) )
# _functools
exts.append( Extension("_functools", ["_functoolsmodule.c"]) )
# Python C API test module
exts.append( Extension('_testcapi', ['_testcapimodule.c']) )
# profilers (_lsprof is for cProfile.py)
exts.append( Extension('_hotshot', ['_hotshot.c']) )
exts.append( Extension('_lsprof', ['_lsprof.c', 'rotatingtree.c']) )
# static Unicode character database
if have_unicode:
exts.append( Extension('unicodedata', ['unicodedata.c']) )
# access to ISO C locale support
data = open('pyconfig.h').read()
m = re.search(r"#s*define\s+WITH_LIBINTL\s+1\s*", data)
if m is not None:
locale_libs = ['intl']
else:
locale_libs = []
if platform == 'darwin':
locale_extra_link_args = ['-framework', 'CoreFoundation']
else:
locale_extra_link_args = []
exts.append( Extension('_locale', ['_localemodule.c'],
libraries=locale_libs,
extra_link_args=locale_extra_link_args) )
# Modules with some UNIX dependencies -- on by default:
# (If you have a really backward UNIX, select and socket may not be
# supported...)
# fcntl(2) and ioctl(2)
exts.append( Extension('fcntl', ['fcntlmodule.c']) )
if platform not in ['mac']:
# pwd(3)
exts.append( Extension('pwd', ['pwdmodule.c']) )
# grp(3)
exts.append( Extension('grp', ['grpmodule.c']) )
# spwd, shadow passwords
if (config_h_vars.get('HAVE_GETSPNAM', False) or
config_h_vars.get('HAVE_GETSPENT', False)):
exts.append( Extension('spwd', ['spwdmodule.c']) )
# select(2); not on ancient System V
exts.append( Extension('select', ['selectmodule.c']) )
# Helper module for various ascii-encoders
exts.append( Extension('binascii', ['binascii.c']) )
# Fred Drake's interface to the Python parser
exts.append( Extension('parser', ['parsermodule.c']) )
# cStringIO and cPickle
exts.append( Extension('cStringIO', ['cStringIO.c']) )
exts.append( Extension('cPickle', ['cPickle.c']) )
# Memory-mapped files (also works on Win32).
if platform not in ['atheos', 'mac']:
exts.append( Extension('mmap', ['mmapmodule.c']) )
# Lance Ellinghaus's syslog module
if platform not in ['mac']:
# syslog daemon interface
exts.append( Extension('syslog', ['syslogmodule.c']) )
# George Neville-Neil's timing module:
# Deprecated in PEP 4 http://www.python.org/peps/pep-0004.html
# http://mail.python.org/pipermail/python-dev/2006-January/060023.html
#exts.append( Extension('timing', ['timingmodule.c']) )
#
# Here ends the simple stuff. From here on, modules need certain
# libraries, are platform-specific, or present other surprises.
#
# Multimedia modules
# These don't work for 64-bit platforms!!!
# These represent audio samples or images as strings:
# Operations on audio samples
# According to #993173, this one should actually work fine on
# 64-bit platforms.
exts.append( Extension('audioop', ['audioop.c']) )
# Disabled on 64-bit platforms
if sys.maxint != 9223372036854775807L:
# Operations on images
exts.append( Extension('imageop', ['imageop.c']) )
# Read SGI RGB image files (but coded portably)
exts.append( Extension('rgbimg', ['rgbimgmodule.c']) )
# readline
do_readline = self.compiler.find_library_file(lib_dirs, 'readline')
if platform == 'darwin':
# MacOSX 10.4 has a broken readline. Don't try to build
# the readline module unless the user has installed a fixed
# readline package
if find_file('readline/rlconf.h', inc_dirs, []) is None:
do_readline = False
if do_readline:
if sys.platform == 'darwin':
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entiry path.
# This way a staticly linked custom readline gets picked up
# before the (broken) dynamic library in /usr/lib.
readline_extra_link_args = ('-Wl,-search_paths_first',)
else:
readline_extra_link_args = ()
readline_libs = ['readline']
if self.compiler.find_library_file(lib_dirs,
'ncursesw'):
readline_libs.append('ncursesw')
elif self.compiler.find_library_file(lib_dirs,
'ncurses'):
readline_libs.append('ncurses')
elif self.compiler.find_library_file(lib_dirs, 'curses'):
readline_libs.append('curses')
elif self.compiler.find_library_file(lib_dirs +
['/usr/lib/termcap'],
'termcap'):
readline_libs.append('termcap')
exts.append( Extension('readline', ['readline.c'],
library_dirs=['/usr/lib/termcap'],
extra_link_args=readline_extra_link_args,
libraries=readline_libs) )
if platform not in ['mac']:
# crypt module.
if self.compiler.find_library_file(lib_dirs, 'crypt'):
libs = ['crypt']
else:
libs = []
exts.append( Extension('crypt', ['cryptmodule.c'], libraries=libs) )
# CSV files
exts.append( Extension('_csv', ['_csv.c']) )
# socket(2)
exts.append( Extension('_socket', ['socketmodule.c'],
depends = ['socketmodule.h']) )
# Detect SSL support for the socket module (via _ssl)
search_for_ssl_incs_in = [
'/usr/local/ssl/include',
'/usr/contrib/ssl/include/'
]
ssl_incs = find_file('openssl/ssl.h', inc_dirs,
search_for_ssl_incs_in
)
if ssl_incs is not None:
krb5_h = find_file('krb5.h', inc_dirs,
['/usr/kerberos/include'])
if krb5_h:
ssl_incs += krb5_h
ssl_libs = find_library_file(self.compiler, 'ssl',lib_dirs,
['/usr/local/ssl/lib',
'/usr/contrib/ssl/lib/'
] )
if (ssl_incs is not None and
ssl_libs is not None):
exts.append( Extension('_ssl', ['_ssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto'],
depends = ['socketmodule.h']), )
# find out which version of OpenSSL we have
openssl_ver = 0
openssl_ver_re = re.compile(
'^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' )
for ssl_inc_dir in inc_dirs + search_for_ssl_incs_in:
name = os.path.join(ssl_inc_dir, 'openssl', 'opensslv.h')
if os.path.isfile(name):
try:
incfile = open(name, 'r')
for line in incfile:
m = openssl_ver_re.match(line)
if m:
openssl_ver = eval(m.group(1))
break
except IOError:
pass
# first version found is what we'll use (as the compiler should)
if openssl_ver:
break
#print 'openssl_ver = 0x%08x' % openssl_ver
if (ssl_incs is not None and
ssl_libs is not None and
openssl_ver >= 0x00907000):
# The _hashlib module wraps optimized implementations
# of hash functions from the OpenSSL library.
exts.append( Extension('_hashlib', ['_hashopenssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto']) )
else:
# The _sha module implements the SHA1 hash algorithm.
exts.append( Extension('_sha', ['shamodule.c']) )
# The _md5 module implements the RSA Data Security, Inc. MD5
# Message-Digest Algorithm, described in RFC 1321. The
# necessary files md5.c and md5.h are included here.
exts.append( Extension('_md5',
sources = ['md5module.c', 'md5.c'],
depends = ['md5.h']) )
if (openssl_ver < 0x00908000):
# OpenSSL doesn't do these until 0.9.8 so we'll bring our own hash
exts.append( Extension('_sha256', ['sha256module.c']) )
exts.append( Extension('_sha512', ['sha512module.c']) )
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange for at least one of them to be available on
# your machine, though none are defined by default because of library
# dependencies. The Python module anydbm.py provides an
# implementation independent wrapper for these; dumbdbm.py provides
# similar functionality (but slower of course) implemented in Python.
# Sleepycat^WOracle Berkeley DB interface.
# http://www.oracle.com/database/berkeley-db/db/index.html
#
# This requires the Sleepycat^WOracle DB code. The supported versions
# are set below. Visit the URL above to download
# a release. Most open source OSes come with one or more
# versions of BerkeleyDB already installed.
max_db_ver = (4, 5)
# NOTE: while the _bsddb.c code links against BerkeleyDB 4.6.x
# we leave that version disabled by default as it has proven to be
# quite a buggy library release on many platforms.
min_db_ver = (3, 3)
db_setup_debug = False # verbose debug prints from this script?
# construct a list of paths to look for the header file in on
# top of the normal inc_dirs.
db_inc_paths = [
'/usr/include/db4',
'/usr/local/include/db4',
'/opt/sfw/include/db4',
'/sw/include/db4',
'/usr/include/db3',
'/usr/local/include/db3',
'/opt/sfw/include/db3',
'/sw/include/db3',
]
# 4.x minor number specific paths
for x in range(max_db_ver[1]+1):
db_inc_paths.append('/usr/include/db4%d' % x)
db_inc_paths.append('/usr/include/db4.%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x)
db_inc_paths.append('/usr/local/include/db4%d' % x)
db_inc_paths.append('/pkg/db-4.%d/include' % x)
db_inc_paths.append('/opt/db-4.%d/include' % x)
# 3.x minor number specific paths
for x in (3,):
db_inc_paths.append('/usr/include/db3%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x)
db_inc_paths.append('/usr/local/include/db3%d' % x)
db_inc_paths.append('/pkg/db-3.%d/include' % x)
db_inc_paths.append('/opt/db-3.%d/include' % x)
# Add some common subdirectories for Sleepycat DB to the list,
# based on the standard include directories. This way DB3/4 gets
# picked up when it is installed in a non-standard prefix and
# the user has added that prefix into inc_dirs.
std_variants = []
for dn in inc_dirs:
std_variants.append(os.path.join(dn, 'db3'))
std_variants.append(os.path.join(dn, 'db4'))
for x in range(max_db_ver[1]+1):
std_variants.append(os.path.join(dn, "db4%d"%x))
std_variants.append(os.path.join(dn, "db4.%d"%x))
for x in (2,3):
std_variants.append(os.path.join(dn, "db3%d"%x))
std_variants.append(os.path.join(dn, "db3.%d"%x))
db_inc_paths = std_variants + db_inc_paths
db_ver_inc_map = {}
class db_found(Exception): pass
try:
# See whether there is a Sleepycat header in the standard
# search path.
for d in inc_dirs + db_inc_paths:
f = os.path.join(d, "db.h")
if db_setup_debug: print "db: looking for db.h in", f
if os.path.exists(f):
f = open(f).read()
m = re.search(r"#define\WDB_VERSION_MAJOR\W(\d+)", f)
if m:
db_major = int(m.group(1))
m = re.search(r"#define\WDB_VERSION_MINOR\W(\d+)", f)
db_minor = int(m.group(1))
db_ver = (db_major, db_minor)
# Avoid 4.6 prior to 4.6.21 due to a BerkeleyDB bug
if db_ver == (4, 6):
m = re.search(r"#define\WDB_VERSION_PATCH\W(\d+)", f)
db_patch = int(m.group(1))
if db_patch < 21:
print "db.h:", db_ver, "patch", db_patch,
print "being ignored (4.6.x must be >= 4.6.21)"
continue
if ( (not db_ver_inc_map.has_key(db_ver)) and
(db_ver <= max_db_ver and db_ver >= min_db_ver) ):
# save the include directory with the db.h version
# (first occurrance only)
db_ver_inc_map[db_ver] = d
print "db.h: found", db_ver, "in", d
else:
# we already found a header for this library version
if db_setup_debug: print "db.h: ignoring", d
else:
# ignore this header, it didn't contain a version number
if db_setup_debug: print "db.h: unsupported version", db_ver, "in", d
db_found_vers = db_ver_inc_map.keys()
db_found_vers.sort()
while db_found_vers:
db_ver = db_found_vers.pop()
db_incdir = db_ver_inc_map[db_ver]
# check lib directories parallel to the location of the header
db_dirs_to_check = [
os.path.join(db_incdir, '..', 'lib64'),
os.path.join(db_incdir, '..', 'lib'),
os.path.join(db_incdir, '..', '..', 'lib64'),
os.path.join(db_incdir, '..', '..', 'lib'),
]
db_dirs_to_check = filter(os.path.isdir, db_dirs_to_check)
# Look for a version specific db-X.Y before an ambiguoius dbX
# XXX should we -ever- look for a dbX name? Do any
# systems really not name their library by version and
# symlink to more general names?
for dblib in (('db-%d.%d' % db_ver),
('db%d%d' % db_ver),
('db%d' % db_ver[0])):
dblib_file = self.compiler.find_library_file(
db_dirs_to_check + lib_dirs, dblib )
if dblib_file:
dblib_dir = [ os.path.abspath(os.path.dirname(dblib_file)) ]
raise db_found
else:
if db_setup_debug: print "db lib: ", dblib, "not found"
except db_found:
print "db lib: using", db_ver, dblib
if db_setup_debug: print "db: lib dir", dblib_dir, "inc dir", db_incdir
db_incs = [db_incdir]
dblibs = [dblib]
# We add the runtime_library_dirs argument because the
# BerkeleyDB lib we're linking against often isn't in the
# system dynamic library search path. This is usually
# correct and most trouble free, but may cause problems in
# some unusual system configurations (e.g. the directory
# is on an NFS server that goes away).
exts.append(Extension('_bsddb', ['_bsddb.c'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
libraries=dblibs))
else:
if db_setup_debug: print "db: no appropriate library found"
db_incs = None
dblibs = []
dblib_dir = None
# The sqlite interface
sqlite_setup_debug = False # verbose debug prints from this script?
# We hunt for #define SQLITE_VERSION "n.n.n"
# We need to find >= sqlite version 3.0.8
sqlite_incdir = sqlite_libdir = None
sqlite_inc_paths = [ '/usr/include',
'/usr/include/sqlite',
'/usr/include/sqlite3',
'/usr/local/include',
'/usr/local/include/sqlite',
'/usr/local/include/sqlite3',
]
MIN_SQLITE_VERSION_NUMBER = (3, 0, 8)
MIN_SQLITE_VERSION = ".".join([str(x)
for x in MIN_SQLITE_VERSION_NUMBER])
# Scan the default include directories before the SQLite specific
# ones. This allows one to override the copy of sqlite on OSX,
# where /usr/include contains an old version of sqlite.
for d in inc_dirs + sqlite_inc_paths:
f = os.path.join(d, "sqlite3.h")
if os.path.exists(f):
if sqlite_setup_debug: print "sqlite: found %s"%f
incf = open(f).read()
m = re.search(
r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"(.*)"', incf)
if m:
sqlite_version = m.group(1)
sqlite_version_tuple = tuple([int(x)
for x in sqlite_version.split(".")])
if sqlite_version_tuple >= MIN_SQLITE_VERSION_NUMBER:
# we win!
print "%s/sqlite3.h: version %s"%(d, sqlite_version)
sqlite_incdir = d
break
else:
if sqlite_setup_debug:
print "%s: version %d is too old, need >= %s"%(d,
sqlite_version, MIN_SQLITE_VERSION)
elif sqlite_setup_debug:
print "sqlite: %s had no SQLITE_VERSION"%(f,)
if sqlite_incdir:
sqlite_dirs_to_check = [
os.path.join(sqlite_incdir, '..', 'lib64'),
os.path.join(sqlite_incdir, '..', 'lib'),
os.path.join(sqlite_incdir, '..', '..', 'lib64'),
os.path.join(sqlite_incdir, '..', '..', 'lib'),
]
sqlite_libfile = self.compiler.find_library_file(
sqlite_dirs_to_check + lib_dirs, 'sqlite3')
sqlite_libdir = [os.path.abspath(os.path.dirname(sqlite_libfile))]
if sqlite_incdir and sqlite_libdir:
sqlite_srcs = ['_sqlite/cache.c',
'_sqlite/connection.c',
'_sqlite/cursor.c',
'_sqlite/microprotocols.c',
'_sqlite/module.c',
'_sqlite/prepare_protocol.c',
'_sqlite/row.c',
'_sqlite/statement.c',
'_sqlite/util.c', ]
sqlite_defines = []
if sys.platform != "win32":
sqlite_defines.append(('MODULE_NAME', '"sqlite3"'))
else:
sqlite_defines.append(('MODULE_NAME', '\\"sqlite3\\"'))
if sys.platform == 'darwin':
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entiry path.
# This way a staticly linked custom sqlite gets picked up
# before the dynamic library in /usr/lib.
sqlite_extra_link_args = ('-Wl,-search_paths_first',)
else:
sqlite_extra_link_args = ()
exts.append(Extension('_sqlite3', sqlite_srcs,
define_macros=sqlite_defines,
include_dirs=["Modules/_sqlite",
sqlite_incdir],
library_dirs=sqlite_libdir,
runtime_library_dirs=sqlite_libdir,
extra_link_args=sqlite_extra_link_args,
libraries=["sqlite3",]))
# Look for Berkeley db 1.85. Note that it is built as a different
# module name so it can be included even when later versions are
# available. A very restrictive search is performed to avoid
# accidentally building this module with a later version of the
# underlying db library. May BSD-ish Unixes incorporate db 1.85
# symbols into libc and place the include file in /usr/include.
#
# If the better bsddb library can be built (db_incs is defined)
# we do not build this one. Otherwise this build will pick up
# the more recent berkeleydb's db.h file first in the include path
# when attempting to compile and it will fail.
f = "/usr/include/db.h"
if os.path.exists(f) and not db_incs:
data = open(f).read()
m = re.search(r"#s*define\s+HASHVERSION\s+2\s*", data)
if m is not None:
# bingo - old version used hash file format version 2
### XXX this should be fixed to not be platform-dependent
### but I don't have direct access to an osf1 platform and
### seemed to be muffing the search somehow
libraries = platform == "osf1" and ['db'] or None
if libraries is not None:
exts.append(Extension('bsddb185', ['bsddbmodule.c'],
libraries=libraries))
else:
exts.append(Extension('bsddb185', ['bsddbmodule.c']))
# The standard Unix dbm module:
if platform not in ['cygwin']:
if find_file("ndbm.h", inc_dirs, []) is not None:
# Some systems have -lndbm, others don't
if self.compiler.find_library_file(lib_dirs, 'ndbm'):
ndbm_libs = ['ndbm']
else:
ndbm_libs = []
exts.append( Extension('dbm', ['dbmmodule.c'],
define_macros=[('HAVE_NDBM_H',None)],
libraries = ndbm_libs ) )
elif (self.compiler.find_library_file(lib_dirs, 'gdbm')
and find_file("gdbm/ndbm.h", inc_dirs, []) is not None):
exts.append( Extension('dbm', ['dbmmodule.c'],
define_macros=[('HAVE_GDBM_NDBM_H',None)],
libraries = ['gdbm'] ) )
elif db_incs is not None:
exts.append( Extension('dbm', ['dbmmodule.c'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
define_macros=[('HAVE_BERKDB_H',None),
('DB_DBM_HSEARCH',None)],
libraries=dblibs))
# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm:
if (self.compiler.find_library_file(lib_dirs, 'gdbm')):
exts.append( Extension('gdbm', ['gdbmmodule.c'],
libraries = ['gdbm'] ) )
# Unix-only modules
if platform not in ['mac', 'win32']:
# Steen Lumholt's termios module
exts.append( Extension('termios', ['termios.c']) )
# Jeremy Hylton's rlimit interface
if platform not in ['atheos']:
exts.append( Extension('resource', ['resource.c']) )
# Sun yellow pages. Some systems have the functions in libc.
if platform not in ['cygwin', 'atheos']:
if (self.compiler.find_library_file(lib_dirs, 'nsl')):
libs = ['nsl']
else:
libs = []
exts.append( Extension('nis', ['nismodule.c'],
libraries = libs) )
# Curses support, requiring the System V version of curses, often
# provided by the ncurses library.
panel_library = 'panel'
if (self.compiler.find_library_file(lib_dirs, 'ncursesw')):
curses_libs = ['ncursesw']
# Bug 1464056: If _curses.so links with ncursesw,
# _curses_panel.so must link with panelw.
panel_library = 'panelw'
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
elif (self.compiler.find_library_file(lib_dirs, 'ncurses')):
curses_libs = ['ncurses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
elif (self.compiler.find_library_file(lib_dirs, 'curses')
and platform != 'darwin'):
# OSX has an old Berkeley curses, not good enough for
# the _curses module.
if (self.compiler.find_library_file(lib_dirs, 'terminfo')):
curses_libs = ['curses', 'terminfo']
elif (self.compiler.find_library_file(lib_dirs, 'termcap')):
curses_libs = ['curses', 'termcap']
else:
curses_libs = ['curses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
# If the curses module is enabled, check for the panel module
if (module_enabled(exts, '_curses') and
self.compiler.find_library_file(lib_dirs, panel_library)):
exts.append( Extension('_curses_panel', ['_curses_panel.c'],
libraries = [panel_library] + curses_libs) )
# Andrew Kuchling's zlib module. Note that some versions of zlib
# 1.1.3 have security problems. See CERT Advisory CA-2002-07:
# http://www.cert.org/advisories/CA-2002-07.html
#
# zlib 1.1.4 is fixed, but at least one vendor (RedHat) has decided to
# patch its zlib 1.1.3 package instead of upgrading to 1.1.4. For
# now, we still accept 1.1.3, because we think it's difficult to
# exploit this in Python, and we'd rather make it RedHat's problem
# than our problem <wink>.
#
# You can upgrade zlib to version 1.1.4 yourself by going to
# http://www.gzip.org/zlib/
zlib_inc = find_file('zlib.h', [], inc_dirs)
if zlib_inc is not None:
zlib_h = zlib_inc[0] + '/zlib.h'
version = '"0.0.0"'
version_req = '"1.1.3"'
fp = open(zlib_h)
while 1:
line = fp.readline()
if not line:
break
if line.startswith('#define ZLIB_VERSION'):
version = line.split()[2]
break
if version >= version_req:
if (self.compiler.find_library_file(lib_dirs, 'z')):
if sys.platform == "darwin":
zlib_extra_link_args = ('-Wl,-search_paths_first',)
else:
zlib_extra_link_args = ()
exts.append( Extension('zlib', ['zlibmodule.c'],
libraries = ['z'],
extra_link_args = zlib_extra_link_args))
# Gustavo Niemeyer's bz2 module.
if (self.compiler.find_library_file(lib_dirs, 'bz2')):
if sys.platform == "darwin":
bz2_extra_link_args = ('-Wl,-search_paths_first',)
else:
bz2_extra_link_args = ()
exts.append( Extension('bz2', ['bz2module.c'],
libraries = ['bz2'],
extra_link_args = bz2_extra_link_args) )
# Interface to the Expat XML parser
#
# Expat was written by James Clark and is now maintained by a
# group of developers on SourceForge; see www.libexpat.org for
# more information. The pyexpat module was written by Paul
# Prescod after a prototype by Jack Jansen. The Expat source
# is included in Modules/expat/. Usage of a system
# shared libexpat.so/expat.dll is not advised.
#
# More information on Expat can be found at www.libexpat.org.
#
expatinc = os.path.join(os.getcwd(), srcdir, 'Modules', 'expat')
define_macros = [
('HAVE_EXPAT_CONFIG_H', '1'),
]
exts.append(Extension('pyexpat',
define_macros = define_macros,
include_dirs = [expatinc],
sources = ['pyexpat.c',
'expat/xmlparse.c',
'expat/xmlrole.c',
'expat/xmltok.c',
],
))
# Fredrik Lundh's cElementTree module. Note that this also
# uses expat (via the CAPI hook in pyexpat).
if os.path.isfile(os.path.join(srcdir, 'Modules', '_elementtree.c')):
define_macros.append(('USE_PYEXPAT_CAPI', None))
exts.append(Extension('_elementtree',
define_macros = define_macros,
include_dirs = [expatinc],
sources = ['_elementtree.c'],
))
# Hye-Shik Chang's CJKCodecs modules.
if have_unicode:
exts.append(Extension('_multibytecodec',
['cjkcodecs/multibytecodec.c']))
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
exts.append(Extension('_codecs_' + loc,
['cjkcodecs/_codecs_%s.c' % loc]))
# Dynamic loading module
if sys.maxint == 0x7fffffff:
# This requires sizeof(int) == sizeof(long) == sizeof(char*)
dl_inc = find_file('dlfcn.h', [], inc_dirs)
if (dl_inc is not None) and (platform not in ['atheos']):
exts.append( Extension('dl', ['dlmodule.c']) )
# Thomas Heller's _ctypes module
self.detect_ctypes(inc_dirs, lib_dirs)
# Platform-specific libraries
if platform == 'linux2':
# Linux-specific modules
exts.append( Extension('linuxaudiodev', ['linuxaudiodev.c']) )
if platform in ('linux2', 'freebsd4', 'freebsd5', 'freebsd6',
'freebsd7'):
exts.append( Extension('ossaudiodev', ['ossaudiodev.c']) )
if platform == 'sunos5':
# SunOS specific modules
exts.append( Extension('sunaudiodev', ['sunaudiodev.c']) )
if platform == 'darwin' and ("--disable-toolbox-glue" not in
sysconfig.get_config_var("CONFIG_ARGS")):
if os.uname()[2] > '8.':
# We're on Mac OS X 10.4 or later, the compiler should
# support '-Wno-deprecated-declarations'. This will
# surpress deprecation warnings for the Carbon extensions,
# these extensions wrap the Carbon APIs and even those
# parts that are deprecated.
carbon_extra_compile_args = ['-Wno-deprecated-declarations']
else:
carbon_extra_compile_args = []
# Mac OS X specific modules.
def macSrcExists(name1, name2=''):
if not name1:
return None
names = (name1,)
if name2:
names = (name1, name2)
path = os.path.join(srcdir, 'Mac', 'Modules', *names)
return os.path.exists(path)
def addMacExtension(name, kwds, extra_srcs=[]):
dirname = ''
if name[0] == '_':
dirname = name[1:].lower()
cname = name + '.c'
cmodulename = name + 'module.c'
# Check for NNN.c, NNNmodule.c, _nnn/NNN.c, _nnn/NNNmodule.c
if macSrcExists(cname):
srcs = [cname]
elif macSrcExists(cmodulename):
srcs = [cmodulename]
elif macSrcExists(dirname, cname):
# XXX(nnorwitz): If all the names ended with module, we
# wouldn't need this condition. ibcarbon is the only one.
srcs = [os.path.join(dirname, cname)]
elif macSrcExists(dirname, cmodulename):
srcs = [os.path.join(dirname, cmodulename)]
else:
raise RuntimeError("%s not found" % name)
# Here's the whole point: add the extension with sources
exts.append(Extension(name, srcs + extra_srcs, **kwds))
# Core Foundation
core_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework', 'CoreFoundation'],
}
addMacExtension('_CF', core_kwds, ['cf/pycfbridge.c'])
addMacExtension('autoGIL', core_kwds)
# Carbon
carbon_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework', 'Carbon'],
}
CARBON_EXTS = ['ColorPicker', 'gestalt', 'MacOS', 'Nav',
'OSATerminology', 'icglue',
# All these are in subdirs
'_AE', '_AH', '_App', '_CarbonEvt', '_Cm', '_Ctl',
'_Dlg', '_Drag', '_Evt', '_File', '_Folder', '_Fm',
'_Help', '_Icn', '_IBCarbon', '_List',
'_Menu', '_Mlte', '_OSA', '_Res', '_Qd', '_Qdoffs',
'_Scrap', '_Snd', '_TE', '_Win',
]
for name in CARBON_EXTS:
addMacExtension(name, carbon_kwds)
# Application Services & QuickTime
app_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework','ApplicationServices'],
}
addMacExtension('_Launch', app_kwds)
addMacExtension('_CG', app_kwds)
exts.append( Extension('_Qt', ['qt/_Qtmodule.c'],
extra_compile_args=carbon_extra_compile_args,
extra_link_args=['-framework', 'QuickTime',
'-framework', 'Carbon']) )
self.extensions.extend(exts)
# Call the method for detecting whether _tkinter can be compiled
self.detect_tkinter(inc_dirs, lib_dirs)
def detect_tkinter_darwin(self, inc_dirs, lib_dirs):
# The _tkinter module, using frameworks. Since frameworks are quite
# different the UNIX search logic is not sharable.
from os.path import join, exists
framework_dirs = [
'/System/Library/Frameworks/',
'/Library/Frameworks',
join(os.getenv('HOME'), '/Library/Frameworks')
]
# Find the directory that contains the Tcl.framework and Tk.framework
# bundles.
# XXX distutils should support -F!
for F in framework_dirs:
# both Tcl.framework and Tk.framework should be present
for fw in 'Tcl', 'Tk':
if not exists(join(F, fw + '.framework')):
break
else:
# ok, F is now directory with both frameworks. Continure
# building
break
else:
# Tk and Tcl frameworks not found. Normal "unix" tkinter search
# will now resume.
return 0
# For 8.4a2, we must add -I options that point inside the Tcl and Tk
# frameworks. In later release we should hopefully be able to pass
# the -F option to gcc, which specifies a framework lookup path.
#
include_dirs = [
join(F, fw + '.framework', H)
for fw in 'Tcl', 'Tk'
for H in 'Headers', 'Versions/Current/PrivateHeaders'
]
# For 8.4a2, the X11 headers are not included. Rather than include a
# complicated search, this is a hard-coded path. It could bail out
# if X11 libs are not found...
include_dirs.append('/usr/X11R6/include')
frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
include_dirs = include_dirs,
libraries = [],
extra_compile_args = frameworks,
extra_link_args = frameworks,
)
self.extensions.append(ext)
return 1
def detect_tkinter(self, inc_dirs, lib_dirs):
# The _tkinter module.
# Rather than complicate the code below, detecting and building
# AquaTk is a separate method. Only one Tkinter will be built on
# Darwin - either AquaTk, if it is found, or X11 based Tk.
platform = self.get_platform()
if (platform == 'darwin' and
self.detect_tkinter_darwin(inc_dirs, lib_dirs)):
return
# Assume we haven't found any of the libraries or include files
# The versions with dots are used on Unix, and the versions without
# dots on Windows, for detection by cygwin.
tcllib = tklib = tcl_includes = tk_includes = None
for version in ['8.5', '85', '8.4', '84', '8.3', '83', '8.2',
'82', '8.1', '81', '8.0', '80']:
tklib = self.compiler.find_library_file(lib_dirs, 'tk' + version)
tcllib = self.compiler.find_library_file(lib_dirs, 'tcl' + version)
if tklib and tcllib:
# Exit the loop when we've found the Tcl/Tk libraries
break
# Now check for the header files
if tklib and tcllib:
# Check for the include files on Debian and {Free,Open}BSD, where
# they're put in /usr/include/{tcl,tk}X.Y
dotversion = version
if '.' not in dotversion and "bsd" in sys.platform.lower():
# OpenBSD and FreeBSD use Tcl/Tk library names like libtcl83.a,
# but the include subdirs are named like .../include/tcl8.3.
dotversion = dotversion[:-1] + '.' + dotversion[-1]
tcl_include_sub = []
tk_include_sub = []
for dir in inc_dirs:
tcl_include_sub += [dir + os.sep + "tcl" + dotversion]
tk_include_sub += [dir + os.sep + "tk" + dotversion]
tk_include_sub += tcl_include_sub
tcl_includes = find_file('tcl.h', inc_dirs, tcl_include_sub)
tk_includes = find_file('tk.h', inc_dirs, tk_include_sub)
if (tcllib is None or tklib is None or
tcl_includes is None or tk_includes is None):
self.announce("INFO: Can't locate Tcl/Tk libs and/or headers", 2)
return
# OK... everything seems to be present for Tcl/Tk.
include_dirs = [] ; libs = [] ; defs = [] ; added_lib_dirs = []
for dir in tcl_includes + tk_includes:
if dir not in include_dirs:
include_dirs.append(dir)
# Check for various platform-specific directories
if platform == 'sunos5':
include_dirs.append('/usr/openwin/include')
added_lib_dirs.append('/usr/openwin/lib')
elif os.path.exists('/usr/X11R6/include'):
include_dirs.append('/usr/X11R6/include')
added_lib_dirs.append('/usr/X11R6/lib64')
added_lib_dirs.append('/usr/X11R6/lib')
elif os.path.exists('/usr/X11R5/include'):
include_dirs.append('/usr/X11R5/include')
added_lib_dirs.append('/usr/X11R5/lib')
else:
# Assume default location for X11
include_dirs.append('/usr/X11/include')
added_lib_dirs.append('/usr/X11/lib')
# If Cygwin, then verify that X is installed before proceeding
if platform == 'cygwin':
x11_inc = find_file('X11/Xlib.h', [], include_dirs)
if x11_inc is None:
return
# Check for BLT extension
if self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT8.0'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT8.0')
elif self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT')
# Add the Tcl/Tk libraries
libs.append('tk'+ version)
libs.append('tcl'+ version)
if platform in ['aix3', 'aix4']:
libs.append('ld')
# Finally, link with the X11 libraries (not appropriate on cygwin)
if platform != "cygwin":
libs.append('X11')
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)] + defs,
include_dirs = include_dirs,
libraries = libs,
library_dirs = added_lib_dirs,
)
self.extensions.append(ext)
## # Uncomment these lines if you want to play with xxmodule.c
## ext = Extension('xx', ['xxmodule.c'])
## self.extensions.append(ext)
# XXX handle these, but how to detect?
# *** Uncomment and edit for PIL (TkImaging) extension only:
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
# *** Uncomment and edit for TOGL extension only:
# -DWITH_TOGL togl.c \
# *** Uncomment these for TOGL extension only:
# -lGL -lGLU -lXext -lXmu \
def configure_ctypes(self, ext):
if not self.use_system_libffi:
(srcdir,) = sysconfig.get_config_vars('srcdir')
ffi_builddir = os.path.join(self.build_temp, 'libffi')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi'))
ffi_configfile = os.path.join(ffi_builddir, 'fficonfig.py')
from distutils.dep_util import newer_group
config_sources = [os.path.join(ffi_srcdir, fname)
for fname in os.listdir(ffi_srcdir)
if os.path.isfile(os.path.join(ffi_srcdir, fname))]
if self.force or newer_group(config_sources,
ffi_configfile):
from distutils.dir_util import mkpath
mkpath(ffi_builddir)
config_args = []
# Pass empty CFLAGS because we'll just append the resulting
# CFLAGS to Python's; -g or -O2 is to be avoided.
cmd = "cd %s && env CFLAGS='' '%s/configure' %s" \
% (ffi_builddir, ffi_srcdir, " ".join(config_args))
res = os.system(cmd)
if res or not os.path.exists(ffi_configfile):
print "Failed to configure _ctypes module"
return False
fficonfig = {}
execfile(ffi_configfile, globals(), fficonfig)
ffi_srcdir = os.path.join(fficonfig['ffi_srcdir'], 'src')
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_builddir, 'include'),
ffi_builddir, ffi_srcdir]
extra_compile_args = fficonfig['ffi_cflags'].split()
ext.sources.extend(fficonfig['ffi_sources'])
ext.include_dirs.extend(include_dirs)
ext.extra_compile_args.extend(extra_compile_args)
return True
def detect_ctypes(self, inc_dirs, lib_dirs):
self.use_system_libffi = False
include_dirs = []
extra_compile_args = []
extra_link_args = []
sources = ['_ctypes/_ctypes.c',
'_ctypes/callbacks.c',
'_ctypes/callproc.c',
'_ctypes/stgdict.c',
'_ctypes/cfield.c',
'_ctypes/malloc_closure.c']
depends = ['_ctypes/ctypes.h']
if sys.platform == 'darwin':
sources.append('_ctypes/darwin/dlfcn_simple.c')
include_dirs.append('_ctypes/darwin')
# XXX Is this still needed?
## extra_link_args.extend(['-read_only_relocs', 'warning'])
elif sys.platform == 'sunos5':
# XXX This shouldn't be necessary; it appears that some
# of the assembler code is non-PIC (i.e. it has relocations
# when it shouldn't. The proper fix would be to rewrite
# the assembler code to be PIC.
# This only works with GCC; the Sun compiler likely refuses
# this option. If you want to compile ctypes with the Sun
# compiler, please research a proper solution, instead of
# finding some -z option for the Sun compiler.
extra_link_args.append('-mimpure-text')
ext = Extension('_ctypes',
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=[],
sources=sources,
depends=depends)
ext_test = Extension('_ctypes_test',
sources=['_ctypes/_ctypes_test.c'])
self.extensions.extend([ext, ext_test])
if not '--with-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS"):
return
ffi_inc = find_file('ffi.h', [], inc_dirs)
if ffi_inc is not None:
ffi_h = ffi_inc[0] + '/ffi.h'
fp = open(ffi_h)
while 1:
line = fp.readline()
if not line:
ffi_inc = None
break
if line.startswith('#define LIBFFI_H'):
break
ffi_lib = None
if ffi_inc is not None:
for lib_name in ('ffi_convenience', 'ffi_pic', 'ffi'):
if (self.compiler.find_library_file(lib_dirs, lib_name)):
ffi_lib = lib_name
break
if ffi_inc and ffi_lib:
ext.include_dirs.extend(ffi_inc)
ext.libraries.append(ffi_lib)
self.use_system_libffi = True
class PyBuildInstall(install):
# Suppress the warning about installation into the lib_dynload
# directory, which is not in sys.path when running Python during
# installation:
def initialize_options (self):
install.initialize_options(self)
self.warn_dir=0
class PyBuildInstallLib(install_lib):
# Do exactly what install_lib does but make sure correct access modes get
# set on installed directories and files. All installed files with get
# mode 644 unless they are a shared library in which case they will get
# mode 755. All installed directories will get mode 755.
so_ext = sysconfig.get_config_var("SO")
def install(self):
outfiles = install_lib.install(self)
self.set_file_modes(outfiles, 0644, 0755)
self.set_dir_modes(self.install_dir, 0755)
return outfiles
def set_file_modes(self, files, defaultMode, sharedLibMode):
if not self.is_chmod_supported(): return
if not files: return
for filename in files:
if os.path.islink(filename): continue
mode = defaultMode
if filename.endswith(self.so_ext): mode = sharedLibMode
log.info("changing mode of %s to %o", filename, mode)
if not self.dry_run: os.chmod(filename, mode)
def set_dir_modes(self, dirname, mode):
if not self.is_chmod_supported(): return
os.path.walk(dirname, self.set_dir_modes_visitor, mode)
def set_dir_modes_visitor(self, mode, dirname, names):
if os.path.islink(dirname): return
log.info("changing mode of %s to %o", dirname, mode)
if not self.dry_run: os.chmod(dirname, mode)
def is_chmod_supported(self):
return hasattr(os, 'chmod')
SUMMARY = """
Python is an interpreted, interactive, object-oriented programming
language. It is often compared to Tcl, Perl, Scheme or Java.
Python combines remarkable power with very clear syntax. It has
modules, classes, exceptions, very high level dynamic data types, and
dynamic typing. There are interfaces to many system calls and
libraries, as well as to various windowing systems (X11, Motif, Tk,
Mac, MFC). New built-in modules are easily written in C or C++. Python
is also usable as an extension language for applications that need a
programmable interface.
The Python implementation is portable: it runs on many brands of UNIX,
on Windows, DOS, OS/2, Mac, Amiga... If your favorite system isn't
listed here, it may still be supported, if there's a C compiler for
it. Ask around on comp.lang.python -- or just try compiling Python
yourself.
"""
CLASSIFIERS = """
Development Status :: 3 - Alpha
Development Status :: 6 - Mature
License :: OSI Approved :: Python Software Foundation License
Natural Language :: English
Programming Language :: C
Programming Language :: Python
Topic :: Software Development
"""
def main():
# turn off warnings when deprecated modules are imported
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
setup(# PyPI Metadata (PEP 301)
name = "Python",
version = sys.version.split()[0],
url = "http://www.python.org/%s" % sys.version[:3],
maintainer = "Guido van Rossum and the Python community",
maintainer_email = "python-dev@python.org",
description = "A high-level object-oriented programming language",
long_description = SUMMARY.strip(),
license = "PSF license",
classifiers = filter(None, CLASSIFIERS.split("\n")),
platforms = ["Many"],
# Build info
cmdclass = {'build_ext':PyBuildExt, 'install':PyBuildInstall,
'install_lib':PyBuildInstallLib},
# The struct module is defined here, because build_ext won't be
# called unless there's at least one extension module defined.
ext_modules=[Extension('_struct', ['_struct.c'])],
# Scripts to install
scripts = ['Tools/scripts/pydoc', 'Tools/scripts/idle',
'Lib/smtpd.py']
)
# --install-platlib
if __name__ == '__main__':
main()
|
TathagataChakraborti/resource-conflicts
|
PLANROB-2015/seq-sat-lama/Python-2.5.2/setup.py
|
Python
|
mit
| 69,044
|
[
"VisIt"
] |
eebc37d2c4cb0cfa26462bfabff9f9fcf58d75c3d87cdf1537d08a559ac414b9
|
#!/usr/bin/python3
# coding: utf-8
# # Initialization
#
# Welcome to the first assignment of "Improving Deep Neural Networks".
#
# Training your neural network requires specifying an initial value of the weights. A well chosen initialization method will help learning.
#
# If you completed the previous course of this specialization, you probably followed our instructions for weight initialization, and it has worked out so far. But how do you choose the initialization for a new neural network? In this notebook, you will see how different initializations lead to different results.
#
# A well chosen initialization can:
# - Speed up the convergence of gradient descent
# - Increase the odds of gradient descent converging to a lower training (and generalization) error
#
# To get started, run the following cell to load the packages and the planar dataset you will try to classify.
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
#get_ipython().magic('matplotlib inline')
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load image dataset: blue/red dots in circles
train_X, train_Y, test_X, test_Y = load_dataset()
# You would like a classifier to separate the blue dots from the red dots.
# ## 1 - Neural Network model
# You will use a 3-layer neural network (already implemented for you). Here are the initialization methods you will experiment with:
# - *Zeros initialization* -- setting `initialization = "zeros"` in the input argument.
# - *Random initialization* -- setting `initialization = "random"` in the input argument. This initializes the weights to large random values.
# - *He initialization* -- setting `initialization = "he"` in the input argument. This initializes the weights to random values scaled according to a paper by He et al., 2015.
#
# **Instructions**: Please quickly read over the code below, and run it. In the next part you will implement the three initialization methods that this `model()` calls.
# In[5]:
def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
learning_rate -- learning rate for gradient descent
num_iterations -- number of iterations to run gradient descent
print_cost -- if True, print the cost every 1000 iterations
initialization -- flag to choose which initialization to use ("zeros","random" or "he")
Returns:
parameters -- parameters learnt by the model
"""
grads = {}
costs = [] # to keep track of the loss
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 10, 5, 1]
# Initialize parameters dictionary.
if initialization == "zeros":
parameters = initialize_parameters_zeros(layers_dims)
elif initialization == "random":
parameters = initialize_parameters_random(layers_dims)
elif initialization == "he":
parameters = initialize_parameters_he(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
a3, cache = forward_propagation(X, parameters)
# Loss
cost = compute_loss(a3, Y)
# Backward propagation.
grads = backward_propagation(X, Y, cache)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 1000 iterations
if print_cost and i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
costs.append(cost)
# plot the loss
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
# ## 2 - Zero initialization
#
# There are two types of parameters to initialize in a neural network:
# - the weight matrices $(W^{[1]}, W^{[2]}, W^{[3]}, ..., W^{[L-1]}, W^{[L]})$
# - the bias vectors $(b^{[1]}, b^{[2]}, b^{[3]}, ..., b^{[L-1]}, b^{[L]})$
#
# **Exercise**: Implement the following function to initialize all parameters to zeros. You'll see later that this does not work well since it fails to "break symmetry", but lets try it anyway and see what happens. Use np.zeros((..,..)) with the correct shapes.
# In[2]:
# GRADED FUNCTION: initialize_parameters_zeros
def initialize_parameters_zeros(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
parameters = {}
L = len(layers_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l-1]))
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
# In[3]:
parameters = initialize_parameters_zeros([3,2,1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **W1**
# </td>
# <td>
# [[ 0. 0. 0.]
# [ 0. 0. 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **b1**
# </td>
# <td>
# [[ 0.]
# [ 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **W2**
# </td>
# <td>
# [[ 0. 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **b2**
# </td>
# <td>
# [[ 0.]]
# </td>
# </tr>
#
# </table>
# Run the following code to train your model on 15,000 iterations using zeros initialization.
# In[6]:
parameters = model(train_X, train_Y, initialization = "zeros")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# The performance is really bad, and the cost does not really decrease, and the algorithm performs no better than random guessing. Why? Lets look at the details of the predictions and the decision boundary:
# In[7]:
print ("predictions_train = " + str(predictions_train))
print ("predictions_test = " + str(predictions_test))
# In[8]:
plt.title("Model with Zeros initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# The model is predicting 0 for every example.
#
# In general, initializing all the weights to zero results in the network failing to break symmetry. This means that every neuron in each layer will learn the same thing, and you might as well be training a neural network with $n^{[l]}=1$ for every layer, and the network is no more powerful than a linear classifier such as logistic regression.
# <font color='blue'>
# **What you should remember**:
# - The weights $W^{[l]}$ should be initialized randomly to break symmetry.
# - It is however okay to initialize the biases $b^{[l]}$ to zeros. Symmetry is still broken so long as $W^{[l]}$ is initialized randomly.
#
# ## 3 - Random initialization
#
# To break symmetry, lets intialize the weights randomly. Following random initialization, each neuron can then proceed to learn a different function of its inputs. In this exercise, you will see what happens if the weights are intialized randomly, but to very large values.
#
# **Exercise**: Implement the following function to initialize your weights to large random values (scaled by \*10) and your biases to zeros. Use `np.random.randn(..,..) * 10` for weights and `np.zeros((.., ..))` for biases. We are using a fixed `np.random.seed(..)` to make sure your "random" weights match ours, so don't worry if running several times your code gives you always the same initial values for the parameters.
# In[9]:
# GRADED FUNCTION: initialize_parameters_random
def initialize_parameters_random(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3) # This seed makes sure your "random" numbers will be the as ours
parameters = {}
L = len(layers_dims) # integer representing the number of layers
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l],layers_dims[l-1])*10
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
# In[10]:
parameters = initialize_parameters_random([3, 2, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **W1**
# </td>
# <td>
# [[ 17.88628473 4.36509851 0.96497468]
# [-18.63492703 -2.77388203 -3.54758979]]
# </td>
# </tr>
# <tr>
# <td>
# **b1**
# </td>
# <td>
# [[ 0.]
# [ 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **W2**
# </td>
# <td>
# [[-0.82741481 -6.27000677]]
# </td>
# </tr>
# <tr>
# <td>
# **b2**
# </td>
# <td>
# [[ 0.]]
# </td>
# </tr>
#
# </table>
# Run the following code to train your model on 15,000 iterations using random initialization.
# In[11]:
parameters = model(train_X, train_Y, initialization = "random")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# If you see "inf" as the cost after the iteration 0, this is because of numerical roundoff; a more numerically sophisticated implementation would fix this. But this isn't worth worrying about for our purposes.
#
# Anyway, it looks like you have broken symmetry, and this gives better results. than before. The model is no longer outputting all 0s.
# In[12]:
print (predictions_train)
print (predictions_test)
# In[13]:
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# **Observations**:
# - The cost starts very high. This is because with large random-valued weights, the last activation (sigmoid) outputs results that are very close to 0 or 1 for some examples, and when it gets that example wrong it incurs a very high loss for that example. Indeed, when $\log(a^{[3]}) = \log(0)$, the loss goes to infinity.
# - Poor initialization can lead to vanishing/exploding gradients, which also slows down the optimization algorithm.
# - If you train this network longer you will see better results, but initializing with overly large random numbers slows down the optimization.
#
# <font color='blue'>
# **In summary**:
# - Initializing weights to very large random values does not work well.
# - Hopefully intializing with small random values does better. The important question is: how small should be these random values be? Lets find out in the next part!
# ## 4 - He initialization
#
# Finally, try "He Initialization"; this is named for the first author of He et al., 2015. (If you have heard of "Xavier initialization", this is similar except Xavier initialization uses a scaling factor for the weights $W^{[l]}$ of `sqrt(1./layers_dims[l-1])` where He initialization would use `sqrt(2./layers_dims[l-1])`.)
#
# **Exercise**: Implement the following function to initialize your parameters with He initialization.
#
# **Hint**: This function is similar to the previous `initialize_parameters_random(...)`. The only difference is that instead of multiplying `np.random.randn(..,..)` by 10, you will multiply it by $\sqrt{\frac{2}{\text{dimension of the previous layer}}}$, which is what He initialization recommends for layers with a ReLU activation.
# In[14]:
# GRADED FUNCTION: initialize_parameters_he
def initialize_parameters_he(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layers_dims) - 1 # integer representing the number of layers
for l in range(1, L + 1):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l],layers_dims[l-1])*np.sqrt(2/layers_dims[l-1])
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
# In[15]:
parameters = initialize_parameters_he([2, 4, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **W1**
# </td>
# <td>
# [[ 1.78862847 0.43650985]
# [ 0.09649747 -1.8634927 ]
# [-0.2773882 -0.35475898]
# [-0.08274148 -0.62700068]]
# </td>
# </tr>
# <tr>
# <td>
# **b1**
# </td>
# <td>
# [[ 0.]
# [ 0.]
# [ 0.]
# [ 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **W2**
# </td>
# <td>
# [[-0.03098412 -0.33744411 -0.92904268 0.62552248]]
# </td>
# </tr>
# <tr>
# <td>
# **b2**
# </td>
# <td>
# [[ 0.]]
# </td>
# </tr>
#
# </table>
# Run the following code to train your model on 15,000 iterations using He initialization.
# In[16]:
parameters = model(train_X, train_Y, initialization = "he")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# In[17]:
plt.title("Model with He initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# **Observations**:
# - The model with He initialization separates the blue and the red dots very well in a small number of iterations.
#
# ## 5 - Conclusions
# You have seen three different types of initializations. For the same number of iterations and same hyperparameters the comparison is:
#
# <table>
# <tr>
# <td>
# **Model**
# </td>
# <td>
# **Train accuracy**
# </td>
# <td>
# **Problem/Comment**
# </td>
#
# </tr>
# <td>
# 3-layer NN with zeros initialization
# </td>
# <td>
# 50%
# </td>
# <td>
# fails to break symmetry
# </td>
# <tr>
# <td>
# 3-layer NN with large random initialization
# </td>
# <td>
# 83%
# </td>
# <td>
# too large weights
# </td>
# </tr>
# <tr>
# <td>
# 3-layer NN with He initialization
# </td>
# <td>
# 99%
# </td>
# <td>
# recommended method
# </td>
# </tr>
# </table>
# <font color='blue'>
# **What you should remember from this notebook**:
# - Different initializations lead to different results
# - Random initialization is used to break symmetry and make sure different hidden units can learn different things
# - Don't intialize to values that are too large
# - He initialization works well for networks with ReLU activations.
|
jinzishuai/learn2deeplearn
|
deeplearning.ai/C2.ImproveDeepNN/week1-hw/Initialization/Initialization.py
|
Python
|
gpl-3.0
| 17,528
|
[
"NEURON"
] |
7c44cdeae07f4850de4555055a427b602237962681af0a04a76e4acd2acb923e
|
#!/usr/bin/env python
#pylint: disable-msg=C0301,C0103,R0914,R0903
"""
DAS command line tool
"""
__author__ = "Valentin Kuznetsov"
import sys
if sys.version_info < (2, 6):
raise Exception("DAS requires python 2.6 or greater")
DAS_CLIENT = 'das-client/1.0::python/%s.%s' % sys.version_info[:2]
import os
import re
import time
import json
import urllib
import urllib2
import httplib
import cookielib
from optparse import OptionParser
from math import log
# define exit codes according to Linux sysexists.h
EX_OK = 0 # successful termination
EX__BASE = 64 # base value for error messages
EX_USAGE = 64 # command line usage error
EX_DATAERR = 65 # data format error
EX_NOINPUT = 66 # cannot open input
EX_NOUSER = 67 # addressee unknown
EX_NOHOST = 68 # host name unknown
EX_UNAVAILABLE = 69 # service unavailable
EX_SOFTWARE = 70 # internal software error
EX_OSERR = 71 # system error (e.g., can't fork)
EX_OSFILE = 72 # critical OS file missing
EX_CANTCREAT = 73 # can't create (user) output file
EX_IOERR = 74 # input/output error
EX_TEMPFAIL = 75 # temp failure; user is invited to retry
EX_PROTOCOL = 76 # remote error in protocol
EX_NOPERM = 77 # permission denied
EX_CONFIG = 78 # configuration error
class HTTPSClientAuthHandler(urllib2.HTTPSHandler):
"""
Simple HTTPS client authentication class based on provided
key/ca information
"""
def __init__(self, key=None, cert=None, level=0):
if level > 1:
urllib2.HTTPSHandler.__init__(self, debuglevel=1)
else:
urllib2.HTTPSHandler.__init__(self)
self.key = key
self.cert = cert
def https_open(self, req):
"""Open request method"""
#Rather than pass in a reference to a connection class, we pass in
# a reference to a function which, for all intents and purposes,
# will behave as a constructor
return self.do_open(self.get_connection, req)
def get_connection(self, host, timeout=300):
"""Connection method"""
if self.key:
return httplib.HTTPSConnection(host, key_file=self.key,
cert_file=self.cert)
return httplib.HTTPSConnection(host)
class DASOptionParser:
"""
DAS cache client option parser
"""
def __init__(self):
usage = "Usage: %prog [options]\n"
usage += "For more help please visit https://cmsweb.cern.ch/das/faq"
self.parser = OptionParser(usage=usage)
self.parser.add_option("-v", "--verbose", action="store",
type="int", default=0, dest="verbose",
help="verbose output")
self.parser.add_option("--query", action="store", type="string",
default=False, dest="query",
help="specify query for your request")
msg = "host name of DAS cache server, default is https://cmsweb.cern.ch"
self.parser.add_option("--host", action="store", type="string",
default='https://cmsweb.cern.ch', dest="host", help=msg)
msg = "start index for returned result set, aka pagination,"
msg += " use w/ limit (default is 0)"
self.parser.add_option("--idx", action="store", type="int",
default=0, dest="idx", help=msg)
msg = "number of returned results (default is 10),"
msg += " use --limit=0 to show all results"
self.parser.add_option("--limit", action="store", type="int",
default=10, dest="limit", help=msg)
msg = 'specify return data format (json or plain), default plain.'
self.parser.add_option("--format", action="store", type="string",
default="plain", dest="format", help=msg)
msg = 'query waiting threshold in sec, default is 5 minutes'
self.parser.add_option("--threshold", action="store", type="int",
default=300, dest="threshold", help=msg)
msg = 'specify private key file name'
self.parser.add_option("--key", action="store", type="string",
default="", dest="ckey", help=msg)
msg = 'specify private certificate file name'
self.parser.add_option("--cert", action="store", type="string",
default="", dest="cert", help=msg)
msg = 'specify number of retries upon busy DAS server message'
self.parser.add_option("--retry", action="store", type="string",
default=0, dest="retry", help=msg)
msg = 'show DAS headers in JSON format'
msg += ' (obsolete, keep for backward compatibility)'
self.parser.add_option("--das-headers", action="store_true",
default=False, dest="das_headers", help=msg)
msg = 'specify power base for size_format, default is 10 (can be 2)'
self.parser.add_option("--base", action="store", type="int",
default=0, dest="base", help=msg)
def get_opt(self):
"""
Returns parse list of options
"""
return self.parser.parse_args()
def convert_time(val):
"Convert given timestamp into human readable format"
if isinstance(val, int) or isinstance(val, float):
return time.strftime('%d/%b/%Y_%H:%M:%S_GMT', time.gmtime(val))
return val
def size_format(uinput, ibase=0):
"""
Format file size utility, it converts file size into KB, MB, GB, TB, PB units
"""
if not ibase:
return uinput
try:
num = float(uinput)
except Exception as _exc:
return uinput
if ibase == 2.: # power of 2
base = 1024.
xlist = ['', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB']
else: # default base is 10
base = 1000.
xlist = ['', 'KB', 'MB', 'GB', 'TB', 'PB']
for xxx in xlist:
if num < base:
return "%3.1f%s" % (num, xxx)
num /= base
def unique_filter(rows):
"""
Unique filter drop duplicate rows.
"""
old_row = {}
row = None
for row in rows:
row_data = dict(row)
try:
del row_data['_id']
del row_data['das']
del row_data['das_id']
del row_data['cache_id']
except:
pass
old_data = dict(old_row)
try:
del old_data['_id']
del old_data['das']
del old_data['das_id']
del old_data['cache_id']
except:
pass
if row_data == old_data:
continue
if old_row:
yield old_row
old_row = row
yield row
def get_value(data, filters, base=10):
"""Filter data from a row for given list of filters"""
for ftr in filters:
if ftr.find('>') != -1 or ftr.find('<') != -1 or ftr.find('=') != -1:
continue
row = dict(data)
values = set()
for key in ftr.split('.'):
if isinstance(row, dict) and key in row:
if key == 'creation_time':
row = convert_time(row[key])
elif key == 'size':
row = size_format(row[key], base)
else:
row = row[key]
if isinstance(row, list):
for item in row:
if isinstance(item, dict) and key in item:
if key == 'creation_time':
row = convert_time(item[key])
elif key == 'size':
row = size_format(item[key], base)
else:
row = item[key]
values.add(row)
else:
if isinstance(item, basestring):
values.add(item)
if len(values) == 1:
yield str(values.pop())
else:
yield str(list(values))
def fullpath(path):
"Expand path to full path"
if path and path[0] == '~':
path = path.replace('~', '')
path = path[1:] if path[0] == '/' else path
path = os.path.join(os.environ['HOME'], path)
return path
def get_data(host, query, idx, limit, debug, threshold=300, ckey=None,
cert=None, das_headers=True):
"""Contact DAS server and retrieve data for given DAS query"""
params = {'input':query, 'idx':idx, 'limit':limit}
path = '/das/cache'
pat = re.compile('http[s]{0,1}://')
if not pat.match(host):
msg = 'Invalid hostname: %s' % host
raise Exception(msg)
url = host + path
headers = {"Accept": "application/json", "User-Agent": DAS_CLIENT}
encoded_data = urllib.urlencode(params, doseq=True)
url += '?%s' % encoded_data
req = urllib2.Request(url=url, headers=headers)
if ckey and cert:
ckey = fullpath(ckey)
cert = fullpath(cert)
http_hdlr = HTTPSClientAuthHandler(ckey, cert, debug)
else:
http_hdlr = urllib2.HTTPHandler(debuglevel=debug)
proxy_handler = urllib2.ProxyHandler({})
cookie_jar = cookielib.CookieJar()
cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar)
opener = urllib2.build_opener(http_hdlr, proxy_handler, cookie_handler)
fdesc = opener.open(req)
data = fdesc.read()
fdesc.close()
pat = re.compile(r'^[a-z0-9]{32}')
if data and isinstance(data, str) and pat.match(data) and len(data) == 32:
pid = data
else:
pid = None
iwtime = 2 # initial waiting time in seconds
wtime = 20 # final waiting time in seconds
sleep = iwtime
time0 = time.time()
while pid:
params.update({'pid':data})
encoded_data = urllib.urlencode(params, doseq=True)
url = host + path + '?%s' % encoded_data
req = urllib2.Request(url=url, headers=headers)
try:
fdesc = opener.open(req)
data = fdesc.read()
fdesc.close()
except urllib2.HTTPError as err:
return {"status":"fail", "reason":str(err)}
if data and isinstance(data, str) and pat.match(data) and len(data) == 32:
pid = data
else:
pid = None
time.sleep(sleep)
if sleep < wtime:
sleep *= 2
elif sleep == wtime:
sleep = iwtime # start new cycle
else:
sleep = wtime
if (time.time()-time0) > threshold:
reason = "client timeout after %s sec" % int(time.time()-time0)
return {"status":"fail", "reason":reason}
jsondict = json.loads(data)
return jsondict
def prim_value(row):
"""Extract primary key value from DAS record"""
prim_key = row['das']['primary_key']
if prim_key == 'summary':
return row[prim_key]
key, att = prim_key.split('.')
if isinstance(row[key], list):
for item in row[key]:
if att in item:
return item[att]
else:
return row[key][att]
def print_summary(rec):
"Print summary record information on stdout"
if 'summary' not in rec:
msg = 'Summary information is not found in record:\n', rec
raise Exception(msg)
for row in rec['summary']:
keys = [k for k in row.keys()]
maxlen = max([len(k) for k in keys])
for key, val in row.items():
pkey = '%s%s' % (key, ' '*(maxlen-len(key)))
print '%s: %s' % (pkey, val)
print
def main():
"""Main function"""
optmgr = DASOptionParser()
opts, _ = optmgr.get_opt()
host = opts.host
debug = opts.verbose
query = opts.query
idx = opts.idx
limit = opts.limit
thr = opts.threshold
ckey = opts.ckey
cert = opts.cert
base = opts.base
if not query:
print 'Input query is missing'
sys.exit(EX_USAGE)
if opts.format == 'plain':
jsondict = get_data(host, query, idx, limit, debug, thr, ckey, cert)
cli_msg = jsondict.get('client_message', None)
if cli_msg:
print "DAS CLIENT WARNING: %s" % cli_msg
if 'status' not in jsondict:
print 'DAS record without status field:\n%s' % jsondict
sys.exit(EX_PROTOCOL)
if jsondict['status'] != 'ok':
print "status: %s, reason: %s" \
% (jsondict.get('status'), jsondict.get('reason', 'N/A'))
if opts.retry:
found = False
for attempt in xrange(1, int(opts.retry)):
interval = log(attempt)**5
print "Retry in %5.3f sec" % interval
time.sleep(interval)
data = get_data(host, query, idx, limit, debug, thr, ckey, cert)
jsondict = json.loads(data)
if jsondict.get('status', 'fail') == 'ok':
found = True
break
else:
sys.exit(EX_TEMPFAIL)
if not found:
sys.exit(EX_TEMPFAIL)
nres = jsondict['nresults']
if not limit:
drange = '%s' % nres
else:
drange = '%s-%s out of %s' % (idx+1, idx+limit, nres)
if opts.limit:
msg = "\nShowing %s results" % drange
msg += ", for more results use --idx/--limit options\n"
print msg
mongo_query = jsondict['mongo_query']
unique = False
fdict = mongo_query.get('filters', {})
filters = fdict.get('grep', [])
aggregators = mongo_query.get('aggregators', [])
if 'unique' in fdict.keys():
unique = True
if filters and not aggregators:
data = jsondict['data']
if isinstance(data, dict):
rows = [r for r in get_value(data, filters, base)]
print ' '.join(rows)
elif isinstance(data, list):
if unique:
data = unique_filter(data)
for row in data:
rows = [r for r in get_value(row, filters, base)]
print ' '.join(rows)
else:
print(json.dumps(jsondict))
elif aggregators:
data = jsondict['data']
if unique:
data = unique_filter(data)
for row in data:
if row['key'].find('size') != -1 and \
row['function'] == 'sum':
val = size_format(row['result']['value'], base)
else:
val = row['result']['value']
print '%s(%s)=%s' \
% (row['function'], row['key'], val)
else:
data = jsondict['data']
if isinstance(data, list):
old = None
val = None
for row in data:
prim_key = row.get('das', {}).get('primary_key', None)
if prim_key == 'summary':
print_summary(row)
return
val = prim_value(row)
if not opts.limit:
if val != old:
print val
old = val
else:
print val
if val != old and not opts.limit:
print val
elif isinstance(data, dict):
print prim_value(data)
else:
print data
else:
jsondict = get_data(\
host, query, idx, limit, debug, thr, ckey, cert)
print(json.dumps(jsondict))
#
# main
#
if __name__ == '__main__':
main()
|
rovere/utilities
|
das.py
|
Python
|
mit
| 15,924
|
[
"VisIt"
] |
af291784c58443d11cbecc674ad566a68a7bf5fc57e240e624cf657a63c02e54
|
# Shifter will be a strictly 2-dimensional game. It will have a start menu when first loaded,
# music, sounds, and graphics.
import pygame
from pygame import *
import CONST
from CONST import *
import random
screen = display.set_mode(WINSIZE)
clock = pygame.time.Clock()
caption("SHIFTER")
pygame.font.init()
#=============
#=============
# Sound things
if(checkfile("284253_BLIPPBLIPP.mp3")):
musiclode("284253_BLIPPBLIPP.mp3")
bloop = FXLODE("selectL.wav")
blip = FXLODE("selectM.wav")
bleep = FXLODE("selectH.wav")
bomb = FXLODE("Blast.wav")
bomb1 = FXLODE("blast2.wav")
bomb2 = FXLODE("blast3.wav")
pulse = FXLODE("deeppulse.wav")
laser = FXLODE("laser.wav")
jet = FXLODE("JET.wav")
deflect = FXLODE("blast1.wav",0.7)
siren = FXLODE("warning2.wav")
goof = FXLODE("warning.wav")
damage = FXLODE("damage.wav")
attention = FXLODE("attention.wav")
ready1 = FXLODE("ready.wav",1)
ready2rock = FXLODE("ready2.wav",1)
textblip = FXLODE("textblit.wav")
powerup = FXLODE("ufo.wav")
charge = FXLODE("ufo2.wav")
powerup = FXLODE("powerup1.wav")
die = FXLODE("die.wav")
slice = FXLODE("slice.wav")
bladekill = FXLODE("bladekill.wav")
spinblade = FXLODE("spinblade.wav")
bigblade = FXLODE("bigblader.wav")
bigblast = FXLODE("bigblast.wav")
roar = FXLODE("roar.wav")
transform = FXLODE("transform.wav")
bigcharge = FXLODE("charging.wav")
slowcharge = FXLODE("slow evil charge.wav")
opticcharge = FXLODE("opticcharge.wav")
opticblast = FXLODE("opticblast.wav")
hammersmash = FXLODE("hammersmash.wav",0.7)
hammerspawn = FXLODE("hammerspawn.wav",1)
genspawn = FXLODE("genspawn.wav")
invis = FXLODE("invis.wav",1)
rush = FXLODE("rush.wav")
shiny = FXLODE("shiny.wav")
supershift = FXLODE("supershift.wav")
BOOM = FXLODE("cineboom.wav")
pygame.mixer.music.set_volume(0.5)
soundstatus = True
musicstatus = True
# The ground you run on
erection = pygame.Surface((WINSIZE[0],WINSIZE[1]))
erectpos = (WINSIZE[0],0)
erection.fill(BLACK)
# The stars in the back
Stars = pygame.sprite.Group()
Starinterval = 6
MAXSTARS = 20
STARSPEED = 20
STARSIZE = (4,4)
MAXSPEED = 60
# Some dots
DOTZ = pygame.sprite.Group()
# Some explosions
BLASTS = pygame.sprite.Group()
SHIELDS = pygame.sprite.Group()
# Some gens
GENES = pygame.sprite.Group()
# Some walls
walmart = pygame.sprite.Group()
blastmart = pygame.sprite.Group()
powermart = pygame.sprite.Group()
spinmart = pygame.sprite.Group()
#TESTING=============
invincible = False
startlevel = 1
superpowered = False
bossdead = False
currentsong = ""
#====================
class Maine:
def __init__(self):
self.running = 1
self.INTRO()
def INTRO(self):
# Text Info
Texty = texty("REDENSEK.TTF",20)
firstpart = Texty.render("STATICA PRESENTS",0,BLACK)
secondpart = Texty.render("THE THIRD GAME BY MICHAEL AREVALO",0,BLACK)
thirdpart = Texty.render("WITH MUSIC BY MAJS",0,RED)
scripty = 0
timer = 0
if(checkfile("284253_BLIPPBLIPP.mp3")):
musicplay(musicstatus)
eventimer = 0
erection = pygame.Surface((WINSIZE[0],WINSIZE[1]))
erectpos = (WINSIZE[0],0)
erection.fill(BLACK)
checkok = 0
while self.running:
clock.tick(FPS)
screen.fill(RED)
eventimer+=1
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = 0
elif event.type == KEYDOWN:
if (event.key == K_ESCAPE):
self.MENU()
if (event.key == K_SPACE):
self.MENU()
if (scripty == 0):
if (eventimer != 120):
screen.blit(firstpart,(50,240))
if (erectpos[0] > 490):
erectpos = (erectpos[0]-4,erectpos[1])
else:
print "first transition"
scripty = 1
elif(scripty == 1):
if (erectpos[1] > -330):
erectpos = (erectpos[0],erectpos[1]-20)
else:
scripty = 2
elif(scripty == 2):
if (eventimer != 300):
if (erectpos[0] != 0):
erectpos = (erectpos[0]-8,erectpos[1])
else:
screen.blit(secondpart,(320,400))
else:
print "second transition"
scripty = 3
checkok = 1
elif(scripty == 3):
if (timer < 200):
if (erectpos[1] != 0):
erectpos = (0,erectpos[1]+5)
timer+=1
else:
scripty = 4
elif(scripty == 4):
if (eventimer != 600):
if (erectpos[1] < 420):
erectpos = (0, erectpos[1]+5)
else:
print "final transition"
self.MENU()
screen.blit(erection,erectpos)
if (checkok):
screen.blit(thirdpart,(240,240))
display.flip()
def MENU(self):
Bigtex = texty("REDENSEK.TTF",60) #large text
TITLE = Bigtex.render("SHIFTER",0,BLACK)
titlepos = (20,30)
selection = 0
if superpowered:
START = TEXT("[START]+",BLACK,(535,360),1.5)
else:
START = TEXT("START",BLACK,(535,360),1.5)
OPTIONS = TEXT("OPTIONS",BLACK,(520,380),1.5)
EXIT = TEXT("EXIT",BLACK,(520,400),1.5)
movethem = 0
timer = 0
startimer = 0
box1 = (18,48)
box2 = (185,35)
boxrect = pygame.Rect(box1,box2)
erectpos = (0,420)
submenu = False
subselect = 0
global soundstatus
global musicstatus
SUB1 = TEXT("SOUND: ON",BLACK,(640,320))
SUB2 = TEXT("MUSIC: ON",BLACK,(640,340))
newthing = 0
playgame = 0
shifter = 0
selectok = 0
lightspeed = (5,0.5)
tp = 0.7
color = BLACK
changingmodes = False
changed = 0
fader = FADED(0)
faderok = 0
#some cleanup
for final in finals:
final.kill()
while self.running:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = 0
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
if ((event.key == K_ESCAPE)and not changingmodes):
if (selection == -1):
fxplay(blip,soundstatus)
subselect = 0
selection = 1
selectok = 1
TEXT.move(SUB1,640,20)
TEXT.move(SUB2,640,20)
lightspeed = (20,tp)
if ((event.key == K_UP)and not changingmodes):
selectok = 1
fxplay(bloop,soundstatus)
if (selection == -1):
if (subselect == 1):
subselect = 2
else: subselect = 1
elif (selection == 0):
selection = 2
else:
selection -= 1
if ((event.key == K_DOWN)and not changingmodes):
selectok = 1
fxplay(bloop,soundstatus)
if (selection == -1):
if (subselect == 2):
subselect = 1
else: subselect = 2
elif (selection == 2):
selection = 0
else:
selection += 1
if ((event.key == K_LEFT)and not changingmodes):
if (selection == -1):
fxplay(blip,soundstatus)
subselect = 0
selection = 1
selectok = 1
TEXT.move(SUB1,640,20)
TEXT.move(SUB2,640,20)
lightspeed = (20,tp)
if ((event.key == K_RIGHT)and not changingmodes):
if (selection == 1):
fxplay(blip,soundstatus)
subselect = 1
selection = -1
TEXT.move(START,300,20,tp)
TEXT.move(OPTIONS,315,20,tp)
TEXT.move(EXIT,300,20,tp)
TEXT.move(SUB1,410,20,tp)
TEXT.move(SUB2,400,20,tp)
print "OPTIONS"
if ((event.key == K_RETURN)and not changingmodes):
if (selection == 0):
fxplay(bleep,soundstatus)
print "START"
if mixer.music.get_busy():
mixer.music.fadeout(600)
changingmodes = True
elif (selection == 1):
fxplay(blip,soundstatus)
selection = -1
subselect = 1
TEXT.move(START,300,20,tp)
TEXT.move(OPTIONS,315,20,tp)
TEXT.move(EXIT,300,20,tp)
TEXT.move(SUB1,410,20,tp)
TEXT.move(SUB2,400,20,tp)
print "OPTIONS"
elif (selection == 2):
print "EXIT"
self.running = 0
elif (selection == 3):
fxplay(bleep,soundstatus)
elif (subselect == 1):
if(soundstatus == True):
TEXT.changetext(SUB1, "SOUND: OFF")
soundstatus = False
else:
TEXT.changetext(SUB1, "SOUND: ON")
soundstatus = True
elif (subselect == 2):
if(musicstatus == True):
mixer.music.set_volume(0.0)
TEXT.changetext(SUB2, "MUSIC: OFF")
musicstatus = False
else:
mixer.music.set_volume(0.5)
TEXT.changetext(SUB2, "MUSIC: ON")
musicstatus = True
if selectok:
if (selection == 0):
TEXT.move(START,535)
TEXT.move(OPTIONS,520)
TEXT.move(EXIT,520)
elif (selection == 1):
TEXT.move(START,520,lightspeed[0],lightspeed[1])
TEXT.move(OPTIONS,535,lightspeed[0],lightspeed[1])
TEXT.move(EXIT,520,lightspeed[0],lightspeed[1])
lightspeed = (5,0.5)
elif (selection == 2):
TEXT.move(START,520)
TEXT.move(OPTIONS,520)
TEXT.move(EXIT,535)
elif (subselect == 1):
TEXT.move(SUB1,410)
TEXT.move(SUB2,400)
elif (subselect == 2):
TEXT.move(SUB1,400)
TEXT.move(SUB2,410)
selectok = 0
screen.fill(RED)
# Title flash
if (timer == 3):
TITLE = Bigtex.render("SHIFTER",0,WHITE)
draw.rect(screen,WHITE,boxrect,2)
elif(timer == 6):
TITLE = Bigtex.render("SHIFTER",0,BLACK)
#draw.rect(screen,BLACK,boxrect,2)
timer = 0
timer+=1
if(changingmodes):
if(changed == 0):
TEXT.move(START,650,15)
TEXT.move(OPTIONS,650,15)
TEXT.move(EXIT,650,15)
if(changed < 20):
box1 = (18,box1[1]-5)
boxrect = pygame.Rect(box1,box2)
titlepos = (titlepos[0],titlepos[1]-5)
else:
self.TUT()
changed+=1
screen.blit(TITLE,titlepos)
screen.blit(erection,erectpos)
TEXT.update(START)
TEXT.update(OPTIONS)
TEXT.update(EXIT)
TEXT.update(SUB1)
TEXT.update(SUB2)
if (len(Stars) != 0):
Stars.update()
if (startimer == Starinterval-(newthing/10)):
startimer = 0
if (len(Stars) < MAXSTARS):
if (color == BLACK): color = WHITE
else: color = BLACK
Stars.add(STARZ(-(STARSPEED+newthing),
Starpos(),
(STARSPEED+newthing,2),
color))
if faderok:
if(fader.alphasz != 255):
fader.update()
else:
self.ROLLCREDITS()
startimer+=1
display.flip()
def TUT(self):
startimer = 0
color = BLACK
mastertimer = 0
pauser = False
playerpos = 0
player = pygame.Rect((50,420),(100,100))
# Text Info
Texty = texty("REDENSEK.TTF",20)
part1 = Texty.render("WHEN PLAYING, PRESS K TO SHIFT OBJECTS",0,BLACK)
part2 = Texty.render("YOUR RANGE IS LIMITED, SO TIME YOUR SHOTS",0,BLACK)
part2a = Texty.render("RANGE",0,BLACK)
part3 = Texty.render("GOOD LUCK!",0,BLACK)
TUTORIAL = TEXT("TUTORIAL",RED,(550,430))
eventz = 0
blinky = 0
pausetimer = 0
while self.running:
mastertimer+=1
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = 0
elif event.type == KEYDOWN:
if (event.key == K_ESCAPE):
self.RESET("PLAY")
screen.fill(RED)
if pauser:
pausetimer += 1
if(eventz == 1):
if(pausetimer < 200):
screen.blit(part1,(100,50))
elif(pausetimer > 200):
pauser = False
pausetimer = 0
elif(eventz == 2):
if(pausetimer < 200):
screen.blit(part2,(100,50))
blinky+=1
if(blinky < 6):
draw.circle(screen,BLACK,(100,400),200,2)
elif(blinky > 12):
blinky = 0
draw.circle(screen,RED,(100,400),200,2)
elif(pausetimer > 200):
pauser = False
pausetimer = 0
elif(eventz == 3):
if(pausetimer < 200):
screen.blit(part3,(100,50))
elif(pausetimer > 200):
pauser = False
pausetimer = 0
else:
Stars.update()
BLASTS.update()
TEXT.update(TUTORIAL)
#STAR SHIT
if (startimer == Starinterval):
startimer = 0
if (len(Stars) < MAXSTARS):
if (color == BLACK): color = WHITE
else: color = BLACK
Stars.add(STARZ(-(STARSPEED),
Starpos(),
(STARSPEED,2),
color))
startimer+=1
#scripted events
if(mastertimer > 200):
pauser = True
eventz += 1
fxplay(textblip,soundstatus)
mastertimer = 0
if(eventz == 4):
print "PLAY BALL"
self.PLAY()
bar.update()
player = pygame.Rect((50,bar.ypos-playerpos+1),(100,102))
draw.rect(screen,BLACK,player)
if(playerpos < 100):
playerpos += 2
elif(playerpos == 100):
playerpos+=1
fxplay(damage,soundstatus)
for xrange in (20,40,60,80):
DOTZ.add(DOTS((xrange,410)))
BLASTS.add(EXPLOSION((100,400),50,0,1))
DOTZ.update()
display.flip()
def PLAY(self):
if remote.endgame:
remote.RESET()
startimer = 0
playerb = pygame.Rect((50,320),(100,100))
Texty = texty("REDENSEK.TTF",20)
newthing = 0
newtimer = 0
walltimer = 0
wallinterval = 100
pauseloop = 0
PAUSER = Texty.render("PAUSED",0,RED)
INSTRUCT = Texty.render("PRESS X TO EXIT",0,RED)
INSTRUCT2 = Texty.render("R FOR MAIN MENU",0,RED)
color = BLACK
#Laser stuff
#==================
lightfromheaven = 0
#==================
lightflash = 0
lightwid = 100
lightxpos = 50
lightrect = pygame.Rect((50,0),(100,420))
lightcount = 100
timedill = 10
ticktock = 3
cv = 0
fader = FADED(0)
darthfader = FADED(0,WHITE)
finals.add(FINALLITY())
#===============
global bossdead
bossdead = False
#===============
while self.running:
clock.tick(FPS)
if(newtimer < 100):
newtimer+=1
else:
newtimer = 0
if(newthing < 20):
newthing+=1
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == KEYDOWN and event.key == K_q):
self.running = 0
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
if(event.key == K_RSHIFT) or (event.key == K_LSHIFT):
if(dude.ultrapowered == True):
dude.ultrapowered = False
for boss in bawss:
boss.DIEBOX()
if ((event.key == K_k) and dude.alive):
dude.STRIKU()
for wall in walmart:
if superpowered and (wall.pos[0] < 500):
newpos = (wall.pos[0]+50,wall.pos[1]+145)
shocks.add(SHOCKWAVE(newpos[0]))
BLASTS.add(EXPLOSION(newpos,20,0,1,BLACK))
randar = random.randrange(50,80)
swords.add(SWORDOFLIGHT(newpos[0],newpos[1]-randar,1))
wall.dead(1)
fxplay(bomb,soundstatus)
elif(wall.pos[0] < 300):
newpos = (wall.pos[0]+50,wall.pos[1]+145)
BLASTS.add(EXPLOSION(newpos,20,0,1,BLACK))
wall.dead(1)
fxplay(bomb,soundstatus)
for wall in blastmart:
if superpowered and (wall.pos[0] < 500):
newpos = (wall.pos[0],wall.pos[1]+145)
shocks.add(SHOCKWAVE(newpos[0]))
BLASTS.add(EXPLOSION(newpos,20,0,1,BLACK))
randar = random.randrange(50,80)
swords.add(SWORDOFLIGHT(newpos[0],newpos[1]-randar,1))
wall.dead(1)
elif(wall.pos[0] < 300):
newpos = (wall.pos[0],wall.pos[1]+145)
BLASTS.add(EXPLOSION(newpos,20,0,1,BLACK))
wall.pushback()
for wall in spinmart:
newrand = random.randrange(0,4)
if superpowered and (wall.pos[0] < 500):
newpos = (wall.pos[0],wall.pos[1]+55)
BLASTS.add(EXPLOSION(newpos,20,0,1,BLACK))
shocks.add(SHOCKWAVE(newpos[0]))
if(newrand == 0):
wall.REFLECT()
else:
BLASTS.add(EXPLOSION(newpos,50,0,1,BLACK))
wall.dead()
elif(wall.pos[0] < 400):
newpos = (wall.pos[0],wall.pos[1]+55)
BLASTS.add(EXPLOSION(newpos,20,0,1,BLACK))
if(newrand == 0):
wall.REFLECT()
else:
BLASTS.add(EXPLOSION(newpos,50,0,1,BLACK))
wall.dead()
for gen in bawls:
if not gen.initiate and superpowered and gen.eyeflash:
gen.damage(1)
newpos = (gen.pos[0],gen.pos[1])
BLASTS.add(EXPLOSION(newpos,20,0,1,BLACK))
randar = random.randrange(0,50)
swords.add(SWORDOFLIGHT(newpos[0]-80,newpos[1]-randar,1))
shocks.add(SHOCKWAVE(newpos[0]))
for ball in balls:
if(ball.pos[0] < 200):
fxplay(bomb1,soundstatus)
ball.tag()
for baws in bawss:
if(baws.attacktype == 2)and(baws.atktmr > 0):
ypwn = random.randrange(-40,40)
newpos = (baws.pos[0]+70,baws.pos[1]+128+ypwn)
BLASTS.add(EXPLOSION(newpos,60,0,1,BLACK))
swords.add(SWORDOFLIGHT(newpos[0]-80,newpos[1]-50,1))
baws.damage(0.2)
for lasic in lazor:
lasic.xpos += 30
if(baws.pos[0] < 265) and (baws.attacktype == 4):
baws.damage(1)
fxplay(bomb1,soundstatus)
fxplay(bomb2,soundstatus)
baws.flash = True
newpos = (baws.pos[0]+70,baws.pos[1]+128)
BLASTS.add(EXPLOSION(newpos,20,0,1,BLACK))
randar = random.randrange(40,80)
swords.add(SWORDOFLIGHT(newpos[0],newpos[1]-randar,1))
baws.moveamount = -40
for turd in littleshit:
turd.shake()
newpos = (turd.pos[0],turd.pos[1]-40)
randar = random.randrange(-10,10)
swords.add(SWORDOFLIGHT(newpos[0]-50,newpos[1]+randar,1))
#==========================
if ((event.key == K_j)):
print remote.MasterTimer/30
#powermart.add(POWERBALL())
#==========================
if ((event.key == K_x)):
if pauseloop:
self.running = 0
if ((event.key == K_ESCAPE) or
(event.key == K_p)):
# PAUSE MENU
if(pauseloop == 0):
pauseloop = 1
elif(pauseloop == 1):
mixer.music.unpause()
mixer.unpause()
pauseloop = 0
if (event.key == K_r):
if pauseloop:
self.RESET()
#CHECK TO SEE IF THE GAME IS UNFOCUSED:
if not pygame.display.get_active():
# PAUSE MENU
if(pauseloop == 0):
pauseloop = 1
if (pauseloop == 0):
screen.fill(RED)
if (len(Stars) != 0):
Stars.update()
if (startimer == Starinterval-(newthing/10)):
startimer = 0
if (len(Stars) < MAXSTARS):
if (color == BLACK): color = WHITE
else: color = BLACK
Stars.add(STARZ(-(STARSPEED+newthing),
Starpos(),
(STARSPEED+newthing,2),
color))
startimer+=1
SHIELDS.update()
BLASTS.update()
dude.update(bar.ypos)
powermart.update()
balls.update()
lazor.update()
waves.update()
bar.update()
spinmart.update()
bawls.update()
swords.update()
bawss.update()
gdamnhammers.update(bar.ypos)
shocks.update(bar.ypos)
littleshit.update()
if bossdead:
for final in finals:
if(final.color > 200):
darthfader.update()
if(darthfader.alphasz == 255):
global RED
RED = (255,0,0)
self.PLANETBLAST()
finals.update()
for item in walmart:
for thing in BLASTS:
if (item.rect.colliderect(thing.rect)and
thing.pain):
item.dead(0)
if (item.pos[0] < 70):
item.dead(0)
screen.fill(WHITE)
dude.DAMAGE()
DOTZ.update()
GENES.update()
if dude.alive:
walmart.update((STARSPEED),(bar.ypos-420))
blastmart.update((bar.ypos-420))
remote.update()
alarm.update()
#INTRODUCTION
if (lightfromheaven == 0):
draw.rect(screen,BLACK,playerb)
if(lightcount == 0):
fxplay(charge,soundstatus)
lightfromheaven = 1
else:
lightcount -= 1
if (lightfromheaven == 1):
if(lightwid > 0):
lightwid -= 01
lightxpos+=0.5
lightrect = pygame.Rect((lightxpos,0),(lightwid,420))
else:
lightfromheaven = 2
lightflash = 0
newcolor = (cv,cv,cv)
if (cv < 255):
cv+=1
if(lightflash == 3):
draw.rect(screen,WHITE,lightrect)
elif(lightflash == 6):
lightflash = 0
lightflash += 1
draw.rect(screen,newcolor,playerb)
elif(lightfromheaven == 2):
charge.stop()
for yval in (400,380,360):
DOTZ.add(DOTS((100,yval)))
screen.fill(WHITE)
fxplay(pulse,soundstatus)
BLASTS.add(EXPLOSION((100,400),50,0,1,WHITE))
dude.STRIKU()
lightfromheaven = 3
else:
mixer.pause()
mixer.music.pause()
screen.blit(PAUSER,(550,430))
screen.blit(INSTRUCT,(480,445))
screen.blit(INSTRUCT2,(480,455))
if remote.endgame:
if(fader.alphasz != 255):
fader.update()
else:
self.ROLLCREDITS()
if not dude.alive:
alarm.RESET()
# Kill EVERYTHING
for wall in walmart:
wall.kill()
for wall in blastmart:
wall.kill()
for shit in powermart:
shit.kill()
for gen in GENES:
gen.kill()
for gen in bawls:
gen.pause = True
for wall in spinmart:
wall.kill()
display.flip()
def ROLLCREDITS(self):
finallity = imglode("END","concepts",False)
finallity2 = imglode("final copy","concepts",False)
picalpha = 0
picalpha2 = 0
Texty = texty("REDENSEK.TTF",20)
#TEXTS
lines = []
lines2 = []
linesb = ("CREDITS:",
"ART, CODING: MICHAEL AREVALO",
"PLAYTESTING, IDEA MACHINE: JAY DIHENIA",
" MATTHEW GENG",
"LEVEL DESIGN: ZENON CUELLAR",
"MORAL SUPPORT: MY GIRLFRIEND",
"",
"SPECIAL THANKS: MAJS, BJRA, AND ALL THE",
"MUSICIANS FROM NEWGROUNDS.COM AUDIO PORTAL",
"SOUNDS MADE USING SFXR WRITTEN BY:",
"DR PETTER",
"",
"AND YOU, THE PLAYER!")
lines2b = ("MADE USING PYTHON AND PYGAME",
"DISTRIBUTED FREE OF CHARGE AND LICENSED",
"UNDER GNU LESSER GENERAL PUBLIC LICENSE",
"",
"SHIFTER - COPYRIGHT (C) MICHAEL AREVALO 2010",
"",
"THANKS FOR PLAYING!")
for line in linesb:
lines.append(Texty.render(line,0,RED))
for line in lines2b:
lines2.append(Texty.render(line,0,RED))
shiftval = 420
if(checkfile("202672_Excellent_use_of_Magic__my.mp3")):
musiclode("202672_Excellent_use_of_Magic__my.mp3")
musicplay(musicstatus,1)
while self.running:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = 0
elif event.type == KEYDOWN:
if (event.key == K_ESCAPE):
mixer.music.fadeout(600)
self.RESET()
screen.fill(BLACK)
if (picalpha < 255):
picalpha += 1
finallity.set_alpha(picalpha)
screen.blit(finallity,(0,0))
else:
screen.blit(finallity,(0,0))
shiftval-= 0.2
if(shiftval > -300):
shiftval -= 0.1
for line in (1,2,3,4,5,6,7,8,9,10,11,12):
screen.blit(lines[line-1],(20,(line*20)+shiftval))
else:
for line in (1,2,3,4,5,6,7):
screen.blit(lines2[line-1],(20,(line*20)-10))
if(shiftval < -400):
if(picalpha2 < 255):
picalpha2 += 1
finallity2.set_alpha(picalpha2)
screen.blit(finallity2,(0,0))
display.flip()
def RESET(self,mode="MENU"):
remote.RESET()
alarm.RESET()
# Kill EVERYTHING
for wall in walmart:
wall.kill()
for gen in GENES:
gen.kill()
for bawl in bawls:
bawl.kill()
for wall in blastmart:
wall.kill()
for shit in powermart:
shit.kill() #>:3
for dot in DOTZ:
dot.kill()
for boss in bawss:
boss.kill()
if(mode == "MENU"):
self.MENU()
else: self.PLAY()
def PLANETBLAST(self):
self.endtimer = 0
planeet = imglode("Planet.bmp")
STARS = imglode("stars.bmp")
planeetpos = (470,200)
planetoffset = 0
blastradius = 50
blastwid = 0
greenrad = 20
greenwid = 2
increment = 0
linewidth = 10
linepos1 = (150,0)
linepos2 = (710,480)
lineshow = 0
fader = FADED(0)
darthfader = FADED(0,WHITE)
while self.running:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = 0
screen.fill(RED)
screen.blit(STARS,(0,0))
if((self.endtimer < 200) and
(self.endtimer%2 == 0) and
(self.endtimer > 70)):
draw.line(screen,WHITE,linepos1,linepos2,linewidth)
planetoffset = 5
else:
planetoffset = 0
if(self.endtimer > 60) and (self.endtimer < 70):
screen.fill(WHITE)
if(self.endtimer > 70) and (self.endtimer < 150):
if(increment == 6):
greenrad+= 7
increment = 0
blastradius+=3
if(linewidth > 0):
linewidth -= 1
increment+=1
draw.circle(screen,GREEN,(570,350),greenrad,greenwid)
draw.circle(screen,WHITE,(570,350),blastradius,blastwid)
if(self.endtimer > 150) and (self.endtimer%5 == 0):
if(increment == 6):
greenrad+= 7
increment = 0
blastradius+=3
increment+=1
draw.circle(screen,BLACK,(570,350),greenrad,greenwid)
draw.circle(screen,WHITE,(570,350),blastradius,blastwid)
screen.blit(planeet,
(planeetpos[0],planeetpos[1]+planetoffset))
if(self.endtimer > 100):
darthfader.update()
if(self.endtimer > 600):
if(fader.alphasz != 255):
fader.update()
else:
self.ROLLCREDITS()
self.endtimer+=1
display.flip()
class STARZ(pygame.sprite.Sprite):
def __init__(self, speed, start, size, color):
pygame.sprite.Sprite.__init__(self)
self.Spd = speed
self.Pos = start
self.image = Surface(size)
self.image.fill(color)
def update(self):
self.Pos = (self.Pos[0]+self.Spd, self.Pos[1])
screen.blit(self.image, self.Pos)
if (self.Pos[0]< -30):
self.kill()
class WALL(pygame.sprite.Sprite):
def __init__(self,xval=WINSIZE[0],warn=False,super=False):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.surface.Surface((60,200))
self.image.fill(BLACK)
self.pos = (xval,220)
self.rect = pygame.rect.Rect(self.pos,(60,200))
self.warning = warn
self.warntimer = 0
self.superwall = super
def update(self,speed,alt):
if self.warning:
self.warntimer+=1
if(self.warntimer == 1):
alarm.sound()
if(self.warntimer > 100):
self.warning = 0
else:
self.rect = pygame.rect.Rect(self.pos,(60,200))
self.pos = (self.pos[0]-speed,220+alt)
screen.blit(self.image,self.pos)
def dead(self, kill=0):
self.image.fill(WHITE)
bar.FUXXOR()
screen.blit(self.image,self.pos)
if kill:
clown = 40
for n in (270,280,290,300,310):
DOTZ.add(DOTS((self.pos[0],n),clown*2,5))
else:
clown = 20
for n in (220,260,300,340,380):
DOTZ.add(DOTS((self.pos[0],n),clown))
if(self.superwall):
remote.RESET(True)
self.kill()
class BLASTWALL(pygame.sprite.Sprite):
def __init__(self,warn = 0):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.surface.Surface((60,200))
self.image.fill(WHITE)
self.pos = (WINSIZE[0],220)
self.flash = 0
self.timebomb = 200
self.speed = 10
self.warning = warn
self.warntimer = 0
def update(self,alt):
if self.warning:
self.warntimer+=1
if(self.warntimer == 1):
alarm.sound()
if(self.warntimer > 100):
self.warning = 0
else:
if (self.pos[0] < 100):
self.speed = self.speed/2
if (self.pos[0]>20):
self.pos = (self.pos[0]-self.speed,220+alt)
screen.blit(self.image,self.pos)
if(self.timebomb == 0):
self.dead(1)
self.speed += 1
self.flash += 1
self.timebomb -= 1
if(self.flash == 6):
self.image.fill(WHITE)
elif(self.flash == 12):
self.image.fill(BLACK)
self.flash = 0
def pushback(self):
self.speed = -25
self.pos = (self.pos[0]+10,self.pos[1])
fxplay(deflect,soundstatus)
def dead(self,detonate):
if (self.pos[0] < 200):
screen.fill(WHITE)
dude.DAMAGE()
fxplay(bomb2,soundstatus)
fxplay(pulse,soundstatus)
if detonate:
BLASTS.add(EXPLOSION((self.pos[0]+30,400),100,1))
self.kill()
else:
self.kill()
class POWERBALL(pygame.sprite.Sprite):
def __init__(self,startx = 640,yad = 0):
pygame.sprite.Sprite.__init__(self)
self.cpos = (startx,360-yad)
self.rad = 10
self.blindingflash = 0
def update(self):
self.rad-=1
if self.blindingflash:
pygame.draw.circle(screen,WHITE,self.cpos,30)
self.cpos = (self.cpos[0]-10,self.cpos[1])
pygame.draw.circle(screen,WHITE,self.cpos,10,self.rad)
if(self.cpos[0] < 95):
fxplay(powerup,soundstatus)
SHIELDS.add(EXPLOSION((100,400),50,0,0,WHITE))
pygame.draw.circle(screen,WHITE,(100,400),70)
dude.HEAL()
self.kill()
if(self.rad == 0):
self.rad = 10
class BLADEWALL(pygame.sprite.Sprite):
def __init__(self,pos,ymove=5,sound=True):
pygame.sprite.Sprite.__init__(self)
self.imgs = imglode("Spinwall.bmp")
self.imgs = imgscale(self.imgs,3)
self.imgs = getsub(self.imgs,32*3)
self.frame = 0
self.pos = pos
self.xmove = 15
self.xchange = 2
self.ymove = ymove
self.ychange = True
self.framechange = 100
self.framerate = 100
if sound:
fxplay(spinblade,soundstatus)
self.rect = pygame.Rect(self.pos,(96,96))
self.valid = True
def update(self):
self.rect = pygame.Rect(self.pos,(96,96))
self.pos = (self.pos[0]+self.xmove,
self.pos[1]+self.ymove)
screen.blit(self.imgs[self.frame],self.pos)
self.xmove-=self.xchange
if (self.ymove != 0) and self.ychange:
self.ymove -= 0.1
if(self.ymove < 0) and self.ychange:
self.ymove = 0
if (self.framechange >= 0):
self.framechange-=self.framerate
else:
#switch frame
if(self.frame < 6):
self.frame += 1
else:
if(self.frame == 6):
self.frame = 7
else: self.frame = 6
self.framechange = 100
if (self.framerate < 50):
self.framerate += 5
if(self.pos[0] < 100):
self.dead()
dude.DAMAGE()
if(self.pos[0] > 700):
self.kill()
def REFLECT(self):
self.valid = False
screen.fill(WHITE)
self.xmove = 30
self.xchange = 0
for x in (0,5,1,15):
DOTZ.add(DOTS((self.pos[0]+x,self.pos[1]+30)))
fxplay(bladekill,soundstatus)
self.ychange = False
self.ymove = random.randrange(-20,0)
def dead(self):
if self.valid:
fxplay(slice,soundstatus)
for x in (0,5,1,15):
DOTZ.add(DOTS((self.pos[0]+x,self.pos[1]+30)))
self.kill()
class MINIGEN(pygame.sprite.Sprite):
def __init__(self,type="nothing",xpos=560):
pygame.sprite.Sprite.__init__(self)
self.pos = (xpos,-50)
self.img = pygame.Surface((50,50))
self.img2 = pygame.Surface((10,10))
self.img.fill(WHITE)
self.img2.fill(BLACK)
self.rect = pygame.Rect(self.pos,(50,50))
self.ychange = 21
self.timer = 200
self.timer2 = 12
self.blades = 4
if(type == "ROTATE"):
self.rotateok = True
self.xadd = 5
self.yadd = 0
self.switch = 0
self.img.fill(GREEN)
else:
self.rotateok = False
Texty = texty("REDENSEK.TTF",50)
self.warn = Texty.render("X",0,BLACK)
def update(self):
if self.rotateok:
self.switch += 1
if(self.switch == 20):
self.yadd = 5
self.xadd = 0
elif(self.switch == 40):
self.yadd = 0
self.xadd = 5
elif(self.switch == 60):
self.yadd = -5
self.xadd = 0
elif(self.switch == 80):
self.yadd = 0
self.xadd = -5
self.switch = 0
self.pos = (self.pos[0]+self.xadd,
self.pos[1]+self.yadd)
self.rect = pygame.Rect(self.pos,(50,50))
for wall in spinmart:
if(wall.rect.colliderect(self.rect) and
not wall.valid):
self.die()
if(self.timer == 0):
if(self.timer2 == 0):
self.timer2 = 12
if(self.blades > 0):
self.blades -= 1
spinmart.add(BLADEWALL(self.pos))
else:
self.timer = 100
self.blades = 4
else:
self.timer2 -= 1
else:
self.timer -= 1
if(self.timer < 20):
screen.blit(self.warn,
(self.pos[0]+13,self.pos[1]-50))
if(self.pos[1] < 150):
self.pos = (self.pos[0],self.pos[1]+self.ychange)
if(self.ychange > 1):
self.ychange -= 1
screen.blit(self.img, self.pos)
screen.blit(self.img2,
(self.pos[0]+55,self.pos[1]+10))
screen.blit(self.img2,
(self.pos[0]+55,self.pos[1]+30))
def die(self):
fxplay(bomb2,soundstatus)
BLASTS.add(EXPLOSION((self.pos[0]+13,self.pos[1]+25),
50,0,0,BLACK))
for xpos in (0,5,10,15,20):
DOTZ.add(DOTS((self.pos[0]+xpos,self.pos[1])))
self.kill()
class GENERATOR(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.images = imglode("THEEYE.bmp")
self.images = imgscale(self.images,2)
self.images = getsub(self.images,64)
self.imglen = len(self.images)
self.imgno = 0
self.imgdelay = 0
self.imgtrigger = False
self.eyeflash = False
self.pos = (560,0)
self.image1 = pygame.Surface((60,60))
self.image2 = pygame.Surface((10,10))
self.image1.fill(BLACK)
self.image2.fill(BLACK)
self.initiate = 1
self.mod = 5
self.flash = 0
# ATTACK STUFF
self.warning = 0
self.atkimg = pygame.Surface((6,15))
self.atkimg.fill(BLACK)
self.atktimer = 0
self.attacking = 0
self.atkseqZERO = 0
self.atkseqONE = 0
self.atkseqTWO = 0
#HEALTH STUFF
self.HP = 100
self.hpbar = pygame.Surface((600,16))
self.hpbarback = pygame.Surface((608,24))
self.hpbarflash = pygame.Surface((608,24))
self.hpbarflash.fill(WHITE)
self.hpbar.fill(WHITE)
self.hpbarback.fill(BLACK)
self.hpflash = 0
self.cv = 255
#TEXT STUFF
Texty = texty("REDENSEK.TTF",20)
Bigtex = texty("REDENSEK.TTF",50)
self.nametext = Texty.render("GENERATOR",0,BLACK)
self.question = Bigtex.render("?",0,BLACK)
self.lol = Bigtex.render(":)",0,BLACK)
self.loltimer = 100
#BIG ENTRANCE
fxplay(jet,soundstatus)
if mixer.music.get_busy():
mixer.music.stop()
if(checkfile("DANCE39.mp3")):
musiclode("DANCE39.mp3")
self.pause = False
def update(self):
screen.blit(self.image1,self.pos)
screen.blit(self.image2,
(self.pos[0]-15,self.pos[1]+10))
screen.blit(self.image2,
(self.pos[0]-15,self.pos[1]+40))
if (self.mod > 1):
self.mod-=.04
if self.initiate:
if(self.pos[1] < 300):
self.pos = (560,self.pos[1]+self.mod)
else:
self.image1.fill(WHITE)
BLASTS.add(EXPLOSION((self.pos[0]+30,self.pos[1]+30),50))
derp = 1
while(derp < 6):
derp+=1
DOTZ.add(DOTS((self.pos[0],self.pos[1]+90),20,2))
self.initiate = 0
screen.fill(WHITE)
fxplay(pulse,soundstatus)
if(checkfile("DANCE39.mp3")):
musicplay(musicstatus)
self.imgtrigger = True
else:
if not self.pause:
self.atktimer+=1
else:
if(self.loltimer == 0):
self.loltimer = -1
fxplay(textblip,soundstatus)
elif(self.loltimer == -1):
screen.blit(self.lol,(self.pos[0]+20,self.pos[1]-50))
else:
self.loltimer -= 1
# ATTACK SEQUENCES
if(self.atktimer == 100):
self.atkNO = random.randrange(0,4)
self.atktimer = 0
self.warning = 1
if(self.atkNO < 2):
self.atkseqZERO = 1
elif(self.atkNO == 2):
self.atkseqONE = 1
elif(self.atkNO == 3):
self.atkseqTWO = 1
#WARNING
if self.warning:
self.attacking+=1
if(self.attacking == 1):
fxplay(laser,soundstatus)
if(self.attacking < 10):
if self.atkseqZERO:
screen.blit(self.atkimg,(self.pos[0]+20,self.pos[1]-20))
screen.blit(self.atkimg,(self.pos[0]+30,self.pos[1]-20))
screen.blit(self.atkimg,(self.pos[0]+40,self.pos[1]-20))
elif self.atkseqONE:
pygame.draw.circle(screen,BLACK,
(self.pos[0]+30,self.pos[1]-18),
10)
elif self.atkseqTWO:
screen.blit(self.question,
(self.pos[0]+18,self.pos[1]-50))
elif(self.attacking>20) and (self.attacking<30):
if self.atkseqZERO:
screen.blit(self.atkimg,(self.pos[0]+20,self.pos[1]-20))
screen.blit(self.atkimg,(self.pos[0]+30,self.pos[1]-20))
screen.blit(self.atkimg,(self.pos[0]+40,self.pos[1]-20))
elif self.atkseqONE:
pygame.draw.circle(screen,BLACK,
(self.pos[0]+30,self.pos[1]-18),
10)
elif self.atkseqTWO:
screen.blit(self.question,
(self.pos[0]+18,self.pos[1]-50))
elif(self.attacking>40) and (self.attacking<50):
if self.atkseqZERO:
screen.blit(self.atkimg,(self.pos[0]+20,self.pos[1]-20))
screen.blit(self.atkimg,(self.pos[0]+30,self.pos[1]-20))
screen.blit(self.atkimg,(self.pos[0]+40,self.pos[1]-20))
elif self.atkseqONE:
pygame.draw.circle(screen,BLACK,
(self.pos[0]+30,self.pos[1]-18),
10)
elif self.atkseqTWO:
screen.blit(self.question,
(self.pos[0]+18,self.pos[1]-50))
# ATTACKS
if self.atkseqZERO :
self.eyeflash = True
if(self.attacking == 60):
walmart.add(WALL(540))
if(self.attacking == 70):
walmart.add(WALL(540))
if(self.attacking == 80):
walmart.add(WALL(540))
self.atkseqZERO = 0
self.warning = 0
self.atktimer = 0
self.attacking = 0
self.eyeflash = False
elif self.atkseqONE:
self.eyeflash = True
if(self.attacking == 60):
blastmart.add(BLASTWALL())
if(self.attacking == 100):
self.warning = 0
self.atkseqONE = 0
self.atktimer = 0
self.attacking = 0
self.eyeflash = False
elif self.atkseqTWO:
self.eyeflash = True
if(self.attacking == 80):
if(random.randint(0,10) == 1):
powermart.add(POWERBALL(520,10))
elif(random.randint(0,20) == 0):
fxplay(bomb,soundstatus)
BLASTS.add(EXPLOSION((self.pos[0]+30,self.pos[1]+30),
20,0,0,BLACK))
self.damage(5)
else:
walmart.add(WALL(540))
self.eyeflash = False
self.warning = 0
self.atkseqTWO = 0
self.atktimer = 0
self.attacking = 0
screen.blit(self.hpbarback,(20,20))
screen.blit(self.hpbar,(24,24))
screen.blit(self.nametext,(24,22))
if self.hpflash:
self.hpflash = 0
screen.blit(self.hpbarflash,(20,20))
#COLLISION
for explosion in BLASTS:
if(explosion.pos[0]>400) and explosion.pain:
self.damage(10)
explosion.pain = 0
if self.imgtrigger:
if(self.imgno < self.imglen-2):
self.imgdelay += 1
if(self.imgdelay == 3):
self.imgdelay = 0
self.imgno+=1
if(self.imgno == self.imglen-2):
self.imgtrigger = False
if self.eyeflash:
screen.blit(self.images[7],
(self.pos[0]-2,self.pos[1]))
if(self.flash == 3):
draw.circle(screen,BLACK,
(self.pos[0]+30,self.pos[1]+30),10,2)
draw.circle(screen,WHITE,
(self.pos[0]+30,self.pos[1]+30),
70,2)
else:
screen.blit(self.images[self.imgno],
(self.pos[0]-2,self.pos[1]))
self.flash+=1
if(self.flash == 3):
self.image2.fill(BLACK)
elif(self.flash == 6):
self.image2.fill(WHITE)
self.flash = 0
def damage(self,amount):
self.eyeflash = True
self.cv-=2*amount
if(self.cv < 0): self.cv = 0
self.image1.fill((self.cv,self.cv,self.cv))
self.HP -= amount
if(self.HP < 0): self.HP = 0
i = 1
while(i < 5):
i+=1
DOTZ.add(DOTS((self.pos[0]+40,self.pos[1]+40)))
self.hpbar = pygame.Surface(((6*self.HP),16))
self.hpbar.fill(WHITE)
#DEATH
if(self.HP == 0):
i = 1
while(i < 5):
i+=1
DOTZ.add(DOTS((self.pos[0]+40,self.pos[1]+40)))
fxplay(BOOM,soundstatus)
global FPS
FPS = 15
BLASTS.add(EXPLOSION(self.pos,100,0,1,WHITE))
remote.RESET(True)
mixer.music.fadeout(600)
self.kill()
bawls = pygame.sprite.Group()
class HAMMER(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.img = imglode("HAMMER.bmp")
self.img = imgscale(self.img,4)
self.img2 = imglode("HAMMER2.bmp")
self.img2 = imgscale(self.img2,4)
self.image = self.img2
self.flickerok = True
self.flicker = 0
self.timer = 0
self.pos = (400,0)
self.ymove = 1
self.shock = True
def update(self,ypos):
self.timer+=1
if(self.timer > 30) and (self.timer < 100):
self.flickerok = False
if(self.pos[1] < 200):
if(self.ymove < 20):
self.ymove+=5
self.pos = (self.pos[0],
self.pos[1]+self.ymove)
else:
if self.shock:
screen.fill(WHITE)
self.shock = False
fxplay(hammersmash,soundstatus)
fxplay(bomb,soundstatus)
shocks.add(SHOCKWAVE(self.pos[0]-35))
self.pos = (self.pos[0],230)
if(self.timer == 60):
fxplay(hammersmash,soundstatus)
fxplay(bomb,soundstatus)
shocks.add(SHOCKWAVE(self.pos[0]-50))
elif(self.timer == 80):
fxplay(hammersmash,soundstatus)
fxplay(bomb,soundstatus)
shocks.add(SHOCKWAVE(self.pos[0]-100))
elif(self.timer == 100):
fxplay(hammersmash,soundstatus)
fxplay(bomb,soundstatus)
shocks.add(SHOCKWAVE(self.pos[0]-150,True))
if(self.timer > 130):
self.flickerok = True
if(self.timer > 200):
self.kill()
if self.flickerok:
self.flicker+=1
if(self.flicker < 3):
self.image = self.img2
elif(self.flicker > 4):
self.flicker = 0
self.image = self.img
screen.blit(self.image,(self.pos[0],self.pos[1]+(430-ypos)))
gdamnhammers = pygame.sprite.Group()
class ENERGYBLAST(pygame.sprite.Sprite):
def __init__(self,xpos,type="NORMAL"):
pygame.sprite.Sprite.__init__(self)
if(type == "NORMAL"):
self.pos = (xpos, 420)
self.xmove = -5
self.ymove = -10
elif(type == "ABNORMAL"):
self.pos = (xpos,200)
self.xmove = -20
self.ymove = 10
self.type = type
self.addok = 0
self.reflecting = False
self.tagging = False
self.tagcount = 0
def update(self):
if(self.type == "NORMAL"):
if self.tagging:
if(self.xmove > -10):
self.xmove-=1
if not self.reflecting and not self.tagging:
if(self.ymove < 4):
self.ymove += 0.5
self.pos = (self.pos[0]+self.xmove,
self.pos[1]+self.ymove)
if(self.addok < 4):
self.addok+=1
else:
self.addok = 0
BLASTS.add(EXPLOSION(self.pos,50,0,0,GREEN))
if(self.type == "NORMAL"):
draw.circle(screen,GREEN,self.pos,30)
else:
draw.circle(screen,WHITE,self.pos,30)
if((self.pos[0] > 400) and
(self.pos[1] < 300) and
(self.type == "NORMAL")):
fxplay(BOOM,soundstatus)
BLASTS.add(EXPLOSION(self.pos,100,0,0,GREEN))
for boss in bawss:
boss.damage(20)
fxplay(damage,soundstatus)
screen.fill(WHITE)
self.kill()
if(self.pos[1] > 420) or (self.pos[0] < 0):
fxplay(BOOM,soundstatus)
BLASTS.add(EXPLOSION(self.pos,100,0,0,GREEN))
dude.DEATH()
fxplay(damage,soundstatus)
screen.fill(WHITE)
self.kill()
def tag(self):
if(self.type == "ABNORMAL"):
BLASTS.add(EXPLOSION(self.pos,70,0,1,WHITE))
self.kill()
if(self.tagcount < 8):
self.tagcount+=1
self.tagging = True
self.xmove = 10
self.ymove = 0
BLASTS.add(EXPLOSION(self.pos,40,0,1,BLACK))
else:
self.tagging = False
self.REFLECT()
def REFLECT(self):
BLASTS.add(EXPLOSION(self.pos,80,0,0,GREEN,))
self.reflecting = True
self.xmove = 20
self.ymove = -10
balls = pygame.sprite.Group()
class SHOCKWAVE(pygame.sprite.Sprite):
def __init__(self,xpos,blast=False,detonate=False,size=3):
pygame.sprite.Sprite.__init__(self)
self.imgs = imglode("Shockwave.bmp")
self.imgs = imgscale(self.imgs,size)
self.imgs = getsub(self.imgs,64*size)
self.timer = 0
self.frame = 0
self.pos = (xpos,268)
if not detonate:
BLASTS.add(EXPLOSION((self.pos[0]+90,self.pos[1]+160),
40,0,1,BLACK))
self.detonate = detonate
if blast:
balls.add(ENERGYBLAST(self.pos[0]))
if self.detonate:
BLASTS.add(EXPLOSION((self.pos[0],self.pos[1]+160),
100,1,1,GREEN))
def update(self,yalt):
if(self.timer < 1):
self.timer += 1
else:
self.timer=0
self.frame+=1
if(self.frame == 0):
for y in (0,20,40,60,80,100,120,140):
DOTZ.add(DOTS((self.pos[0],
self.pos[1]+y),20,5))
DOTZ.add(DOTS((self.pos[0]+20,
self.pos[1]+y),20,5))
bar.FUXXOR()
elif(self.frame == 5):
self.kill()
else:
if self.detonate:
self.pos = (self.pos[0]-128,self.pos[1])
screen.blit(self.imgs[self.frame],self.pos)
shocks = pygame.sprite.Group()
class LASER(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.rect = pygame.Rect((100,0),(2,450))
self.xpos = 100
self.width = 2
self.flash = 0
self.color = WHITE
def update(self):
self.flash += 1
if(self.flash < 3):
self.color = WHITE
else:
self.flash = 0
self.color = GREEN
if(self.width > 2):
self.width -= 4
if(self.width <= 0):
self.kill()
if(self.xpos > 100):
self.xpos -= 1
self.rect = pygame.Rect((self.xpos-(self.width/2),0),(self.width,450))
draw.rect(screen,self.color,self.rect)
def grow(self):
self.width = 50
lazor = pygame.sprite.Group()
class WAVE(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.img = pygame.Surface((100,480))
self.img.fill(WHITE)
self.width = 100
self.xpos = -100
bar.FUXXOR()
def update(self):
if(self.width > 0):
self.xpos += 20
self.width -= 2
else:
self.kill()
self.img = pygame.Surface((self.width,480))
self.img.fill(WHITE)
screen.blit(self.img,(self.xpos,0))
waves = pygame.sprite.Group()
class FINALLITY(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.MasterTimer = 0
self.color = 0
self.interval = 40
def update(self):
self.MasterTimer+=1
if(self.MasterTimer < 500):
if(self.color < 255):
self.color+=1
global RED
RED = (255,self.color,self.color)
if(self.MasterTimer%self.interval == 0):
if(self.interval > 20):
self.interval-=1
waves.add(WAVE())
finals = pygame.sprite.Group()
#FUCK YEAR LET'S DO THIS
class BOSS(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
#FIRST THE OBVIOUS STUFF
self.HP = 100
self.die = False
self.dying = False
self.deathsequence = 0
self.deathx = 0
self.summonok = True
#Now the imaging
self.alphasz = 255
self.imgs = []
#first the stills
img1 = imglode("boxfirstform.bmp")
img2 = imglode("boxdead.bmp")
#then the animations
imgs1 = imglode("boxtransform.bmp")
imgs2 = imglode("boxsecondform.bmp")
imgs3 = imglode("boxatk1.bmp")
imgs4 = imglode("boxatk2.bmp")
imgs5 = imglode("boxatk3.bmp")
#next expand them
img1 = imgscale(img1,4)
img2 = imgscale(img2,4)
imgs1 = imgscale(imgs1,4)
imgs2 = imgscale(imgs2,4)
imgs3 = imgscale(imgs3,4)
imgs4 = imgscale(imgs4,4)
imgs5 = imgscale(imgs5,4)
#next split the ones that need splitting
imgs1 = getsub(imgs1,256)
imgs2 = getsub(imgs2,256)
imgs3 = getsub(imgs3,256)
imgs4 = getsub(imgs4,256)
imgs5 = getsub(imgs5,256)
for thing in (img1,img2,imgs1,imgs2,imgs3,
imgs4,imgs5):
self.imgs.append(thing)
#self.imgs now follows the format:
# (first idle,dead,[transform],[second idle],
# [first attack],[second attack], [third attack])
#other things that will need blitting
self.hpbar1 = pygame.Surface((600,16))
self.hpbar1.fill(GREEN)
self.hpbar2 = pygame.Surface((600,16))
self.hpbar2.fill(RED)
self.hpbarback = pygame.Surface((608,24))
self.hpbarback.fill(BLACK)
self.hpbarflash = pygame.Surface((608,24))
self.hpbarflash.fill(GREEN)
self.hpshow = False
#TEXT STUFF
Texty = texty("REDENSEK.TTF",20)
Bigtex = texty("REDENSEK.TTF",40)
Liltex = texty("REDENSEK.TTF",10)
self.name = Texty.render("BOX",0,BLACK)
self.warn1 = Bigtex.render("WARNING!",0,BLACK)
self.warn2 = Liltex.render("MASSIVE ENEMY APPROACHING",0,BLACK)
#BIG ENTRANCE
self.entrancetimer = 0
self.me = pygame.Surface((20,20))
self.mepos = (400,0)
self.color = (0,0,0)
self.cv = 0
self.frame = 0
self.frameswitch = 6
self.pos = (400,100)
self.bob = 0
self.bobval = 1
self.image = img1
self.imgtype = -1
self.flash = False
self.fuck = False
#========================
self.transforming = False
#========================
self.transformtimer = 0
self.firstform = True
#Attack info
self.spawninterval = 200
self.paused = True
self.attacktype = 0
self.attacktimer2 = 0
self.attacktimer = 0
self.atktmr = 0
self.moveok = 0
self.moveamount = 0
self.flicker = False
self.alphasz = 255
self.shockpos = 0
#sounds
mixer.music.stop()
def update(self):
#BIG ENTRANCE
if(self.imgtype == -1):
if(self.cv < 255):
self.cv+=1
self.color = (0,self.cv,0)
if(self.entrancetimer == 100):
screen.fill(self.color)
fxplay(bomb2,soundstatus)
self.me = pygame.Surface((20,20))
self.me.fill(self.color)
if(self.entrancetimer == 200):
screen.fill(self.color)
fxplay(bomb2,soundstatus)
self.me = pygame.Surface((40,40))
self.me.fill(self.color)
if(self.entrancetimer == 250):
screen.fill(self.color)
fxplay(bomb2,soundstatus)
self.me = pygame.Surface((70,70))
self.me.fill(self.color)
self.mepos = (self.mepos[0]+10,self.mepos[1]+10)
if(self.entrancetimer == 270):
screen.fill(self.color)
fxplay(bomb2,soundstatus)
self.me = pygame.Surface((100,100))
self.me.fill(self.color)
self.mepos = (self.mepos[0]+10,self.mepos[1]+10)
if(self.entrancetimer == 300):
screen.fill(self.color)
fxplay(bomb2,soundstatus)
self.me = self.imgs[0]
self.mepos = (400,100)
if(self.mepos[1] < 100):
self.mepos = (self.mepos[0],self.mepos[1]+10)
if(self.entrancetimer == 350):
screen.fill(WHITE)
fxplay(shiny,soundstatus)
BLASTS.add(EXPLOSION(self.mepos,100))
if(self.entrancetimer == 400):
if(checkfile("296234_Awake___Supernova.mp3")):
musiclode("296234_Awake___Supernova.mp3")
musicplay(musicstatus)
self.flash = True
self.paused = False
self.imgtype = 0
screen.blit(self.me,self.mepos)
self.entrancetimer+=1
#HEALTH SHITS
elif(self.imgtype >= 0) and not self.die:
screen.blit(self.hpbarback,(20,20))
screen.blit(self.hpbar2,(24,24))
if self.firstform:
screen.blit(self.hpbar1,(24,24))
screen.blit(self.name,(24,20))
#ATTACKS
if(self.attacktimer2 > self.spawninterval) and not self.paused:
BLASTS.add(EXPLOSION((self.pos[0]+128,self.pos[1]+128),50,0,0,GREEN))
thing = random.randrange(0,2)
fxplay(bomb1,soundstatus)
if(thing == 0):
walmart.add(WALL(600))
elif(thing == 1):
blastmart.add(BLASTWALL())
self.attacktimer2 = 0
else:
self.attacktimer2 += 1
if(self.paused == False):
self.attacktimer += 1
#regular mode attacks
if(self.imgtype == 0):
if(self.attacktimer == 220):
self.paused = True
self.attacktimer+=1
self.attacktype = self.makechoice()
print self.attacktype
elif(self.attacktimer == 221):
#SMASH YOU
if(self.attacktype == 4):
if(self.pos[0] < 100):
self.flash = True
dude.DAMAGE()
self.atktmr = 201
self.bob = 0
self.atktmr+=1
if(self.atktmr < 200):
if(self.moveok == 1):
if(self.moveamount < 20):
self.moveamount+=5
self.pos = (self.pos[0]-self.moveamount,self.pos[1])
else:
if(self.pos[1] < 210):
self.pos=(self.pos[0],self.pos[1]+5)
else:
for x in (0,50,100,150,200):
DOTZ.add(DOTS((x,self.pos[1]+256)))
fxplay(hammersmash,soundstatus)
fxplay(rush,soundstatus)
shocks.add(SHOCKWAVE(300))
BLASTS.add(EXPLOSION((self.pos[0]+128,self.pos[1]+128),
70,0,0,BLACK))
self.moveok = 1
elif(self.atktmr > 200) and (self.atktmr < 250):
if(self.pos[0] != 400):
self.pos = (correct(self.pos[0],400,20,10),
self.pos[1])
if(self.pos[1] != 100):
self.pos = (self.pos[0],
correct(self.pos[1],100,20,10))
elif(self.atktmr > 250):
self.moveamount = 0
self.paused = False
self.atktmr = 0
self.attacktimer = 0
self.moveok = 0
#SLICE N DICE YOU
if(self.attacktype == 5):
self.bob = 0
if(self.flicker == False):
self.flicker = True
if (self.alphasz > 50):
self.alphasz -= 5
self.atktmr += 1
if((self.atktmr == 50)or
(self.atktmr == 70)or
(self.atktmr == 90)or
(self.atktmr == 110)):
fxplay(genspawn,soundstatus)
GENES.add(MINIGEN("ROTATE",350))
if(len(GENES) == 0) and (self.atktmr > 50):
self.alphasz = 255
self.paused = False
self.atktmr = 0
self.attacktimer = 0
#HAMMER TIME
if(self.attacktype == 6):
self.bob = 0
if(self.atktmr < 100):
self.atktmr += 1
elif(self.atktmr == 100):
self.atktmr += 1
fxplay(hammerspawn,soundstatus)
gdamnhammers.add(HAMMER())
else:
if(len(balls) == 0):
self.paused = False
self.atktmr = 0
self.attacktimer = 0
#WTF mode attacks
if(self.imgtype > 0):
if(self.attacktimer == 150):
self.paused = True
self.attacktimer+=1
self.attacktype = self.makechoice()
print self.attacktype
elif(self.attacktimer == 151):
#THROW A FUCKTON OF BLADES
if(self.attacktype == 0):
if(self.atktmr == 0):
fxplay(transform,soundstatus)
self.frameswitch = 0
self.imgtype = 3
elif((self.atktmr == 30)or
(self.atktmr == 40)or
(self.atktmr == 50)or
(self.atktmr == 60)or
(self.atktmr == 70)or
(self.atktmr == 80)):
fxplay(bigblade,soundstatus)
spinmart.add(BLADEWALL((self.pos[0]+100,self.pos[1]),
10,False))
self.atktmr+=1
if(self.atktmr == 200):
self.atktmr = 0
self.attacktimer = 0
self.imgtype = 2
screen.fill(GREEN)
self.paused = False
#GIGA FLARE
if(self.attacktype == 1):
if(self.atktmr == 0):
fxplay(roar,soundstatus)
self.flash = True
self.frameswitch = 0
self.imgtype = 4
if(self.atktmr == 40):
fxplay(slowcharge,soundstatus)
fxplay(bigcharge,soundstatus)
if((self.atktmr > 40) and
(self.atktmr%5 == 0) and
(self.atktmr < 500)):
BLASTS.add(EXPLOSION((self.pos[0]+128,self.pos[1]+128),
50,0,0,GREEN))
draw.circle(screen,GREEN,
(self.pos[0]+128,self.pos[1]+128),40)
self.atktmr+=1
if(self.atktmr%50 == 0):
blastmart.add(BLASTWALL())
if(self.atktmr == 500):
fxplay(bigblast,soundstatus)
fxplay(BOOM,soundstatus)
fxplay(pulse,soundstatus)
balls.add(ENERGYBLAST(self.pos[0]+128,"ABNORMAL"))
if(self.atktmr > 510):
global FPS
FPS = 5
self.atktmr = 0
self.attacktimer = 0
self.imgtype = 2
screen.fill(GREEN)
self.paused = False
#LASER RAIN
if(self.attacktype == 2):
if(self.atktmr == 0):
self.frameswitch = 0
self.imgtype = 5
if(self.atktmr == 20):
lazor.add(LASER())
fxplay(opticcharge,soundstatus)
if(self.atktmr == 200):
self.flash = True
for lasic in lazor:
lasic.grow()
self.shockpos = lasic.xpos
fxplay(opticblast,soundstatus)
if(self.atktmr == 220):
for lasic in lazor:
lasic.kill()
global FPS
FPS = 10
self.flash = True
fxplay(BOOM,soundstatus)
shocks.add(SHOCKWAVE(self.shockpos,False,True))
if(self.shockpos < 250):
dude.DEATH()
elif(self.shockpos > 400):
self.damage(40)
if(self.atktmr%5 == 0):
SHIELDS.add(EXPLOSION((self.pos[0],self.pos[1]+128),
50,0,0,GREEN))
self.atktmr+=1
if(self.atktmr == 260):
self.atktmr = 0
self.attacktimer = 0
self.imgtype = 2
screen.fill(GREEN)
self.paused = False
if not self.die:
if(self.bob > 20):
self.bobval = -1
elif(self.bob < -20):
self.bobval = 1
self.bob+=self.bobval
#TRANSFORMING SEQUENCE
if self.transforming:
self.bob = 0
global RED
if self.alphasz > 150:
self.alphasz-=1
RED = (self.alphasz,0,0)
if(self.transformtimer == 0):
mixer.music.fadeout(600)
if((self.transformtimer == 60)or
(self.transformtimer == 100)or
(self.transformtimer == 160)or
(self.transformtimer == 180)or
(self.transformtimer == 200)or
(self.transformtimer == 230)):
fxplay(bomb1,soundstatus)
screen.fill(WHITE)
bar.FUXXOR()
if(self.transformtimer > 270):
fxplay(transform,soundstatus)
fxplay(roar,soundstatus)
self.imgtype = 1
if(self.transformtimer > 300):
mixer.music.stop()
if(checkfile("252492_The_Hate_Patrol_With_Army_.mp3")):
musiclode("252492_The_Hate_Patrol_With_Army_.mp3")
musicplay(musicstatus)
self.spawninterval = 100
BLASTS.add(EXPLOSION((self.pos[0]+128,self.pos[1]+128),
200,0,1,GREEN))
self.transforming = False
self.paused = False
self.firstform = False
self.transformtimer+=1
#ANIMATIONS
if self.flash:
screen.fill(WHITE)
self.flash = False
if(self.frameswitch == 0):
self.frameswitch = 6
#first idle animation: just the box
if(self.imgtype == 0):
self.image = self.imgs[0]
#transformation animation
elif(self.imgtype == 1):
if(self.frame < 6):
self.frame+=1
self.image = self.imgs[2][self.frame]
else:
self.imgtype = 2
self.frame = 0
#second idle animation
elif(self.imgtype == 2):
if(self.frame < 2):
self.frame+=1
else: self.frame = 0
self.image = self.imgs[3][self.frame]
#first attack
elif(self.imgtype == 3):
if(self.frame < 4):
self.frame+=1
self.image = self.imgs[4][self.frame]
#second attack
elif(self.imgtype == 4):
if(self.frame > 2):
self.frame = 0
if(self.frame < 2):
self.frame+=1
self.image = self.imgs[5][self.frame]
#third attack
elif(self.imgtype == 5):
if(self.frame < 4):
self.frame+=1
self.image = self.imgs[6][self.frame]
#DEATH
elif(self.imgtype == 6):
self.image = self.imgs[1]
else:
self.frameswitch -= 1
#OH FUCK
if(self.imgtype > 0):
if not self.fuck:
bar.FUXXOR()
self.fuck = True
#SPECIAL EFFECTS
if self.flicker:
self.flicker = False
self.image.set_alpha(self.alphasz)
if(self.imgtype >= 0) and not self.die:
if((self.frameswitch < 6) and (self.fuck==True)):
self.pos2 = ((self.pos[0]+random.randrange(-10,10)),
(self.pos[1]+random.randrange(-10,10)+self.bob))
screen.blit(self.image,self.pos2,None,BLEND_MIN)
else:
screen.blit(self.image,(self.pos[0],self.pos[1]+self.bob))
#DEATH SEQUENCE
if(self.dying) and not self.die:
self.deathsequence+=1
if(self.deathsequence%100 == 0):
self.flash = True
fxplay(bigcharge,soundstatus)
if(self.deathsequence%5 == 0) or (self.deathsequence == 0):
#fxplay()
BLASTS.add(EXPLOSION((self.pos[0]+128,self.pos[1]+128),
50,0,0,GREEN))
draw.circle(screen,GREEN,
(self.pos[0]+128,self.pos[1]+128),40)
if(self.deathsequence%40 == 0):
BLASTS.add(EXPLOSION((self.pos[0]+128,self.pos[1]+128),
200,0,0,GREEN))
if(self.deathsequence == 400):
if self.summonok:
self.summonok = False
littleshit.add(LILHELPER())
self.deathsequence = 0
if(self.die):
screen.blit(self.image,self.pos)
if(self.deathsequence > 100) and (self.deathsequence%2 == 0):
self.deathx+=1
if(self.deathsequence > 100):
self.pos = (self.pos[0]-self.deathx,self.pos[1])
self.deathsequence+=1
if(self.pos[0] < -400):
global bossdead
bossdead = True
self.kill()
def switch(self,type):
self.imgtype = type
def makechoice(self):
if(self.imgtype > 0):
i = random.randrange(0,3)
else:
i = random.randrange(4,7)
if(i == 5):
fxplay(invis,soundstatus)
return i
def damage(self,amount):
if(self.firstform == True):
if(self.HP-amount < 0):
self.hpbar1 = pygame.Surface((0,16))
self.paused = True
self.transforming = True
self.atktmr = 0
self.attacktimer = 0
self.attacktimer2 = 0
#reset values
self.flicker = True
self.alphasz = 255
self.pos = (400,100)
screen.fill(WHITE)
self.HP = 100
else:
self.HP-=amount
self.hpbar1 = pygame.Surface((self.HP * 6,16))
self.hpbar1.fill(GREEN)
else:
if(self.HP-amount < 0):
self.hpbar2 = pygame.Surface((0,20))
self.dying = True
self.imgtype = 4
self.paused = True
self.atktmr = 0
self.attacktimer = 0
self.attacktimer2 = 0
# Kill EVERYTHING
for wall in walmart:
wall.kill()
for wall in blastmart:
wall.kill()
for shit in powermart:
shit.kill()
for wall in spinmart:
wall.kill()
fxplay(slowcharge,soundstatus)
self.HP = 0
self.hpbar2 = pygame.Surface((self.HP * 6,16))
self.hpbar2.fill((255,0,0))
else:
self.HP-=amount
self.hpbar2 = pygame.Surface((self.HP * 6,16))
self.hpbar2.fill((255,0,0))
screen.blit(self.hpbarflash,(20,20))
def DIEBOX(self):
global FPS
FPS = 1
global RED
for wave in waves:
wave.kill()
RED = (255,0,0)
self.imgtype = 6
mixer.music.fadeout(600)
self.flash = True
self.die = True
self.paused = True
self.atktmr = 0
self.attacktimer = 0
self.attacktimer2 = 0
swords.add(SWORDOFLIGHT(self.pos[0]-50,80,0))
self.deathsequence = 0
bawss = pygame.sprite.Group()
class SWORDOFLIGHT(pygame.sprite.Sprite):
def __init__(self,xpos,ypos=100,type=1):
pygame.sprite.Sprite.__init__(self)
self.type = type
if(self.type == 0):
self.img = imglode("sword.bmp")
self.img = imgscale(self.img,6)
elif(self.type == 1):
self.img = imglode("sword2.bmp")
self.img = imgscale(self.img,2)
self.timer = 0
self.pos = (xpos,ypos)
def update(self):
if(self.timer == 0):
fxplay(bladekill,soundstatus)
if(self.type == 0):
screen.fill(WHITE)
mixer.music.fadeout(600)
shocks.add(SHOCKWAVE(self.pos[0]+80))
if(self.timer < 100):
if(self.type == 1):
if(self.timer%3 == 0):
self.pos = (self.pos[0]-1,self.pos[1])
screen.blit(self.img,self.pos)
elif(self.type == 0):
screen.blit(self.img,self.pos)
else:
if(self.type == 0):
for ypos in (0,20,40,60,80,100):
DOTZ.add(DOTS((self.pos[0]+80,ypos+200)))
self.kill()
self.timer+=1
swords = pygame.sprite.Group()
class LILHELPER(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.img = pygame.Surface((40,40))
self.img2 = pygame.Surface((10,10))
self.img.fill(WHITE)
self.img2.fill(BLACK)
Bigtex = texty("REDENSEK.TTF",50)
self.question = Bigtex.render("?",0,WHITE)
self.lol = Bigtex.render(":)",0,WHITE)
self.pos = (700,300)
self.timer = 0
self.xoffset = 0
def update(self):
if(self.xoffset > 0):
self.xoffset-=1
if(self.pos[0] > 400):
self.pos = (self.pos[0]-1,self.pos[1])
else:
self.timer+=1
if(self.timer == 50):
fxplay(goof, soundstatus)
if(self.timer < 200) and (self.timer > 50):
screen.blit(self.question,
(self.pos[0]+10,self.pos[1]-50))
if(self.timer == 200):
powermart.add(POWERSWORD(self.pos[0]))
if(self.timer > 250) and (self.timer < 400):
screen.blit(self.lol,
(self.pos[0]+10,self.pos[1]-50))
if(self.timer > 400):
if(self.pos[1] > -50):
self.pos = (self.pos[0],self.pos[1]-20)
else:
self.kill()
screen.blit(self.img,(self.pos[0]+self.xoffset,
self.pos[1]))
screen.blit(self.img2,(self.pos[0]-15+self.xoffset,
self.pos[1]+25))
screen.blit(self.img2,(self.pos[0]-15+self.xoffset,
self.pos[1]+5))
def shake(self):
self.xoffset = 10
littleshit = pygame.sprite.Group()
class POWERSWORD(pygame.sprite.Sprite):
def __init__(self,xpos=640):
pygame.sprite.Sprite.__init__(self)
self.imgs = imglode("powerup.bmp")
self.imgs = imgscale(self.imgs,3)
self.imgs = getsub(self.imgs,3*16)
self.frame = 0
self.pos = (xpos,300)
def update(self,spd=0):
if(self.pos[0] > 100):
self.pos = (self.pos[0]-5, self.pos[1])
else:
self.collect()
screen.blit(self.imgs[self.frame],self.pos)
if(self.frame == 3):
self.frame = 0
else: self.frame += 1
def collect(self):
fxplay(powerup,soundstatus)
screen.fill(WHITE)
if not superpowered:
global superpowered
superpowered = True
elif(len(bawss) > 0):
dude.ultrapowered = True
self.collected = True
self.kill()
class PLAYER:
def __init__(self):
self.imgs = imglode("Kobold.bmp")
self.imgs = imgscale(self.imgs, 2)
self.imgs = getsub(self.imgs, 48*2)
self.imgs2 = imglode("Kobold2.bmp")
self.imgs2 = imgscale(self.imgs2, 2)
self.imgs2 = getsub(self.imgs2, 48*2)
Texty = texty("REDENSEK.TTF",60)
self.imglength = len(self.imgs)
self.changeok = 0
self.frame = 0
self.ready = 0
self.delay = 0
self.ultrapowered = False
self.ultratimer = 0
#HEALTH SHIT
self.HP = 90
self.shieldflash = 0
self.shieldpos = (100,400)
self.breakshield = True
self.exclaim = Texty.render("!",0,WHITE)
self.warning = Texty.render("WARNING LOW SHIELD",0,WHITE)
self.SHIFT = Texty.render("[SHIFT]!",0,WHITE)
self.beepok = True
self.beeptimer = 0
#Death animation
self.alive = True
self.deathtimer = 200
def update(self,ypos):
global FPS
if self.alive:
if self.ultrapowered:
if(self.ultratimer == 1):
fxplay(attention,soundstatus)
if(self.ultratimer < 10):
screen.blit(self.SHIFT,(320,200))
elif(self.ultratimer > 10):
self.ultratimer = 0
self.ultratimer+=1
self.delay+=1
if (self.ready == 0):
self.changeok = 0
else:
self.ready-=1
if(self.delay == 3):
self.delay = 0
if self.frame < self.imglength-2:
self.frame += 1
else:
self.frame = 0
self.pos = (50,ypos-90)
if(self.changeok == 0):
screen.blit(self.imgs[self.frame],self.pos)
else:
screen.blit(self.imgs2[self.frame],self.pos)
#HP SHIT
if(self.HP == 60):
self.shieldflash+=1
if(self.shieldflash == 60):
self.shieldflash = 0
SHIELDS.add(EXPLOSION(self.shieldpos,50,0,0,BLACK))
elif(self.HP == 30):
self.shieldflash+=1
if(self.shieldflash == 30):
self.shieldflash = 0
SHIELDS.add(EXPLOSION(self.shieldpos,50,0,0,BLACK))
elif(self.HP == 0):
if self.beepok:
if((self.beeptimer == 0)or
(self.beeptimer == 5)or
(self.beeptimer == 10) or
(self.beeptimer == 15)):
screen.blit(self.warning,(50,400))
fxplay(attention,soundstatus)
self.beeptimer+=1
if(self.beeptimer > 15):
self.beepok = False
self.beeptimer = 0
if self.breakshield:
self.breakshield = False
SHIELDS.add(EXPLOSION(self.shieldpos,200,0,1,BLACK))
for xpos in (0,50,100,150):
DOTZ.add(DOTS((xpos,420)))
else:
self.shieldflash+=1
if (self.shieldflash > 15):
screen.blit(self.exclaim,(110,290))
if (self.shieldflash == 30):
self.shieldflash = 0
else:
if(self.deathtimer > 0):
self.deathtimer-=1
else:
remote.RESETLEVEL()
if(FPS < 60):
FPS+=1
def STRIKU(self):
if superpowered:
SHIELDS.add(EXPLOSION((self.pos[0]+90,self.pos[1]+40),
10,0,0,BLACK))
self.ready = 50
self.changeok = 1
def DAMAGE(self):
global FPS
FPS=FPS/3
SHIELDS.add(EXPLOSION(self.shieldpos,100,0,1,BLACK))
fxplay(damage,soundstatus)
self.shieldflash = 0
if(self.HP > 0):
self.HP-=30
else: self.DEATH()
if(self.HP == 0):
self.beepok = True
def HEAL(self):
if(self.HP < 90):
self.HP+=30
def DEATH(self):
if invincible:
self.HP = 90
self.breakshield = True
self.shieldflash = 0
else:
global FPS
FPS = 60
screen.fill(WHITE)
SHIELDS.add(EXPLOSION(self.shieldpos,100,0,1,BLACK))
fxplay(die, soundstatus)
mixer.music.pause()
i = 1
while(i < 8):
i+=1
DOTZ.add(DOTS(self.shieldpos))
self.alive = False
mixer.music.fadeout(600)
dude = PLAYER()
def correct(inputval,inputgoal,coreval,tollerance=2):
if (inputval < inputgoal-tollerance):
return inputval + coreval
elif (inputval > inputgoal+tollerance):
return inputval - coreval
else:
return inputgoal
class DOTS(pygame.sprite.Sprite):
def __init__(self,pos,xspd = 20,yspd = 20):
pygame.sprite.Sprite.__init__(self)
self.pos = pos
self.color = (0,0,0)
self.xspd = random.randrange(xspd/2,xspd)
self.yspd = random.randrange(-yspd,yspd)
self.image = pygame.Surface((20,20))
def update(self):
self.image.fill(self.color)
self.pos = (self.pos[0]+self.xspd,self.pos[1]+self.yspd)
self.xspd-=3
if (self.color[0]>=230):
self.kill()
else:
self.color = (self.color[0]+10,self.color[1],self.color[2])
screen.blit(self.image,self.pos)
class EXPLOSION(pygame.sprite.Sprite):
def __init__(self,pos,size,pain=0,shakeok=1,color=WHITE):
pygame.sprite.Sprite.__init__(self)
self.size = size
self.pos = pos
self.rad = 0
self.pain = pain
self.rect = pygame.rect.Rect((pos[0]-size,400),(size*2,2))
if shakeok:
bar.FUXXOR(40)
self.color = color
def update(self):
pygame.draw.circle(screen,self.color,self.pos,self.size)
pygame.draw.circle(screen,RED,self.pos,self.rad)
self.rect = pygame.rect.Rect((self.pos[0]-self.size,400),
(self.size*2,2))
if(self.rad <= self.size):
self.rad += 15
self.size+= 10
else:
self.kill()
class FLOOR:
def __init__(self):
self.ypos = 420
self.correcter = 1
self.imagio = pygame.surface.Surface((WINSIZE[0],70))
self.imagio.fill(BLACK)
def update(self):
self.ypos = correct(self.ypos,420,self.correcter)
screen.blit(self.imagio, (0,self.ypos))
if (self.correcter > 1):
self.correcter-=1
def FUXXOR(self,correction = 20):
self.ypos = 410
self.correcter = correction
bar = FLOOR()
class WARNING:
def __init__(self):
self.timer = 0
self.warning = imglode("warning.bmp")
self.warning = imgscale(self.warning,2)
self.playok = True
self.playcount = 0
self.longenough = 0
def update(self):
if(self.longenough == 0):
if not self.playok:
self.playok = True
else:
self.longenough -= 1
if self.playok:
if(self.timer%20 == 0) and (self.timer > 0):
fxplay(siren,soundstatus)
if(self.timer > 55):
screen.blit(self.warning,(550,200))
elif(self.timer < 40) and (self.timer > 35):
screen.blit(self.warning,(550,200))
elif(self.timer < 20) and (self.timer > 15):
screen.blit(self.warning,(550,200))
self.longenough = 600
self.playok = False
if(self.timer > 0):
self.timer-=1
def sound(self):
self.longenough == 600
self.timer = 60
def RESET(self):
self.longenough == 600
self.timer = 0
self.playok = True
alarm = WARNING()
class LEVELCONTROL:
def __init__(self):
self.SubTimer = 0
self.MasterTimer = 0
#=========================
self.curlevel = startlevel
#=========================
self.level = loadlevel('LEVEL'+str(self.curlevel))
self.position = 0
self.active = 0
self.multi = False
self.multimer = 0
self.wait = 100
#TEXT STUFF
Texty = texty("REDENSEK.TTF",40)
self.text = Texty.render(self.level[0],0,RED)
self.textcount = 0
self.textshow = True
if(checkfile(self.level[3])):
global currentsong
currentsong = self.level[3]
self.init = (300,50)
Texty = texty("REDENSEK.TTF",60)
self.ready1 = Texty.render("3",0,BLACK)
self.ready2 = Texty.render("2",0,BLACK)
self.ready3 = Texty.render("1",0,BLACK)
self.ready4 = Texty.render("GO!",0,BLACK)
self.endgame = False
self.resettinglevel = False
self.resetmusic = True
def update(self):
if self.active:
self.MasterTimer+=1
self.position = 0
for event in self.level[1]:
if(self.MasterTimer == event):
addin = self.level[2][self.position]
if((addin=='w') or (addin=='W')):
walmart.add(WALL(640,1))
elif((addin=='b') or (addin=='B')):
blastmart.add(BLASTWALL(1))
elif((addin=='g') or (addin=='G')):
mixer.music.fadeout(600)
bawls.add(GENERATOR())
self.active = 0
elif((addin=='3w') or (addin=='3W')):
self.active = 0
self.multi = True
elif((addin=='k') or (addin=='K')):
walmart.add(WALL(640,1,1))
elif((addin=='x') or (addin=='X')):
fxplay(genspawn,soundstatus)
GENES.add(MINIGEN())
elif((addin=='f') or (addin=='F')):
bawss.add(BOSS())
elif((addin=='p') or (addin=='P')):
powermart.add(POWERBALL())
elif((addin=='s') or (addin=='S')):
powermart.add(POWERSWORD())
self.position += 1
if self.textshow:
self.textcount+=1
if((self.textcount > 20)and
(self.textcount < 60)):
screen.blit(self.ready1,self.init)
if((self.textcount > 60)and
(self.textcount < 100)):
screen.blit(self.ready2,self.init)
if((self.textcount > 100)and
(self.textcount < 140)):
screen.blit(self.ready3,self.init)
if((self.textcount > 140)and
(self.textcount < 180)):
screen.blit(self.ready4,self.init)
#SOUNDS
if((self.textcount == 20)or
(self.textcount == 60)or
(self.textcount == 100)):
fxplay(ready1,soundstatus)
if(self.textcount == 140):
fxplay(ready2rock,soundstatus)
if self.resetmusic:
mixer.music.stop()
musiclode(self.level[3])
self.resetmusic = False
musicplay(musicstatus)
if(self.textcount < 100):
screen.blit(self.text,(10,425))
elif(self.textcount > 140):
self.active = 1
self.textcount = 0
self.textshow = 0
if self.multi:
if(self.multimer == 1):
walmart.add(WALL(640,1))
elif(self.multimer == 15):
walmart.add(WALL(640,1))
elif(self.multimer == 30):
walmart.add(WALL(640,1))
self.multimer = 0
self.multi = False
self.active = 1
self.multimer+=1
def RESET(self,switch=False):
if switch:
self.curlevel += 1
print "LEVEL"+str(self.curlevel)+".txt"
#CHECK IF THE NEXT LEVEL FILE EXISTS
if checkfile("LEVEL"+str(self.curlevel)+".txt","levels"):
self.level = loadlevel('LEVEL'+str(self.curlevel))
self.wait = 100
self.SubTimer = 0
self.MasterTimer = 0
self.position = 0
self.active = 0
#TEXT STUFF
Texty = texty("REDENSEK.TTF",40)
self.text = Texty.render(self.level[0],0,RED)
self.textcount = 0
self.textshow = True
self.multimer = 0
self.multi = False
#MUSIC SHIT
if(checkfile(self.level[3])):
global currentsong
if(currentsong != self.level[3]):
mixer.music.fadeout(600)
self.resetmusic = True
currentsong = self.level[3]
else:
print "NO NEXT LEVEL"
self.active = 0
self.endgame = True
else:
self.endgame = False
self.curlevel = startlevel
self.level = loadlevel('LEVEL'+str(self.curlevel))
self.wait = 100
self.SubTimer = 0
self.MasterTimer = 0
self.position = 0
self.active = 0
self.resetmusic = True
global bossdead
bossdead = False
#TEXT STUFF
Texty = texty("REDENSEK.TTF",40)
self.text = Texty.render(self.level[0],0,RED)
self.textcount = 0
self.textshow = True
self.multimer = 0
self.multi = False
def RESETLEVEL(self):
if not dude.alive:
dude.alive = True
dude.HP = 90
dude.deathtimer = 200
self.resettinglevel = True
self.resetmusic = True
self.wait = 100
self.SubTimer = 0
self.MasterTimer = 0
self.position = 0
self.active = 0
alarm.playcount = 0
alarm.longenough = 0
global RED
RED = (255,0,0)
for l in lazor:
l.kill()
#TEXT STUFF
Texty = texty("REDENSEK.TTF",40)
self.text = Texty.render(self.level[0],0,RED)
self.textcount = 0
self.textshow = True
#CLEANUP
for g in bawls:
g.kill()
for b in bawss:
b.kill()
remote = LEVELCONTROL()
class TEXT:
def __init__(self,textname,color,goal,decay=0.5,side="RIGHT",size=20):
Texty = texty("REDENSEK.TTF",size)
self.newtext = Texty.render(textname,0,color)
self.size = size
self.color = color
if(side == "LEFT"):
self.pos = (-100,goal[1])
self.initializer = 1
self.move = 20
elif(side == "RIGHT"):
self.pos = (640,goal[1])
self.initializer = 1
self.move = 20
self.goal = goal[0]
self.dk = decay
def update(self):
if(self.pos[0] != self.goal):
if(self.move > 1):
self.move -= self.dk
self.pos = (correct(self.pos[0],self.goal,self.move),self.pos[1])
screen.blit(self.newtext,self.pos)
def move(self,newvalue,move=5,decay=0.5):
self.dk = decay
if(newvalue > 0):
self.goal = newvalue
self.move = move
elif(newvalue < 0):
self.goal = newvalue
self.move = move
def changetext(self,newtext):
Texty = texty("REDENSEK.TTF",self.size)
self.newtext = Texty.render(newtext,0,self.color)
class FADED:
def __init__(self,option=1,color=BLACK):
if(option == 1):
self.alphasz = 255
else: self.alphasz = 0
self.option = option
self.surfnturf = pygame.Surface((640,480))
self.color = color
def update(self):
screen.blit(self.surfnturf,(0,0))
if(self.option == 1):
if(self.alphasz > 0):
self.alphasz -= 1
else:
if(self.alphasz < 255):
self.alphasz += 1
self.surfnturf.fill(self.color)
self.surfnturf.set_alpha(self.alphasz)
def Starpos():
return (WINSIZE[0], random.randrange(0,WINSIZE[1]-60))
Maine()
|
iPatso/PyGameProjs
|
PYex/RunnerEx/main.py
|
Python
|
apache-2.0
| 106,750
|
[
"BLAST"
] |
345c065b176f67fa1cbed66c4da826b27a20616397a984d5c661dfb6e754f88c
|
""" This is the admin interface to the LFC. It exposes the functionality not available through the standard
LcgFileCatalogCombinedClient """
from DIRAC.Resources.Catalog.LcgFileCatalogCombinedClient import LcgFileCatalogCombinedClient
class LcgFileCatalogAdminClient(LcgFileCatalogCombinedClient):
LcgFileCatalogCombinedClient.ro_methods.extend(['getUserDirectory'])
LcgFileCatalogCombinedClient.write_methods.extend(['createUserDirectory', 'changeDirectoryOwner',
'createUserMapping', 'removeUserDirectory'])
|
Sbalbp/DIRAC
|
Resources/Catalog/LcgFileCatalogAdminClient.py
|
Python
|
gpl-3.0
| 578
|
[
"DIRAC"
] |
dffc1438cba4775c6f6b19d8bd9a71743238981c2fc257fc99564cd123b5db3c
|
# -*- coding: utf-8 -*-
## src/disco.py
##
## Copyright (C) 2005-2006 Stéphan Kochen <stephan AT kochen.nl>
## Copyright (C) 2005-2007 Nikos Kouremenos <kourem AT gmail.com>
## Copyright (C) 2005-2008 Yann Leboulanger <asterix AT lagaule.org>
## Copyright (C) 2006 Dimitur Kirov <dkirov AT gmail.com>
## Copyright (C) 2006-2008 Jean-Marie Traissard <jim AT lapin.org>
## Copyright (C) 2007 Stephan Erb <steve-e AT h3c.de>
##
## This file is part of Gajim.
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
##
# The appearance of the treeview, and parts of the dialog, are controlled by
# AgentBrowser (sub-)classes. Methods that probably should be overridden when
# subclassing are: (look at the docstrings and source for additional info)
# - def cleanup(self) *
# - def _create_treemodel(self) *
# - def _add_actions(self)
# - def _clean_actions(self)
# - def update_theme(self) *
# - def update_actions(self)
# - def default_action(self)
# - def _find_item(self, jid, node)
# - def _add_item(self, jid, node, item, force)
# - def _update_item(self, iter, jid, node, item)
# - def _update_info(self, iter, jid, node, identities, features, data)
# - def _update_error(self, iter, jid, node)
#
# * Should call the super class for this method.
# All others do not have to call back to the super class. (but can if they want
# the functionality)
# There are more methods, of course, but this is a basic set.
import os
import inspect
import weakref
import gobject
import gtk
import pango
import dialogs
import tooltips
import gtkgui_helpers
import groups
import adhoc_commands
import search_window
from common import gajim
from common import xmpp
from common.exceptions import GajimGeneralException
from common import helpers
# Dictionary mapping category, type pairs to browser class, image pairs.
# This is a function, so we can call it after the classes are declared.
# For the browser class, None means that the service will only be browsable
# when it advertises disco as it's feature, False means it's never browsable.
def _gen_agent_type_info():
return {
# Defaults
(0, 0): (None, None),
# Jabber server
('server', 'im'): (ToplevelAgentBrowser, 'jabber.png'),
('services', 'jabber'): (ToplevelAgentBrowser, 'jabber.png'),
('hierarchy', 'branch'): (AgentBrowser, 'jabber.png'),
# Services
('conference', 'text'): (MucBrowser, 'conference.png'),
('headline', 'rss'): (AgentBrowser, 'rss.png'),
('headline', 'weather'): (False, 'weather.png'),
('gateway', 'weather'): (False, 'weather.png'),
('_jid', 'weather'): (False, 'weather.png'),
('gateway', 'sip'): (False, 'sip.png'),
('directory', 'user'): (None, 'jud.png'),
('pubsub', 'generic'): (PubSubBrowser, 'pubsub.png'),
('pubsub', 'service'): (PubSubBrowser, 'pubsub.png'),
('proxy', 'bytestreams'): (None, 'bytestreams.png'), # Socks5 FT proxy
('headline', 'newmail'): (ToplevelAgentBrowser, 'mail.png'),
# Transports
('conference', 'irc'): (ToplevelAgentBrowser, 'irc.png'),
('_jid', 'irc'): (False, 'irc.png'),
('gateway', 'aim'): (False, 'aim.png'),
('_jid', 'aim'): (False, 'aim.png'),
('gateway', 'gadu-gadu'): (False, 'gadu-gadu.png'),
('_jid', 'gadugadu'): (False, 'gadu-gadu.png'),
('gateway', 'http-ws'): (False, 'http-ws.png'),
('gateway', 'icq'): (False, 'icq.png'),
('_jid', 'icq'): (False, 'icq.png'),
('gateway', 'msn'): (False, 'msn.png'),
('_jid', 'msn'): (False, 'msn.png'),
('gateway', 'sms'): (False, 'sms.png'),
('_jid', 'sms'): (False, 'sms.png'),
('gateway', 'smtp'): (False, 'mail.png'),
('gateway', 'yahoo'): (False, 'yahoo.png'),
('_jid', 'yahoo'): (False, 'yahoo.png'),
('gateway', 'mrim'): (False, 'mrim.png'),
('_jid', 'mrim'): (False, 'mrim.png'),
}
# Category type to "human-readable" description string, and sort priority
_cat_to_descr = {
'other': (_('Others'), 2),
'gateway': (_('Transports'), 0),
'_jid': (_('Transports'), 0),
#conference is a category for listing mostly groupchats in service discovery
'conference': (_('Conference'), 1),
}
class CacheDictionary:
'''A dictionary that keeps items around for only a specific time.
Lifetime is in minutes. Getrefresh specifies whether to refresh when
an item is merely accessed instead of set aswell.'''
def __init__(self, lifetime, getrefresh = True):
self.lifetime = lifetime * 1000 * 60
self.getrefresh = getrefresh
self.cache = {}
class CacheItem:
'''An object to store cache items and their timeouts.'''
def __init__(self, value):
self.value = value
self.source = None
def __call__(self):
return self.value
def cleanup(self):
for key in self.cache.keys():
item = self.cache[key]
if item.source:
gobject.source_remove(item.source)
del self.cache[key]
def _expire_timeout(self, key):
'''The timeout has expired, remove the object.'''
if key in self.cache:
del self.cache[key]
return False
def _refresh_timeout(self, key):
'''The object was accessed, refresh the timeout.'''
item = self.cache[key]
if item.source:
gobject.source_remove(item.source)
if self.lifetime:
source = gobject.timeout_add_seconds(self.lifetime/1000, self._expire_timeout, key)
item.source = source
def __getitem__(self, key):
item = self.cache[key]
if self.getrefresh:
self._refresh_timeout(key)
return item()
def __setitem__(self, key, value):
item = self.CacheItem(value)
self.cache[key] = item
self._refresh_timeout(key)
def __delitem__(self, key):
item = self.cache[key]
if item.source:
gobject.source_remove(item.source)
del self.cache[key]
def __contains__(self, key):
return key in self.cache
has_key = __contains__
_icon_cache = CacheDictionary(15)
def get_agent_address(jid, node = None):
'''Returns an agent's address for displaying in the GUI.'''
if node:
return '%s@%s' % (node, str(jid))
else:
return str(jid)
class Closure(object):
'''A weak reference to a callback with arguments as an object.
Weak references to methods immediatly die, even if the object is still
alive. Besides a handy way to store a callback, this provides a workaround
that keeps a reference to the object instead.
Userargs and removeargs must be tuples.'''
def __init__(self, cb, userargs = (), remove = None, removeargs = ()):
self.userargs = userargs
self.remove = remove
self.removeargs = removeargs
if inspect.ismethod(cb):
self.meth_self = weakref.ref(cb.im_self, self._remove)
self.meth_name = cb.func_name
elif callable(cb):
self.meth_self = None
self.cb = weakref.ref(cb, self._remove)
else:
raise TypeError('Object is not callable')
def _remove(self, ref):
if self.remove:
self.remove(self, *self.removeargs)
def __call__(self, *args, **kwargs):
if self.meth_self:
obj = self.meth_self()
cb = getattr(obj, self.meth_name)
else:
cb = self.cb()
args = args + self.userargs
return cb(*args, **kwargs)
class ServicesCache:
'''Class that caches our query results. Each connection will have it's own
ServiceCache instance.'''
def __init__(self, account):
self.account = account
self._items = CacheDictionary(0, getrefresh = False)
self._info = CacheDictionary(0, getrefresh = False)
self._subscriptions = CacheDictionary(5, getrefresh=False)
self._cbs = {}
def cleanup(self):
self._items.cleanup()
self._info.cleanup()
def _clean_closure(self, cb, type_, addr):
# A closure died, clean up
cbkey = (type_, addr)
try:
self._cbs[cbkey].remove(cb)
except KeyError:
return
except ValueError:
return
# Clean an empty list
if not self._cbs[cbkey]:
del self._cbs[cbkey]
def get_icon(self, identities = []):
'''Return the icon for an agent.'''
# Grab the first identity with an icon
for identity in identities:
try:
cat, type = identity['category'], identity['type']
info = _agent_type_info[(cat, type)]
except KeyError:
continue
filename = info[1]
if filename:
break
else:
# Loop fell through, default to unknown
cat = type = 0
info = _agent_type_info[(0, 0)]
filename = info[1]
if not filename: # we don't have an image to show for this type
return
# Use the cache if possible
if filename in _icon_cache:
return _icon_cache[filename]
# Or load it
filepath = os.path.join(gajim.DATA_DIR, 'pixmaps', 'agents', filename)
pix = gtk.gdk.pixbuf_new_from_file(filepath)
# Store in cache
_icon_cache[filename] = pix
return pix
def get_browser(self, identities=[], features=[]):
'''Return the browser class for an agent.'''
# First pass, we try to find a ToplevelAgentBrowser
for identity in identities:
try:
cat, type_ = identity['category'], identity['type']
info = _agent_type_info[(cat, type_)]
except KeyError:
continue
browser = info[0]
if browser and browser == ToplevelAgentBrowser:
return browser
# second pass, we haven't found a ToplevelAgentBrowser
for identity in identities:
try:
cat, type_ = identity['category'], identity['type']
info = _agent_type_info[(cat, type_)]
except KeyError:
continue
browser = info[0]
if browser:
return browser
# NS_BROWSE is deprecated, but we check for it anyways.
# Some services list it in features and respond to
# NS_DISCO_ITEMS anyways.
# Allow browsing for unknown types aswell.
if (not features and not identities) or \
xmpp.NS_DISCO_ITEMS in features or xmpp.NS_BROWSE in features:
return AgentBrowser
return None
def get_info(self, jid, node, cb, force = False, nofetch = False, args = ()):
'''Get info for an agent.'''
addr = get_agent_address(jid, node)
# Check the cache
if addr in self._info:
args = self._info[addr] + args
cb(jid, node, *args)
return
if nofetch:
return
# Create a closure object
cbkey = ('info', addr)
cb = Closure(cb, userargs = args, remove = self._clean_closure,
removeargs = cbkey)
# Are we already fetching this?
if cbkey in self._cbs:
self._cbs[cbkey].append(cb)
else:
self._cbs[cbkey] = [cb]
gajim.connections[self.account].discoverInfo(jid, node)
def get_items(self, jid, node, cb, force = False, nofetch = False, args = ()):
'''Get a list of items in an agent.'''
addr = get_agent_address(jid, node)
# Check the cache
if addr in self._items:
args = (self._items[addr],) + args
cb(jid, node, *args)
return
if nofetch:
return
# Create a closure object
cbkey = ('items', addr)
cb = Closure(cb, userargs = args, remove = self._clean_closure,
removeargs = cbkey)
# Are we already fetching this?
if cbkey in self._cbs:
self._cbs[cbkey].append(cb)
else:
self._cbs[cbkey] = [cb]
gajim.connections[self.account].discoverItems(jid, node)
def agent_info(self, jid, node, identities, features, data):
'''Callback for when we receive an agent's info.'''
addr = get_agent_address(jid, node)
# Store in cache
self._info[addr] = (identities, features, data)
# Call callbacks
cbkey = ('info', addr)
if cbkey in self._cbs:
for cb in self._cbs[cbkey]:
cb(jid, node, identities, features, data)
# clean_closure may have beaten us to it
if cbkey in self._cbs:
del self._cbs[cbkey]
def agent_items(self, jid, node, items):
'''Callback for when we receive an agent's items.'''
addr = get_agent_address(jid, node)
# Store in cache
self._items[addr] = items
# Call callbacks
cbkey = ('items', addr)
if cbkey in self._cbs:
for cb in self._cbs[cbkey]:
cb(jid, node, items)
# clean_closure may have beaten us to it
if cbkey in self._cbs:
del self._cbs[cbkey]
def agent_info_error(self, jid):
'''Callback for when a query fails. (even after the browse and agents
namespaces)'''
addr = get_agent_address(jid)
# Call callbacks
cbkey = ('info', addr)
if cbkey in self._cbs:
for cb in self._cbs[cbkey]:
cb(jid, '', 0, 0, 0)
# clean_closure may have beaten us to it
if cbkey in self._cbs:
del self._cbs[cbkey]
def agent_items_error(self, jid):
'''Callback for when a query fails. (even after the browse and agents
namespaces)'''
addr = get_agent_address(jid)
# Call callbacks
cbkey = ('items', addr)
if cbkey in self._cbs:
for cb in self._cbs[cbkey]:
cb(jid, '', 0)
# clean_closure may have beaten us to it
if cbkey in self._cbs:
del self._cbs[cbkey]
# object is needed so that @property works
class ServiceDiscoveryWindow(object):
'''Class that represents the Services Discovery window.'''
def __init__(self, account, jid = '', node = '',
address_entry = False, parent = None):
self.account = account
self.parent = parent
if not jid:
jid = gajim.config.get_per('accounts', account, 'hostname')
node = ''
self.jid = None
self.browser = None
self.children = []
self.dying = False
# Check connection
if gajim.connections[account].connected < 2:
dialogs.ErrorDialog(_('You are not connected to the server'),
_('Without a connection, you can not browse available services'))
raise RuntimeError, 'You must be connected to browse services'
# Get a ServicesCache object.
try:
self.cache = gajim.connections[account].services_cache
except AttributeError:
self.cache = ServicesCache(account)
gajim.connections[account].services_cache = self.cache
self.xml = gtkgui_helpers.get_glade('service_discovery_window.glade')
self.window = self.xml.get_widget('service_discovery_window')
self.services_treeview = self.xml.get_widget('services_treeview')
self.model = None
# This is more reliable than the cursor-changed signal.
selection = self.services_treeview.get_selection()
selection.connect_after('changed',
self.on_services_treeview_selection_changed)
self.services_scrollwin = self.xml.get_widget('services_scrollwin')
self.progressbar = self.xml.get_widget('services_progressbar')
self.banner = self.xml.get_widget('banner_agent_label')
self.banner_icon = self.xml.get_widget('banner_agent_icon')
self.banner_eventbox = self.xml.get_widget('banner_agent_eventbox')
self.style_event_id = 0
self.banner.realize()
self.paint_banner()
self.action_buttonbox = self.xml.get_widget('action_buttonbox')
# Address combobox
self.address_comboboxentry = None
address_table = self.xml.get_widget('address_table')
if address_entry:
self.address_comboboxentry = self.xml.get_widget(
'address_comboboxentry')
self.address_comboboxentry_entry = self.address_comboboxentry.child
self.address_comboboxentry_entry.set_activates_default(True)
liststore = gtk.ListStore(str)
self.address_comboboxentry.set_model(liststore)
self.latest_addresses = gajim.config.get(
'latest_disco_addresses').split()
if jid in self.latest_addresses:
self.latest_addresses.remove(jid)
self.latest_addresses.insert(0, jid)
if len(self.latest_addresses) > 10:
self.latest_addresses = self.latest_addresses[0:10]
for j in self.latest_addresses:
self.address_comboboxentry.append_text(j)
self.address_comboboxentry.child.set_text(jid)
else:
# Don't show it at all if we didn't ask for it
address_table.set_no_show_all(True)
address_table.hide()
self._initial_state()
self.xml.signal_autoconnect(self)
self.travel(jid, node)
self.window.show_all()
@property
def _get_account(self):
return self.account
@property
def _set_account(self, value):
self.account = value
self.cache.account = value
if self.browser:
self.browser.account = value
def _initial_state(self):
'''Set some initial state on the window. Separated in a method because
it's handy to use within browser's cleanup method.'''
self.progressbar.hide()
title_text = _('Service Discovery using account %s') % self.account
self.window.set_title(title_text)
self._set_window_banner_text(_('Service Discovery'))
self.banner_icon.clear()
self.banner_icon.hide() # Just clearing it doesn't work
def _set_window_banner_text(self, text, text_after = None):
theme = gajim.config.get('roster_theme')
bannerfont = gajim.config.get_per('themes', theme, 'bannerfont')
bannerfontattrs = gajim.config.get_per('themes', theme,
'bannerfontattrs')
if bannerfont:
font = pango.FontDescription(bannerfont)
else:
font = pango.FontDescription('Normal')
if bannerfontattrs:
# B is attribute set by default
if 'B' in bannerfontattrs:
font.set_weight(pango.WEIGHT_HEAVY)
if 'I' in bannerfontattrs:
font.set_style(pango.STYLE_ITALIC)
font_attrs = 'font_desc="%s"' % font.to_string()
font_size = font.get_size()
# in case there is no font specified we use x-large font size
if font_size == 0:
font_attrs = '%s size="large"' % font_attrs
markup = '<span %s>%s</span>' % (font_attrs, text)
if text_after:
font.set_weight(pango.WEIGHT_NORMAL)
markup = '%s\n<span font_desc="%s" size="small">%s</span>' % \
(markup, font.to_string(), text_after)
self.banner.set_markup(markup)
def paint_banner(self):
'''Repaint the banner with theme color'''
theme = gajim.config.get('roster_theme')
bgcolor = gajim.config.get_per('themes', theme, 'bannerbgcolor')
textcolor = gajim.config.get_per('themes', theme, 'bannertextcolor')
self.disconnect_style_event()
if bgcolor:
color = gtk.gdk.color_parse(bgcolor)
self.banner_eventbox.modify_bg(gtk.STATE_NORMAL, color)
default_bg = False
else:
default_bg = True
if textcolor:
color = gtk.gdk.color_parse(textcolor)
self.banner.modify_fg(gtk.STATE_NORMAL, color)
default_fg = False
else:
default_fg = True
if default_fg or default_bg:
self._on_style_set_event(self.banner, None, default_fg, default_bg)
if self.browser:
self.browser.update_theme()
def disconnect_style_event(self):
if self.style_event_id:
self.banner.disconnect(self.style_event_id)
self.style_event_id = 0
def connect_style_event(self, set_fg = False, set_bg = False):
self.disconnect_style_event()
self.style_event_id = self.banner.connect('style-set',
self._on_style_set_event, set_fg, set_bg)
def _on_style_set_event(self, widget, style, *opts):
''' set style of widget from style class *.Frame.Eventbox
opts[0] == True -> set fg color
opts[1] == True -> set bg color '''
self.disconnect_style_event()
if opts[1]:
bg_color = widget.style.bg[gtk.STATE_SELECTED]
self.banner_eventbox.modify_bg(gtk.STATE_NORMAL, bg_color)
if opts[0]:
fg_color = widget.style.fg[gtk.STATE_SELECTED]
self.banner.modify_fg(gtk.STATE_NORMAL, fg_color)
self.banner.ensure_style()
self.connect_style_event(opts[0], opts[1])
def destroy(self, chain = False):
'''Close the browser. This can optionally close its children and
propagate to the parent. This should happen on actions like register,
or join to kill off the entire browser chain.'''
if self.dying:
return
self.dying = True
# self.browser._get_agent_address() would break when no browser.
addr = get_agent_address(self.jid, self.node)
del gajim.interface.instances[self.account]['disco'][addr]
if self.browser:
self.window.hide()
self.browser.cleanup()
self.browser = None
self.window.destroy()
for child in self.children[:]:
child.parent = None
if chain:
child.destroy(chain = chain)
self.children.remove(child)
if self.parent:
if self in self.parent.children:
self.parent.children.remove(self)
if chain and not self.parent.children:
self.parent.destroy(chain = chain)
self.parent = None
else:
self.cache.cleanup()
def travel(self, jid, node):
'''Travel to an agent within the current services window.'''
if self.browser:
self.browser.cleanup()
self.browser = None
# Update the window list
if self.jid:
old_addr = get_agent_address(self.jid, self.node)
if old_addr in gajim.interface.instances[self.account]['disco']:
del gajim.interface.instances[self.account]['disco'][old_addr]
addr = get_agent_address(jid, node)
gajim.interface.instances[self.account]['disco'][addr] = self
# We need to store these, self.browser is not always available.
self.jid = jid
self.node = node
self.cache.get_info(jid, node, self._travel)
def _travel(self, jid, node, identities, features, data):
'''Continuation of travel.'''
if self.dying or jid != self.jid or node != self.node:
return
if not identities:
if not self.address_comboboxentry:
# We can't travel anywhere else.
self.destroy()
dialogs.ErrorDialog(_('The service could not be found'),
_('There is no service at the address you entered, or it is not responding. Check the address and try again.'))
return
klass = self.cache.get_browser(identities, features)
if not klass:
dialogs.ErrorDialog(_('The service is not browsable'),
_('This type of service does not contain any items to browse.'))
return
elif klass is None:
klass = AgentBrowser
self.browser = klass(self.account, jid, node)
self.browser.prepare_window(self)
self.browser.browse()
def open(self, jid, node):
'''Open an agent. By default, this happens in a new window.'''
try:
win = gajim.interface.instances[self.account]['disco']\
[get_agent_address(jid, node)]
win.window.present()
return
except KeyError:
pass
try:
win = ServiceDiscoveryWindow(self.account, jid, node, parent=self)
except RuntimeError:
# Disconnected, perhaps
return
self.children.append(win)
def on_service_discovery_window_destroy(self, widget):
self.destroy()
def on_close_button_clicked(self, widget):
self.destroy()
def on_address_comboboxentry_changed(self, widget):
if self.address_comboboxentry.get_active() != -1:
# user selected one of the entries so do auto-visit
jid = self.address_comboboxentry.child.get_text().decode('utf-8')
try:
jid = helpers.parse_jid(jid)
except helpers.InvalidFormat, s:
pritext = _('Invalid Server Name')
dialogs.ErrorDialog(pritext, str(s))
return
self.travel(jid, '')
def on_go_button_clicked(self, widget):
jid = self.address_comboboxentry.child.get_text().decode('utf-8')
try:
jid = helpers.parse_jid(jid)
except helpers.InvalidFormat, s:
pritext = _('Invalid Server Name')
dialogs.ErrorDialog(pritext, str(s))
return
if jid == self.jid: # jid has not changed
return
if jid in self.latest_addresses:
self.latest_addresses.remove(jid)
self.latest_addresses.insert(0, jid)
if len(self.latest_addresses) > 10:
self.latest_addresses = self.latest_addresses[0:10]
self.address_comboboxentry.get_model().clear()
for j in self.latest_addresses:
self.address_comboboxentry.append_text(j)
gajim.config.set('latest_disco_addresses',
' '.join(self.latest_addresses))
gajim.interface.save_config()
self.travel(jid, '')
def on_services_treeview_row_activated(self, widget, path, col = 0):
self.browser.default_action()
def on_services_treeview_selection_changed(self, widget):
self.browser.update_actions()
class AgentBrowser:
'''Class that deals with browsing agents and appearance of the browser
window. This class and subclasses should basically be treated as "part"
of the ServiceDiscoveryWindow class, but had to be separated because this part
is dynamic.'''
def __init__(self, account, jid, node):
self.account = account
self.jid = jid
self.node = node
self._total_items = 0
self.browse_button = None
# This is for some timeout callbacks
self.active = False
def _get_agent_address(self):
'''Returns the agent's address for displaying in the GUI.'''
return get_agent_address(self.jid, self.node)
def _set_initial_title(self):
'''Set the initial window title based on agent address.'''
self.window.window.set_title(_('Browsing %(address)s using account '
'%(account)s') % {'address': self._get_agent_address(),
'account': self.account})
self.window._set_window_banner_text(self._get_agent_address())
def _create_treemodel(self):
'''Create the treemodel for the services treeview. When subclassing,
note that the first two columns should ALWAYS be of type string and
contain the JID and node of the item respectively.'''
# JID, node, name, address
self.model = gtk.ListStore(str, str, str, str)
self.model.set_sort_column_id(3, gtk.SORT_ASCENDING)
self.window.services_treeview.set_model(self.model)
# Name column
col = gtk.TreeViewColumn(_('Name'))
renderer = gtk.CellRendererText()
col.pack_start(renderer)
col.set_attributes(renderer, text = 2)
self.window.services_treeview.insert_column(col, -1)
col.set_resizable(True)
# Address column
col = gtk.TreeViewColumn(_('JID'))
renderer = gtk.CellRendererText()
col.pack_start(renderer)
col.set_attributes(renderer, text = 3)
self.window.services_treeview.insert_column(col, -1)
col.set_resizable(True)
self.window.services_treeview.set_headers_visible(True)
def _clean_treemodel(self):
self.model.clear()
for col in self.window.services_treeview.get_columns():
self.window.services_treeview.remove_column(col)
self.window.services_treeview.set_headers_visible(False)
def _add_actions(self):
'''Add the action buttons to the buttonbox for actions the browser can
perform.'''
self.browse_button = gtk.Button()
image = gtk.image_new_from_stock(gtk.STOCK_OPEN, gtk.ICON_SIZE_BUTTON)
label = gtk.Label(_('_Browse'))
label.set_use_underline(True)
hbox = gtk.HBox()
hbox.pack_start(image, False, True, 6)
hbox.pack_end(label, True, True)
self.browse_button.add(hbox)
self.browse_button.connect('clicked', self.on_browse_button_clicked)
self.window.action_buttonbox.add(self.browse_button)
self.browse_button.show_all()
def _clean_actions(self):
'''Remove the action buttons specific to this browser.'''
if self.browse_button:
self.browse_button.destroy()
self.browse_button = None
def _set_title(self, jid, node, identities, features, data):
'''Set the window title based on agent info.'''
# Set the banner and window title
if 'name' in identities[0]:
name = identities[0]['name']
self.window._set_window_banner_text(self._get_agent_address(), name)
# Add an icon to the banner.
pix = self.cache.get_icon(identities)
self.window.banner_icon.set_from_pixbuf(pix)
self.window.banner_icon.show()
def _clean_title(self):
# Everything done here is done in window._initial_state
# This is for subclasses.
pass
def prepare_window(self, window):
'''Prepare the service discovery window. Called when a browser is hooked
up with a ServiceDiscoveryWindow instance.'''
self.window = window
self.cache = window.cache
self._set_initial_title()
self._create_treemodel()
self._add_actions()
# This is a hack. The buttonbox apparently doesn't care about pack_start
# or pack_end, so we repack the close button here to make sure it's last
close_button = self.window.xml.get_widget('close_button')
self.window.action_buttonbox.remove(close_button)
self.window.action_buttonbox.pack_end(close_button)
close_button.show_all()
self.update_actions()
self.active = True
self.cache.get_info(self.jid, self.node, self._set_title)
def cleanup(self):
'''Cleanup when the window intends to switch browsers.'''
self.active = False
self._clean_actions()
self._clean_treemodel()
self._clean_title()
self.window._initial_state()
def update_theme(self):
'''Called when the default theme is changed.'''
pass
def on_browse_button_clicked(self, widget = None):
'''When we want to browse an agent:
Open a new services window with a browser for the agent type.'''
model, iter = self.window.services_treeview.get_selection().get_selected()
if not iter:
return
jid = model[iter][0].decode('utf-8')
if jid:
node = model[iter][1].decode('utf-8')
self.window.open(jid, node)
def update_actions(self):
'''When we select a row:
activate action buttons based on the agent's info.'''
if self.browse_button:
self.browse_button.set_sensitive(False)
model, iter = self.window.services_treeview.get_selection().get_selected()
if not iter:
return
jid = model[iter][0].decode('utf-8')
node = model[iter][1].decode('utf-8')
if jid:
self.cache.get_info(jid, node, self._update_actions, nofetch = True)
def _update_actions(self, jid, node, identities, features, data):
'''Continuation of update_actions.'''
if not identities or not self.browse_button:
return
klass = self.cache.get_browser(identities, features)
if klass:
self.browse_button.set_sensitive(True)
def default_action(self):
'''When we double-click a row:
perform the default action on the selected item.'''
model, iter = self.window.services_treeview.get_selection().get_selected()
if not iter:
return
jid = model[iter][0].decode('utf-8')
node = model[iter][1].decode('utf-8')
if jid:
self.cache.get_info(jid, node, self._default_action, nofetch = True)
def _default_action(self, jid, node, identities, features, data):
'''Continuation of default_action.'''
if self.cache.get_browser(identities, features):
# Browse if we can
self.on_browse_button_clicked()
return True
return False
def browse(self, force = False):
'''Fill the treeview with agents, fetching the info if necessary.'''
self.model.clear()
self._total_items = self._progress = 0
self.window.progressbar.show()
self._pulse_timeout = gobject.timeout_add(250, self._pulse_timeout_cb)
self.cache.get_items(self.jid, self.node, self._agent_items,
force = force, args = (force,))
def _pulse_timeout_cb(self, *args):
'''Simple callback to keep the progressbar pulsing.'''
if not self.active:
return False
self.window.progressbar.pulse()
return True
def _find_item(self, jid, node):
'''Check if an item is already in the treeview. Return an iter to it
if so, None otherwise.'''
iter = self.model.get_iter_root()
while iter:
cjid = self.model.get_value(iter, 0).decode('utf-8')
cnode = self.model.get_value(iter, 1).decode('utf-8')
if jid == cjid and node == cnode:
break
iter = self.model.iter_next(iter)
if iter:
return iter
return None
def _agent_items(self, jid, node, items, force):
'''Callback for when we receive a list of agent items.'''
self.model.clear()
self._total_items = 0
gobject.source_remove(self._pulse_timeout)
self.window.progressbar.hide()
# The server returned an error
if items == 0:
if not self.window.address_comboboxentry:
# We can't travel anywhere else.
self.window.destroy()
dialogs.ErrorDialog(_('The service is not browsable'),
_('This service does not contain any items to browse.'))
return
# We got a list of items
self.window.services_treeview.set_model(None)
for item in items:
jid = item['jid']
node = item.get('node', '')
# If such an item is already here: don't add it
if self._find_item(jid, node):
continue
self._total_items += 1
self._add_item(jid, node, item, force)
self.window.services_treeview.set_model(self.model)
def _agent_info(self, jid, node, identities, features, data):
'''Callback for when we receive info about an agent's item.'''
addr = get_agent_address(jid, node)
iter = self._find_item(jid, node)
if not iter:
# Not in the treeview, stop
return
if identities == 0:
# The server returned an error
self._update_error(iter, jid, node)
else:
# We got our info
self._update_info(iter, jid, node, identities, features, data)
self.update_actions()
def _add_item(self, jid, node, item, force):
'''Called when an item should be added to the model. The result of a
disco#items query.'''
self.model.append((jid, node, item.get('name', ''),
get_agent_address(jid, node)))
self.cache.get_info(jid, node, self._agent_info, force = force)
def _update_item(self, iter_, jid, node, item):
'''Called when an item should be updated in the model. The result of a
disco#items query. (seldom)'''
if 'name' in item:
self.model[iter_][2] = item['name']
def _update_info(self, iter_, jid, node, identities, features, data):
'''Called when an item should be updated in the model with further info.
The result of a disco#info query.'''
name = identities[0].get('name', '')
if name:
self.model[iter_][2] = name
def _update_error(self, iter_, jid, node):
'''Called when a disco#info query failed for an item.'''
pass
class ToplevelAgentBrowser(AgentBrowser):
'''This browser is used at the top level of a jabber server to browse
services such as transports, conference servers, etc.'''
def __init__(self, *args):
AgentBrowser.__init__(self, *args)
self._progressbar_sourceid = None
self._renderer = None
self._progress = 0
self.tooltip = tooltips.ServiceDiscoveryTooltip()
self.register_button = None
self.join_button = None
self.execute_button = None
self.search_button = None
# Keep track of our treeview signals
self._view_signals = []
self._scroll_signal = None
def _pixbuf_renderer_data_func(self, col, cell, model, iter_):
'''Callback for setting the pixbuf renderer's properties.'''
jid = model.get_value(iter_, 0)
if jid:
pix = model.get_value(iter_, 2)
cell.set_property('visible', True)
cell.set_property('pixbuf', pix)
else:
cell.set_property('visible', False)
def _text_renderer_data_func(self, col, cell, model, iter_):
'''Callback for setting the text renderer's properties.'''
jid = model.get_value(iter_, 0)
markup = model.get_value(iter_, 3)
state = model.get_value(iter_, 4)
cell.set_property('markup', markup)
if jid:
cell.set_property('cell_background_set', False)
if state > 0:
# 1 = fetching, 2 = error
cell.set_property('foreground_set', True)
else:
# Normal/succes
cell.set_property('foreground_set', False)
else:
theme = gajim.config.get('roster_theme')
bgcolor = gajim.config.get_per('themes', theme, 'groupbgcolor')
if bgcolor:
cell.set_property('cell_background_set', True)
cell.set_property('foreground_set', False)
def _treemodel_sort_func(self, model, iter1, iter2):
'''Sort function for our treemodel.'''
# Compare state
statecmp = cmp(model.get_value(iter1, 4), model.get_value(iter2, 4))
if statecmp == 0:
# These can be None, apparently
descr1 = model.get_value(iter1, 3)
if descr1:
descr1 = descr1.decode('utf-8')
descr2 = model.get_value(iter2, 3)
if descr2:
descr2 = descr2.decode('utf-8')
# Compare strings
return cmp(descr1, descr2)
return statecmp
def _show_tooltip(self, state):
view = self.window.services_treeview
pointer = view.get_pointer()
props = view.get_path_at_pos(pointer[0], pointer[1])
# check if the current pointer is at the same path
# as it was before setting the timeout
if props and self.tooltip.id == props[0]:
# bounding rectangle of coordinates for the cell within the treeview
rect = view.get_cell_area(props[0], props[1])
# position of the treeview on the screen
position = view.window.get_origin()
self.tooltip.show_tooltip(state, rect.height, position[1] + rect.y)
else:
self.tooltip.hide_tooltip()
# These are all callbacks to make tooltips work
def on_treeview_leave_notify_event(self, widget, event):
props = widget.get_path_at_pos(int(event.x), int(event.y))
if self.tooltip.timeout > 0:
if not props or self.tooltip.id == props[0]:
self.tooltip.hide_tooltip()
def on_treeview_motion_notify_event(self, widget, event):
props = widget.get_path_at_pos(int(event.x), int(event.y))
if self.tooltip.timeout > 0:
if not props or self.tooltip.id != props[0]:
self.tooltip.hide_tooltip()
if props:
[row, col, x, y] = props
iter = None
try:
iter = self.model.get_iter(row)
except Exception:
self.tooltip.hide_tooltip()
return
jid = self.model[iter][0]
state = self.model[iter][4]
# Not a category, and we have something to say about state
if jid and state > 0 and \
(self.tooltip.timeout == 0 or self.tooltip.id != props[0]):
self.tooltip.id = row
self.tooltip.timeout = gobject.timeout_add(500,
self._show_tooltip, state)
def on_treeview_event_hide_tooltip(self, widget, event):
''' This happens on scroll_event, key_press_event
and button_press_event '''
self.tooltip.hide_tooltip()
def _create_treemodel(self):
# JID, node, icon, description, state
# State means 2 when error, 1 when fetching, 0 when succes.
view = self.window.services_treeview
self.model = gtk.TreeStore(str, str, gtk.gdk.Pixbuf, str, int)
self.model.set_sort_func(4, self._treemodel_sort_func)
self.model.set_sort_column_id(4, gtk.SORT_ASCENDING)
view.set_model(self.model)
col = gtk.TreeViewColumn()
# Icon Renderer
renderer = gtk.CellRendererPixbuf()
renderer.set_property('xpad', 6)
col.pack_start(renderer, expand = False)
col.set_cell_data_func(renderer, self._pixbuf_renderer_data_func)
# Text Renderer
renderer = gtk.CellRendererText()
col.pack_start(renderer, expand = True)
col.set_cell_data_func(renderer, self._text_renderer_data_func)
renderer.set_property('foreground', 'dark gray')
# Save this so we can go along with theme changes
self._renderer = renderer
self.update_theme()
view.insert_column(col, -1)
col.set_resizable(True)
# Connect signals
scrollwin = self.window.services_scrollwin
self._view_signals.append(view.connect('leave-notify-event',
self.on_treeview_leave_notify_event))
self._view_signals.append(view.connect('motion-notify-event',
self.on_treeview_motion_notify_event))
self._view_signals.append(view.connect('key-press-event',
self.on_treeview_event_hide_tooltip))
self._view_signals.append(view.connect('button-press-event',
self.on_treeview_event_hide_tooltip))
self._scroll_signal = scrollwin.connect('scroll-event',
self.on_treeview_event_hide_tooltip)
def _clean_treemodel(self):
# Disconnect signals
view = self.window.services_treeview
for sig in self._view_signals:
view.disconnect(sig)
self._view_signals = []
if self._scroll_signal:
scrollwin = self.window.services_scrollwin
scrollwin.disconnect(self._scroll_signal)
self._scroll_signal = None
AgentBrowser._clean_treemodel(self)
def _add_actions(self):
AgentBrowser._add_actions(self)
self.execute_button = gtk.Button()
image = gtk.image_new_from_stock(gtk.STOCK_EXECUTE, gtk.ICON_SIZE_BUTTON)
label = gtk.Label(_('_Execute Command'))
label.set_use_underline(True)
hbox = gtk.HBox()
hbox.pack_start(image, False, True, 6)
hbox.pack_end(label, True, True)
self.execute_button.add(hbox)
self.execute_button.connect('clicked', self.on_execute_button_clicked)
self.window.action_buttonbox.add(self.execute_button)
self.execute_button.show_all()
self.register_button = gtk.Button(label=_("Re_gister"),
use_underline=True)
self.register_button.connect('clicked', self.on_register_button_clicked)
self.window.action_buttonbox.add(self.register_button)
self.register_button.show_all()
self.join_button = gtk.Button()
image = gtk.image_new_from_stock(gtk.STOCK_CONNECT, gtk.ICON_SIZE_BUTTON)
label = gtk.Label(_('_Join'))
label.set_use_underline(True)
hbox = gtk.HBox()
hbox.pack_start(image, False, True, 6)
hbox.pack_end(label, True, True)
self.join_button.add(hbox)
self.join_button.connect('clicked', self.on_join_button_clicked)
self.window.action_buttonbox.add(self.join_button)
self.join_button.show_all()
self.search_button = gtk.Button()
image = gtk.image_new_from_stock(gtk.STOCK_FIND, gtk.ICON_SIZE_BUTTON)
label = gtk.Label(_('_Search'))
label.set_use_underline(True)
hbox = gtk.HBox()
hbox.pack_start(image, False, True, 6)
hbox.pack_end(label, True, True)
self.search_button.add(hbox)
self.search_button.connect('clicked', self.on_search_button_clicked)
self.window.action_buttonbox.add(self.search_button)
self.search_button.show_all()
def _clean_actions(self):
if self.execute_button:
self.execute_button.destroy()
self.execute_button = None
if self.register_button:
self.register_button.destroy()
self.register_button = None
if self.join_button:
self.join_button.destroy()
self.join_button = None
if self.search_button:
self.search_button.destroy()
self.search_button = None
AgentBrowser._clean_actions(self)
def on_search_button_clicked(self, widget = None):
'''When we want to search something:
open search window'''
model, iter = self.window.services_treeview.get_selection().get_selected()
if not iter:
return
service = model[iter][0].decode('utf-8')
if service in gajim.interface.instances[self.account]['search']:
gajim.interface.instances[self.account]['search'][service].present()
else:
gajim.interface.instances[self.account]['search'][service] = \
search_window.SearchWindow(self.account, service)
def cleanup(self):
self.tooltip.hide_tooltip()
AgentBrowser.cleanup(self)
def update_theme(self):
theme = gajim.config.get('roster_theme')
bgcolor = gajim.config.get_per('themes', theme, 'groupbgcolor')
if bgcolor:
self._renderer.set_property('cell-background', bgcolor)
self.window.services_treeview.queue_draw()
def on_execute_button_clicked(self, widget = None):
'''When we want to execute a command:
open adhoc command window'''
model, iter = self.window.services_treeview.get_selection().get_selected()
if not iter:
return
service = model[iter][0].decode('utf-8')
adhoc_commands.CommandWindow(self.account, service)
def on_register_button_clicked(self, widget = None):
'''When we want to register an agent:
request information about registering with the agent and close the
window.'''
model, iter = self.window.services_treeview.get_selection().get_selected()
if not iter:
return
jid = model[iter][0].decode('utf-8')
if jid:
gajim.connections[self.account].request_register_agent_info(jid)
self.window.destroy(chain = True)
def on_join_button_clicked(self, widget):
'''When we want to join an IRC room or create a new MUC room:
Opens the join_groupchat_window.'''
model, iter = self.window.services_treeview.get_selection().get_selected()
if not iter:
return
service = model[iter][0].decode('utf-8')
if 'join_gc' not in gajim.interface.instances[self.account]:
try:
dialogs.JoinGroupchatWindow(self.account, service)
except GajimGeneralException:
pass
else:
gajim.interface.instances[self.account]['join_gc'].window.present()
self.window.destroy(chain = True)
def update_actions(self):
if self.execute_button:
self.execute_button.set_sensitive(False)
if self.register_button:
self.register_button.set_sensitive(False)
if self.browse_button:
self.browse_button.set_sensitive(False)
if self.join_button:
self.join_button.set_sensitive(False)
if self.search_button:
self.search_button.set_sensitive(False)
model, iter = self.window.services_treeview.get_selection().get_selected()
model, iter = self.window.services_treeview.get_selection().get_selected()
if not iter:
return
if not model[iter][0]:
# We're on a category row
return
if model[iter][4] != 0:
# We don't have the info (yet)
# It's either unknown or a transport, register button should be active
if self.register_button:
self.register_button.set_sensitive(True)
# Guess what kind of service we're dealing with
if self.browse_button:
jid = model[iter][0].decode('utf-8')
type = gajim.get_transport_name_from_jid(jid,
use_config_setting = False)
if type:
identity = {'category': '_jid', 'type': type}
klass = self.cache.get_browser([identity])
if klass:
self.browse_button.set_sensitive(True)
else:
# We couldn't guess
self.browse_button.set_sensitive(True)
else:
# Normal case, we have info
AgentBrowser.update_actions(self)
def _update_actions(self, jid, node, identities, features, data):
AgentBrowser._update_actions(self, jid, node, identities, features, data)
if self.execute_button and xmpp.NS_COMMANDS in features:
self.execute_button.set_sensitive(True)
if self.search_button and xmpp.NS_SEARCH in features:
self.search_button.set_sensitive(True)
if self.register_button and xmpp.NS_REGISTER in features:
# We can register this agent
registered_transports = []
jid_list = gajim.contacts.get_jid_list(self.account)
for jid in jid_list:
contact = gajim.contacts.get_first_contact_from_jid(
self.account, jid)
if _('Transports') in contact.groups:
registered_transports.append(jid)
if jid in registered_transports:
self.register_button.set_label(_('_Edit'))
else:
self.register_button.set_label(_('Re_gister'))
self.register_button.set_sensitive(True)
if self.join_button and xmpp.NS_MUC in features:
self.join_button.set_sensitive(True)
def _default_action(self, jid, node, identities, features, data):
if AgentBrowser._default_action(self, jid, node, identities, features, data):
return True
if xmpp.NS_REGISTER in features:
# Register if we can't browse
self.on_register_button_clicked()
return True
return False
def browse(self, force = False):
self._progress = 0
AgentBrowser.browse(self, force = force)
def _expand_all(self):
'''Expand all items in the treeview'''
# GTK apparently screws up here occasionally. :/
#def expand_all(*args):
# self.window.services_treeview.expand_all()
# self.expanding = False
# return False
#self.expanding = True
#gobject.idle_add(expand_all)
self.window.services_treeview.expand_all()
def _update_progressbar(self):
'''Update the progressbar.'''
# Refresh this every update
if self._progressbar_sourceid:
gobject.source_remove(self._progressbar_sourceid)
fraction = 0
if self._total_items:
self.window.progressbar.set_text(_("Scanning %(current)d / %(total)d.."
) % {'current': self._progress, 'total': self._total_items})
fraction = float(self._progress) / float(self._total_items)
if self._progress >= self._total_items:
# We show the progressbar for just a bit before hiding it.
id = gobject.timeout_add_seconds(2, self._hide_progressbar_cb)
self._progressbar_sourceid = id
else:
self.window.progressbar.show()
# Hide the progressbar if we're timing out anyways. (20 secs)
id = gobject.timeout_add_seconds(20, self._hide_progressbar_cb)
self._progressbar_sourceid = id
self.window.progressbar.set_fraction(fraction)
def _hide_progressbar_cb(self, *args):
'''Simple callback to hide the progressbar a second after we finish.'''
if self.active:
self.window.progressbar.hide()
return False
def _friendly_category(self, category, type_=None):
'''Get the friendly category name and priority.'''
cat = None
if type_:
# Try type-specific override
try:
cat, prio = _cat_to_descr[(category, type_)]
except KeyError:
pass
if not cat:
try:
cat, prio = _cat_to_descr[category]
except KeyError:
cat, prio = _cat_to_descr['other']
return cat, prio
def _create_category(self, cat, type_=None):
'''Creates a category row.'''
cat, prio = self._friendly_category(cat, type_)
return self.model.append(None, ('', '', None, cat, prio))
def _find_category(self, cat, type_=None):
'''Looks up a category row and returns the iterator to it, or None.'''
cat, prio = self._friendly_category(cat, type_)
iter = self.model.get_iter_root()
while iter:
if self.model.get_value(iter, 3).decode('utf-8') == cat:
break
iter = self.model.iter_next(iter)
if iter:
return iter
return None
def _find_item(self, jid, node):
iter = None
cat_iter = self.model.get_iter_root()
while cat_iter and not iter:
iter = self.model.iter_children(cat_iter)
while iter:
cjid = self.model.get_value(iter, 0).decode('utf-8')
cnode = self.model.get_value(iter, 1).decode('utf-8')
if jid == cjid and node == cnode:
break
iter = self.model.iter_next(iter)
cat_iter = self.model.iter_next(cat_iter)
if iter:
return iter
return None
def _add_item(self, jid, node, item, force):
# Row text
addr = get_agent_address(jid, node)
if 'name' in item:
descr = "<b>%s</b>\n%s" % (item['name'], addr)
else:
descr = "<b>%s</b>" % addr
# Guess which kind of service this is
identities = []
type = gajim.get_transport_name_from_jid(jid,
use_config_setting = False)
if type:
identity = {'category': '_jid', 'type': type}
identities.append(identity)
cat_args = ('_jid', type)
else:
# Put it in the 'other' category for now
cat_args = ('other',)
# Set the pixmap for the row
pix = self.cache.get_icon(identities)
# Put it in the right category
cat = self._find_category(*cat_args)
if not cat:
cat = self._create_category(*cat_args)
self.model.append(cat, (item['jid'], item.get('node', ''), pix, descr, 1))
self._expand_all()
# Grab info on the service
self.cache.get_info(jid, node, self._agent_info, force = force)
self._update_progressbar()
def _update_item(self, iter_, jid, node, item):
addr = get_agent_address(jid, node)
if 'name' in item:
descr = "<b>%s</b>\n%s" % (item['name'], addr)
else:
descr = "<b>%s</b>" % addr
self.model[iter_][3] = descr
def _update_info(self, iter_, jid, node, identities, features, data):
addr = get_agent_address(jid, node)
name = identities[0].get('name', '')
if name:
descr = "<b>%s</b>\n%s" % (name, addr)
else:
descr = "<b>%s</b>" % addr
# Update progress
self._progress += 1
self._update_progressbar()
# Search for an icon and category we can display
pix = self.cache.get_icon(identities)
for identity in identities:
try:
cat, type = identity['category'], identity['type']
except KeyError:
continue
break
# Check if we have to move categories
old_cat_iter = self.model.iter_parent(iter_)
old_cat = self.model.get_value(old_cat_iter, 3).decode('utf-8')
if self.model.get_value(old_cat_iter, 3) == cat:
# Already in the right category, just update
self.model[iter_][2] = pix
self.model[iter_][3] = descr
self.model[iter_][4] = 0
return
# Not in the right category, move it.
self.model.remove(iter_)
# Check if the old category is empty
if not self.model.iter_is_valid(old_cat_iter):
old_cat_iter = self._find_category(old_cat)
if not self.model.iter_children(old_cat_iter):
self.model.remove(old_cat_iter)
cat_iter = self._find_category(cat, type)
if not cat_iter:
cat_iter = self._create_category(cat, type)
self.model.append(cat_iter, (jid, node, pix, descr, 0))
self._expand_all()
def _update_error(self, iter_, jid, node):
addr = get_agent_address(jid, node)
self.model[iter_][4] = 2
self._progress += 1
self._update_progressbar()
class MucBrowser(AgentBrowser):
def __init__(self, *args, **kwargs):
AgentBrowser.__init__(self, *args, **kwargs)
self.join_button = None
def _create_treemodel(self):
# JID, node, name, users_int, users_str, description, fetched
# This is rather long, I'd rather not use a data_func here though.
# Users is a string, because want to be able to leave it empty.
self.model = gtk.ListStore(str, str, str, int, str, str, bool)
self.model.set_sort_column_id(2, gtk.SORT_ASCENDING)
self.window.services_treeview.set_model(self.model)
# Name column
col = gtk.TreeViewColumn(_('Name'))
col.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
col.set_fixed_width(100)
renderer = gtk.CellRendererText()
col.pack_start(renderer)
col.set_attributes(renderer, text = 2)
col.set_sort_column_id(2)
self.window.services_treeview.insert_column(col, -1)
col.set_resizable(True)
# Users column
col = gtk.TreeViewColumn(_('Users'))
renderer = gtk.CellRendererText()
col.pack_start(renderer)
col.set_attributes(renderer, text = 4)
col.set_sort_column_id(3)
self.window.services_treeview.insert_column(col, -1)
col.set_resizable(True)
# Description column
col = gtk.TreeViewColumn(_('Description'))
renderer = gtk.CellRendererText()
col.pack_start(renderer)
col.set_attributes(renderer, text = 5)
col.set_sort_column_id(4)
self.window.services_treeview.insert_column(col, -1)
col.set_resizable(True)
# Id column
col = gtk.TreeViewColumn(_('Id'))
renderer = gtk.CellRendererText()
col.pack_start(renderer)
col.set_attributes(renderer, text = 0)
col.set_sort_column_id(0)
self.window.services_treeview.insert_column(col, -1)
col.set_resizable(True)
self.window.services_treeview.set_headers_visible(True)
self.window.services_treeview.set_headers_clickable(True)
# Source id for idle callback used to start disco#info queries.
self._fetch_source = None
# Query failure counter
self._broken = 0
# Connect to scrollwindow scrolling
self.vadj = self.window.services_scrollwin.get_property('vadjustment')
self.vadj_cbid = self.vadj.connect('value-changed', self.on_scroll)
# And to size changes
self.size_cbid = self.window.services_scrollwin.connect(
'size-allocate', self.on_scroll)
def _clean_treemodel(self):
if self.size_cbid:
self.window.services_scrollwin.disconnect(self.size_cbid)
self.size_cbid = None
if self.vadj_cbid:
self.vadj.disconnect(self.vadj_cbid)
self.vadj_cbid = None
AgentBrowser._clean_treemodel(self)
def _add_actions(self):
self.join_button = gtk.Button(label=_('_Join'), use_underline=True)
self.join_button.connect('clicked', self.on_join_button_clicked)
self.window.action_buttonbox.add(self.join_button)
self.join_button.show_all()
def _clean_actions(self):
if self.join_button:
self.join_button.destroy()
self.join_button = None
def on_join_button_clicked(self, *args):
'''When we want to join a conference:
Ask specific informations about the selected agent and close the window'''
model, iter = self.window.services_treeview.get_selection().get_selected()
if not iter:
return
service = model[iter][0].decode('utf-8')
room = model[iter][1].decode('utf-8')
if 'join_gc' not in gajim.interface.instances[self.account]:
try:
room_jid = '%s@%s' % (service, room)
dialogs.JoinGroupchatWindow(self.account, service)
except GajimGeneralException:
pass
else:
gajim.interface.instances[self.account]['join_gc'].window.present()
self.window.destroy(chain = True)
def update_actions(self):
if self.join_button:
sens = self.window.services_treeview.get_selection().count_selected_rows()
self.join_button.set_sensitive(sens > 0)
def default_action(self):
self.on_join_button_clicked()
def _start_info_query(self):
'''Idle callback to start checking for visible rows.'''
self._fetch_source = None
self._query_visible()
return False
def on_scroll(self, *args):
'''Scrollwindow callback to trigger new queries on scolling.'''
# This apparently happens when inactive sometimes
self._query_visible()
def _query_visible(self):
'''Query the next visible row for info.'''
if self._fetch_source:
# We're already fetching
return
view = self.window.services_treeview
if not view.flags() & gtk.REALIZED:
# Prevent a silly warning, try again in a bit.
self._fetch_source = gobject.timeout_add(100, self._start_info_query)
return
# We have to do this in a pygtk <2.8 compatible way :/
#start, end = self.window.services_treeview.get_visible_range()
rect = view.get_visible_rect()
iter = end = None
# Top row
try:
sx, sy = view.tree_to_widget_coords(rect.x, rect.y)
spath = view.get_path_at_pos(sx, sy)[0]
iter = self.model.get_iter(spath)
except TypeError:
self._fetch_source = None
return
# Bottom row
# Iter compare is broke, use the path instead
try:
ex, ey = view.tree_to_widget_coords(rect.x + rect.height,
rect.y + rect.height)
end = view.get_path_at_pos(ex, ey)[0]
# end is the last visible, we want to query that aswell
end = (end[0] + 1,)
except TypeError:
# We're at the end of the model, we can leave end=None though.
pass
while iter and self.model.get_path(iter) != end:
if not self.model.get_value(iter, 6):
jid = self.model.get_value(iter, 0).decode('utf-8')
node = self.model.get_value(iter, 1).decode('utf-8')
self.cache.get_info(jid, node, self._agent_info)
self._fetch_source = True
return
iter = self.model.iter_next(iter)
self._fetch_source = None
def _channel_altinfo(self, jid, node, items, name = None):
'''Callback for the alternate disco#items query. We try to atleast get
the amount of users in the room if the service does not support MUC
dataforms.'''
if items == 0:
# The server returned an error
self._broken += 1
if self._broken >= 3:
# Disable queries completely after 3 failures
if self.size_cbid:
self.window.services_scrollwin.disconnect(self.size_cbid)
self.size_cbid = None
if self.vadj_cbid:
self.vadj.disconnect(self.vadj_cbid)
self.vadj_cbid = None
self._fetch_source = None
return
else:
iter = self._find_item(jid, node)
if iter:
if name:
self.model[iter][2] = name
self.model[iter][3] = len(items) # The number of users
self.model[iter][4] = str(len(items)) # The number of users
self.model[iter][6] = True
self._fetch_source = None
self._query_visible()
def _add_item(self, jid, node, item, force):
self.model.append((jid, node, item.get('name', ''), -1, '', '', False))
if not self._fetch_source:
self._fetch_source = gobject.idle_add(self._start_info_query)
def _update_info(self, iter_, jid, node, identities, features, data):
name = identities[0].get('name', '')
for form in data:
typefield = form.getField('FORM_TYPE')
if typefield and typefield.getValue() == \
'http://jabber.org/protocol/muc#roominfo':
# Fill model row from the form's fields
users = form.getField('muc#roominfo_occupants')
descr = form.getField('muc#roominfo_description')
if users:
self.model[iter_][3] = int(users.getValue())
self.model[iter_][4] = users.getValue()
if descr:
self.model[iter_][5] = descr.getValue()
# Only set these when we find a form with additional info
# Some servers don't support forms and put extra info in
# the name attribute, so we preserve it in that case.
self.model[iter_][2] = name
self.model[iter_][6] = True
break
else:
# We didn't find a form, switch to alternate query mode
self.cache.get_items(jid, node, self._channel_altinfo, args = (name,))
return
# Continue with the next
self._fetch_source = None
self._query_visible()
def _update_error(self, iter_, jid, node):
# switch to alternate query mode
self.cache.get_items(jid, node, self._channel_altinfo)
def PubSubBrowser(account, jid, node):
''' Returns an AgentBrowser subclass that will display service discovery
for particular pubsub service. Different pubsub services may need to
present different data during browsing. '''
# for now, only discussion groups are supported...
# TODO: check if it has appropriate features to be such kind of service
return DiscussionGroupsBrowser(account, jid, node)
class DiscussionGroupsBrowser(AgentBrowser):
''' For browsing pubsub-based discussion groups service. '''
def __init__(self, account, jid, node):
AgentBrowser.__init__(self, account, jid, node)
# this will become set object when we get subscriptions; None means
# we don't know yet which groups are subscribed
self.subscriptions = None
# this will become our action widgets when we create them; None means
# we don't have them yet (needed for check in callback)
self.subscribe_button = None
self.unsubscribe_button = None
gajim.connections[account].send_pb_subscription_query(jid, self._subscriptionsCB)
def _create_treemodel(self):
''' Create treemodel for the window. '''
# JID, node, name (with description) - pango markup, dont have info?, subscribed?
self.model = gtk.TreeStore(str, str, str, bool, bool)
# sort by name
self.model.set_sort_column_id(2, gtk.SORT_ASCENDING)
self.window.services_treeview.set_model(self.model)
# Name column
# Pango markup for name and description, description printed with
# <small/> font
renderer = gtk.CellRendererText()
col = gtk.TreeViewColumn(_('Name'))
col.pack_start(renderer)
col.set_attributes(renderer, markup=2)
col.set_resizable(True)
self.window.services_treeview.insert_column(col, -1)
self.window.services_treeview.set_headers_visible(True)
# Subscription state
renderer = gtk.CellRendererToggle()
col = gtk.TreeViewColumn(_('Subscribed'))
col.pack_start(renderer)
col.set_attributes(renderer, inconsistent=3, active=4)
col.set_resizable(False)
self.window.services_treeview.insert_column(col, -1)
# Node Column
renderer = gtk.CellRendererText()
col = gtk.TreeViewColumn(_('Node'))
col.pack_start(renderer)
col.set_attributes(renderer, markup=1)
col.set_resizable(True)
self.window.services_treeview.insert_column(col, -1)
def _add_items(self, jid, node, items, force):
for item in items:
jid = item['jid']
node = item.get('node', '')
self._total_items += 1
self._add_item(jid, node, item, force)
def _in_list_foreach(self, model, path, iter_, node):
if model[path][1] == node:
self.in_list = True
def _in_list(self, node):
self.in_list = False
self.model.foreach(self._in_list_foreach, node)
return self.in_list
def _add_item(self, jid, node, item, force):
''' Called when we got basic information about new node from query.
Show the item. '''
name = item.get('name', '')
if self.subscriptions is not None:
dunno = False
subscribed = node in self.subscriptions
else:
dunno = True
subscribed = False
name = gobject.markup_escape_text(name)
name = '<b>%s</b>' % name
node_splitted = node.split('/')
parent_iter = None
while len(node_splitted) > 1:
parent_node = node_splitted.pop(0)
parent_iter = self._get_child_iter(parent_iter, parent_node)
node_splitted[0] = parent_node + '/' + node_splitted[0]
if not self._in_list(node):
self.model.append(parent_iter, (jid, node, name, dunno, subscribed))
self.cache.get_items(jid, node, self._add_items, force = force,
args = (force,))
def _get_child_iter(self, parent_iter, node):
child_iter = self.model.iter_children(parent_iter)
while child_iter:
if self.model[child_iter][1] == node:
return child_iter
child_iter = self.model.iter_next(child_iter)
return None
def _add_actions(self):
self.post_button = gtk.Button(label=_('New post'), use_underline=True)
self.post_button.set_sensitive(False)
self.post_button.connect('clicked', self.on_post_button_clicked)
self.window.action_buttonbox.add(self.post_button)
self.post_button.show_all()
self.subscribe_button = gtk.Button(label=_('_Subscribe'), use_underline=True)
self.subscribe_button.set_sensitive(False)
self.subscribe_button.connect('clicked', self.on_subscribe_button_clicked)
self.window.action_buttonbox.add(self.subscribe_button)
self.subscribe_button.show_all()
self.unsubscribe_button = gtk.Button(label=_('_Unsubscribe'), use_underline=True)
self.unsubscribe_button.set_sensitive(False)
self.unsubscribe_button.connect('clicked', self.on_unsubscribe_button_clicked)
self.window.action_buttonbox.add(self.unsubscribe_button)
self.unsubscribe_button.show_all()
def _clean_actions(self):
if self.post_button is not None:
self.post_button.destroy()
self.post_button = None
if self.subscribe_button is not None:
self.subscribe_button.destroy()
self.subscribe_button = None
if self.unsubscribe_button is not None:
self.unsubscribe_button.destroy()
self.unsubscribe_button = None
def update_actions(self):
'''Called when user selected a row. Make subscribe/unsubscribe buttons
sensitive appropriatelly.'''
# we have nothing to do if we don't have buttons...
if self.subscribe_button is None: return
model, iter = self.window.services_treeview.get_selection().get_selected()
if not iter or self.subscriptions is None:
# no item selected or no subscriptions info, all buttons are insensitive
self.post_button.set_sensitive(False)
self.subscribe_button.set_sensitive(False)
self.unsubscribe_button.set_sensitive(False)
else:
subscribed = model.get_value(iter, 4) # 4 = subscribed?
self.post_button.set_sensitive(subscribed)
self.subscribe_button.set_sensitive(not subscribed)
self.unsubscribe_button.set_sensitive(subscribed)
def on_post_button_clicked(self, widget):
'''Called when 'post' button is pressed. Open window to create post'''
model, iter = self.window.services_treeview.get_selection().get_selected()
if iter is None: return
groupnode = model.get_value(iter, 1) # 1 = groupnode
groups.GroupsPostWindow(self.account, self.jid, groupnode)
def on_subscribe_button_clicked(self, widget):
'''Called when 'subscribe' button is pressed. Send subscribtion request.'''
model, iter = self.window.services_treeview.get_selection().get_selected()
if iter is None: return
groupnode = model.get_value(iter, 1) # 1 = groupnode
gajim.connections[self.account].send_pb_subscribe(self.jid, groupnode, self._subscribeCB, groupnode)
def on_unsubscribe_button_clicked(self, widget):
'''Called when 'unsubscribe' button is pressed. Send unsubscription request.'''
model, iter = self.window.services_treeview.get_selection().get_selected()
if iter is None: return
groupnode = model.get_value(iter, 1) # 1 = groupnode
gajim.connections[self.account].send_pb_unsubscribe(self.jid, groupnode, self._unsubscribeCB, groupnode)
def _subscriptionsCB(self, conn, request):
''' We got the subscribed groups list stanza. Now, if we already
have items on the list, we should actualize them. '''
try:
subscriptions = request.getTag('pubsub').getTag('subscriptions')
except Exception:
return
groups = set()
for child in subscriptions.getTags('subscription'):
groups.add(child['node'])
self.subscriptions = groups
# try to setup existing items in model
model = self.window.services_treeview.get_model()
for row in model:
# 1 = group node
# 3 = insensitive checkbox for subscribed
# 4 = subscribed?
groupnode = row[1]
row[3]=False
row[4]=groupnode in groups
# we now know subscriptions, update button states
self.update_actions()
raise xmpp.NodeProcessed
def _subscribeCB(self, conn, request, groupnode):
'''We have just subscribed to a node. Update UI'''
self.subscriptions.add(groupnode)
model = self.window.services_treeview.get_model()
for row in model:
if row[1] == groupnode: # 1 = groupnode
row[4]=True
break
self.update_actions()
raise xmpp.NodeProcessed
def _unsubscribeCB(self, conn, request, groupnode):
'''We have just unsubscribed from a node. Update UI'''
self.subscriptions.remove(groupnode)
model = self.window.services_treeview.get_model()
for row in model:
if row[1] == groupnode: # 1 = groupnode
row[4]=False
break
self.update_actions()
raise xmpp.NodeProcessed
# Fill the global agent type info dictionary
_agent_type_info = _gen_agent_type_info()
# vim: se ts=3:
|
sgala/gajim
|
src/disco.py
|
Python
|
gpl-3.0
| 67,696
|
[
"VisIt"
] |
9ef668918b570fd875d4593939c9f388fa2936d88fb3aeeb255231e84a7deb57
|
# Copyright 2013 by Kamil Koziara. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
I/O operations for NeXO.
"""
import xml.sax
import collections
import ast
from Ontology.Data import OntologyGraph, OntologyTerm, TermAssociation, GeneAnnotation
from .Interfaces import OntoReader
_SKIP = 0
_NODE = 1
_EDGE = 2
class NexoContentHandler(xml.sax.ContentHandler):
def __init__(self, get_all_attrs, annotation_source):
xml.sax.ContentHandler.__init__(self)
self.get_all_attrs = get_all_attrs
self.annotation_source = annotation_source
self.state = _SKIP
self.old_state = _SKIP
self.nodes = {}
self.annotations = collections.defaultdict(list)
self.edges = []
self.current_term = None
self.current_edge = None
def _split_list(self, val):
if val.startswith('['):
return ast.literal_eval(val)
else:
return [val]
def startElement(self, name, attrs):
if name == "node":
self.state = _NODE
term_id = attrs["label"]
self.current_term = OntologyTerm(term_id, term_id, {})
self.nodes[attrs["id"]] = self.current_term
elif name == "edge":
self.state = _EDGE
self.current_edge = [attrs["source"], attrs["target"]]
self.edges.append(self.current_edge)
elif name == "graphics":
self.old_state = self.state
self.state = _SKIP
elif name == "att":
if self.state == _NODE:
if attrs["name"] == "Term":
val = attrs.get("value")
if val != None:
self.current_term.name = val
elif (attrs["name"] == "Assigned Genes" and self.annotation_source == "genes")\
or (attrs["name"] == "Assigned Orfs" and self.annotation_source == "orfs"):
val = attrs.get("value")
if val != None:
for gene in self._split_list(val):
self.annotations[gene].append(self.current_term.id)
elif self.get_all_attrs:
val = attrs.get("value")
if val != None:
self.current_term.attrs[attrs["name"]] = val
elif self.state == _EDGE:
if attrs["name"] == "NeXO relation type":
self.current_edge.append(attrs.get("value"))
def endElement(self, name):
if name == "node" or name == "edge":
self.state = _SKIP
elif name == "graphics":
self.state = self.old_state
def characters(self, content):
pass
class NexoReader(OntoReader):
"""
Class for reading Nexo xgmml network.
"""
def __init__(self, file_handle, get_all_attrs = False, annotation_source = "genes"):
self.handle = file_handle
self.get_all_attrs = get_all_attrs
self.annotation_source = annotation_source
def read(self):
"""
Returns gene annotation list and ontology graph read from nexo file.
"""
content_handler = NexoContentHandler(self.get_all_attrs, self.annotation_source)
xml.sax.parse(self.handle, content_handler)
annotations = []
for obj, assocs in content_handler.annotations.items():
annotations.append(GeneAnnotation(obj,
associations = [TermAssociation(x) for x in assocs]))
graph = OntologyGraph()
for _, node in content_handler.nodes.items():
graph.add_node(node.id, node)
edge_types = set()
for edge in content_handler.edges:
source = content_handler.nodes[edge[0]].id
target = content_handler.nodes[edge[1]].id
graph.add_edge(target, source, edge[2]) # in our representation it is inverted
edge_types.add(edge[2])
for edge_type in edge_types:
graph.typedefs[edge_type] = {"id" : edge_type}
return (annotations, graph)
|
arkatebi/SwissProt-stats
|
Ontology/IO/NexoIO.py
|
Python
|
gpl-3.0
| 4,401
|
[
"Biopython"
] |
97dc1247a3d0343bd1a61ff37af2e94871af5e087750c858a09104d3e7dc330b
|
"""Tests for the ArchiveFiles Operation"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=protected-access, redefined-outer-name
import six
from functools import partial
import logging
import os
import pytest
from mock import MagicMock as Mock
from DIRAC import S_ERROR, S_OK
from DIRAC.Core.Utilities import DEncode
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.DataManagementSystem.Agent.RequestOperations import ArchiveFiles
logging.basicConfig(level=logging.WARNING, format='%(levelname)-5s - %(name)-8s: %(message)s')
LOG = logging.getLogger('TestArchiveFiles')
MODULE = 'DIRAC.DataManagementSystem.Agent.RequestOperations.ArchiveFiles'
FILE_NAME = 'fileName'
N_FILES = 10
DEST_DIR = '/Some/Local/Folder'
@pytest.fixture
def listOfLFNs():
"""Return a list of LFNs"""
lfns = []
for index, name in enumerate([FILE_NAME] * N_FILES):
lfns.append('/vo/%s_%d' % (name, index))
return lfns
@pytest.fixture
def _myMocker(mocker):
"""Mock call to external libraries."""
mocker.patch(MODULE + '.shutil.make_archive')
mocker.patch(MODULE + '.shutil.rmtree')
mocker.patch(MODULE + '.os.makedirs')
mocker.patch(MODULE + '.os.remove')
mocker.patch(MODULE + '.gMonitor')
return None
@pytest.fixture
def multiRetValOK(listOfLFNs):
"""Return a return structure for multiple values"""
retVal = {'OK': True, 'Value':
{'Failed': {},
'Successful': {},
}}
for lfn in listOfLFNs:
retVal['Value']['Successful'][lfn] = True
return retVal
def multiRetVal(*args, **kwargs):
"""Return a return structure for multiple values"""
retVal = {'OK': True, 'Value':
{'Failed': {},
'Successful': {},
}}
lfns = args[0]
if isinstance(lfns, six.string_types):
lfns = [lfns]
for _index, lfn in enumerate(lfns):
if str(kwargs.get('Index', 5)) in lfn:
retVal['Value']['Failed'][lfn] = kwargs.get('Error', 'Failed to do X')
LOG.error('Error for %s %s', lfn, retVal['Value']['Failed'][lfn])
else:
retVal['Value']['Successful'][lfn] = kwargs.get('Success', True)
LOG.info('Success for %s %s', lfn, retVal['Value']['Successful'])
return retVal
@pytest.fixture
def archiveRequestAndOp(listOfLFNs):
"""Return a tuple of the request and operation."""
req = Request()
req.RequestName = 'MyRequest'
op = Operation()
switches = {}
archiveLFN = '/vo/tars/myTar.tar'
op.Arguments = DEncode.encode({'SourceSE': switches.get('SourceSE', 'SOURCE-SE'),
'TarballSE': switches.get('TarballSE', 'TARBALL-SE'),
'RegisterDescendent': False,
'ArchiveLFN': archiveLFN})
op.Type = 'ArchiveFiles'
for index, lfn in enumerate(listOfLFNs):
oFile = File()
oFile.LFN = lfn
oFile.Size = index
oFile.Checksum = '01130a%0d' % index
oFile.ChecksumType = 'adler32'
op.addFile(oFile)
req.addOperation(op)
return req, op
@pytest.fixture
def archiveFiles(mocker, archiveRequestAndOp, multiRetValOK):
"""Return the ArchiveFiles operation instance."""
mocker.patch.dict(os.environ, {'AGENT_WORKDIRECTORY': DEST_DIR})
af = ArchiveFiles.ArchiveFiles(archiveRequestAndOp[1])
af.fc = mocker.MagicMock('FileCatalogMock')
af.fc.hasAccess = mocker.MagicMock()
af.fc.hasAccess.return_value = multiRetValOK
af.fc.getReplicas = mocker.MagicMock()
af.fc.getReplicas.side_effect = partial(multiRetVal, Success={'SOURCE-SE': 'PFN'}, Index=11)
af.fc.isFile = mocker.MagicMock()
archiveLFN = '/vo/tars/myTar.tar'
af.fc.isFile.return_value = S_OK({'Failed': {archiveLFN: 'no file'},
'Successful': {}})
af.dm = mocker.MagicMock('DataManagerMock')
af.dm.getFile = mocker.MagicMock(return_value=multiRetValOK)
af.dm.putAndRegister = mocker.MagicMock(return_value=multiRetValOK)
return af
def test_constructor(archiveFiles):
assert archiveFiles.parameterDict == {}
assert archiveFiles.lfns == []
assert archiveFiles.waitingFiles == []
assert archiveFiles.cacheFolder == '/Some/Local/Folder'
def test_run_OK(archiveFiles, _myMocker, listOfLFNs):
archiveFiles._run()
archiveFiles.dm.getFile.assert_called_with(listOfLFNs[9],
destinationDir=os.path.join(DEST_DIR, 'MyRequest', 'vo'),
sourceSE='SOURCE-SE')
for opFile in archiveFiles.operation:
assert opFile.Status == 'Done'
def test_run_Fail(archiveFiles, _myMocker, listOfLFNs):
archiveFiles.dm.getFile.side_effect = partial(multiRetVal, Index=5)
with pytest.raises(RuntimeError, match='Completely failed to download file'):
archiveFiles._run()
archiveFiles.dm.getFile.assert_called_with(listOfLFNs[5],
destinationDir=os.path.join(DEST_DIR, 'MyRequest', 'vo'),
sourceSE='SOURCE-SE')
for opFile in archiveFiles.operation:
assert opFile.Status == 'Waiting'
def test_run_IgnoreMissingFiles(archiveFiles, _myMocker, listOfLFNs):
archiveFiles.dm.getFile.side_effect = partial(multiRetVal, Index=5, Error='No such file or directory')
archiveFiles._run()
archiveFiles.dm.getFile.assert_called_with(listOfLFNs[9],
destinationDir=os.path.join(DEST_DIR, 'MyRequest', 'vo'),
sourceSE='SOURCE-SE')
for index, opFile in enumerate(archiveFiles.operation):
LOG.debug('%s', opFile) # lazy evaluation of the argument!
if index == 5:
assert opFile.Status == 'Done'
else:
assert opFile.Status == 'Done'
def test_checkFilePermissions(archiveFiles, _myMocker):
archiveFiles.waitingFiles = archiveFiles.getWaitingFilesList()
archiveFiles.lfns = [opFile.LFN for opFile in archiveFiles.waitingFiles]
assert len(archiveFiles.lfns) == N_FILES
archiveFiles.fc.hasAccess.side_effect = partial(multiRetVal, Index=3, Error='Permission denied')
with pytest.raises(RuntimeError, match='^Do not have sufficient permissions$'):
archiveFiles._checkFilePermissions()
for index, opFile in enumerate(archiveFiles.operation):
if index == 3:
assert opFile.Status == 'Failed'
else:
assert opFile.Status == 'Waiting'
def test_checkFilePermissions_breaks(archiveFiles, _myMocker):
archiveFiles.waitingFiles = archiveFiles.getWaitingFilesList()
archiveFiles.lfns = [opFile.LFN for opFile in archiveFiles.waitingFiles]
assert len(archiveFiles.lfns) == N_FILES
archiveFiles.fc.hasAccess.return_value = S_ERROR('Break')
with pytest.raises(RuntimeError, match='^Could not resolve permissions$'):
archiveFiles._checkFilePermissions()
for opFile in archiveFiles.operation:
assert opFile.Status == 'Waiting'
def test_uploadTarBall_breaks(archiveFiles, _myMocker, listOfLFNs):
archiveFiles.dm.putAndRegister.return_value = S_ERROR('Break')
with pytest.raises(RuntimeError, match='^Failed to upload tarball: Break$'):
archiveFiles._run()
for opFile in archiveFiles.operation:
assert opFile.Status == 'Waiting'
archiveFiles.dm.getFile.assert_called_with(listOfLFNs[9],
destinationDir=os.path.join(DEST_DIR, 'MyRequest', 'vo'),
sourceSE='SOURCE-SE')
archiveFiles.dm.putAndRegister.assert_called_with('/vo/tars/myTar.tar',
'myTar.tar',
'TARBALL-SE')
def test_call(archiveFiles, _myMocker, listOfLFNs):
archiveFiles()
for opFile in archiveFiles.operation:
assert opFile.Status == 'Done'
archiveFiles.dm.getFile.assert_called_with(listOfLFNs[9],
destinationDir=os.path.join(DEST_DIR, 'MyRequest', 'vo'),
sourceSE='SOURCE-SE')
archiveFiles.dm.putAndRegister.assert_called_with('/vo/tars/myTar.tar',
'myTar.tar',
'TARBALL-SE')
def test_call_withError(archiveFiles, _myMocker, listOfLFNs):
archiveFiles.dm.putAndRegister.return_value = S_ERROR('Break')
archiveFiles()
for opFile in archiveFiles.operation:
assert opFile.Status == 'Waiting'
archiveFiles.dm.getFile.assert_called_with(listOfLFNs[9],
destinationDir=os.path.join(DEST_DIR, 'MyRequest', 'vo'),
sourceSE='SOURCE-SE')
archiveFiles.dm.putAndRegister.assert_called_with('/vo/tars/myTar.tar',
'myTar.tar',
'TARBALL-SE')
def test_call_withUnexpectedError(archiveFiles, _myMocker):
archiveFiles.operation.Arguments = ''
res = archiveFiles()
assert not res['OK']
def test_cleanup(archiveFiles, mocker):
osMocker = mocker.patch(MODULE + '.os.remove', side_effect=OSError('No such file or directory'))
rmTreeMock = mocker.patch(MODULE + '.shutil.rmtree', side_effect=OSError('No such file or directory'))
archiveFiles.parameterDict = {'ArchiveLFN': '/vo.lfn/nofile.tar'}
archiveFiles._cleanup()
osMocker.assert_called_with('nofile.tar')
rmTreeMock.assert_called_with(archiveFiles.cacheFolder, ignore_errors=True)
def test_checkArchiveLFN(archiveFiles):
archiveLFN = '/vo/tars/myTar.tar'
archiveFiles.parameterDict = {'ArchiveLFN': archiveLFN}
# tarball does not exist
archiveFiles._checkArchiveLFN()
archiveFiles.fc.isFile.assert_called_with(archiveLFN)
def test_checkArchiveLFN_Fail(archiveFiles):
archiveLFN = '/vo/tars/myTar.tar'
archiveFiles.parameterDict = {'ArchiveLFN': archiveLFN}
# tarball already exists
archiveFiles.fc.isFile.side_effect = multiRetVal
with pytest.raises(RuntimeError, match='already exists$'):
archiveFiles._checkArchiveLFN()
def test_checkReplicas_success(archiveFiles):
archiveFiles.waitingFiles = archiveFiles.getWaitingFilesList()
archiveFiles.parameterDict = {'SourceSE': 'SOURCE-SE'}
archiveFiles.lfns = [opFile.LFN for opFile in archiveFiles.waitingFiles]
archiveFiles.fc.getReplicas.side_effect = partial(multiRetVal,
Index=11,
Success={'SOURCE-SE': 'PFN'})
assert archiveFiles._checkReplicas() is None
def test_checkReplicas_notAt(archiveFiles):
archiveFiles.waitingFiles = archiveFiles.getWaitingFilesList()
archiveFiles.parameterDict = {'SourceSE': 'SOURCE-SE'}
archiveFiles.lfns = [opFile.LFN for opFile in archiveFiles.waitingFiles]
archiveFiles.fc.getReplicas.side_effect = partial(multiRetVal,
Index=11,
Success={'Not-SOURCE-SE': 'PFN'})
with pytest.raises(RuntimeError, match='Some replicas are not at the source'):
archiveFiles._checkReplicas()
def test_checkReplicas_noSuchFile(archiveFiles):
archiveFiles.waitingFiles = archiveFiles.getWaitingFilesList()
archiveFiles.parameterDict = {'SourceSE': 'SOURCE-SE'}
archiveFiles.lfns = [opFile.LFN for opFile in archiveFiles.waitingFiles]
archiveFiles.fc.getReplicas.side_effect = partial(multiRetVal,
Index=7,
Success={'SOURCE-SE': 'PFN'},
Error='No such file or directory')
assert archiveFiles._checkReplicas() is None
def test_checkReplicas_somefailed(archiveFiles):
archiveFiles.waitingFiles = archiveFiles.getWaitingFilesList()
archiveFiles.parameterDict = {'SourceSE': 'SOURCE-SE'}
archiveFiles.lfns = [opFile.LFN for opFile in archiveFiles.waitingFiles]
archiveFiles.fc.getReplicas.side_effect = partial(multiRetVal,
Index=7,
Success={'SOURCE-SE': 'PFN'},
Error='some error')
with pytest.raises(RuntimeError, match='Failed to get some replica information'):
archiveFiles._checkReplicas()
def test_checkReplicas_failed(archiveFiles, mocker):
archiveFiles.waitingFiles = archiveFiles.getWaitingFilesList()
archiveFiles.parameterDict = {'SourceSE': 'SOURCE-SE'}
archiveFiles.lfns = [opFile.LFN for opFile in archiveFiles.waitingFiles]
archiveFiles.fc.getReplicas = mocker.MagicMock()
archiveFiles.fc.getReplicas.return_value = S_ERROR('some error')
with pytest.raises(RuntimeError, match='Failed to get replica information'):
archiveFiles._checkReplicas()
def test_registerDescendent_disabled(archiveFiles):
archiveFiles.parameterDict = {'RegisterDescendent': False}
archiveFiles.lfns = [opFile.LFN for opFile in archiveFiles.waitingFiles]
archiveFiles.fc.addFileAncestors = Mock(name='AFA')
assert archiveFiles._registerDescendent() is None
archiveFiles.fc.addFileAncestors.assert_not_called()
def test_registerDescendent_success(archiveFiles, listOfLFNs):
archiveFiles.lfns = listOfLFNs
archiveLFN = '/vo/tars/myTar.tar'
archiveFiles.parameterDict = {'RegisterDescendent': True, 'ArchiveLFN': archiveLFN}
archiveFiles.fc.addFileAncestors = Mock(name='AFA',
return_value=S_OK({'Failed': {},
'Successful': {archiveLFN: 'Done'}}))
assert archiveFiles._registerDescendent() is None
archiveFiles.fc.addFileAncestors.assert_called_with({archiveLFN: {'Ancestors': archiveFiles.lfns}})
def test_registerDescendent_PartialFailure(archiveFiles, listOfLFNs):
archiveFiles.lfns = listOfLFNs
archiveLFN = '/vo/tars/myTar.tar'
archiveFiles.parameterDict = {'RegisterDescendent': True, 'ArchiveLFN': archiveLFN}
archiveFiles.fc.addFileAncestors = Mock(name='AFA',
side_effect=(S_ERROR('Failure'),
S_OK({'Failed': {},
'Successful': {archiveLFN: 'Done'}})))
archiveFiles._registerDescendent()
archiveFiles.fc.addFileAncestors.assert_called_with({archiveLFN: {'Ancestors': archiveFiles.lfns}})
def test_registerDescendent_completeFailure(archiveFiles, listOfLFNs):
archiveFiles.lfns = listOfLFNs
archiveLFN = '/vo/tars/myTar.tar'
archiveFiles.parameterDict = {'RegisterDescendent': True, 'ArchiveLFN': archiveLFN}
archiveFiles.fc.addFileAncestors = Mock(name='AFA', return_value=S_ERROR('Failure'))
with pytest.raises(RuntimeError, match='Failed to register ancestors'):
archiveFiles._registerDescendent()
archiveFiles.fc.addFileAncestors.assert_called_with({archiveLFN: {'Ancestors': archiveFiles.lfns}})
|
yujikato/DIRAC
|
src/DIRAC/DataManagementSystem/Agent/RequestOperations/test/Test_ArchiveFiles.py
|
Python
|
gpl-3.0
| 15,159
|
[
"DIRAC"
] |
8b6b71957c5086b6bc19e294b339f3dae4dd13a156cc05ea9890989378965108
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Test the user facing API is as we expect...
"""
import importlib
import os
from pathlib import PurePath
import pytest
from numpy.testing import assert_equal
import MDAnalysis as mda
mda_dirname = os.path.dirname(mda.__file__)
def test_Universe():
assert mda.Universe is mda.core.universe.Universe
def test_fetch_mmtf():
assert mda.fetch_mmtf is mda.coordinates.MMTF.fetch_mmtf
def test_Writer():
assert mda.Writer is mda.coordinates.core.writer
def test_AtomGroup():
assert mda.AtomGroup is mda.core.groups.AtomGroup
def test_ResidueGroup():
assert mda.ResidueGroup is mda.core.groups.ResidueGroup
def test_SegmentGroup():
assert mda.SegmentGroup is mda.core.groups.SegmentGroup
def init_files():
"""A generator yielding all MDAnalysis __init__ files."""
os.chdir(mda_dirname)
for root, dirs, files in os.walk("."):
if "__init__.py" in files:
submodule = ".".join(PurePath(root).parts)
submodule = "."*(len(submodule) > 0) + submodule
yield submodule
@pytest.mark.parametrize('submodule', init_files())
def test_all_import(submodule):
module = importlib.import_module("MDAnalysis" + submodule)
module_path = os.path.join(mda_dirname, *submodule.split("."))
if hasattr(module, "__all__"):
missing = [name for name in module.__all__
if name not in module.__dict__.keys()
and name not in [os.path.splitext(f)[0] for
f in os.listdir(module_path)]]
assert_equal(missing, [], err_msg="{}".format(submodule) +
" has errors in __all__ list: " +
"missing = {}".format(missing))
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/test_api.py
|
Python
|
gpl-2.0
| 2,813
|
[
"MDAnalysis"
] |
694c0ac52c9b334b41277f2ef3823c7152ebe4678b67919e5681f516b0d600c4
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from pyscf.pbc import gto as pbcgto
from pyscf.pbc import scf as pbcscf
import pyscf.pbc.mp
import pyscf.pbc.mp.kmp2
cell = pbcgto.Cell()
cell.unit = 'B'
L = 7
cell.atom.extend([['Be', (L/2., L/2., L/2.)]])
cell.a = 7 * np.identity(3)
cell.a[1,0] = 5.0
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade-q2'
cell.mesh = [12]*3
cell.verbose = 5
cell.output = '/dev/null'
cell.build()
def build_h_cell():
# Returns FCC H cell.
cell = pbcgto.Cell()
cell.atom = [['H', (0.000000000, 0.000000000, 0.000000000)],
['H', (0.000000000, 0.500000000, 0.250000000)],
['H', (0.500000000, 0.500000000, 0.500000000)],
['H', (0.500000000, 0.000000000, 0.750000000)]]
cell.unit = 'Bohr'
cell.a = [[1.,0.,0.],[0.,1.,0],[0,0,2.2]]
cell.verbose = 7
cell.spin = 0
cell.charge = 0
cell.basis = [[0, [1.0, 1]],]
cell.pseudo = 'gth-pade'
cell.output = '/dev/null'
cell.max_memory = 1000
for i in range(len(cell.atom)):
cell.atom[i][1] = tuple(np.dot(np.array(cell.atom[i][1]),np.array(cell.a)))
cell.build()
return cell
def run_kcell(cell, nk):
abs_kpts = cell.make_kpts(nk, wrap_around=True)
kmf = pbcscf.KRHF(cell, abs_kpts)
kmf.conv_tol = 1e-12
ekpt = kmf.scf()
mp = pyscf.pbc.mp.kmp2.KMP2(kmf).run()
return ekpt, mp.e_corr
def run_kcell_complex(cell, nk):
abs_kpts = cell.make_kpts(nk, wrap_around=True)
kmf = pbcscf.KRHF(cell, abs_kpts)
kmf.conv_tol = 1e-12
ekpt = kmf.scf()
kmf.mo_coeff = [kmf.mo_coeff[i].astype(np.complex128) for i in range(np.prod(nk))]
mp = pyscf.pbc.mp.kmp2.KMP2(kmf).run()
return ekpt, mp.e_corr
class KnownValues(unittest.TestCase):
def test_111(self):
nk = (1, 1, 1)
escf, emp = run_kcell(cell,nk)
self.assertAlmostEqual(escf, -1.2061049658473704, 9)
self.assertAlmostEqual(emp, -5.44597932944397e-06, 9)
escf, emp = run_kcell_complex(cell,nk)
self.assertAlmostEqual(emp, -5.44597932944397e-06, 9)
def test_311_high_cost(self):
nk = (3, 1, 1)
escf, emp = run_kcell(cell,nk)
self.assertAlmostEqual(escf, -1.0585001200928885, 9)
self.assertAlmostEqual(emp, -7.9832274354253814e-06, 9)
def test_h4_fcc_k2_frozen(self):
'''Metallic hydrogen fcc lattice with frozen lowest lying occupied
and highest lying virtual orbitals. Checks versus a corresponding
supercell calculation.
NOTE: different versions of the davidson may converge to a different
solution for the k-point IP/EA eom. If you're getting the wrong
root, check to see if it's contained in the supercell set of
eigenvalues.'''
cell = build_h_cell()
nmp = [2, 1, 1]
kmf = pbcscf.KRHF(cell)
kmf.kpts = cell.make_kpts(nmp, scaled_center=[0.0,0.0,0.0])
e = kmf.kernel()
frozen = [[0, 3], []]
mymp = pyscf.pbc.mp.kmp2.KMP2(kmf, frozen=frozen)
ekmp2, _ = mymp.kernel()
self.assertAlmostEqual(ekmp2, -0.022416773725207319, 6)
self.assertAlmostEqual(mymp.e_tot, 2.155470531550681, 6)
# Start of supercell calculations
from pyscf.pbc.tools.pbc import super_cell
supcell = super_cell(cell, nmp)
supcell.build()
mf = pbcscf.KRHF(supcell)
e = mf.kernel()
mysmp = pyscf.pbc.mp.kmp2.KMP2(mf, frozen=[0, 7])
emp2, _ = mysmp.kernel()
emp2 /= np.prod(nmp)
self.assertAlmostEqual(emp2, -0.022416773725207319, 6)
def test_h4_fcc_k2_frozen_df_nocc(self):
'''Metallic hydrogen fcc, test different nocc at k points.'''
cell = build_h_cell()
nmp = [2, 1, 1]
kmf = pbcscf.KRHF(cell).density_fit()
kmf.kpts = cell.make_kpts(nmp, scaled_center=[0.0,0.0,0.0])
e = kmf.kernel()
frozen = [[0, 3], []]
mymp = pyscf.pbc.mp.kmp2.KMP2(kmf, frozen=frozen)
ekmp2, _ = mymp.kernel()
self.assertAlmostEqual(ekmp2, -0.016333989667540873, 6)
self.assertAlmostEqual(mymp.e_tot, 2.329841282521279, 6)
def test_rdm1(self):
cell = pbcgto.Cell()
cell.atom = '''Al 0 0 0'''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.a = '''
2.47332919 0 1.42797728
0.82444306 2.33187713 1.42797728
0, 0, 2.85595455
'''
cell.unit = 'angstrom'
cell.spin = 1
cell.build()
cell.verbose = 4
cell.incore_anyway = True
abs_kpts = cell.make_kpts((2, 1, 1), scaled_center=(.1, .2, .3))
kmf = pbcscf.KRHF(cell, abs_kpts)
kmf.conv_tol = 1e-12
kmf.kernel()
mp = pyscf.pbc.mp.kmp2.KMP2(kmf)
mp.kernel(with_t2=True)
self.assertAlmostEqual(mp.e_corr, -0.00162057921874043, 6)
dm = mp.make_rdm1()
np.testing.assert_allclose(np.trace(dm[0]) + np.trace(dm[1]), 6)
for kdm in dm:
np.testing.assert_allclose(kdm, kdm.conj().T)
def test_kmp2_with_cderi(self):
nk = (1, 1, 1)
abs_kpts = cell.make_kpts(nk, wrap_around=True)
kmf = pbcscf.KRHF(cell, abs_kpts).density_fit()
kmf.conv_tol = 1e-12
ekpt = kmf.scf()
kmf2 = pbcscf.KRHF(cell, abs_kpts).density_fit()
kmf2.conv_tol = 1e-12
kmf2.with_df._cderi = kmf.with_df._cderi
ekpt2 = kmf2.scf()
mp = pyscf.pbc.mp.kmp2.KMP2(kmf2).run()
self.assertAlmostEqual(ekpt2, -1.2053666821021261, 9)
self.assertAlmostEqual(mp.e_corr, -6.9881475423322723e-06, 9)
if __name__ == '__main__':
print("Full kpoint test")
unittest.main()
|
sunqm/pyscf
|
pyscf/pbc/mp/test/test_kpoint.py
|
Python
|
apache-2.0
| 6,345
|
[
"PySCF"
] |
054887ccf6ac1a8ba1677e855ba0de747cde0d5c91ab05bc2c820af284beb462
|
"""Copyright 2012 Phidgets Inc.
This work is licensed under the Creative Commons Attribution 2.5 Canada License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
"""
__author__ = 'Adam Stelmack'
__version__ = '2.1.8'
__date__ = 'May 17 2010'
import threading
from ctypes import *
from Phidgets.PhidgetLibrary import PhidgetLibrary
from Phidgets.Phidget import Phidget
from Phidgets.PhidgetException import PhidgetException
from Phidgets.Events.Events import CurrentChangeEventArgs, InputChangeEventArgs, VelocityChangeEventArgs
from Phidgets.Events.Events import EncoderPositionChangeEventArgs, BackEMFEventArgs, CurrentUpdateEventArgs
from Phidgets.Events.Events import EncoderPositionUpdateEventArgs, SensorUpdateEventArgs
import sys
class MotorControl(Phidget):
"""This class represents a Phidget Motor Controller. All methods to to control a motor controller and read back motor data are implemented in this class.
The Motor Control Phidget is able to control 1 or more DC motors and has 0 or more digital inputs.
Both speed and acceleration are controllable. Speed is controlled via PWM.
The size of the motors that can be driven depends on the motor controller.
See your device's User Guide for more specific API details, technical information, and revision details.
The User Guide, along with other resources, can be found on the product page for your device.
Extends:
Phidget
"""
def __init__(self):
"""The Constructor Method for the MotorControl Class
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
"""
Phidget.__init__(self)
self.__inputChange = None
self.__velocityChange = None
self.__currentChange = None
self.__currentUpdate = None
self.__positionChange = None
self.__positionUpdate = None
self.__sensorUpdate = None
self.__backEMFUpdate = None
self.__onInputChange = None
self.__onVelocityChange = None
self.__onCurrentChange = None
self.__onCurrentUpdate = None
self.__onPositionChange = None
self.__onPositionUpdate = None
self.__onSensorUpdate = None
self.__onBackEMFUpdate = None
try:
PhidgetLibrary.getDll().CPhidgetMotorControl_create(byref(self.handle))
except RuntimeError:
raise
if sys.platform == 'win32':
self.__INPUTCHANGEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int)
self.__VELOCITYCHANGEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
self.__CURRENTCHANGEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
self.__CURRENTUPDATEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
self.__POSITIONCHANGEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int, c_int)
self.__POSITIONUPDATEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int)
self.__SENSORUPDATEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int)
self.__BACKEMFUPDATEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
elif sys.platform == 'darwin' or sys.platform == 'linux2':
self.__INPUTCHANGEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int)
self.__VELOCITYCHANGEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
self.__CURRENTCHANGEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
self.__CURRENTUPDATEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
self.__POSITIONCHANGEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int, c_int)
self.__POSITIONUPDATEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int)
self.__SENSORUPDATEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int)
self.__BACKEMFUPDATEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
def __del__(self):
"""The Destructor Method for the MotorControl Class
"""
Phidget.dispose(self)
def getMotorCount(self):
"""Returns the number of motors supported by this Phidget.
This does not neccesarily correspond to the number of motors actually attached to the board.
Returns:
The number of supported motors <int>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
motorCount = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getMotorCount(self.handle, byref(motorCount))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return motorCount.value
def getVelocity(self, index):
"""Returns a motor's velocity.
The valid range is -100 - 100, with 0 being stopped.
Parameters:
index<int>: index of the motor.
Returns:
The current velocity of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
veloctiy = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getVelocity(self.handle, c_int(index), byref(veloctiy))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return veloctiy.value
def setVelocity(self, index, value):
"""Sets a motor's velocity.
The valid range is from -100 to 100, with 0 being stopped. -100 and 100 both corespond to full voltage,
with the value in between corresponding to different widths of PWM.
Parameters:
index<int>: index of the motor.
value<double>: requested velocity for the motor.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index or velocity value are invalid.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_setVelocity(self.handle, c_int(index), c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def __nativeVelocityChangeEvent(self, handle, usrptr, index, value):
if self.__velocityChange != None:
self.__velocityChange(VelocityChangeEventArgs(self, index, value))
return 0
def setOnVelocityChangeHandler(self, velocityChangeHandler):
"""Sets the VelocityChange Event Handler.
The velocity change handler is a method that will be called when the velocity of a motor changes.
These velocity changes are reported back from the Motor Controller and so correspond to actual motor velocity over time.
Parameters:
velocityChangeHandler: hook to the velocityChangeHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if velocityChangeHandler == None:
self.__velocityChange = None
self.__onVelocityChange = None
else:
self.__velocityChange = velocityChangeHandler
self.__onVelocityChange = self.__VELOCITYCHANGEHANDLER(self.__nativeVelocityChangeEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_set_OnVelocityChange_Handler(self.handle, self.__onVelocityChange, None)
except RuntimeError:
self.__velocityChange = None
self.__onVelocityChange = None
raise
if result > 0:
raise PhidgetException(result)
def getAcceleration(self, index):
"""Returns a motor's acceleration.
The valid range is between getAccelerationMin and getAccelerationMax,
and refers to how fast the Motor Controller will change the speed of a motor.
Parameters:
index<int>: index of motor.
Returns:
The acceleration of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
accel = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getAcceleration(self.handle, c_int(index), byref(accel))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return accel.value
def setAcceleration(self, index, value):
"""Sets a motor's acceleration.
The valid range is between getAccelerationMin and getAccelerationMax.
This controls how fast the motor changes speed.
Parameters:
index<int>: index of the motor.
value<double>: requested acceleration for that motor.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index or acceleration value are invalid.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_setAcceleration(self.handle, c_int(index), c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getAccelerationMax(self, index):
"""Returns the maximum acceleration that a motor will accept, or return.
Parameters:
index<int>: Index of the motor.
Returns:
Maximum acceleration of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
accelMax = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getAccelerationMax(self.handle, c_int(index), byref(accelMax))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return accelMax.value
def getAccelerationMin(self, index):
"""Returns the minimum acceleration that a motor will accept, or return.
Parameters:
index<int>: Index of the motor.
Returns:
Minimum acceleration of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
accelMin = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getAccelerationMin(self.handle, c_int(index), byref(accelMin))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return accelMin.value
def getCurrent(self, index):
"""Returns a motor's current usage.
The valid range is 0 - 255. Note that this is not supported on all motor controllers.
Parameters:
index<int>: index of the motor.
Returns:
The current usage of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
current = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getCurrent(self.handle, c_int(index), byref(current))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return current.value
def __nativeCurrentChangeEvent(self, handle, usrptr, index, value):
if self.__currentChange != None:
self.__currentChange(CurrentChangeEventArgs(self, index, value))
return 0
def setOnCurrentChangeHandler(self, currentChangeHandler):
"""Sets the CurrentCHange Event Handler.
The current change handler is a method that will be called when the current consumed by a motor changes.
Note that this event is not supported with the current motor controller, but will be supported in the future
Parameters:
currentChangeHandler: hook to the currentChangeHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if currentChangeHandler == None:
self.__currentChange = None
self.__onCurrentChange = None
else:
self.__currentChange = currentChangeHandler
self.__onCurrentChange = self.__CURRENTCHANGEHANDLER(self.__nativeCurrentChangeEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_set_OnCurrentChange_Handler(self.handle, self.__onCurrentChange, None)
except RuntimeError:
self.__currentChange = None
self.__onCurrentChange = None
raise
if result > 0:
raise PhidgetException(result)
def __nativeCurrentUpdateEvent(self, handle, usrptr, index, current):
if self.__currentUpdate != None:
self.__currentUpdate(CurrentUpdateEventArgs(self, index, current))
return 0
def setOnCurrentUpdateHandler(self, currentUpdateHandler):
"""Sets the CurrentCHange Event Handler.
The current change handler is a method that will be called when the current consumed by a motor changes.
Note that this event is not supported with the current motor controller, but will be supported in the future
Parameters:
currentChangeHandler: hook to the currentChangeHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if currentUpdateHandler == None:
self.__currentUpdate = None
self.__onCurrentUpdate = None
else:
self.__currentUpdate = currentUpdateHandler
self.__onCurrentUpdate = self.__CURRENTUPDATEHANDLER(self.__nativeCurrentUpdateEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_set_OnCurrentUpdate_Handler(self.handle, self.__onCurrentUpdate, None)
except RuntimeError:
self.__currentUpdate = None
self.__onCurrentUpdate = None
raise
if result > 0:
raise PhidgetException(result)
def getInputCount(self):
"""Returns the number of digital inputs.
Not all Motor Controllers have digital inputs.
Returns:
The number of digital Inputs <int>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
inputCount = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getInputCount(self.handle, byref(inputCount))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return inputCount.value
def getInputState(self, index):
"""Returns the state of a digital input.
True means that the input is activated, and False indicated the default state.
Parameters:
index<int> index of the input.
Returns:
The state of the input <boolean>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
inputState = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getInputState(self.handle, c_int(index), byref(inputState))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
if inputState.value == 1:
return True
else:
return False
def __nativeInputChangeEvent(self, handle, usrptr, index, value):
if self.__inputChange != None:
if value == 1:
state = True
else:
state = False
self.__inputChange(InputChangeEventArgs(self, index, state))
return 0
def setOnInputChangeHandler(self, inputChangeHandler):
"""Sets the InputChange Event Handler.
The input change handler is a method that will be called when an input on this Motor Controller board has changed.
Parameters:
inputChangeHandler: hook to the inputChangeHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if inputChangeHandler == None:
self.__inputChange = None
self.__onInputChange = None
else:
self.__inputChange = inputChangeHandler
self.__onInputChange = self.__INPUTCHANGEHANDLER(self.__nativeInputChangeEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_set_OnInputChange_Handler(self.handle, self.__onInputChange, None)
except RuntimeError:
self.__inputChange = None
self.__onInputChange = None
raise
if result > 0:
raise PhidgetException(result)
def getEncoderCount(self):
"""
"""
encoderCount = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getEncoderCount(self.handle, byref(encoderCount))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return encoderCount.value
def getEncoderPosition(self, index):
"""
"""
encoderPosition = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getEncoderPosition(self.handle, c_int(index), byref(encoderPosition))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return encoderPosition.value
def setEncoderPosition(self, index, encoderPosition):
"""
"""
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_setEncoderPosition(self.handle, c_int(index), c_int(encoderPosition))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def __nativePositionChangeEvent(self, handle, usrptr, index, time, positionChange):
if self.__positionChange != None:
self.__positionChange(EncoderPositionChangeEventArgs(self, index, time, positionChange))
return 0
def setOnPositionChangeHandler(self, positionChangeHandler):
"""Sets the position change event handler.
The position change handler is a method that will be called when the position of an encoder changes.
The position change event provides data about how many ticks have occured, and how much time has passed since the last position change event,
but does not contain an absolute position.
This can be obtained from getEncoderPosition.
Parameters:
positionChangeHandler: hook to the positionChangeHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if positionChangeHandler == None:
self.__positionChange = None
self.__onPositionChange = None
else:
self.__positionChange = positionChangeHandler
self.__onPositionChange = self.__POSITIONCHANGEHANDLER(self.__nativePositionChangeEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_set_OnEncoderPositionChange_Handler(self.handle, self.__onPositionChange, None)
except RuntimeError:
self.__positionChange = None
self.__onPositionChange = None
raise
if result > 0:
raise PhidgetException(result)
def __nativePositionUpdateEvent(self, handle, usrptr, index, positionChange):
if self.__positionUpdate != None:
self.__positionUpdate(EncoderPositionUpdateEventArgs(self, index, positionChange))
return 0
def setOnPositionUpdateHandler(self, positionUpdateHandler):
"""Sets the position change event handler.
The position change handler is a method that will be called when the position of an encoder changes.
The position change event provides data about how many ticks have occured, and how much time has passed since the last position change event,
but does not contain an absolute position.
This can be obtained from getEncoderPosition.
Parameters:
positionChangeHandler: hook to the positionChangeHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if positionUpdateHandler == None:
self.__positionUpdate = None
self.__onPositionUpdate = None
else:
self.__positionUpdate = positionUpdateHandler
self.__onPositionUpdate = self.__POSITIONUPDATEHANDLER(self.__nativePositionUpdateEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_set_OnEncoderPositionUpdate_Handler(self.handle, self.__onPositionUpdate, None)
except RuntimeError:
self.__positionUpdate = None
self.__onPositionUpdate = None
raise
if result > 0:
raise PhidgetException(result)
def getSensorCount(self):
"""Returns the number of analog inputs on the Motor Control.
Returns:
Number of analog inputs <int>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
sensorCount = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getSensorCount(self.handle, byref(sensorCount))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return sensorCount.value
def getSensorValue(self, index):
"""Returns the value of a analog input.
The analog inputs are where analog sensors are attached on the Motor Control.
The valid range is 0-1000. In the case of a sensor, this value can be converted to an actual sensor
value using the formulas provided in the sensor product manual.
Parameters:
index<int>: Index of the sensor.
Returns:
The Sensor value <int>
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
sensorValue = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getSensorValue(self.handle, c_int(index), byref(sensorValue))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return sensorValue.value
def getSensorRawValue(self, index):
"""Returns the raw value of a analog input.
This is a more accurate version of getSensorValue. The valid range is 0-4095.
Note however that the analog outputs on the Motor Control are only 10-bit values and this value represents an oversampling to 12-bit.
Parameters:
index<int>: Index of the sensor.
Returns:
The Raw Sensor value <int>
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
sensorValue = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getSensorRawValue(self.handle, c_int(index), byref(sensorValue))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return sensorValue.value
def __nativeSensorUpdateEvent(self, handle, usrptr, index, value):
if self.__sensorUpdate != None:
self.__sensorUpdate(SensorUpdateEventArgs(self, index, value))
return 0
def setOnSensorUpdateHandler(self, sensorUpdateHandler):
"""Set the SensorChange Event Handler.
The sensor change handler is a method that will be called when a sensor on
this Motor Controller has changed by at least the Trigger that has been set for this sensor.
Parameters:
sensorUpdateHandler: hook to the sensorUpdateHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if sensorUpdateHandler == None:
self.__sensorUpdate = None
self.__onSensorUpdate = None
else:
self.__sensorUpdate = sensorUpdateHandler
self.__onSensorUpdate = self.__SENSORUPDATEHANDLER(self.__nativeSensorUpdateEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_set_OnSensorUpdate_Handler(self.handle, self.__onSensorUpdate, None)
except RuntimeError:
self.__sensorUpdate = None
self.__onSensorUpdate = None
raise
if result > 0:
raise PhidgetException(result)
def getRatiometric(self):
"""Gets the ratiometric state for the analog sensors
Returns:
State of the Ratiometric setting.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if this phidget does not support ratiometric.
"""
ratiometricState = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getRatiometric(self.handle, byref(ratiometricState))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
if ratiometricState.value == 1:
return True
else:
return False
def setRatiometric(self, state):
"""Sets the ratiometric state for the analog inputs.
The default is for ratiometric to be set on and this is appropriate for most sensors.
False - off
True - on
Parameters:
state<boolean>: State of the ratiometric setting.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if this Phidget does not support ratiometric.
"""
if state == True:
value = 1
else:
value = 0
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_setRatiometric(self.handle, c_int(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getBraking(self, index):
"""
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, the supplied index is out of range, or if this motor controller does not support braking.
"""
braking = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getBraking(self.handle, c_int(index), byref(braking))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return braking.value
def setBraking(self, index, braking):
"""
Parameters:
braking<double>:
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, the supplied index is out of range, or if this Motor Controller does not support braking.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_setBraking(self.handle, c_int(index), c_double(braking))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getSupplyVoltage(self):
"""
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if this Phidget does not support this feature.
"""
supplyVoltage = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getSupplyVoltage(self.handle, byref(supplyVoltage))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return supplyVoltage.value
def getBackEMFSensingState(self, index):
"""
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, the supplied index is out of range, or if this motor controller does not support braking.
"""
state = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getBackEMFSensingState(self.handle, c_int(index), byref(state))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
if state.value == 1:
return True
else:
return False
def setBackEMFSensingState(self, index, state):
"""
Parameters:
state<boolean>:
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, the supplied index is out of range, or if this Motor Controller does not support braking.
"""
if state == True:
value = 1
else:
value = 0
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_setBackEMFSensingState(self.handle, c_int(index), c_int(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getBackEMF(self, index):
"""
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if this Phidget does not support this feature.
"""
voltage = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_getBackEMF(self.handle, c_int(index), byref(voltage))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return voltage.value
def __nativeBackEMFUpdateEvent(self, handle, usrptr, index, voltage):
if self.__backEMFUpdate != None:
self.__backEMFUpdate(BackEMFEventArgs(self, index, voltage))
return 0
def setOnBackEMFUpdateHandler(self, backEMFUpdateHandler):
"""Set the BackEMF Update Event Handler.
Parameters:
sensorUpdateHandler: hook to the sensorUpdateHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if backEMFUpdateHandler == None:
self.__backEMFUpdate = None
self.__onBackEMFUpdate = None
else:
self.__backEMFUpdate = backEMFUpdateHandler
self.__onBackEMFUpdate = self.__SENSORUPDATEHANDLER(self.__nativeBackEMFUpdateEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetMotorControl_set_OnBackEMFUpdate_Handler(self.handle, self.__onBackEMFUpdate, None)
except RuntimeError:
self.__backEMFUpdate = None
self.__onBackEMFUpdate = None
raise
if result > 0:
raise PhidgetException(result)
|
danielsuo/mobot
|
src/move/PhidgetsPython/Phidgets/Devices/MotorControl.py
|
Python
|
mit
| 35,542
|
[
"VisIt"
] |
7ef7a122c43eaf49d9a8b955f6efb578d5d41cac46aafab5962cd68f61d6b99f
|
import numpy as np
from . import sql_query
# SDSS primtarget codes
TARGET_QSO_HIZ = int('0x00000001', 16)
TARGET_QSO_CAP = int('0x00000002', 16)
TARGET_QSO_SKIRT = int('0x00000004', 16)
TARGET_QSO_FIRST_CAP = int('0x00000008', 16)
TARGET_QSO_FIRST_SKIRT = int('0x00000010', 16)
TARGET_GALAXY_RED = int('0x00000020', 16)
TARGET_GALAXY = int('0x00000040', 16)
TARGET_GALAXY_BIG = int('0x00000080', 16)
TARGET_GALAXY_BRIGHT_CORE = int('0x00000100', 16)
TARGET_ROSAT_A = int('0x00000200', 16)
TARGET_ROSAT_B = int('0x00000400', 16)
TARGET_ROSAT_C = int('0x00000800', 16)
TARGET_ROSAT_D = int('0x00001000', 16)
TARGET_STAR_BHB = int('0x00002000', 16)
TARGET_STAR_CARBON = int('0x00004000', 16)
TARGET_STAR_BROWN_DWARF = int('0x00008000', 16)
TARGET_STAR_SUB_DWARF = int('0x00010000', 16)
TARGET_STAR_CATY_VAR = int('0x00020000', 16)
TARGET_STAR_RED_DWARF = int('0x00040000', 16)
TARGET_STAR_WHITE_DWARF = int('0x00080000', 16)
TARGET_SERENDIP_BLUE = int('0x00100000', 16)
TARGET_SERENDIP_FIRST = int('0x00200000', 16)
TARGET_SERENDIP_RED = int('0x00400000', 16)
TARGET_SERENDIP_DISTANT = int('0x00800000', 16)
TARGET_SERENDIP_MANUAL = int('0x01000000', 16)
TARGET_QSO_FAINT = int('0x02000000', 16)
TARGET_GALAXY_RED_II = int('0x04000000', 16)
TARGET_ROSAT_E = int('0x08000000', 16)
TARGET_STAR_PN = int('0x10000000', 16)
TARGET_QSO_REJECT = int('0x20000000', 16)
DEFAULT_TARGET = TARGET_GALAXY # main galaxy sample
def query_plate_mjd_fiber(n_spectra,
primtarget=DEFAULT_TARGET,
zmin=0, zmax=0.7):
"""Query the SDSS server for plate, mjd, and fiber numbers
Parameters
----------
n_spectra: int
number of spectra to query. Max is 100,000 (set by CAS server)
primtarget: int
prime target flag. See notes below
zmin, zmax: float
minimum and maximum redshift range for query
Returns
-------
plate, mjd, fiber : ndarrays, size=n_spectra
The plate numbers MJD, and fiber numbers of the spectra
Notes
-----
Primtarget flag values can be found at
http://cas.sdss.org/dr7/en/help/browser/enum.asp?n=PrimTarget
"""
query_text = '\n'.join(("SELECT TOP %(n_spectra)i ",
" plate, mjd, fiberid ",
"FROM specObj ",
"WHERE ((PrimTarget & %(primtarget)i) > 0) ",
" AND (z > %(zmin)f)",
" AND (z <= %(zmax)f) ")) % locals()
output = sql_query(query_text).readlines()
keys = output[0]
res = np.zeros((n_spectra, 3), dtype=int)
for i, line in enumerate(output[2:]):
try:
res[i] = line.decode().strip().split(',')
except BaseException:
raise ValueError(b'\n'.join(output))
ntot = i + 1
return res[:ntot].T
|
astroML/astroML
|
astroML/datasets/tools/cas_query.py
|
Python
|
bsd-2-clause
| 3,040
|
[
"Galaxy"
] |
996a2288ad8765c36eacf7decc1aeae14e6f419852f5e22ae420a22e5358a896
|
#!/usr/bin/env python
""" update local cfg
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
Script.setUsageMessage("\n".join([__doc__.split("\n")[1], "Usage:", " %s [options] " % Script.scriptName]))
Script.parseCommandLine()
args = Script.getPositionalArgs()
csAPI = CSAPI()
# Setup the DFC
#
# DataManagement
# {
# Production
# {
# Services
# {
# FileCatalog
# {
# DirectoryManager = DirectoryClosure
# FileManager = FileManagerPS
# SecurityManager = FullSecurityManager
# }
# }
# Databases
# {
# FileCatalogDB
# {
# DBName = FileCatalogDB
# }
# }
# }
# }
for sct in [
"Systems/DataManagement",
"Systems/DataManagement/Production",
"Systems/DataManagement/Production/Databases",
"Systems/DataManagement/Production/Databases/FileCatalogDB",
"Systems/DataManagement/Production/Databases/MultiVOFileCatalogDB",
]:
res = csAPI.createSection(sct)
if not res["OK"]:
print(res["Message"])
exit(1)
dbHost = os.environ["DB_HOST"]
dbPort = os.environ["DB_PORT"]
csAPI.setOption("Systems/DataManagement/Production/Databases/FileCatalogDB/DBName", "FileCatalogDB")
csAPI.setOption("Systems/DataManagement/Production/Databases/FileCatalogDB/Host", dbHost)
csAPI.setOption("Systems/DataManagement/Production/Databases/FileCatalogDB/Port", dbPort)
csAPI.setOption("Systems/DataManagement/Production/Databases/MultiVOFileCatalogDB/DBName", "MultiVOFileCatalogDB")
csAPI.setOption("Systems/DataManagement/Production/Databases/MultiVOFileCatalogDB/Host", dbHost)
csAPI.setOption("Systems/DataManagement/Production/Databases/MultiVOFileCatalogDB/Port", dbPort)
# Setup other DBs (this is for LHCb - innocuous!)
#
# Bookkeeping
# {
# Production
# {
# Databases
# {
# BookkeepingDB
# {
# LHCbDIRACBookkeepingTNS =
# LHCbDIRACBookkeepingUser =
# LHCbDIRACBookkeepingPassword =
# LHCbDIRACBookkeepingServer =
# }
# }
# }
# }
for sct in [
"Systems/Bookkeeping",
"Systems/Bookkeeping/Production",
"Systems/Bookkeeping/Production/Databases",
"Systems/Bookkeeping/Production/Databases/BookkeepingDB",
]:
res = csAPI.createSection(sct)
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Systems/Bookkeeping/Production/Databases/BookkeepingDB/LHCbDIRACBookkeepingTNS", "FILL_ME")
csAPI.setOption("Systems/Bookkeeping/Production/Databases/BookkeepingDB/LHCbDIRACBookkeepingUser", "FILL_ME")
csAPI.setOption("Systems/Bookkeeping/Production/Databases/BookkeepingDB/LHCbDIRACBookkeepingPassword", "FILL_ME")
csAPI.setOption("Systems/Bookkeeping/Production/Databases/BookkeepingDB/LHCbDIRACBookkeepingServer", "FILL_ME")
# Commit
csAPI.commit()
|
ic-hep/DIRAC
|
tests/Jenkins/dirac-cfg-update-dbs.py
|
Python
|
gpl-3.0
| 2,992
|
[
"DIRAC"
] |
d98284082458ea32c2aef5de0a2a1d89c86e2e09e104cb9a8fa8aeb347d7491f
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""DirectRunner, executing on the local machine.
The DirectRunner is a runner implementation that executes the entire
graph of transformations belonging to a pipeline on the local machine.
"""
# pytype: skip-file
from __future__ import absolute_import
import itertools
import logging
import time
import typing
from google.protobuf import wrappers_pb2
import apache_beam as beam
from apache_beam import coders
from apache_beam import typehints
from apache_beam.internal.util import ArgumentPlaceholder
from apache_beam.options.pipeline_options import DirectOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.pvalue import PCollection
from apache_beam.runners.direct.bundle_factory import BundleFactory
from apache_beam.runners.direct.clock import RealClock
from apache_beam.runners.direct.clock import TestClock
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.transforms.core import CombinePerKey
from apache_beam.transforms.core import CombineValuesDoFn
from apache_beam.transforms.core import DoFn
from apache_beam.transforms.core import ParDo
from apache_beam.transforms.ptransform import PTransform
from apache_beam.typehints import trivial_inference
# Note that the BundleBasedDirectRunner and SwitchingDirectRunner names are
# experimental and have no backwards compatibility guarantees.
__all__ = ['BundleBasedDirectRunner', 'DirectRunner', 'SwitchingDirectRunner']
_LOGGER = logging.getLogger(__name__)
class SwitchingDirectRunner(PipelineRunner):
"""Executes a single pipeline on the local machine.
This implementation switches between using the FnApiRunner (which has
high throughput for batch jobs) and using the BundleBasedDirectRunner,
which supports streaming execution and certain primitives not yet
implemented in the FnApiRunner.
"""
def is_fnapi_compatible(self):
return BundleBasedDirectRunner.is_fnapi_compatible()
def run_pipeline(self, pipeline, options):
from apache_beam.pipeline import PipelineVisitor
from apache_beam.runners.dataflow.native_io.iobase import NativeSource
from apache_beam.runners.dataflow.native_io.iobase import _NativeWrite
from apache_beam.testing.test_stream import TestStream
class _FnApiRunnerSupportVisitor(PipelineVisitor):
"""Visitor determining if a Pipeline can be run on the FnApiRunner."""
def accept(self, pipeline):
self.supported_by_fnapi_runner = True
pipeline.visit(self)
return self.supported_by_fnapi_runner
def visit_transform(self, applied_ptransform):
transform = applied_ptransform.transform
# The FnApiRunner does not support streaming execution.
if isinstance(transform, TestStream):
self.supported_by_fnapi_runner = False
# The FnApiRunner does not support reads from NativeSources.
if (isinstance(transform, beam.io.Read) and
isinstance(transform.source, NativeSource)):
self.supported_by_fnapi_runner = False
# The FnApiRunner does not support the use of _NativeWrites.
if isinstance(transform, _NativeWrite):
self.supported_by_fnapi_runner = False
if isinstance(transform, beam.ParDo):
dofn = transform.dofn
# The FnApiRunner does not support execution of CombineFns with
# deferred side inputs.
if isinstance(dofn, CombineValuesDoFn):
args, kwargs = transform.raw_side_inputs
args_to_check = itertools.chain(args, kwargs.values())
if any(isinstance(arg, ArgumentPlaceholder)
for arg in args_to_check):
self.supported_by_fnapi_runner = False
# Check whether all transforms used in the pipeline are supported by the
# FnApiRunner, and the pipeline was not meant to be run as streaming.
if _FnApiRunnerSupportVisitor().accept(pipeline):
from apache_beam.runners.portability.fn_api_runner import FnApiRunner
runner = FnApiRunner()
else:
runner = BundleBasedDirectRunner()
return runner.run_pipeline(pipeline, options)
# Type variables.
K = typing.TypeVar('K')
V = typing.TypeVar('V')
@typehints.with_input_types(typing.Tuple[K, V])
@typehints.with_output_types(typing.Tuple[K, typing.Iterable[V]])
class _GroupByKeyOnly(PTransform):
"""A group by key transform, ignoring windows."""
def infer_output_type(self, input_type):
key_type, value_type = trivial_inference.key_value_types(input_type)
return typehints.KV[key_type, typehints.Iterable[value_type]]
def expand(self, pcoll):
self._check_pcollection(pcoll)
return PCollection.from_(pcoll)
@typehints.with_input_types(typing.Tuple[K, typing.Iterable[V]])
@typehints.with_output_types(typing.Tuple[K, typing.Iterable[V]])
class _GroupAlsoByWindow(ParDo):
"""The GroupAlsoByWindow transform."""
def __init__(self, windowing):
super(_GroupAlsoByWindow, self).__init__(_GroupAlsoByWindowDoFn(windowing))
self.windowing = windowing
def expand(self, pcoll):
self._check_pcollection(pcoll)
return PCollection.from_(pcoll)
class _GroupAlsoByWindowDoFn(DoFn):
# TODO(robertwb): Support combiner lifting.
def __init__(self, windowing):
super(_GroupAlsoByWindowDoFn, self).__init__()
self.windowing = windowing
def infer_output_type(self, input_type):
key_type, windowed_value_iter_type = trivial_inference.key_value_types(
input_type)
value_type = windowed_value_iter_type.inner_type.inner_type
return typehints.Iterable[typehints.KV[key_type,
typehints.Iterable[value_type]]]
def start_bundle(self):
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.trigger import create_trigger_driver
# pylint: enable=wrong-import-order, wrong-import-position
self.driver = create_trigger_driver(self.windowing, True)
def process(self, element):
k, vs = element
return self.driver.process_entire_key(k, vs)
@typehints.with_input_types(typing.Tuple[K, V])
@typehints.with_output_types(typing.Tuple[K, typing.Iterable[V]])
class _StreamingGroupByKeyOnly(_GroupByKeyOnly):
"""Streaming GroupByKeyOnly placeholder for overriding in DirectRunner."""
urn = "direct_runner:streaming_gbko:v0.1"
# These are needed due to apply overloads.
def to_runner_api_parameter(self, unused_context):
return _StreamingGroupByKeyOnly.urn, None
@staticmethod
@PTransform.register_urn(urn, None)
def from_runner_api_parameter(
unused_ptransform, unused_payload, unused_context):
return _StreamingGroupByKeyOnly()
@typehints.with_input_types(typing.Tuple[K, typing.Iterable[V]])
@typehints.with_output_types(typing.Tuple[K, typing.Iterable[V]])
class _StreamingGroupAlsoByWindow(_GroupAlsoByWindow):
"""Streaming GroupAlsoByWindow placeholder for overriding in DirectRunner."""
urn = "direct_runner:streaming_gabw:v0.1"
# These are needed due to apply overloads.
def to_runner_api_parameter(self, context):
return (
_StreamingGroupAlsoByWindow.urn,
wrappers_pb2.BytesValue(
value=context.windowing_strategies.get_id(self.windowing)))
@staticmethod
@PTransform.register_urn(urn, wrappers_pb2.BytesValue)
def from_runner_api_parameter(unused_ptransform, payload, context):
return _StreamingGroupAlsoByWindow(
context.windowing_strategies.get_by_id(payload.value))
@typehints.with_input_types(typing.Tuple[K, typing.Iterable[V]])
@typehints.with_output_types(typing.Tuple[K, typing.Iterable[V]])
class _GroupByKey(PTransform):
"""The DirectRunner GroupByKey implementation."""
def expand(self, pcoll):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.coders import typecoders
input_type = pcoll.element_type
if input_type is not None:
# Initialize type-hints used below to enforce type-checking and to
# pass downstream to further PTransforms.
key_type, value_type = trivial_inference.key_value_types(input_type)
# Enforce the input to a GBK has a KV element type.
pcoll.element_type = typehints.typehints.coerce_to_kv_type(
pcoll.element_type)
typecoders.registry.verify_deterministic(
typecoders.registry.get_coder(key_type),
'GroupByKey operation "%s"' % self.label)
reify_output_type = typehints.KV[
key_type, typehints.WindowedValue[value_type]] # type: ignore[misc]
gbk_input_type = (
typehints.KV[
key_type,
typehints.Iterable[typehints.WindowedValue[ # type: ignore[misc]
value_type]]])
gbk_output_type = typehints.KV[key_type, typehints.Iterable[value_type]]
# pylint: disable=bad-continuation
return (
pcoll
| 'ReifyWindows' >> (
ParDo(beam.GroupByKey.ReifyWindows()).with_output_types(
reify_output_type))
| 'GroupByKey' >> (
_GroupByKeyOnly().with_input_types(
reify_output_type).with_output_types(gbk_input_type))
| (
'GroupByWindow' >>
_GroupAlsoByWindow(pcoll.windowing).with_input_types(
gbk_input_type).with_output_types(gbk_output_type)))
else:
# The input_type is None, run the default
return (
pcoll
| 'ReifyWindows' >> ParDo(beam.GroupByKey.ReifyWindows())
| 'GroupByKey' >> _GroupByKeyOnly()
| 'GroupByWindow' >> _GroupAlsoByWindow(pcoll.windowing))
def _get_transform_overrides(pipeline_options):
# A list of PTransformOverride objects to be applied before running a pipeline
# using DirectRunner.
# Currently this only works for overrides where the input and output types do
# not change.
# For internal use only; no backwards-compatibility guarantees.
# Importing following locally to avoid a circular dependency.
from apache_beam.pipeline import PTransformOverride
from apache_beam.runners.direct.helper_transforms import LiftedCombinePerKey
from apache_beam.runners.direct.sdf_direct_runner import ProcessKeyedElementsViaKeyedWorkItemsOverride
from apache_beam.runners.direct.sdf_direct_runner import SplittableParDoOverride
class CombinePerKeyOverride(PTransformOverride):
def matches(self, applied_ptransform):
if isinstance(applied_ptransform.transform, CombinePerKey):
return applied_ptransform.inputs[0].windowing.is_default()
def get_replacement_transform(self, transform):
# TODO: Move imports to top. Pipeline <-> Runner dependency cause problems
# with resolving imports when they are at top.
# pylint: disable=wrong-import-position
try:
return LiftedCombinePerKey(
transform.fn, transform.args, transform.kwargs)
except NotImplementedError:
return transform
class StreamingGroupByKeyOverride(PTransformOverride):
def matches(self, applied_ptransform):
# Note: we match the exact class, since we replace it with a subclass.
return applied_ptransform.transform.__class__ == _GroupByKeyOnly
def get_replacement_transform(self, transform):
# Use specialized streaming implementation.
transform = _StreamingGroupByKeyOnly()
return transform
class StreamingGroupAlsoByWindowOverride(PTransformOverride):
def matches(self, applied_ptransform):
# Note: we match the exact class, since we replace it with a subclass.
transform = applied_ptransform.transform
return (
isinstance(applied_ptransform.transform, ParDo) and
isinstance(transform.dofn, _GroupAlsoByWindowDoFn) and
transform.__class__ != _StreamingGroupAlsoByWindow)
def get_replacement_transform(self, transform):
# Use specialized streaming implementation.
transform = _StreamingGroupAlsoByWindow(transform.dofn.windowing)
return transform
class TestStreamOverride(PTransformOverride):
def matches(self, applied_ptransform):
from apache_beam.testing.test_stream import TestStream
self.applied_ptransform = applied_ptransform
return isinstance(applied_ptransform.transform, TestStream)
def get_replacement_transform(self, transform):
from apache_beam.runners.direct.test_stream_impl import _ExpandableTestStream
return _ExpandableTestStream(transform)
class GroupByKeyPTransformOverride(PTransformOverride):
"""A ``PTransformOverride`` for ``GroupByKey``.
This replaces the Beam implementation as a primitive.
"""
def matches(self, applied_ptransform):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.core import GroupByKey
return isinstance(applied_ptransform.transform, GroupByKey)
def get_replacement_transform(self, ptransform):
return _GroupByKey()
overrides = [
# This needs to be the first and the last override. Other overrides depend
# on the GroupByKey implementation to be composed of _GroupByKeyOnly and
# _GroupAlsoByWindow.
GroupByKeyPTransformOverride(),
SplittableParDoOverride(),
ProcessKeyedElementsViaKeyedWorkItemsOverride(),
CombinePerKeyOverride(),
TestStreamOverride(),
]
# Add streaming overrides, if necessary.
if pipeline_options.view_as(StandardOptions).streaming:
overrides.append(StreamingGroupByKeyOverride())
overrides.append(StreamingGroupAlsoByWindowOverride())
# Add PubSub overrides, if PubSub is available.
try:
from apache_beam.io.gcp import pubsub as unused_pubsub
overrides += _get_pubsub_transform_overrides(pipeline_options)
except ImportError:
pass
# This also needs to be last because other transforms apply GBKs which need to
# be translated into a DirectRunner-compatible transform.
overrides.append(GroupByKeyPTransformOverride())
return overrides
class _DirectReadFromPubSub(PTransform):
def __init__(self, source):
self._source = source
def _infer_output_coder(
self, unused_input_type=None, unused_input_coder=None):
# type: (...) -> typing.Optional[coders.Coder]
return coders.BytesCoder()
def get_windowing(self, inputs):
return beam.Windowing(beam.window.GlobalWindows())
def expand(self, pvalue):
# This is handled as a native transform.
return PCollection(self.pipeline, is_bounded=self._source.is_bounded())
class _DirectWriteToPubSubFn(DoFn):
BUFFER_SIZE_ELEMENTS = 100
FLUSH_TIMEOUT_SECS = BUFFER_SIZE_ELEMENTS * 0.5
def __init__(self, sink):
self.project = sink.project
self.short_topic_name = sink.topic_name
self.id_label = sink.id_label
self.timestamp_attribute = sink.timestamp_attribute
self.with_attributes = sink.with_attributes
# TODO(BEAM-4275): Add support for id_label and timestamp_attribute.
if sink.id_label:
raise NotImplementedError(
'DirectRunner: id_label is not supported for '
'PubSub writes')
if sink.timestamp_attribute:
raise NotImplementedError(
'DirectRunner: timestamp_attribute is not '
'supported for PubSub writes')
def start_bundle(self):
self._buffer = []
def process(self, elem):
self._buffer.append(elem)
if len(self._buffer) >= self.BUFFER_SIZE_ELEMENTS:
self._flush()
def finish_bundle(self):
self._flush()
def _flush(self):
from google.cloud import pubsub
pub_client = pubsub.PublisherClient()
topic = pub_client.topic_path(self.project, self.short_topic_name)
if self.with_attributes:
futures = [
pub_client.publish(topic, elem.data, **elem.attributes)
for elem in self._buffer
]
else:
futures = [pub_client.publish(topic, elem) for elem in self._buffer]
timer_start = time.time()
for future in futures:
remaining = self.FLUSH_TIMEOUT_SECS - (time.time() - timer_start)
future.result(remaining)
self._buffer = []
def _get_pubsub_transform_overrides(pipeline_options):
from apache_beam.io.gcp import pubsub as beam_pubsub
from apache_beam.pipeline import PTransformOverride
class ReadFromPubSubOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(
applied_ptransform.transform, beam_pubsub.ReadFromPubSub)
def get_replacement_transform(self, transform):
if not pipeline_options.view_as(StandardOptions).streaming:
raise Exception(
'PubSub I/O is only available in streaming mode '
'(use the --streaming flag).')
return _DirectReadFromPubSub(transform._source)
class WriteToPubSubOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(
applied_ptransform.transform,
(beam_pubsub.WriteToPubSub, beam_pubsub._WriteStringsToPubSub))
def get_replacement_transform(self, transform):
if not pipeline_options.view_as(StandardOptions).streaming:
raise Exception(
'PubSub I/O is only available in streaming mode '
'(use the --streaming flag).')
return beam.ParDo(_DirectWriteToPubSubFn(transform._sink))
return [ReadFromPubSubOverride(), WriteToPubSubOverride()]
class BundleBasedDirectRunner(PipelineRunner):
"""Executes a single pipeline on the local machine."""
@staticmethod
def is_fnapi_compatible():
return False
def run_pipeline(self, pipeline, options):
"""Execute the entire pipeline and returns an DirectPipelineResult."""
# TODO: Move imports to top. Pipeline <-> Runner dependency cause problems
# with resolving imports when they are at top.
# pylint: disable=wrong-import-position
from apache_beam.pipeline import PipelineVisitor
from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import \
ConsumerTrackingPipelineVisitor
from apache_beam.runners.direct.evaluation_context import EvaluationContext
from apache_beam.runners.direct.executor import Executor
from apache_beam.runners.direct.transform_evaluator import \
TransformEvaluatorRegistry
from apache_beam.testing.test_stream import TestStream
# If the TestStream I/O is used, use a mock test clock.
class TestStreamUsageVisitor(PipelineVisitor):
"""Visitor determining whether a Pipeline uses a TestStream."""
def __init__(self):
self.uses_test_stream = False
def visit_transform(self, applied_ptransform):
if isinstance(applied_ptransform.transform, TestStream):
self.uses_test_stream = True
visitor = TestStreamUsageVisitor()
pipeline.visit(visitor)
clock = TestClock() if visitor.uses_test_stream else RealClock()
# Performing configured PTransform overrides.
pipeline.replace_all(_get_transform_overrides(options))
_LOGGER.info('Running pipeline with DirectRunner.')
self.consumer_tracking_visitor = ConsumerTrackingPipelineVisitor()
pipeline.visit(self.consumer_tracking_visitor)
evaluation_context = EvaluationContext(
options,
BundleFactory(
stacked=options.view_as(
DirectOptions).direct_runner_use_stacked_bundle),
self.consumer_tracking_visitor.root_transforms,
self.consumer_tracking_visitor.value_to_consumers,
self.consumer_tracking_visitor.step_names,
self.consumer_tracking_visitor.views,
clock)
executor = Executor(
self.consumer_tracking_visitor.value_to_consumers,
TransformEvaluatorRegistry(evaluation_context),
evaluation_context)
# DirectRunner does not support injecting
# PipelineOptions values at runtime
RuntimeValueProvider.set_runtime_options({})
# Start the executor. This is a non-blocking call, it will start the
# execution in background threads and return.
executor.start(self.consumer_tracking_visitor.root_transforms)
result = DirectPipelineResult(executor, evaluation_context)
return result
# Use the SwitchingDirectRunner as the default.
DirectRunner = SwitchingDirectRunner
class DirectPipelineResult(PipelineResult):
"""A DirectPipelineResult provides access to info about a pipeline."""
def __init__(self, executor, evaluation_context):
super(DirectPipelineResult, self).__init__(PipelineState.RUNNING)
self._executor = executor
self._evaluation_context = evaluation_context
def __del__(self):
if self._state == PipelineState.RUNNING:
_LOGGER.warning(
'The DirectPipelineResult is being garbage-collected while the '
'DirectRunner is still running the corresponding pipeline. This may '
'lead to incomplete execution of the pipeline if the main thread '
'exits before pipeline completion. Consider using '
'result.wait_until_finish() to wait for completion of pipeline '
'execution.')
def wait_until_finish(self, duration=None):
if not PipelineState.is_terminal(self.state):
if duration:
raise NotImplementedError(
'DirectRunner does not support duration argument.')
try:
self._executor.await_completion()
self._state = PipelineState.DONE
except: # pylint: disable=broad-except
self._state = PipelineState.FAILED
raise
return self._state
def aggregated_values(self, aggregator_or_name):
return self._evaluation_context.get_aggregator_values(aggregator_or_name)
def metrics(self):
return self._evaluation_context.metrics()
def cancel(self):
"""Shuts down pipeline workers.
For testing use only. Does not properly wait for pipeline workers to shut
down.
"""
self._state = PipelineState.CANCELLING
self._executor.shutdown()
self._state = PipelineState.CANCELLED
|
iemejia/incubator-beam
|
sdks/python/apache_beam/runners/direct/direct_runner.py
|
Python
|
apache-2.0
| 22,891
|
[
"VisIt"
] |
1d7c1ff2ee3b003591d4922774b6f044f1e5bec8bf448b89eb38ed6b33373a86
|
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-05-19 17:00:49
# @Last modified by: Brian Cherinka
# @Last Modified time: 2017-07-30 19:23:16
from __future__ import print_function, division, absolute_import
from marvin.tests.api.conftest import ApiPage
from marvin import config
import pytest
@pytest.mark.parametrize('page', [('api', 'getModelCube')], ids=['getModelCube'], indirect=True)
class TestGetModelCubes(object):
@pytest.mark.parametrize('reqtype', [('get'), ('post')])
def test_modelcube_success(self, galaxy, page, params, reqtype):
if galaxy.release == 'MPL-4':
pytest.skip('MPL-4 does not have modelcubes')
params.update({'name': galaxy.plateifu, 'bintype': galaxy.bintype.name,
'template': galaxy.template.name})
data = {'plateifu': galaxy.plateifu, 'mangaid': galaxy.mangaid,
'bintype': galaxy.bintype.name, 'template': galaxy.template.name,
'shape': galaxy.shape, 'redcorr': []}
page.load_page(reqtype, page.url.format(**params), params=params)
page.assert_success(data)
@pytest.mark.parametrize('name, missing, errmsg, bintype, template',
[(None, 'release', 'Missing data for required field.', None, None),
('badname', 'name', 'String does not match expected pattern.', None, None),
('84', 'name', 'Shorter than minimum length 4.', None, None),
('8485-1901', 'bintype', 'Not a valid choice.', 'SPVOR', 'GAU-MILESHC'),
('8485-1901', 'template', 'Not a valid choice.', 'SPX', 'MILESHC'),
('8485-1901', 'bintype', 'Not a valid choice.', 'STONY', 'GAU-MILESHC'),
('8485-1901', 'template', 'Not a valid choice.', 'SPX', 'MILES'),
('8485-1901', 'bintype', 'Field may not be null.', None, None),
('8485-1901', 'template', 'Field may not be null.', 'SPX', None)],
ids=['norelease', 'badname', 'shortname', 'badbintype', 'badtemplate',
'wrongmplbintype', 'wrongmpltemplate', 'nobintype', 'notemplate'])
def test_modelcube_failures(self, galaxy, page, params, name, missing, errmsg, bintype, template):
params.update({'name': name, 'bintype': bintype, 'template': template})
if name is None:
page.route_no_valid_params(page.url, missing, reqtype='post', errmsg=errmsg)
else:
url = page.url.format(**params)
url = url.replace('None/', '') if missing in ['bintype', 'template'] else url
page.route_no_valid_params(url, missing, reqtype='post', params=params, errmsg=errmsg)
|
albireox/marvin
|
python/marvin/tests/api/test_modelcube.py
|
Python
|
bsd-3-clause
| 2,900
|
[
"Brian",
"Galaxy"
] |
cd290efacd8223299c8af4e2380f0e61dffd320e2117f3a8a7de2821feb653ec
|
# -*- coding: utf-8 -*-
# @Author: YangZhou
# @Date: 2017-06-26 14:13:40
# @Last Modified by: YangZhou
# @Last Modified time: 2017-06-26 17:25:41
from aces.runners import Runner
from aces.runners.minimize import minimize as minimize_input
import aces.tools as tl
from aces.io.vasp import writevasp
import numpy as np
from ase import io
class runner(Runner):
def generate(self):
self.run()
def creatmini(self, dir):
cur = tl.pwd()
tl.mkdir(dir)
tl.cd(dir)
minimize_input(self.m)
tl.cd(cur)
def run_next(self, dir0, dir, lz):
m = self.m
atoms = io.read("POSCAR_" + dir0)
m.forceThick = True
cell = atoms.get_cell()
cell[2][2] = lz
atoms.set_cell(cell, scale_atoms=True)
m.atoms = atoms
self.creatmini(dir)
atoms = m.atoms_from_dump('%s/range' % dir)
writevasp(atoms, "POSCAR_" + dir)
def run(self):
m = self.m
dir0 = 'minimize'
atoms = m.dump2POSCAR('%s/range' % dir0)
tl.cp("POSCAR", "POSCAR_minimize")
cell = atoms.get_cell()
r = np.arange(1.0, 0.5, -0.01) * cell[2][2]
for i, lz in enumerate(r):
dir = 'minimize%d' % i
self.run_next(dir0, dir, lz)
dir0 = dir
|
vanceeasleaf/aces
|
aces/runners/z_strain.py
|
Python
|
gpl-2.0
| 1,308
|
[
"ASE",
"VASP"
] |
15e4aa443ca15de46526b6b6868c3eaadf3162155af0b6a30852dc6bb8f0e7d7
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/24')
from data_24 import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:4]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=1)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True',numbers_alpha=2)
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,130.3,0,1.2])
grid('True')
show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Single_Contact_Classification/Final/results/objects/24/test10_cross_validate_objects_24_1200ms.py
|
Python
|
mit
| 4,249
|
[
"Mayavi"
] |
b96dfda4e92a77ee3b754ce2f53cbd4416c33a6fd18f078dfdf76a4699dfd5ea
|
"""
Support the ISY-994 controllers.
For configuration details please visit the documentation for this component at
https://home-assistant.io/components/isy994/
"""
import logging
from urllib.parse import urlparse
import voluptuous as vol
from homeassistant.core import HomeAssistant # noqa
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers import discovery, config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType, Dict # noqa
DOMAIN = "isy994"
REQUIREMENTS = ['PyISY==1.0.7']
ISY = None
DEFAULT_SENSOR_STRING = 'sensor'
DEFAULT_HIDDEN_STRING = '{HIDE ME}'
CONF_TLS_VER = 'tls'
CONF_HIDDEN_STRING = 'hidden_string'
CONF_SENSOR_STRING = 'sensor_string'
KEY_MY_PROGRAMS = 'My Programs'
KEY_FOLDER = 'folder'
KEY_ACTIONS = 'actions'
KEY_STATUS = 'status'
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.url,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_TLS_VER): vol.Coerce(float),
vol.Optional(CONF_HIDDEN_STRING,
default=DEFAULT_HIDDEN_STRING): cv.string,
vol.Optional(CONF_SENSOR_STRING,
default=DEFAULT_SENSOR_STRING): cv.string
})
}, extra=vol.ALLOW_EXTRA)
SENSOR_NODES = []
NODES = []
GROUPS = []
PROGRAMS = {}
PYISY = None
HIDDEN_STRING = DEFAULT_HIDDEN_STRING
SUPPORTED_DOMAINS = ['binary_sensor', 'cover', 'fan', 'light', 'lock',
'sensor', 'switch']
def filter_nodes(nodes: list, units: list=None, states: list=None) -> list:
"""Filter a list of ISY nodes based on the units and states provided."""
filtered_nodes = []
units = units if units else []
states = states if states else []
for node in nodes:
match_unit = False
match_state = True
for uom in node.uom:
if uom in units:
match_unit = True
continue
elif uom not in states:
match_state = False
if match_unit:
continue
if match_unit or match_state:
filtered_nodes.append(node)
return filtered_nodes
def _categorize_nodes(hidden_identifier: str, sensor_identifier: str) -> None:
"""Categorize the ISY994 nodes."""
global SENSOR_NODES
global NODES
global GROUPS
SENSOR_NODES = []
NODES = []
GROUPS = []
for (path, node) in ISY.nodes:
hidden = hidden_identifier in path or hidden_identifier in node.name
if hidden:
node.name += hidden_identifier
if sensor_identifier in path or sensor_identifier in node.name:
SENSOR_NODES.append(node)
elif isinstance(node, PYISY.Nodes.Node): # pylint: disable=no-member
NODES.append(node)
elif isinstance(node, PYISY.Nodes.Group): # pylint: disable=no-member
GROUPS.append(node)
def _categorize_programs() -> None:
"""Categorize the ISY994 programs."""
global PROGRAMS
PROGRAMS = {}
for component in SUPPORTED_DOMAINS:
try:
folder = ISY.programs[KEY_MY_PROGRAMS]['HA.' + component]
except KeyError:
pass
else:
for dtype, _, node_id in folder.children:
if dtype is KEY_FOLDER:
program = folder[node_id]
try:
node = program[KEY_STATUS].leaf
assert node.dtype == 'program', 'Not a program'
except (KeyError, AssertionError):
pass
else:
if component not in PROGRAMS:
PROGRAMS[component] = []
PROGRAMS[component].append(program)
# pylint: disable=too-many-locals
def setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the ISY 994 platform."""
isy_config = config.get(DOMAIN)
user = isy_config.get(CONF_USERNAME)
password = isy_config.get(CONF_PASSWORD)
tls_version = isy_config.get(CONF_TLS_VER)
host = urlparse(isy_config.get(CONF_HOST))
port = host.port
addr = host.geturl()
hidden_identifier = isy_config.get(CONF_HIDDEN_STRING,
DEFAULT_HIDDEN_STRING)
sensor_identifier = isy_config.get(CONF_SENSOR_STRING,
DEFAULT_SENSOR_STRING)
global HIDDEN_STRING
HIDDEN_STRING = hidden_identifier
if host.scheme == 'http':
addr = addr.replace('http://', '')
https = False
elif host.scheme == 'https':
addr = addr.replace('https://', '')
https = True
else:
_LOGGER.error('isy994 host value in configuration is invalid.')
return False
addr = addr.replace(':{}'.format(port), '')
import PyISY
global PYISY
PYISY = PyISY
# Connect to ISY controller.
global ISY
ISY = PyISY.ISY(addr, port, username=user, password=password,
use_https=https, tls_ver=tls_version, log=_LOGGER)
if not ISY.connected:
return False
_categorize_nodes(hidden_identifier, sensor_identifier)
_categorize_programs()
# Listen for HA stop to disconnect.
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop)
# Load platforms for the devices in the ISY controller that we support.
for component in SUPPORTED_DOMAINS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
ISY.auto_update = True
return True
# pylint: disable=unused-argument
def stop(event: object) -> None:
"""Stop ISY auto updates."""
ISY.auto_update = False
class ISYDevice(Entity):
"""Representation of an ISY994 device."""
_attrs = {}
_domain = None # type: str
_name = None # type: str
def __init__(self, node) -> None:
"""Initialize the insteon device."""
self._node = node
self._change_handler = self._node.status.subscribe('changed',
self.on_update)
def __del__(self) -> None:
"""Cleanup the subscriptions."""
self._change_handler.unsubscribe()
# pylint: disable=unused-argument
def on_update(self, event: object) -> None:
"""Handle the update event from the ISY994 Node."""
self.update_ha_state()
@property
def domain(self) -> str:
"""Get the domain of the device."""
return self._domain
@property
def unique_id(self) -> str:
"""Get the unique identifier of the device."""
# pylint: disable=protected-access
return self._node._id
@property
def raw_name(self) -> str:
"""Get the raw name of the device."""
return str(self._name) \
if self._name is not None else str(self._node.name)
@property
def name(self) -> str:
"""Get the name of the device."""
return self.raw_name.replace(HIDDEN_STRING, '').strip() \
.replace('_', ' ')
@property
def should_poll(self) -> bool:
"""No polling required since we're using the subscription."""
return False
@property
def value(self) -> object:
"""Get the current value of the device."""
# pylint: disable=protected-access
return self._node.status._val
@property
def state_attributes(self) -> Dict:
"""Get the state attributes for the device."""
attr = {}
if hasattr(self._node, 'aux_properties'):
for name, val in self._node.aux_properties.items():
attr[name] = '{} {}'.format(val.get('value'), val.get('uom'))
return attr
@property
def hidden(self) -> bool:
"""Get whether the device should be hidden from the UI."""
return HIDDEN_STRING in self.raw_name
@property
def unit_of_measurement(self) -> str:
"""Get the device unit of measure."""
return None
def _attr_filter(self, attr: str) -> str:
"""Filter the attribute."""
# pylint: disable=no-self-use
return attr
def update(self) -> None:
"""Perform an update for the device."""
pass
|
Smart-Torvy/torvy-home-assistant
|
homeassistant/components/isy994.py
|
Python
|
mit
| 8,368
|
[
"VisIt"
] |
379ca9adc5775895421afb0702e7bc377262f96f85cfb6ceee708afe692aa9b6
|
##############################################################################
##
## nymp - a graphical xmms2 cli frontend
## Copyright 2010 Thammi
##
## nymp is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## nymp is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with nymp. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
import urwid
import logging
import xmmsclient.collections as coll
from nymp.events import EventEmitter
from nymp.gui.loop import update
from nymp.gui.widgets import SelectableText, ScrollableList
from nymp.gui.buffer import put_buffer
def _node_cmp_key(item):
"""Calculate the key used to sort nodes, ignores case"""
key = item.data[0]
if isinstance(key, basestring):
return key.lower()
else:
return key
def _create_collection(data, attributes, base):
# create the new collection
new_coll = base
for value, attr in zip(data, attributes):
if value == None:
# special case if the property doesn't exist
new_coll = ~coll.Has(coll.Universe(), field=attr) & new_coll
else:
# SRSLY? why do I have to care about encoding here????
if isinstance(value, basestring):
value = value.encode("utf-8")
else:
value = unicode(value)
new_coll = coll.Equals(new_coll, field=attr, value=value)
return new_coll
class CollectionTree(EventEmitter):
MODIFY_EVENT = "tree_modified"
def __init__(self, xc, steps, data=None, parent=None):
EventEmitter.__init__(self)
self.xc = xc
self.steps = steps
self.data = data
self.parent = parent
self.register(self.MODIFY_EVENT)
if parent:
self.collection = _create_collection(data, parent.steps[0]['sort'],
parent.collection)
else:
self.collection = coll.Universe()
self.requested = False
self.childs = None
self.expanded = False
self.is_leaf = len(self.steps) == 0
def add_to_playlist(self):
# flatten the steps
order = sum((step['sort'] for step in self.steps), [])
self.xc.playlist.add_collection(self.collection, order)
logging.info("Added a collection to the playlist")
def save_to_buffer(self):
order = sum((step['sort'] for step in self.steps), [])
put_buffer((self.collection, order))
logging.info("Yanked collection to buffer")
def toggle_exp(self):
if self.expanded:
self.fold()
else:
self.expand()
def expand(self):
if not self.requested:
self.request()
self.expanded = True
self._modified()
def fold(self):
self.expanded = False
self._modified()
def request(self, cb=None):
if not self.is_leaf:
def acc_cb(value):
self._coll_cb(value)
if cb:
cb()
# TODO: build a wrapper
xmms = self.xc.xmms
xmms.coll_query_infos(self.collection, self.steps[0]['sort'], cb=acc_cb)
self.requested = True
def _build_child(self, item):
"""Turn a collection item into a node"""
steps = self.steps
# turn the dictionary into a list
data = [item[attr] for attr in steps[0]['sort']]
# actually create the node
return CollectionTree(self.xc, steps[1:], data, self)
def _coll_cb(self, value):
raw = value.value()
# turn it into a sorted list
self.childs = sorted((self._build_child(item) for item in raw),
key=_node_cmp_key)
self._modified()
def _modified(self, tree=None):
if self.parent:
self.parent._modified()
if tree == None:
tree = self
self.emit(self.MODIFY_EVENT, tree)
def _format_child(self, data):
cur_step = self.steps[0]
if 'format' in cur_step:
return cur_step['format'].format(*data)
else:
return ' - '.join(unicode(item) for item in data)
def format(self):
# parents know best ...
return self.parent._format_child(self.data)
class CollTreeWalker(urwid.ListWalker):
def __init__(self, tree):
self.tree = tree
self._focus = [0]
#tree.listen(tree.MODIFY_EVENT, lambda t: self._modified())
tree.listen(tree.MODIFY_EVENT, lambda t: self.update())
def _find_node(self, pos, cur=None):
if cur == None:
cur = self.tree
if cur.childs == None or len(cur.childs) <= pos[0]:
return None
else:
found = cur.childs[pos[0]]
if len(pos) == 1:
return found
else:
return self._find_node(pos[1:], found)
def _build_widget(self, node, pos):
spacer = " " * ((len(pos) - 1) * 2)
if node.is_leaf:
icon = ""
elif node.expanded and node.childs == None:
icon = "~ "
elif node.expanded:
icon = "- "
else:
icon = "+ "
text = ''.join((spacer, icon, node.format()))
# TODO: caching
return urwid.AttrMap(SelectableText(text, wrap='clip'), 'normal', 'focus')
def focus_node(self):
focus = self._focus
return self._find_node(focus)
def focus_pos(self):
return self._focus
def get_focus(self):
# TODO: remove when new root is finished
if self.tree.childs == None:
return (urwid.Text("Loading ..."), None)
focus = self._focus
node = self._find_node(focus)
if node:
widget = self._build_widget(node, focus)
return (widget, focus)
else:
return (None, None)
def get_next(self, pos, force_forward=False):
# TODO: remove when new root is finished
if pos == None:
return (None, None)
# get current node
cur = self._find_node(pos)
# where to go?
if cur.expanded and cur.childs and not force_forward:
pos = list(pos) + [0]
else:
pos = list(pos)
pos[-1] += 1
node = self._find_node(pos)
if node:
widget = self._build_widget(node, pos)
return (widget, pos)
else:
if len(pos) > 1:
return self.get_next(pos[:-1], True)
else:
return (None, None)
def get_prev(self, pos):
# TODO: remove when new root is finished
if pos == None:
return (None, None)
pos = list(pos)
pos[-1] -= 1
if pos[-1] < 0:
if len(pos) > 1:
pos = pos[:-1]
node = self._find_node(pos)
widget = self._build_widget(node, pos)
return (widget, pos)
else:
return (None, None)
else:
while True:
node = self._find_node(pos)
if not node.expanded or not node.childs:
widget = self._build_widget(node, pos)
return (widget, pos)
else:
pos += [len(node.childs)-1]
def set_focus(self, pos):
self._focus = pos
self._modified()
def update(self):
self._modified()
update()
class BrowserWidget(ScrollableList):
def __init__(self, xc):
self.xc = xc
steps = [
# 1: artist
{
'sort': ['artist'],
},
# 2: album
{
'sort': ['date', 'album'],
'format': u'{0:>4} - {1}',
},
# 3: title
{
'sort': ['partofset', 'tracknr', 'title', 'id'],
'format': u'{1:>2}. {2}',
},
]
self.coll_tree = coll_tree = CollectionTree(xc, steps)
self.walker = walker = CollTreeWalker(coll_tree)
urwid.ListBox.__init__(self, walker)
if xc.connected:
self._connect()
xc.listen(xc.CONNECT_EVENT, self._connect)
def _connect(self):
# TODO: doesn't work with reconnects
self.coll_tree.request()
def widget_id(self):
return "browser"
def mouse_event(self, size, event, button, col, row, focus):
if event == 'mouse press' and button == 3 or button == 11:
actions = {
# left button
3: lambda node: node.toggle_exp(),
# double click (right)
11: lambda node: node.add_to_playlist(),
}
# select item under the mouse
offset, inset = self.get_focus_offset_inset(size)
self.move_focus(size, row - offset)
node = self.walker.focus_node()
if node:
actions[button](node)
else:
ScrollableList.mouse_event(self, size, event, button, col, row, focus)
def command(self, size, command, args):
def deep_fold():
walker = self.walker
node = walker.focus_node()
if node.expanded:
# we should fold the current node
node.fold()
else:
# let's visit the parent
pos = walker.focus_pos()
if len(pos) > 1:
walker.set_focus(pos[:-1])
else:
# TODO: tell the user?
pass
commands = {
'activate': self.walker.focus_node().toggle_exp,
'expand': self.walker.focus_node().expand,
'fold': deep_fold,
'add': self.walker.focus_node().add_to_playlist,
'yank': self.walker.focus_node().save_to_buffer,
}
if command in commands:
if self.walker.focus_node():
commands[command]()
return True
else:
return ScrollableList.command(self, size, command, args)
|
thammi/nymp
|
src/nymp/gui/browser.py
|
Python
|
gpl-3.0
| 10,793
|
[
"VisIt"
] |
bce7294c8ac4fea2561b89b32c578b4a6690da194aacf83e447deba908073ae8
|
import logging
from cStringIO import StringIO
from math import exp
from lxml import etree
from path import path # NOTE (THK): Only used for detecting presence of syllabus
import requests
from datetime import datetime
import dateutil.parser
from xmodule.modulestore import Location
from xmodule.seq_module import SequenceDescriptor, SequenceModule
from xmodule.util.decorators import lazyproperty
from xmodule.graders import grader_from_conf
import json
from xblock.core import Scope, List, String, Dict, Boolean
from .fields import Date
from xmodule.modulestore.locator import CourseLocator
from django.utils.timezone import UTC
from xmodule.util import date_utils
log = logging.getLogger(__name__)
class StringOrDate(Date):
def from_json(self, value):
"""
Parse an optional metadata key containing a time or a string:
if present, assume it's a string if it doesn't parse.
"""
try:
result = super(StringOrDate, self).from_json(value)
except ValueError:
return value
if result is None:
return value
else:
return result
def to_json(self, value):
"""
Convert a time struct or string to a string.
"""
try:
result = super(StringOrDate, self).to_json(value)
except:
return value
if result is None:
return value
else:
return result
edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True)
_cached_toc = {}
class Textbook(object):
def __init__(self, title, book_url):
self.title = title
self.book_url = book_url
self.start_page = int(self.table_of_contents[0].attrib['page'])
# The last page should be the last element in the table of contents,
# but it may be nested. So recurse all the way down the last element
last_el = self.table_of_contents[-1]
while last_el.getchildren():
last_el = last_el[-1]
self.end_page = int(last_el.attrib['page'])
@lazyproperty
def table_of_contents(self):
"""
Accesses the textbook's table of contents (default name "toc.xml") at the URL self.book_url
Returns XML tree representation of the table of contents
"""
toc_url = self.book_url + 'toc.xml'
# cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores)
# course modules have a very short lifespan and are constantly being created and torn down.
# Since this module in the __init__() method does a synchronous call to AWS to get the TOC
# this is causing a big performance problem. So let's be a bit smarter about this and cache
# each fetch and store in-mem for 10 minutes.
# NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and
# rewrite to use the traditional Django in-memory cache.
try:
# see if we already fetched this
if toc_url in _cached_toc:
(table_of_contents, timestamp) = _cached_toc[toc_url]
age = datetime.now(UTC) - timestamp
# expire every 10 minutes
if age.seconds < 600:
return table_of_contents
except Exception as err:
pass
# Get the table of contents from S3
log.info("Retrieving textbook table of contents from %s" % toc_url)
try:
r = requests.get(toc_url)
except Exception as err:
msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
# TOC is XML. Parse it
try:
table_of_contents = etree.fromstring(r.text)
except Exception as err:
msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
return table_of_contents
class TextbookList(List):
def from_json(self, values):
textbooks = []
for title, book_url in values:
try:
textbooks.append(Textbook(title, book_url))
except:
# If we can't get to S3 (e.g. on a train with no internet), don't break
# the rest of the courseware.
log.exception("Couldn't load textbook ({0}, {1})".format(title, book_url))
continue
return textbooks
def to_json(self, values):
json_data = []
for val in values:
if isinstance(val, Textbook):
json_data.append((val.title, val.book_url))
elif isinstance(val, tuple):
json_data.append(val)
else:
continue
return json_data
class CourseFields(object):
textbooks = TextbookList(help="List of pairs of (title, url) for textbooks used in this course",
default=[], scope=Scope.content)
wiki_slug = String(help="Slug that points to the wiki for this course", scope=Scope.content)
enrollment_start = Date(help="Date that enrollment for this class is opened", scope=Scope.settings)
enrollment_end = Date(help="Date that enrollment for this class is closed", scope=Scope.settings)
start = Date(help="Start time when this module is visible",
# using now(UTC()) resulted in fractional seconds which screwed up comparisons and anyway w/b the
# time of first invocation of this stmt on the server
default=datetime.fromtimestamp(0, UTC()),
scope=Scope.settings)
end = Date(help="Date that this class ends", scope=Scope.settings)
advertised_start = String(help="Date that this course is advertised to start", scope=Scope.settings)
grading_policy = Dict(help="Grading policy definition for this class",
default={"GRADER": [
{
"type": "Homework",
"min_count": 12,
"drop_count": 2,
"short_label": "HW",
"weight": 0.15
},
{
"type": "Lab",
"min_count": 12,
"drop_count": 2,
"weight": 0.15
},
{
"type": "Midterm Exam",
"short_label": "Midterm",
"min_count": 1,
"drop_count": 0,
"weight": 0.3
},
{
"type": "Final Exam",
"short_label": "Final",
"min_count": 1,
"drop_count": 0,
"weight": 0.4
}
],
"GRADE_CUTOFFS": {
"Pass": 0.5
}},
scope=Scope.content)
show_calculator = Boolean(help="Whether to show the calculator in this course", default=False, scope=Scope.settings)
display_name = String(help="Display name for this module", default="Empty", display_name="Display Name", scope=Scope.settings)
show_chat = Boolean(help="Whether to show the chat widget in this course", default=False, scope=Scope.settings)
tabs = List(help="List of tabs to enable in this course", scope=Scope.settings)
end_of_course_survey_url = String(help="Url for the end-of-course survey", scope=Scope.settings)
discussion_blackouts = List(help="List of pairs of start/end dates for discussion blackouts", scope=Scope.settings)
discussion_topics = Dict(
help="Map of topics names to ids",
scope=Scope.settings
)
testcenter_info = Dict(help="Dictionary of Test Center info", scope=Scope.settings)
announcement = Date(help="Date this course is announced", scope=Scope.settings)
cohort_config = Dict(help="Dictionary defining cohort configuration", scope=Scope.settings)
is_new = Boolean(help="Whether this course should be flagged as new", scope=Scope.settings)
no_grade = Boolean(help="True if this course isn't graded", default=False, scope=Scope.settings)
disable_progress_graph = Boolean(help="True if this course shouldn't display the progress graph", default=False, scope=Scope.settings)
pdf_textbooks = List(help="List of dictionaries containing pdf_textbook configuration", scope=Scope.settings)
html_textbooks = List(help="List of dictionaries containing html_textbook configuration", scope=Scope.settings)
remote_gradebook = Dict(scope=Scope.settings)
allow_anonymous = Boolean(scope=Scope.settings, default=True)
allow_anonymous_to_peers = Boolean(scope=Scope.settings, default=False)
advanced_modules = List(help="Beta modules used in your course", scope=Scope.settings)
has_children = True
checklists = List(scope=Scope.settings,
default=[
{"short_description" : "Getting Started With Studio",
"items" : [{"short_description": "Add Course Team Members",
"long_description": "Grant your collaborators permission to edit your course so you can work together.",
"is_checked": False,
"action_url": "ManageUsers",
"action_text": "Edit Course Team",
"action_external": False},
{"short_description": "Set Important Dates for Your Course",
"long_description": "Establish your course's student enrollment and launch dates on the Schedule and Details page.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Details & Schedule",
"action_external": False},
{"short_description": "Draft Your Course's Grading Policy",
"long_description": "Set up your assignment types and grading policy even if you haven't created all your assignments.",
"is_checked": False,
"action_url": "SettingsGrading",
"action_text": "Edit Grading Settings",
"action_external": False},
{"short_description": "Explore the Other Studio Checklists",
"long_description": "Discover other available course authoring tools, and find help when you need it.",
"is_checked": False,
"action_url": "",
"action_text": "",
"action_external": False}]
},
{"short_description" : "Draft a Rough Course Outline",
"items" : [{"short_description": "Create Your First Section and Subsection",
"long_description": "Use your course outline to build your first Section and Subsection.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Set Section Release Dates",
"long_description": "Specify the release dates for each Section in your course. Sections become visible to students on their release dates.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Designate a Subsection as Graded",
"long_description": "Set a Subsection to be graded as a specific assignment type. Assignments within graded Subsections count toward a student's final grade.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Reordering Course Content",
"long_description": "Use drag and drop to reorder the content in your course.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Renaming Sections",
"long_description": "Rename Sections by clicking the Section name from the Course Outline.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Deleting Course Content",
"long_description": "Delete Sections, Subsections, or Units you don't need anymore. Be careful, as there is no Undo function.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Add an Instructor-Only Section to Your Outline",
"long_description": "Some course authors find using a section for unsorted, in-progress work useful. To do this, create a section and set the release date to the distant future.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False}]
},
{"short_description" : "Explore edX's Support Tools",
"items" : [{"short_description": "Explore the Studio Help Forum",
"long_description": "Access the Studio Help forum from the menu that appears when you click your user name in the top right corner of Studio.",
"is_checked": False,
"action_url": "http://help.edge.edx.org/",
"action_text": "Visit Studio Help",
"action_external": True},
{"short_description": "Enroll in edX 101",
"long_description": "Register for edX 101, edX's primer for course creation.",
"is_checked": False,
"action_url": "https://edge.edx.org/courses/edX/edX101/How_to_Create_an_edX_Course/about",
"action_text": "Register for edX 101",
"action_external": True},
{"short_description": "Download the Studio Documentation",
"long_description": "Download the searchable Studio reference documentation in PDF form.",
"is_checked": False,
"action_url": "http://files.edx.org/Getting_Started_with_Studio.pdf",
"action_text": "Download Documentation",
"action_external": True}]
},
{"short_description" : "Draft Your Course About Page",
"items" : [{"short_description": "Draft a Course Description",
"long_description": "Courses on edX have an About page that includes a course video, description, and more. Draft the text students will read before deciding to enroll in your course.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False},
{"short_description": "Add Staff Bios",
"long_description": "Showing prospective students who their instructor will be is helpful. Include staff bios on the course About page.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False},
{"short_description": "Add Course FAQs",
"long_description": "Include a short list of frequently asked questions about your course.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False},
{"short_description": "Add Course Prerequisites",
"long_description": "Let students know what knowledge and/or skills they should have before they enroll in your course.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False}]
}
])
info_sidebar_name = String(scope=Scope.settings, default='Course Handouts')
show_timezone = Boolean(help="True if timezones should be shown on dates in the courseware", scope=Scope.settings, default=True)
enrollment_domain = String(help="External login method associated with user accounts allowed to register in course",
scope=Scope.settings)
# An extra property is used rather than the wiki_slug/number because
# there are courses that change the number for different runs. This allows
# courses to share the same css_class across runs even if they have
# different numbers.
#
# TODO get rid of this as soon as possible or potentially build in a robust
# way to add in course-specific styling. There needs to be a discussion
# about the right way to do this, but arjun will address this ASAP. Also
# note that the courseware template needs to change when this is removed.
css_class = String(help="DO NOT USE THIS", scope=Scope.settings)
# TODO: This is a quick kludge to allow CS50 (and other courses) to
# specify their own discussion forums as external links by specifying a
# "discussion_link" in their policy JSON file. This should later get
# folded in with Syllabus, Course Info, and additional Custom tabs in a
# more sensible framework later.
discussion_link = String(help="DO NOT USE THIS", scope=Scope.settings)
# TODO: same as above, intended to let internal CS50 hide the progress tab
# until we get grade integration set up.
# Explicit comparison to True because we always want to return a bool.
hide_progress_tab = Boolean(help="DO NOT USE THIS", scope=Scope.settings)
class CourseDescriptor(CourseFields, SequenceDescriptor):
module_class = SequenceModule
def __init__(self, *args, **kwargs):
"""
Expects the same arguments as XModuleDescriptor.__init__
"""
super(CourseDescriptor, self).__init__(*args, **kwargs)
if self.wiki_slug is None:
if isinstance(self.location, Location):
self.wiki_slug = self.location.course
elif isinstance(self.location, CourseLocator):
self.wiki_slug = self.location.course_id or self.display_name
msg = None
# NOTE: relies on the modulestore to call set_grading_policy() right after
# init. (Modulestore is in charge of figuring out where to load the policy from)
# NOTE (THK): This is a last-minute addition for Fall 2012 launch to dynamically
# disable the syllabus content for courses that do not provide a syllabus
if self.system.resources_fs is None:
self.syllabus_present = False
else:
self.syllabus_present = self.system.resources_fs.exists(path('syllabus'))
self._grading_policy = {}
self.set_grading_policy(self.grading_policy)
if self.discussion_topics == {}:
self.discussion_topics = {'General': {'id': self.location.html_id()}}
self.test_center_exams = []
test_center_info = self.testcenter_info
if test_center_info is not None:
for exam_name in test_center_info:
try:
exam_info = test_center_info[exam_name]
self.test_center_exams.append(self.TestCenterExam(self.id, exam_name, exam_info))
except Exception as err:
# If we can't parse the test center exam info, don't break
# the rest of the courseware.
msg = 'Error %s: Unable to load test-center exam info for exam "%s" of course "%s"' % (err, exam_name, self.id)
log.error(msg)
continue
# TODO check that this is still needed here and can't be by defaults.
if not self.tabs:
# When calling the various _tab methods, can omit the 'type':'blah' from the
# first arg, since that's only used for dispatch
tabs = []
tabs.append({'type': 'courseware'})
tabs.append({'type': 'course_info', 'name': 'Course Info'})
if self.syllabus_present:
tabs.append({'type': 'syllabus'})
tabs.append({'type': 'textbooks'})
# # If they have a discussion link specified, use that even if we feature
# # flag discussions off. Disabling that is mostly a server safety feature
# # at this point, and we don't need to worry about external sites.
if self.discussion_link:
tabs.append({'type': 'external_discussion', 'link': self.discussion_link})
else:
tabs.append({'type': 'discussion', 'name': 'Discussion'})
tabs.append({'type': 'wiki', 'name': 'Wiki'})
if not self.hide_progress_tab:
tabs.append({'type': 'progress', 'name': 'Progress'})
self.tabs = tabs
def set_grading_policy(self, course_policy):
"""
The JSON object can have the keys GRADER and GRADE_CUTOFFS. If either is
missing, it reverts to the default.
"""
if course_policy is None:
course_policy = {}
# Load the global settings as a dictionary
grading_policy = self.grading_policy
# BOY DO I HATE THIS grading_policy CODE ACROBATICS YET HERE I ADD MORE (dhm)--this fixes things persisted w/
# defective grading policy values (but not None)
if 'GRADER' not in grading_policy:
grading_policy['GRADER'] = CourseFields.grading_policy.default['GRADER']
if 'GRADE_CUTOFFS' not in grading_policy:
grading_policy['GRADE_CUTOFFS'] = CourseFields.grading_policy.default['GRADE_CUTOFFS']
# Override any global settings with the course settings
grading_policy.update(course_policy)
# Here is where we should parse any configurations, so that we can fail early
# Use setters so that side effecting to .definitions works
self.raw_grader = grading_policy['GRADER'] # used for cms access
self.grade_cutoffs = grading_policy['GRADE_CUTOFFS']
@classmethod
def read_grading_policy(cls, paths, system):
"""Load a grading policy from the specified paths, in order, if it exists."""
# Default to a blank policy dict
policy_str = '{}'
for policy_path in paths:
if not system.resources_fs.exists(policy_path):
continue
log.debug("Loading grading policy from {0}".format(policy_path))
try:
with system.resources_fs.open(policy_path) as grading_policy_file:
policy_str = grading_policy_file.read()
# if we successfully read the file, stop looking at backups
break
except (IOError):
msg = "Unable to load course settings file from '{0}'".format(policy_path)
log.warning(msg)
return policy_str
@classmethod
def from_xml(cls, xml_data, system, org=None, course=None):
instance = super(CourseDescriptor, cls).from_xml(xml_data, system, org, course)
# bleh, have to parse the XML here to just pull out the url_name attribute
# I don't think it's stored anywhere in the instance.
course_file = StringIO(xml_data.encode('ascii', 'ignore'))
xml_obj = etree.parse(course_file, parser=edx_xml_parser).getroot()
policy_dir = None
url_name = xml_obj.get('url_name', xml_obj.get('slug'))
if url_name:
policy_dir = 'policies/' + url_name
# Try to load grading policy
paths = ['grading_policy.json']
if policy_dir:
paths = [policy_dir + '/grading_policy.json'] + paths
try:
policy = json.loads(cls.read_grading_policy(paths, system))
except ValueError:
system.error_tracker("Unable to decode grading policy as json")
policy = {}
# now set the current instance. set_grading_policy() will apply some inheritance rules
instance.set_grading_policy(policy)
return instance
@classmethod
def definition_from_xml(cls, xml_object, system):
textbooks = []
for textbook in xml_object.findall("textbook"):
textbooks.append((textbook.get('title'), textbook.get('book_url')))
xml_object.remove(textbook)
# Load the wiki tag if it exists
wiki_slug = None
wiki_tag = xml_object.find("wiki")
if wiki_tag is not None:
wiki_slug = wiki_tag.attrib.get("slug", default=None)
xml_object.remove(wiki_tag)
definition, children = super(CourseDescriptor, cls).definition_from_xml(xml_object, system)
definition['textbooks'] = textbooks
definition['wiki_slug'] = wiki_slug
return definition, children
def definition_to_xml(self, resource_fs):
xml_object = super(CourseDescriptor, self).definition_to_xml(resource_fs)
if len(self.textbooks) > 0:
textbook_xml_object = etree.Element('textbook')
for textbook in self.textbooks:
textbook_xml_object.set('title', textbook.title)
textbook_xml_object.set('book_url', textbook.book_url)
xml_object.append(textbook_xml_object)
return xml_object
def has_ended(self):
"""
Returns True if the current time is after the specified course end date.
Returns False if there is no end date specified.
"""
if self.end is None:
return False
return datetime.now(UTC()) > self.end
def has_started(self):
return datetime.now(UTC()) > self.start
@property
def grader(self):
return grader_from_conf(self.raw_grader)
@property
def raw_grader(self):
return self._grading_policy['RAW_GRADER']
@raw_grader.setter
def raw_grader(self, value):
# NOTE WELL: this change will not update the processed graders. If we need that, this needs to call grader_from_conf
self._grading_policy['RAW_GRADER'] = value
self.grading_policy['GRADER'] = value
@property
def grade_cutoffs(self):
return self._grading_policy['GRADE_CUTOFFS']
@grade_cutoffs.setter
def grade_cutoffs(self, value):
self._grading_policy['GRADE_CUTOFFS'] = value
# XBlock fields don't update after mutation
policy = self.grading_policy
policy['GRADE_CUTOFFS'] = value
self.grading_policy = policy
@property
def lowest_passing_grade(self):
return min(self._grading_policy['GRADE_CUTOFFS'].values())
@property
def is_cohorted(self):
"""
Return whether the course is cohorted.
"""
config = self.cohort_config
if config is None:
return False
return bool(config.get("cohorted"))
@property
def auto_cohort(self):
"""
Return whether the course is auto-cohorted.
"""
if not self.is_cohorted:
return False
return bool(self.cohort_config.get(
"auto_cohort", False))
@property
def auto_cohort_groups(self):
"""
Return the list of groups to put students into. Returns [] if not
specified. Returns specified list even if is_cohorted and/or auto_cohort are
false.
"""
if self.cohort_config is None:
return []
else:
return self.cohort_config.get("auto_cohort_groups", [])
@property
def top_level_discussion_topic_ids(self):
"""
Return list of topic ids defined in course policy.
"""
topics = self.discussion_topics
return [d["id"] for d in topics.values()]
@property
def cohorted_discussions(self):
"""
Return the set of discussions that is explicitly cohorted. It may be
the empty set. Note that all inline discussions are automatically
cohorted based on the course's is_cohorted setting.
"""
config = self.cohort_config
if config is None:
return set()
return set(config.get("cohorted_discussions", []))
@property
def is_newish(self):
"""
Returns if the course has been flagged as new. If
there is no flag, return a heuristic value considering the
announcement and the start dates.
"""
flag = self.is_new
if flag is None:
# Use a heuristic if the course has not been flagged
announcement, start, now = self._sorting_dates()
if announcement and (now - announcement).days < 30:
# The course has been announced for less that month
return True
elif (now - start).days < 1:
# The course has not started yet
return True
else:
return False
elif isinstance(flag, basestring):
return flag.lower() in ['true', 'yes', 'y']
else:
return bool(flag)
@property
def sorting_score(self):
"""
Returns a tuple that can be used to sort the courses according
the how "new" they are. The "newness" score is computed using a
heuristic that takes into account the announcement and
(advertized) start dates of the course if available.
The lower the number the "newer" the course.
"""
# Make courses that have an announcement date shave a lower
# score than courses than don't, older courses should have a
# higher score.
announcement, start, now = self._sorting_dates()
scale = 300.0 # about a year
if announcement:
days = (now - announcement).days
score = -exp(-days / scale)
else:
days = (now - start).days
score = exp(days / scale)
return score
def _sorting_dates(self):
# utility function to get datetime objects for dates used to
# compute the is_new flag and the sorting_score
announcement = self.announcement
if announcement is not None:
announcement = announcement
try:
start = dateutil.parser.parse(self.advertised_start)
if start.tzinfo is None:
start = start.replace(tzinfo=UTC())
except (ValueError, AttributeError):
start = self.start
now = datetime.now(UTC())
return announcement, start, now
@lazyproperty
def grading_context(self):
"""
This returns a dictionary with keys necessary for quickly grading
a student. They are used by grades.grade()
The grading context has two keys:
graded_sections - This contains the sections that are graded, as
well as all possible children modules that can affect the
grading. This allows some sections to be skipped if the student
hasn't seen any part of it.
The format is a dictionary keyed by section-type. The values are
arrays of dictionaries containing
"section_descriptor" : The section descriptor
"xmoduledescriptors" : An array of xmoduledescriptors that
could possibly be in the section, for any student
all_descriptors - This contains a list of all xmodules that can
effect grading a student. This is used to efficiently fetch
all the xmodule state for a ModelDataCache without walking
the descriptor tree again.
"""
all_descriptors = []
graded_sections = {}
def yield_descriptor_descendents(module_descriptor):
for child in module_descriptor.get_children():
yield child
for module_descriptor in yield_descriptor_descendents(child):
yield module_descriptor
for c in self.get_children():
for s in c.get_children():
if s.lms.graded:
xmoduledescriptors = list(yield_descriptor_descendents(s))
xmoduledescriptors.append(s)
# The xmoduledescriptors included here are only the ones that have scores.
section_description = {'section_descriptor': s, 'xmoduledescriptors': filter(lambda child: child.has_score, xmoduledescriptors)}
section_format = s.lms.format if s.lms.format is not None else ''
graded_sections[section_format] = graded_sections.get(section_format, []) + [section_description]
all_descriptors.extend(xmoduledescriptors)
all_descriptors.append(s)
return {'graded_sections': graded_sections,
'all_descriptors': all_descriptors, }
@staticmethod
def make_id(org, course, url_name):
return '/'.join([org, course, url_name])
@staticmethod
def id_to_location(course_id):
'''Convert the given course_id (org/course/name) to a location object.
Throws ValueError if course_id is of the wrong format.
'''
org, course, name = course_id.split('/')
return Location('i4x', org, course, 'course', name)
@staticmethod
def location_to_id(location):
'''Convert a location of a course to a course_id. If location category
is not "course", raise a ValueError.
location: something that can be passed to Location
'''
loc = Location(location)
if loc.category != "course":
raise ValueError("{0} is not a course location".format(loc))
return "/".join([loc.org, loc.course, loc.name])
@property
def id(self):
"""Return the course_id for this course"""
return self.location_to_id(self.location)
@property
def start_date_text(self):
def try_parse_iso_8601(text):
try:
result = Date().from_json(text)
if result is None:
result = text.title()
else:
result = result.strftime("%b %d, %Y")
except ValueError:
result = text.title()
return result
if isinstance(self.advertised_start, basestring):
return try_parse_iso_8601(self.advertised_start)
elif self.advertised_start is None and self.start is None:
# TODO this is an impossible state since the init function forces start to have a value
return 'TBD'
else:
return (self.advertised_start or self.start).strftime("%b %d, %Y")
@property
def end_date_text(self):
"""
Returns the end date for the course formatted as a string.
If the course does not have an end date set (course.end is None), an empty string will be returned.
"""
return '' if self.end is None else self.end.strftime("%b %d, %Y")
@property
def forum_posts_allowed(self):
date_proxy = Date()
try:
blackout_periods = [(date_proxy.from_json(start),
date_proxy.from_json(end))
for start, end
in self.discussion_blackouts]
now = datetime.now(UTC())
for start, end in blackout_periods:
if start <= now <= end:
return False
except:
log.exception("Error parsing discussion_blackouts for course {0}".format(self.id))
return True
class TestCenterExam(object):
def __init__(self, course_id, exam_name, exam_info):
self.course_id = course_id
self.exam_name = exam_name
self.exam_info = exam_info
self.exam_series_code = exam_info.get('Exam_Series_Code') or exam_name
self.display_name = exam_info.get('Exam_Display_Name') or self.exam_series_code
self.first_eligible_appointment_date = self._try_parse_time('First_Eligible_Appointment_Date')
if self.first_eligible_appointment_date is None:
raise ValueError("First appointment date must be specified")
# TODO: If defaulting the last appointment date, it should be the
# *end* of the same day, not the same time. It's going to be used as the
# end of the exam overall, so we don't want the exam to disappear too soon.
# It's also used optionally as the registration end date, so time matters there too.
self.last_eligible_appointment_date = self._try_parse_time('Last_Eligible_Appointment_Date') # or self.first_eligible_appointment_date
if self.last_eligible_appointment_date is None:
raise ValueError("Last appointment date must be specified")
self.registration_start_date = (self._try_parse_time('Registration_Start_Date') or
datetime.fromtimestamp(0, UTC()))
self.registration_end_date = self._try_parse_time('Registration_End_Date') or self.last_eligible_appointment_date
# do validation within the exam info:
if self.registration_start_date > self.registration_end_date:
raise ValueError("Registration start date must be before registration end date")
if self.first_eligible_appointment_date > self.last_eligible_appointment_date:
raise ValueError("First appointment date must be before last appointment date")
if self.registration_end_date > self.last_eligible_appointment_date:
raise ValueError("Registration end date must be before last appointment date")
self.exam_url = exam_info.get('Exam_URL')
def _try_parse_time(self, key):
"""
Parse an optional metadata key containing a time: if present, complain
if it doesn't parse.
Return None if not present or invalid.
"""
if key in self.exam_info:
try:
return Date().from_json(self.exam_info[key])
except ValueError as e:
msg = "Exam {0} in course {1} loaded with a bad exam_info key '{2}': '{3}'".format(self.exam_name, self.course_id, self.exam_info[key], e)
log.warning(msg)
return None
def has_started(self):
return datetime.now(UTC()) > self.first_eligible_appointment_date
def has_ended(self):
return datetime.now(UTC()) > self.last_eligible_appointment_date
def has_started_registration(self):
return datetime.now(UTC()) > self.registration_start_date
def has_ended_registration(self):
return datetime.now(UTC()) > self.registration_end_date
def is_registering(self):
now = datetime.now(UTC())
return now >= self.registration_start_date and now <= self.registration_end_date
@property
def first_eligible_appointment_date_text(self):
return self.first_eligible_appointment_date.strftime("%b %d, %Y")
@property
def last_eligible_appointment_date_text(self):
return self.last_eligible_appointment_date.strftime("%b %d, %Y")
@property
def registration_end_date_text(self):
return date_utils.get_default_time_display(self.registration_end_date)
@property
def current_test_center_exam(self):
exams = [exam for exam in self.test_center_exams if exam.has_started_registration() and not exam.has_ended()]
if len(exams) > 1:
# TODO: output some kind of warning. This should already be
# caught if we decide to do validation at load time.
return exams[0]
elif len(exams) == 1:
return exams[0]
else:
return None
def get_test_center_exam(self, exam_series_code):
exams = [exam for exam in self.test_center_exams if exam.exam_series_code == exam_series_code]
return exams[0] if len(exams) == 1 else None
@property
def number(self):
return self.location.course
@property
def org(self):
return self.location.org
|
rationalAgent/edx-platform-custom
|
common/lib/xmodule/xmodule/course_module.py
|
Python
|
agpl-3.0
| 41,972
|
[
"VisIt"
] |
1daca23c553b9fa3007c458f3215e2dcaeaca382f9fc7514583684bfeb0c77b5
|
#! /usr/bin/env python
#import __main__
#__main__.pymol_argv = ['pymol','-qc']
##__main__.pymol_argv = ['pymol','']
#import sys,time,os
#import pymol
#
#pymol.finish_launching()
#pymol.cmd.feedback("disable","all","actions")
#pymol.cmd.feedback("disable","all","results")
#sys.path.append("/home/scratch/software/Pymol-script-repo-master")
import MDAnalysis
from MDAnalysis import *
from MDAnalysis.analysis.distances import *
import numpy
import math
import sys
my_traj = sys.argv[1]
#pymol.cmd.load(my_traj,my_traj[:-4])
#pymol.cmd.split_states(my_traj[:-4])
#states = pymol.cmd.get_object_list()
#
#
#for state in states:
#
# #dih_angle = pymol.cmd.get_dihedral("%s//A/2/N1"%state, "%s//A/2/C6"%state, "%s//A/2/N6"%state, "%s//A/2/H61"%state)
# dih_angle = pymol.cmd.get_dihedral("%s//A/2/N1"%state, "%s//A/2/C6"%state, "%s//A/2/N6"%state, "%s//A/2/H61"%state)
# print dih_angle
#u = Universe("init.pdb",my_traj)
#v = Universe("init.pdb")
u = Universe(my_traj,my_traj)
v = Universe(my_traj)
end = my_traj.find('.pdb')
fout_name = my_traj[0:end] + '_dist.dat'
##acc_1 = u.selectAtoms("segid A and resid 5 and name P")
##acc_2 = u.selectAtoms("segid A and resid 4 and name N7")
##acc_3 = u.selectAtoms("segid A and resid 4 and name N1")
#acc_1 = u.selectAtoms("segid A and resid 4 and name O4")
#acc_2 = u.selectAtoms("segid B and resid 10 and name O4")
#acc_5 = u.selectAtoms("segid B and resid 17 and name N3")
#acc_7 = u.selectAtoms("segid B and resid 15 and name N1")
ahm_h2 = u.selectAtoms("segid B and resid 9 and name HO")
bp_base11 = u.selectAtoms("segid A and resid 4 and name O4")
bp_base12 = u.selectAtoms("segid B and resid 9 and name H62")
bp_base13 = u.selectAtoms("segid A and resid 4 and name O2")
bp_base14 = u.selectAtoms("segid B and resid 9 and name H2")
bp_base21 = u.selectAtoms("segid B and resid 10 and name O4")
bp_base22 = u.selectAtoms("segid A and resid 3 and name H62")
bp_base23 = u.selectAtoms("segid B and resid 10 and name O2")
bp_base24 = u.selectAtoms("segid A and resid 3 and name H2")
f = open(fout_name,'w')
for ts in u.trajectory:
distance2 = numpy.linalg.norm(ahm_h2.centerOfMass() - bp_base21.centerOfMass())
distance3 = numpy.linalg.norm(bp_base11.centerOfMass() - bp_base12.centerOfMass())
distance4 = numpy.linalg.norm(bp_base13.centerOfMass() - bp_base14.centerOfMass())
distance5 = numpy.linalg.norm(bp_base21.centerOfMass() - bp_base22.centerOfMass())
distance6 = numpy.linalg.norm(bp_base23.centerOfMass() - bp_base24.centerOfMass())
#f.write('%7.3f\t%7.3f\t%7.3f\t%7.3f\n' % (distance3,distance4,distance5,distance6))
#f.write('%7.3f\t%7.3f\t%7.3f\t%7.3f\n' % (distance3,distance4,distance1,distance2))
f.write('%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\n' % (distance3,distance4,distance5,distance6,distance2))
f.close()
|
demharters/git_scripts
|
dist_dna_4dnb_fragment_hme2.py
|
Python
|
apache-2.0
| 2,828
|
[
"MDAnalysis",
"PyMOL"
] |
8d48be84e0b82d66d1e9e486628f52d32daaea6ee5249e3b643e00250bd9119b
|
# -*- coding: utf-8 -*-
import numpy as np
import scipy.special
from bokeh.plotting import *
from bokeh.objects import Range1d
mu, sigma = 0, 0.5
measured = np.random.normal(mu, sigma, 1000)
hist, edges = np.histogram(measured, density=True, bins=50)
x = np.linspace(-2, 2, 1000)
pdf = 1/(sigma * np.sqrt(2*np.pi)) * np.exp(-(x-mu)**2 / (2*sigma**2))
cdf = (1+scipy.special.erf((x-mu)/np.sqrt(2*sigma**2)))/2
output_file('histogram.html')
hold()
figure(title="Normal Distribution (μ=0, σ=0.5)",tools="previewsave",
background_fill="#E8DDCB")
quad(top=hist, bottom=np.zeros(len(hist)), left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649",\
)
# Use `line` renderers to display the PDF and CDF
line(x, pdf, line_color="#D95B43", line_width=8, alpha=0.7, legend="PDF")
line(x, cdf, line_color="white", line_width=2, alpha=0.7, legend="CDF")
legend().orientation = "top_left"
figure(title="Log Normal Distribution (μ=0, σ=0.5)", tools="previewsave",
background_fill="#E8DDCB")
mu, sigma = 0, 0.5 # NOTE: you can tinker with these values if you like
measured = np.random.lognormal(mu, sigma, 1000)
hist, edges = np.histogram(measured, density=True, bins=50)
x = np.linspace(0, 8.0, 1000)
pdf = 1/(x* sigma * np.sqrt(2*np.pi)) * np.exp(-(np.log(x)-mu)**2 / (2*sigma**2))
cdf = (1+scipy.special.erf((np.log(x)-mu)/(np.sqrt(2)*sigma)))/2
quad(top=hist, bottom=np.zeros(len(hist)), left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649"
)
line(x, pdf, line_color="#D95B43", line_width=8, alpha=0.7, legend="PDF")
line(x, cdf, line_color="white", line_width=2, alpha=0.7, legend="CDF")
legend().orientation = "bottom_right"
figure(title="Gamma Distribution (k=1, θ=2)", tools="previewsave",
background_fill="#E8DDCB")
k, theta = 1.0, 2.0
measured = np.random.gamma(k, theta, 1000)
hist, edges = np.histogram(measured, density=True, bins=50)
# compute ideal values
x = np.linspace(0, 20.0, 1000)
pdf = x**(k-1) * np.exp(-x/theta) / (theta**k * scipy.special.gamma(k))
cdf = scipy.special.gammainc(k, x/theta) / scipy.special.gamma(k)
quad(top=hist, bottom=np.zeros(len(hist)), left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649"
)
line(x, pdf, line_color="#D95B43", line_width=8, alpha=0.7, legend="PDF")
line(x, cdf, line_color="white", line_width=2, alpha=0.7, legend="CDF")
legend().orientation = "top_left"
figure(title="Beta Distribution (α=2, β=2)", tools="previewsave",
background_fill="#E8DDCB")
alpha, beta = 2.0, 2.0
measured = np.random.beta(alpha, beta, 1000)
hist, edges = np.histogram(measured, density=True, bins=50)
x = np.linspace(0, 1, 1000)
pdf = x**(alpha-1) * (1-x)**(beta-1) / scipy.special.beta(alpha, beta)
cdf = scipy.special.btdtr(alpha, beta, x)
quad(top=hist, bottom=np.zeros(len(hist)), left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649"
)
line(x, pdf, line_color="#D95B43", line_width=8, alpha=0.7, legend="PDF")
line(x, cdf, line_color="white", line_width=2, alpha=0.7, legend="CDF")
figure(title="Weibull Distribution (λ=1, k=1.25)", tools="previewsave",
background_fill="#E8DDCB")
lam, k = 1, 1.25
measured = lam*(-np.log(np.random.uniform(0, 1, 1000)))**(1/k)
hist, edges = np.histogram(measured, density=True, bins=50)
x = np.linspace(0, 8, 1000)
pdf = (k/lam)*(x/lam)**(k-1) * np.exp(-(x/lam)**k)
cdf = 1 - np.exp(-(x/lam)**k)
quad(top=hist, bottom=np.zeros(len(hist)), left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649"
)
line(x, pdf, line_color="#D95B43", line_width=8, alpha=0.7, legend="PDF")
line(x, cdf, line_color="white", line_width=2, alpha=0.7, legend="CDF")
legend().orientation = "top_left"
show()
|
sahat/bokeh
|
examples/plotting/file/histogram.py
|
Python
|
bsd-3-clause
| 3,786
|
[
"TINKER"
] |
683cd7771eed450a517014f1aad00009f97865c1b2ec4a739e6ede6398837b50
|
"""
Unit tests for the Voronoi renderer
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
import numpy as np
import vtk
from .. import voronoiRenderer
from ... import utils
from ....filtering import voronoi
from ....system import lattice
from six.moves import range
# required unless ColouringOptionsWindow is rewritten to have a non GUI dependent settings object
class DummyColouringOpts(object):
def __init__(self):
self.colourBy = "Species"
self.heightAxis = 1
self.minVal = 0.0
self.maxVal = 1.0
self.solidColourRGB = (1.0, 0.0, 0.0)
self.scalarBarText = "Height in Y (A)"
# required unless VoronoiOptionsWindow is rewritten to have a non GUI dependent settings object
class DummyVoronoiOpts(object):
def __init__(self):
self.dispersion = 10.0
self.displayVoronoi = False
self.useRadii = False
self.opacity = 0.8
self.outputToFile = False
self.outputFilename = "voronoi.csv"
self.faceAreaThreshold = 0.1
class TestVoronoiRenderer(unittest.TestCase):
"""
Test the Voronoi renderer
"""
def setUp(self):
"""
Called before each test
"""
# generate lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("He", [0, 0, 0], 0)
self.lattice.addAtom("Fe", [2, 0, 0], 0)
self.lattice.addAtom("He", [0, 2, 0], 0)
self.lattice.addAtom("Fe", [0, 0, 2], 0)
self.lattice.addAtom("He", [5, 5, 5], 0)
self.lattice.addAtom("He", [2, 2, 0], 0)
self.lattice.addAtom("He", [2, 0, 2], 0)
self.lattice.addAtom("He", [0, 2, 2], 0)
self.lattice.addAtom("Fe", [2, 2, 2], 0)
# visible atoms and scalars
self.visAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
self.scalars = np.asarray([0, 1, 0, 1, 0, 0, 0, 0, 1], dtype=np.float64)
# lut
self.nspecies = 2
self.lut = vtk.vtkLookupTable()
self.lut.SetNumberOfColors(self.nspecies)
self.lut.SetNumberOfTableValues(self.nspecies)
self.lut.SetTableRange(0, self.nspecies - 1)
self.lut.SetRange(0, self.nspecies - 1)
for i in range(self.nspecies):
self.lut.SetTableValue(i, 1, 0, 0, 1.0)
# voronoi options
self.voroOpts = DummyVoronoiOpts()
# colouring options
self.colOpts = DummyColouringOpts()
# calc voronoi
calc = voronoi.VoronoiAtomsCalculator(self.voroOpts)
self.voro = calc.getVoronoi(self.lattice)
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.nspecies = None
self.visAtoms = None
self.scalars = None
self.lut = None
self.voroOpts = None
self.colOpts = None
self.voro = None
def test_voronoiRenderer(self):
"""
Voronoi renderer
"""
# run the renderer
renderer = voronoiRenderer.VoronoiRenderer()
renderer.render(self.lattice, self.visAtoms, self.scalars, self.lut, self.voro, self.voroOpts, self.colOpts)
# check result is correct type
self.assertIsInstance(renderer.getActor(), utils.ActorObject)
|
chrisdjscott/Atoman
|
atoman/rendering/renderers/tests/test_voronoiRenderer.py
|
Python
|
mit
| 3,377
|
[
"VTK"
] |
f03e08eb7cdfd75bb05f3478fdbe413171f3c0c7ec07b852a0fcb4012d7f0054
|
"""Weight initializer."""
from __future__ import absolute_import, print_function
import re
import logging
import warnings
import json
import numpy as np
from .base import string_types
from .ndarray import NDArray, load
from . import random
# inherit str for backward compatibility
class InitDesc(str):
"""Descriptor for the initialization pattern.
Parameter
---------
name : str
Name of variable.
attrs : dict of str to str
Attributes of this variable taken from ``Symbol.attr_dict``.
global_init : Initializer
Global initializer to fallback to.
"""
def __new__(cls, name, attrs=None, global_init=None):
ret = super(InitDesc, cls).__new__(cls, name)
ret.attrs = attrs or {}
ret.global_init = global_init
return ret
_INITIALIZER_REGISTRY = {}
def register(klass):
"""Registers a custom initializer.
Custom initializers can be created by extending `mx.init.Initializer` and implementing the
required functions like `_init_weight` and `_init_bias`. The created initializer must be
registered using `mx.init.register` before it can be used.
Parameters
----------
klass : class
A subclass of `mx.init.Initializer` that needs to be registered as a custom initializer.
Example
-------
>>> # Create and register a custom initializer that
... # initializes weights to 0.1 and biases to 1.
...
>>> @mx.init.register
... class CustomInit(mx.init.Initializer):
... def __init__(self):
... super(CustomInit, self).__init__()
... def _init_weight(self, _, arr):
... arr[:] = 0.1
... def _init_bias(self, _, arr):
... arr[:] = 1
...
>>> # Module is an instance of 'mxnet.module.Module'
...
>>> module.init_params(CustomInit())
"""
assert issubclass(klass, Initializer), "Can only register subclass of Initializer"
name = klass.__name__.lower()
if name in _INITIALIZER_REGISTRY:
warnings.warn(
"\033[91mNew initializer %s.%s is overriding existing initializer %s.%s\033[0m"%(
klass.__module__, klass.__name__,
_INITIALIZER_REGISTRY[name].__module__,
_INITIALIZER_REGISTRY[name].__name__),
UserWarning, stacklevel=2)
_INITIALIZER_REGISTRY[name] = klass
return klass
class Initializer(object):
"""The base class of an initializer."""
def __init__(self, **kwargs):
self.kwargs = kwargs
def dumps(self):
"""Saves the initializer to string
Returns
-------
str
JSON formatted string that describes the initializer.
Examples
--------
>>> # Create initializer and retrieve its parameters
...
>>> init = mx.init.Normal(0.5)
>>> init.dumps()
'["normal", {"sigma": 0.5}]'
>>> init = mx.init.Xavier(factor_type="in", magnitude=2.34)
>>> init.dumps()
'["xavier", {"rnd_type": "uniform", "magnitude": 2.34, "factor_type": "in"}]'
"""
return json.dumps([self.__class__.__name__.lower(), self.kwargs])
def __call__(self, desc, arr):
"""Initialize an array
Parameters
----------
desc : InitDesc
Initialization pattern descriptor.
arr : NDArray
The array to be initialized.
"""
if not isinstance(desc, InitDesc):
self._legacy_init(desc, arr)
return
if desc.global_init is None:
desc.global_init = self
init = desc.attrs.get('__init__', "")
if init:
# when calling Variable initializer
klass, kwargs = json.loads(init)
_INITIALIZER_REGISTRY[klass.lower()](**kwargs)._init_weight(desc, arr)
else:
# register nnvm::FSetInputVariableAttrs in the backend for new patterns
# don't add new cases here.
if desc.endswith('weight'):
self._init_weight(desc, arr)
elif desc.endswith('bias'):
self._init_bias(desc, arr)
elif desc.endswith('gamma'):
self._init_gamma(desc, arr)
elif desc.endswith('beta'):
self._init_beta(desc, arr)
else:
self._init_default(desc, arr)
def _legacy_init(self, name, arr):
"""Legacy initialization method.
Parameters
----------
name : str
Name of corrosponding NDArray.
arr : NDArray
NDArray to be initialized.
"""
warnings.warn(
"\033[91mCalling initializer with init(str, NDArray) has been deprecated." \
"please use init(mx.init.InitDesc(...), NDArray) instead.\033[0m",
DeprecationWarning, stacklevel=3)
if not isinstance(name, string_types):
raise TypeError('name must be string')
if not isinstance(arr, NDArray):
raise TypeError('arr must be NDArray')
if name.startswith('upsampling'):
self._init_bilinear(name, arr)
elif name.startswith('stn_loc') and name.endswith('weight'):
self._init_zero(name, arr)
elif name.startswith('stn_loc') and name.endswith('bias'):
self._init_loc_bias(name, arr)
elif name.endswith('bias'):
self._init_bias(name, arr)
elif name.endswith('gamma'):
self._init_gamma(name, arr)
elif name.endswith('beta'):
self._init_beta(name, arr)
elif name.endswith('weight'):
self._init_weight(name, arr)
elif name.endswith("moving_mean"):
self._init_zero(name, arr)
elif name.endswith("moving_var"):
self._init_one(name, arr)
elif name.endswith("moving_inv_var"):
self._init_zero(name, arr)
elif name.endswith("moving_avg"):
self._init_zero(name, arr)
else:
self._init_default(name, arr)
def _init_bilinear(self, _, arr):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
f = np.ceil(shape[3] / 2.)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i / shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
def _init_loc_bias(self, _, arr):
shape = arr.shape
assert(shape[0] == 6)
arr[:] = np.array([1.0, 0, 0, 0, 1.0, 0])
def _init_zero(self, _, arr):
arr[:] = 0.0
def _init_one(self, _, arr):
arr[:] = 1.0
def _init_bias(self, _, arr):
arr[:] = 0.0
def _init_gamma(self, _, arr):
arr[:] = 1.0
def _init_beta(self, _, arr):
arr[:] = 0.0
def _init_weight(self, name, arr):
"""Abstract method to Initialize weight."""
raise NotImplementedError("Must override it")
def _init_default(self, name, _):
raise ValueError(
'Unknown initialization pattern for %s. ' \
'Default initialization is now limited to '\
'"weight", "bias", "gamma" (1.0), and "beta" (0.0).' \
'Please use mx.sym.Variable(init=mx.init.*) to set initialization pattern' % name)
class Load(object):
"""Initializes variables by loading data from file or dict.
**Note** Load will drop ``arg:`` or ``aux:`` from name and
initialize the variables that match with the prefix dropped.
Parameters
----------
param: str or dict of str->`NDArray`
Parameter file or dict mapping name to NDArray.
default_init: Initializer
Default initializer when name is not found in `param`.
verbose: bool
Flag for enabling logging of source when initializing.
"""
def __init__(self, param, default_init=None, verbose=False):
if isinstance(param, str):
param = load(param)
assert isinstance(param, dict)
self.param = {}
for name, arr in param.items():
if name.startswith('arg:') or name.startswith('aux:'):
self.param[name[4:]] = arr
else:
self.param[name] = arr
self.default_init = default_init
self.verbose = verbose
def __call__(self, name, arr):
if name in self.param:
assert arr.shape == self.param[name].shape, \
'Parameter %s cannot be initialized from loading. '%name + \
'Shape mismatch, target %s vs loaded %s'%(str(arr.shape),
self.param[name].shape)
arr[:] = self.param[name]
if self.verbose:
logging.info('Initialized %s by loading', name)
else:
assert self.default_init is not None, \
"Cannot Initialize %s. Not found in loaded param "%name + \
"and no default Initializer is provided."
self.default_init(name, arr)
if self.verbose:
logging.info('Initialized %s by default', name)
class Mixed(object):
"""Initialize parameters using multiple initializers.
Parameters
----------
patterns: list of str
List of regular expressions matching parameter names.
initializers: list of Initializer
List of initializers corresponding to `patterns`.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module', initialize biases to zero
... # and every other parameter to random values with uniform distribution.
...
>>> init = mx.initializer.Mixed(['bias', '.*'], [mx.init.Zero(), mx.init.Uniform(0.1)])
>>> module.init_params(init)
>>>
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected1_weight
[[ 0.0097627 0.01856892 0.04303787]]
fullyconnected1_bias
[ 0.]
"""
def __init__(self, patterns, initializers):
assert len(patterns) == len(initializers)
self.map = list(zip([re.compile(p) for p in patterns], initializers))
def __call__(self, name, arr):
for prog, init in self.map:
if prog.match(name):
init(name, arr)
return
raise ValueError('Parameter name %s did not match any pattern. Consider' +
'add a ".*" pattern at the and with default Initializer.')
@register
class Zero(Initializer):
"""Initializes weights to zero.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module', initialize weights to zero.
...
>>> init = mx.initializer.Zero()
>>> module.init_params(init)
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected0_weight
[[ 0. 0. 0.]]
"""
def __init__(self):
super(Zero, self).__init__()
def _init_weight(self, _, arr):
arr[:] = 0
@register
class One(Initializer):
"""Initializes weights to one.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module', initialize weights to one.
...
>>> init = mx.initializer.One()
>>> module.init_params(init)
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected0_weight
[[ 1. 1. 1.]]
"""
def __init__(self):
super(One, self).__init__()
def _init_weight(self, _, arr):
arr[:] = 1
@register
class Constant(Initializer):
"""Initializes the weights to a scalar value.
Parameters
----------
value : float
Fill value.
"""
def __init__(self, value):
super(Constant, self).__init__(value=value)
self.value = value
def _init_weight(self, _, arr):
arr[:] = self.value
@register
class Uniform(Initializer):
"""Initializes weights with random values uniformly sampled from a given range.
Parameters
----------
scale : float, optional
The bound on the range of the generated random values.
Values are generated from the range [-`scale`, `scale`].
Default scale is 0.07.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module', initialize weights
>>> # to random values uniformly sampled between -0.1 and 0.1.
...
>>> init = mx.init.Uniform(0.1)
>>> module.init_params(init)
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected0_weight
[[ 0.01360891 -0.02144304 0.08511933]]
"""
def __init__(self, scale=0.07):
super(Uniform, self).__init__(scale=scale)
self.scale = scale
def _init_weight(self, _, arr):
random.uniform(-self.scale, self.scale, out=arr)
@register
class Normal(Initializer):
"""Initializes weights with random values sampled from a normal distribution
with a mean of zero and standard deviation of `sigma`.
Parameters
----------
sigma : float, optional
Standard deviation of the normal distribution.
Default standard deviation is 0.01.
Example
-------
>>> # Given 'module', an instance of 'mxnet.module.Module', initialize weights
>>> # to random values sampled from a normal distribution.
...
>>> init = mx.init.Normal(0.5)
>>> module.init_params(init)
>>> for dictionary in module.get_params():
... for key in dictionary:
... print(key)
... print(dictionary[key].asnumpy())
...
fullyconnected0_weight
[[-0.3214761 -0.12660924 0.53789419]]
"""
def __init__(self, sigma=0.01):
super(Normal, self).__init__(sigma=sigma)
self.sigma = sigma
def _init_weight(self, _, arr):
random.normal(0, self.sigma, out=arr)
@register
class Orthogonal(Initializer):
"""Initialize weight as orthogonal matrix.
This initializer implements *Exact solutions to the nonlinear dynamics of
learning in deep linear neural networks*, available at
https://arxiv.org/abs/1312.6120.
Parameters
----------
scale : float optional
Scaling factor of weight.
rand_type: string optional
Use "uniform" or "normal" random number to initialize weight.
"""
def __init__(self, scale=1.414, rand_type="uniform"):
super(Orthogonal, self).__init__(scale=scale, rand_type=rand_type)
self.scale = scale
self.rand_type = rand_type
def _init_weight(self, _, arr):
nout = arr.shape[0]
nin = np.prod(arr.shape[1:])
if self.rand_type == "uniform":
tmp = np.random.uniform(-1.0, 1.0, (nout, nin))
elif self.rand_type == "normal":
tmp = np.random.normal(0.0, 1.0, (nout, nin))
u, _, v = np.linalg.svd(tmp, full_matrices=False) # pylint: disable=invalid-name
if u.shape == tmp.shape:
res = u
else:
res = v
res = self.scale * res.reshape(arr.shape)
arr[:] = res
@register
class Xavier(Initializer):
"""Returns an initializer performing "Xavier" initialization for weights.
This initializer is designed to keep the scale of gradients roughly the same
in all layers.
By default, `rnd_type` is ``'uniform'`` and `factor_type` is ``'avg'``,
the initializer fills the weights with random numbers in the range
of :math:`[-c, c]`, where :math:`c = \\sqrt{\\frac{3.}{0.5 * (n_{in} + n_{out})}}`.
:math:`n_{in}` is the number of neurons feeding into weights, and :math:`n_{out}` is
the number of neurons the result is fed to.
If `rnd_type` is ``'uniform'`` and `factor_type` is ``'in'``,
the :math:`c = \\sqrt{\\frac{3.}{n_{in}}}`.
Similarly when `factor_type` is ``'out'``, the :math:`c = \\sqrt{\\frac{3.}{n_{out}}}`.
If `rnd_type` is ``'gaussian'`` and `factor_type` is ``'avg'``,
the initializer fills the weights with numbers from normal distribution with
a standard deviation of :math:`\\sqrt{\\frac{3.}{0.5 * (n_{in} + n_{out})}}`.
Parameters
----------
rnd_type: str, optional
Random generator type, can be ``'gaussian'`` or ``'uniform'``.
factor_type: str, optional
Can be ``'avg'``, ``'in'``, or ``'out'``.
magnitude: float, optional
Scale of random number.
"""
def __init__(self, rnd_type="uniform", factor_type="avg", magnitude=3):
super(Xavier, self).__init__(rnd_type=rnd_type, factor_type=factor_type,
magnitude=magnitude)
self.rnd_type = rnd_type
self.factor_type = factor_type
self.magnitude = float(magnitude)
def _init_weight(self, _, arr):
shape = arr.shape
hw_scale = 1.
if len(shape) > 2:
hw_scale = np.prod(shape[2:])
fan_in, fan_out = shape[1] * hw_scale, shape[0] * hw_scale
factor = 1.
if self.factor_type == "avg":
factor = (fan_in + fan_out) / 2.0
elif self.factor_type == "in":
factor = fan_in
elif self.factor_type == "out":
factor = fan_out
else:
raise ValueError("Incorrect factor type")
scale = np.sqrt(self.magnitude / factor)
if self.rnd_type == "uniform":
random.uniform(-scale, scale, out=arr)
elif self.rnd_type == "gaussian":
random.normal(0, scale, out=arr)
else:
raise ValueError("Unknown random type")
@register
class MSRAPrelu(Xavier):
"""Initialize the weight according to a MSRA paper.
This initializer implements *Delving Deep into Rectifiers: Surpassing
Human-Level Performance on ImageNet Classification*, available at
https://arxiv.org/abs/1502.01852.
This initializer is proposed for initialization related to ReLu activation,
it maked some changes on top of Xavier method.
Parameters
----------
factor_type: str, optional
Can be ``'avg'``, ``'in'``, or ``'out'``.
slope: float, optional
initial slope of any PReLU (or similar) nonlinearities.
"""
def __init__(self, factor_type="avg", slope=0.25):
self.kwargs = {'factor_type': factor_type, 'slope': slope}
magnitude = 2. / (1 + slope ** 2)
super(MSRAPrelu, self).__init__("gaussian", factor_type, magnitude)
@register
class Bilinear(Initializer):
"""Initialize weight for upsampling layers."""
def __init__(self):
super(Bilinear, self).__init__()
def _init_weight(self, _, arr):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
f = np.ceil(shape[3] / 2.)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i / shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
@register
class LSTMBias(Initializer):
"""Initialize all bias of an LSTMCell to 0.0 except for
the forget gate whose bias is set to custom value.
Parameters
----------
forget_bias: float, bias for the forget gate.
Jozefowicz et al. 2015 recommends setting this to 1.0.
"""
def __init__(self, forget_bias):
super(LSTMBias, self).__init__(forget_bias=forget_bias)
self.forget_bias = forget_bias
def _init_weight(self, name, arr):
arr[:] = 0.0
# in the case of LSTMCell the forget gate is the second
# gate of the 4 LSTM gates, we modify the according values.
num_hidden = int(arr.shape[0] / 4)
arr[num_hidden:2*num_hidden] = self.forget_bias
@register
class FusedRNN(Initializer):
"""Initialize parameters for fused rnn layers.
Parameters
----------
init : Initializer
intializer applied to unpacked weights. Fall back to global
initializer if None.
num_hidden : int
should be the same with arguments passed to FusedRNNCell.
num_layers : int
should be the same with arguments passed to FusedRNNCell.
mode : str
should be the same with arguments passed to FusedRNNCell.
bidirectional : bool
should be the same with arguments passed to FusedRNNCell.
forget_bias : float
should be the same with arguments passed to FusedRNNCell.
"""
def __init__(self, init, num_hidden, num_layers, mode, bidirectional=False, forget_bias=1.0):
if isinstance(init, string_types):
klass, kwargs = json.loads(init)
init = _INITIALIZER_REGISTRY[klass.lower()](**kwargs)
super(FusedRNN, self).__init__(init=init.dumps() if init is not None else None,
num_hidden=num_hidden, num_layers=num_layers, mode=mode,
bidirectional=bidirectional, forget_bias=forget_bias)
self._init = init
self._num_hidden = num_hidden
self._num_layers = num_layers
self._mode = mode
self._bidirectional = bidirectional
self._forget_bias = forget_bias
def _init_weight(self, desc, arr):
from .rnn import rnn_cell
cell = rnn_cell.FusedRNNCell(self._num_hidden, self._num_layers,
self._mode, self._bidirectional,
forget_bias=self._forget_bias, prefix='')
args = cell.unpack_weights({'parameters': arr})
for name in args:
arg_desc = InitDesc(name, global_init=desc.global_init)
# for lstm bias, we use a custom initializer
# which adds a bias to the forget gate
if self._mode == 'lstm' and name.endswith("_f_bias"):
args[name][:] = self._forget_bias
elif self._init is None:
desc.global_init(arg_desc, args[name])
else:
self._init(arg_desc, args[name])
arr[:] = cell.pack_weights(args)['parameters']
|
rishita/mxnet
|
python/mxnet/initializer.py
|
Python
|
apache-2.0
| 22,395
|
[
"Gaussian"
] |
c15973d0f07687f83e64d8b32a6c093cece422ff0b8eead6ca42dace1d461885
|
# -*- coding: utf-8 -*-
"""Some utility functions."""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import atexit
from collections import Iterable
from contextlib import contextmanager
from distutils.version import LooseVersion
from functools import wraps
from functools import partial
import hashlib
import inspect
import json
import logging
from math import log, ceil
import multiprocessing
import operator
import os
import os.path as op
import platform
import shutil
from shutil import rmtree
from string import Formatter
import subprocess
import sys
import tempfile
import time
import traceback
from unittest import SkipTest
import warnings
import webbrowser
import numpy as np
from scipy import linalg, sparse
from .externals.six.moves import urllib
from .externals.six import string_types, StringIO, BytesIO, integer_types
from .externals.decorator import decorator
from .fixes import _get_args
logger = logging.getLogger('mne') # one selection here used across mne-python
logger.propagate = False # don't propagate (in case of multiple imports)
def _memory_usage(*args, **kwargs):
if isinstance(args[0], tuple):
args[0][0](*args[0][1], **args[0][2])
elif not isinstance(args[0], int): # can be -1 for current use
args[0]()
return [-1]
try:
from memory_profiler import memory_usage
except ImportError:
memory_usage = _memory_usage
def nottest(f):
"""Mark a function as not a test (decorator)."""
f.__test__ = False
return f
# # # WARNING # # #
# This list must also be updated in doc/_templates/class.rst if it is
# changed here!
_doc_special_members = ('__contains__', '__getitem__', '__iter__', '__len__',
'__call__', '__add__', '__sub__', '__mul__', '__div__',
'__neg__', '__hash__')
###############################################################################
# RANDOM UTILITIES
def _ensure_int(x, name='unknown', must_be='an int'):
"""Ensure a variable is an integer."""
# This is preferred over numbers.Integral, see:
# https://github.com/scipy/scipy/pull/7351#issuecomment-299713159
try:
x = int(operator.index(x))
except TypeError:
raise TypeError('%s must be %s, got %s' % (name, must_be, type(x)))
return x
def _pl(x, non_pl=''):
"""Determine if plural should be used."""
len_x = x if isinstance(x, (integer_types, np.generic)) else len(x)
return non_pl if len_x == 1 else 's'
def _explain_exception(start=-1, stop=None, prefix='> '):
"""Explain an exception."""
# start=-1 means "only the most recent caller"
etype, value, tb = sys.exc_info()
string = traceback.format_list(traceback.extract_tb(tb)[start:stop])
string = (''.join(string).split('\n') +
traceback.format_exception_only(etype, value))
string = ':\n' + prefix + ('\n' + prefix).join(string)
return string
def _get_call_line(in_verbose=False):
"""Get the call line from within a function."""
# XXX Eventually we could auto-triage whether in a `verbose` decorated
# function or not.
# NB This probably only works for functions that are undecorated,
# or decorated by `verbose`.
back = 2 if not in_verbose else 4
call_frame = inspect.getouterframes(inspect.currentframe())[back][0]
context = inspect.getframeinfo(call_frame).code_context
context = 'unknown' if context is None else context[0].strip()
return context
def _sort_keys(x):
"""Sort and return keys of dict."""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
def object_hash(x, h=None):
"""Hash a reasonable python object.
Parameters
----------
x : object
Object to hash. Can be anything comprised of nested versions of:
{dict, list, tuple, ndarray, str, bytes, float, int, None}.
h : hashlib HASH object | None
Optional, object to add the hash to. None creates an MD5 hash.
Returns
-------
digest : int
The digest resulting from the hash.
"""
if h is None:
h = hashlib.md5()
if hasattr(x, 'keys'):
# dict-like types
keys = _sort_keys(x)
for key in keys:
object_hash(key, h)
object_hash(x[key], h)
elif isinstance(x, bytes):
# must come before "str" below
h.update(x)
elif isinstance(x, (string_types, float, int, type(None))):
h.update(str(type(x)).encode('utf-8'))
h.update(str(x).encode('utf-8'))
elif isinstance(x, (np.ndarray, np.number, np.bool_)):
x = np.asarray(x)
h.update(str(x.shape).encode('utf-8'))
h.update(str(x.dtype).encode('utf-8'))
h.update(x.tostring())
elif hasattr(x, '__len__'):
# all other list-like types
h.update(str(type(x)).encode('utf-8'))
for xx in x:
object_hash(xx, h)
else:
raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
return int(h.hexdigest(), 16)
def object_size(x):
"""Estimate the size of a reasonable python object.
Parameters
----------
x : object
Object to approximate the size of.
Can be anything comprised of nested versions of:
{dict, list, tuple, ndarray, str, bytes, float, int, None}.
Returns
-------
size : int
The estimated size in bytes of the object.
"""
# Note: this will not process object arrays properly (since those only)
# hold references
if isinstance(x, (bytes, string_types, int, float, type(None))):
size = sys.getsizeof(x)
elif isinstance(x, np.ndarray):
# On newer versions of NumPy, just doing sys.getsizeof(x) works,
# but on older ones you always get something small :(
size = sys.getsizeof(np.array([])) + x.nbytes
elif isinstance(x, np.generic):
size = x.nbytes
elif isinstance(x, dict):
size = sys.getsizeof(x)
for key, value in x.items():
size += object_size(key)
size += object_size(value)
elif isinstance(x, (list, tuple)):
size = sys.getsizeof(x) + sum(object_size(xx) for xx in x)
elif sparse.isspmatrix_csc(x) or sparse.isspmatrix_csr(x):
size = sum(sys.getsizeof(xx)
for xx in [x, x.data, x.indices, x.indptr])
else:
raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
return size
def object_diff(a, b, pre=''):
"""Compute all differences between two python variables.
Parameters
----------
a : object
Currently supported: dict, list, tuple, ndarray, int, str, bytes,
float, StringIO, BytesIO.
b : object
Must be same type as x1.
pre : str
String to prepend to each line.
Returns
-------
diffs : str
A string representation of the differences.
"""
out = ''
if type(a) != type(b):
out += pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
elif isinstance(a, dict):
k1s = _sort_keys(a)
k2s = _sort_keys(b)
m1 = set(k2s) - set(k1s)
if len(m1):
out += pre + ' left missing keys %s\n' % (m1)
for key in k1s:
if key not in k2s:
out += pre + ' right missing key %s\n' % key
else:
out += object_diff(a[key], b[key], pre + '[%s]' % repr(key))
elif isinstance(a, (list, tuple)):
if len(a) != len(b):
out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
else:
for ii, (xx1, xx2) in enumerate(zip(a, b)):
out += object_diff(xx1, xx2, pre + '[%s]' % ii)
elif isinstance(a, (string_types, int, float, bytes)):
if a != b:
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif a is None:
if b is not None:
out += pre + ' left is None, right is not (%s)\n' % (b)
elif isinstance(a, np.ndarray):
if not np.array_equal(a, b):
out += pre + ' array mismatch\n'
elif isinstance(a, (StringIO, BytesIO)):
if a.getvalue() != b.getvalue():
out += pre + ' StringIO mismatch\n'
elif sparse.isspmatrix(a):
# sparsity and sparse type of b vs a already checked above by type()
if b.shape != a.shape:
out += pre + (' sparse matrix a and b shape mismatch'
'(%s vs %s)' % (a.shape, b.shape))
else:
c = a - b
c.eliminate_zeros()
if c.nnz > 0:
out += pre + (' sparse matrix a and b differ on %s '
'elements' % c.nnz)
else:
raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
return out
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance.
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (int, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def split_list(l, n):
"""Split list in n (approx) equal pieces."""
n = int(n)
sz = len(l) // n
for i in range(n - 1):
yield l[i * sz:(i + 1) * sz]
yield l[(n - 1) * sz:]
def create_chunks(sequence, size):
"""Generate chunks from a sequence.
Parameters
----------
sequence : iterable
Any iterable object
size : int
The chunksize to be returned
"""
return (sequence[p:p + size] for p in range(0, len(sequence), size))
def sum_squared(X):
"""Compute norm of an array.
Parameters
----------
X : array
Data whose norm must be found
Returns
-------
value : float
Sum of squares of the input array X
"""
X_flat = X.ravel(order='F' if np.isfortran(X) else 'C')
return np.dot(X_flat, X_flat)
def warn(message, category=RuntimeWarning):
"""Emit a warning with trace outside the mne namespace.
This function takes arguments like warnings.warn, and sends messages
using both ``warnings.warn`` and ``logger.warn``. Warnings can be
generated deep within nested function calls. In order to provide a
more helpful warning, this function traverses the stack until it
reaches a frame outside the ``mne`` namespace that caused the error.
Parameters
----------
message : str
Warning message.
category : instance of Warning
The warning class. Defaults to ``RuntimeWarning``.
"""
import mne
root_dir = op.dirname(mne.__file__)
frame = None
if logger.level <= logging.WARN:
last_fname = ''
frame = inspect.currentframe()
while frame:
fname = frame.f_code.co_filename
lineno = frame.f_lineno
# in verbose dec
if fname == '<string>' and last_fname == 'utils.py':
last_fname = fname
frame = frame.f_back
continue
# treat tests as scripts
# and don't capture unittest/case.py (assert_raises)
if not (fname.startswith(root_dir) or
('unittest' in fname and 'case' in fname)) or \
op.basename(op.dirname(fname)) == 'tests':
break
last_fname = op.basename(fname)
frame = frame.f_back
del frame
# We need to use this instead of warn(message, category, stacklevel)
# because we move out of the MNE stack, so warnings won't properly
# recognize the module name (and our warnings.simplefilter will fail)
warnings.warn_explicit(message, category, fname, lineno,
'mne', globals().get('__warningregistry__', {}))
logger.warning(message)
def check_fname(fname, filetype, endings, endings_err=()):
"""Enforce MNE filename conventions.
Parameters
----------
fname : str
Name of the file.
filetype : str
Type of file. e.g., ICA, Epochs etc.
endings : tuple
Acceptable endings for the filename.
endings_err : tuple
Obligatory possible endings for the filename.
"""
if len(endings_err) > 0 and not fname.endswith(endings_err):
print_endings = ' or '.join([', '.join(endings_err[:-1]),
endings_err[-1]])
raise IOError('The filename (%s) for file type %s must end with %s'
% (fname, filetype, print_endings))
print_endings = ' or '.join([', '.join(endings[:-1]), endings[-1]])
if not fname.endswith(endings):
warn('This filename (%s) does not conform to MNE naming conventions. '
'All %s files should end with %s'
% (fname, filetype, print_endings))
class WrapStdOut(object):
"""Dynamically wrap to sys.stdout.
This makes packages that monkey-patch sys.stdout (e.g.doctest,
sphinx-gallery) work properly.
"""
def __getattr__(self, name): # noqa: D105
# Even more ridiculous than this class, this must be sys.stdout (not
# just stdout) in order for this to work (tested on OSX and Linux)
if hasattr(sys.stdout, name):
return getattr(sys.stdout, name)
else:
raise AttributeError("'file' object has not attribute '%s'" % name)
class _TempDir(str):
"""Create and auto-destroy temp dir.
This is designed to be used with testing modules. Instances should be
defined inside test functions. Instances defined at module level can not
guarantee proper destruction of the temporary directory.
When used at module level, the current use of the __del__() method for
cleanup can fail because the rmtree function may be cleaned up before this
object (an alternative could be using the atexit module instead).
"""
def __new__(self): # noqa: D105
new = str.__new__(self, tempfile.mkdtemp(prefix='tmp_mne_tempdir_'))
return new
def __init__(self): # noqa: D102
self._path = self.__str__()
def __del__(self): # noqa: D105
rmtree(self._path, ignore_errors=True)
def estimate_rank(data, tol='auto', return_singular=False, norm=True):
"""Estimate the rank of data.
This function will normalize the rows of the data (typically
channels or vertices) such that non-zero singular values
should be close to one.
Parameters
----------
data : array
Data to estimate the rank of (should be 2-dimensional).
tol : float | str
Tolerance for singular values to consider non-zero in
calculating the rank. The singular values are calculated
in this method such that independent data are expected to
have singular value around one. Can be 'auto' to use the
same thresholding as ``scipy.linalg.orth``.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
norm : bool
If True, data will be scaled by their estimated row-wise norm.
Else data are assumed to be scaled. Defaults to True.
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
"""
data = data.copy() # operate on a copy
if norm is True:
norms = _compute_row_norms(data)
data /= norms[:, np.newaxis]
s = linalg.svd(data, compute_uv=False, overwrite_a=True)
if isinstance(tol, string_types):
if tol != 'auto':
raise ValueError('tol must be "auto" or float')
eps = np.finfo(float).eps
tol = np.max(data.shape) * np.amax(s) * eps
tol = float(tol)
rank = np.sum(s > tol)
if return_singular is True:
return rank, s
else:
return rank
def _compute_row_norms(data):
"""Compute scaling based on estimated norm."""
norms = np.sqrt(np.sum(data ** 2, axis=1))
norms[norms == 0] = 1.0
return norms
def _reject_data_segments(data, reject, flat, decim, info, tstep):
"""Reject data segments using peak-to-peak amplitude."""
from .epochs import _is_good
from .io.pick import channel_indices_by_type
data_clean = np.empty_like(data)
idx_by_type = channel_indices_by_type(info)
step = int(ceil(tstep * info['sfreq']))
if decim is not None:
step = int(ceil(step / float(decim)))
this_start = 0
this_stop = 0
drop_inds = []
for first in range(0, data.shape[1], step):
last = first + step
data_buffer = data[:, first:last]
if data_buffer.shape[1] < (last - first):
break # end of the time segment
if _is_good(data_buffer, info['ch_names'], idx_by_type, reject,
flat, ignore_chs=info['bads']):
this_stop = this_start + data_buffer.shape[1]
data_clean[:, this_start:this_stop] = data_buffer
this_start += data_buffer.shape[1]
else:
logger.info("Artifact detected in [%d, %d]" % (first, last))
drop_inds.append((first, last))
data = data_clean[:, :this_stop]
if not data.any():
raise RuntimeError('No clean segment found. Please '
'consider updating your rejection '
'thresholds.')
return data, drop_inds
def _get_inst_data(inst):
"""Get data view from MNE object instance like Raw, Epochs or Evoked."""
from .io.base import BaseRaw
from .epochs import BaseEpochs
from . import Evoked
from .time_frequency.tfr import _BaseTFR
_validate_type(inst, (BaseRaw, BaseEpochs, Evoked, _BaseTFR), "Instance")
if not inst.preload:
inst.load_data()
return inst._data
class _FormatDict(dict):
"""Help pformat() work properly."""
def __missing__(self, key):
return "{" + key + "}"
def pformat(temp, **fmt):
"""Format a template string partially.
Examples
--------
>>> pformat("{a}_{b}", a='x')
'x_{b}'
"""
formatter = Formatter()
mapping = _FormatDict(fmt)
return formatter.vformat(temp, (), mapping)
###############################################################################
# DECORATORS
# Following deprecated class copied from scikit-learn
# force show of DeprecationWarning even on python 2.7
warnings.filterwarnings('always', category=DeprecationWarning, module='mne')
class deprecated(object):
"""Mark a function or class as deprecated (decorator).
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses::
>>> from mne.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<mne.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
Parameters
----------
extra: string
To be added to the deprecation messages.
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
# scikit-learn will not import on all platforms b/c it can be
# sklearn or scikits.learn, so a self-contained example is used above
def __init__(self, extra=''): # noqa: D102
self.extra = extra
def __call__(self, obj): # noqa: D105
"""Call.
Parameters
----------
obj : object
Object to call.
"""
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def deprecation_wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = deprecation_wrapped
deprecation_wrapped.__name__ = '__init__'
deprecation_wrapped.__doc__ = self._update_doc(init.__doc__)
deprecation_wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun."""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def deprecation_wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
deprecation_wrapped.__name__ = fun.__name__
deprecation_wrapped.__dict__ = fun.__dict__
deprecation_wrapped.__doc__ = self._update_doc(fun.__doc__)
return deprecation_wrapped
def _update_doc(self, olddoc):
newdoc = ".. warning:: DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n %s" % (newdoc, olddoc)
return newdoc
@decorator
def verbose(function, *args, **kwargs):
"""Verbose decorator to allow functions to override log-level.
This decorator is used to set the verbose level during a function or method
call, such as :func:`mne.compute_covariance`. The `verbose` keyword
argument can be 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', True (an
alias for 'INFO'), or False (an alias for 'WARNING'). To set the global
verbosity level for all functions, use :func:`mne.set_log_level`.
Parameters
----------
function : function
Function to be decorated by setting the verbosity level.
Returns
-------
dec : function
The decorated function
Examples
--------
You can use the ``verbose`` argument to set the verbose level on the fly::
>>> import mne
>>> cov = mne.compute_raw_covariance(raw, verbose='WARNING') # doctest: +SKIP
>>> cov = mne.compute_raw_covariance(raw, verbose='INFO') # doctest: +SKIP
Using up to 49 segments
Number of samples used : 5880
[done]
See Also
--------
set_log_level
set_config
""" # noqa: E501
arg_names = _get_args(function)
default_level = verbose_level = None
if len(arg_names) > 0 and arg_names[0] == 'self':
default_level = getattr(args[0], 'verbose', None)
if 'verbose' in arg_names:
verbose_level = args[arg_names.index('verbose')]
elif 'verbose' in kwargs:
verbose_level = kwargs.pop('verbose')
# This ensures that object.method(verbose=None) will use object.verbose
verbose_level = default_level if verbose_level is None else verbose_level
if verbose_level is not None:
# set it back if we get an exception
with use_log_level(verbose_level):
return function(*args, **kwargs)
return function(*args, **kwargs)
class use_log_level(object):
"""Context handler for logging level.
Parameters
----------
level : int
The level to use.
"""
def __init__(self, level): # noqa: D102
self.level = level
def __enter__(self): # noqa: D105
self.old_level = set_log_level(self.level, True)
def __exit__(self, *args): # noqa: D105
set_log_level(self.old_level)
def has_nibabel(vox2ras_tkr=False):
"""Determine if nibabel is installed.
Parameters
----------
vox2ras_tkr : bool
If True, require nibabel has vox2ras_tkr support.
Returns
-------
has : bool
True if the user has nibabel.
"""
try:
import nibabel
out = True
if vox2ras_tkr: # we need MGHHeader to have vox2ras_tkr param
out = (getattr(getattr(getattr(nibabel, 'MGHImage', 0),
'header_class', 0),
'get_vox2ras_tkr', None) is not None)
return out
except ImportError:
return False
def has_mne_c():
"""Check for MNE-C."""
return 'MNE_ROOT' in os.environ
def has_freesurfer():
"""Check for Freesurfer."""
return 'FREESURFER_HOME' in os.environ
def requires_nibabel(vox2ras_tkr=False):
"""Check for nibabel."""
import pytest
extra = ' with vox2ras_tkr support' if vox2ras_tkr else ''
return pytest.mark.skipif(not has_nibabel(vox2ras_tkr),
reason='Requires nibabel%s' % extra)
def buggy_mkl_svd(function):
"""Decorate tests that make calls to SVD and intermittently fail."""
@wraps(function)
def dec(*args, **kwargs):
try:
return function(*args, **kwargs)
except np.linalg.LinAlgError as exp:
if 'SVD did not converge' in str(exp):
msg = 'Intel MKL SVD convergence error detected, skipping test'
warn(msg)
raise SkipTest(msg)
raise
return dec
def requires_version(library, min_version='0.0'):
"""Check for a library version."""
import pytest
return pytest.mark.skipif(not check_version(library, min_version),
reason=('Requires %s version >= %s'
% (library, min_version)))
def requires_module(function, name, call=None):
"""Skip a test if package is not available (decorator)."""
import pytest
call = ('import %s' % name) if call is None else call
reason = 'Test %s skipped, requires %s.' % (function.__name__, name)
try:
exec(call) in globals(), locals()
except Exception as exc:
if len(str(exc)) > 0 and str(exc) != 'No module named %s' % name:
reason += ' Got exception (%s)' % (exc,)
skip = True
else:
skip = False
return pytest.mark.skipif(skip, reason=reason)(function)
def copy_doc(source):
"""Copy the docstring from another function (decorator).
The docstring of the source function is prepepended to the docstring of the
function wrapped by this decorator.
This is useful when inheriting from a class and overloading a method. This
decorator can be used to copy the docstring of the original method.
Parameters
----------
source : function
Function to copy the docstring from
Returns
-------
wrapper : function
The decorated function
Examples
--------
>>> class A:
... def m1():
... '''Docstring for m1'''
... pass
>>> class B (A):
... @copy_doc(A.m1)
... def m1():
... ''' this gets appended'''
... pass
>>> print(B.m1.__doc__)
Docstring for m1 this gets appended
"""
def wrapper(func):
if source.__doc__ is None or len(source.__doc__) == 0:
raise ValueError('Cannot copy docstring: docstring was empty.')
doc = source.__doc__
if func.__doc__ is not None:
doc += func.__doc__
func.__doc__ = doc
return func
return wrapper
def copy_function_doc_to_method_doc(source):
"""Use the docstring from a function as docstring for a method.
The docstring of the source function is prepepended to the docstring of the
function wrapped by this decorator. Additionally, the first parameter
specified in the docstring of the source function is removed in the new
docstring.
This decorator is useful when implementing a method that just calls a
function. This pattern is prevalent in for example the plotting functions
of MNE.
Parameters
----------
source : function
Function to copy the docstring from
Returns
-------
wrapper : function
The decorated method
Examples
--------
>>> def plot_function(object, a, b):
... '''Docstring for plotting function.
...
... Parameters
... ----------
... object : instance of object
... The object to plot
... a : int
... Some parameter
... b : int
... Some parameter
... '''
... pass
...
>>> class A:
... @copy_function_doc_to_method_doc(plot_function)
... def plot(self, a, b):
... '''
... Notes
... -----
... .. versionadded:: 0.13.0
... '''
... plot_function(self, a, b)
>>> print(A.plot.__doc__)
Docstring for plotting function.
<BLANKLINE>
Parameters
----------
a : int
Some parameter
b : int
Some parameter
<BLANKLINE>
Notes
-----
.. versionadded:: 0.13.0
<BLANKLINE>
Notes
-----
The parsing performed is very basic and will break easily on docstrings
that are not formatted exactly according to the ``numpydoc`` standard.
Always inspect the resulting docstring when using this decorator.
"""
def wrapper(func):
doc = source.__doc__.split('\n')
# Find parameter block
for line, text in enumerate(doc[:-2]):
if (text.strip() == 'Parameters' and
doc[line + 1].strip() == '----------'):
parameter_block = line
break
else:
# No parameter block found
raise ValueError('Cannot copy function docstring: no parameter '
'block found. To simply copy the docstring, use '
'the @copy_doc decorator instead.')
# Find first parameter
for line, text in enumerate(doc[parameter_block:], parameter_block):
if ':' in text:
first_parameter = line
parameter_indentation = len(text) - len(text.lstrip(' '))
break
else:
raise ValueError('Cannot copy function docstring: no parameters '
'found. To simply copy the docstring, use the '
'@copy_doc decorator instead.')
# Find end of first parameter
for line, text in enumerate(doc[first_parameter + 1:],
first_parameter + 1):
# Ignore empty lines
if len(text.strip()) == 0:
continue
line_indentation = len(text) - len(text.lstrip(' '))
if line_indentation <= parameter_indentation:
# Reach end of first parameter
first_parameter_end = line
# Of only one parameter is defined, remove the Parameters
# heading as well
if ':' not in text:
first_parameter = parameter_block
break
else:
# End of docstring reached
first_parameter_end = line
first_parameter = parameter_block
# Copy the docstring, but remove the first parameter
doc = ('\n'.join(doc[:first_parameter]) + '\n' +
'\n'.join(doc[first_parameter_end:]))
if func.__doc__ is not None:
doc += func.__doc__
func.__doc__ = doc
return func
return wrapper
_pandas_call = """
import pandas
version = LooseVersion(pandas.__version__)
if version < '0.8.0':
raise ImportError
"""
_sklearn_call = """
required_version = '0.14'
import sklearn
version = LooseVersion(sklearn.__version__)
if version < required_version:
raise ImportError
"""
_mayavi_call = """
with warnings.catch_warnings(record=True): # traits
from mayavi import mlab
mlab.options.backend = 'test'
"""
_mne_call = """
if not has_mne_c():
raise ImportError
"""
_fs_call = """
if not has_freesurfer():
raise ImportError
"""
_n2ft_call = """
if 'NEUROMAG2FT_ROOT' not in os.environ:
raise ImportError
"""
_fs_or_ni_call = """
if not has_nibabel() and not has_freesurfer():
raise ImportError
"""
requires_pandas = partial(requires_module, name='pandas', call=_pandas_call)
requires_sklearn = partial(requires_module, name='sklearn', call=_sklearn_call)
requires_mayavi = partial(requires_module, name='mayavi', call=_mayavi_call)
requires_mne = partial(requires_module, name='MNE-C', call=_mne_call)
requires_freesurfer = partial(requires_module, name='Freesurfer',
call=_fs_call)
requires_neuromag2ft = partial(requires_module, name='neuromag2ft',
call=_n2ft_call)
requires_fs_or_nibabel = partial(requires_module, name='nibabel or Freesurfer',
call=_fs_or_ni_call)
requires_tvtk = partial(requires_module, name='TVTK',
call='from tvtk.api import tvtk')
requires_pysurfer = partial(requires_module, name='PySurfer',
call="""import warnings
with warnings.catch_warnings(record=True):
from surfer import Brain""")
requires_good_network = partial(
requires_module, name='good network connection',
call='if int(os.environ.get("MNE_SKIP_NETWORK_TESTS", 0)):\n'
' raise ImportError')
requires_nitime = partial(requires_module, name='nitime')
requires_h5py = partial(requires_module, name='h5py')
requires_numpydoc = partial(requires_module, name='numpydoc')
def check_version(library, min_version):
r"""Check minimum library version required.
Parameters
----------
library : str
The library name to import. Must have a ``__version__`` property.
min_version : str
The minimum version string. Anything that matches
``'(\d+ | [a-z]+ | \.)'``. Can also be empty to skip version
check (just check for library presence).
Returns
-------
ok : bool
True if the library exists with at least the specified version.
"""
ok = True
try:
library = __import__(library)
except ImportError:
ok = False
else:
if min_version:
this_version = LooseVersion(library.__version__)
if this_version < min_version:
ok = False
return ok
def _check_mayavi_version(min_version='4.3.0'):
"""Check mayavi version."""
if not check_version('mayavi', min_version):
raise RuntimeError("Need mayavi >= %s" % min_version)
def _check_pyface_backend():
"""Check the currently selected Pyface backend.
Returns
-------
backend : str
Name of the backend.
result : 0 | 1 | 2
0: the backend has been tested and works.
1: the backend has not been tested.
2: the backend not been tested.
Notes
-----
See also http://docs.enthought.com/pyface/.
"""
try:
from traits.trait_base import ETSConfig
except ImportError:
return None, 2
backend = ETSConfig.toolkit
if backend == 'qt4':
status = 0
else:
status = 1
return backend, status
def _import_mlab():
"""Quietly import mlab."""
with warnings.catch_warnings(record=True):
from mayavi import mlab
return mlab
@contextmanager
def traits_test_context():
"""Context to raise errors in trait handlers."""
from traits.api import push_exception_handler
push_exception_handler(reraise_exceptions=True)
yield
push_exception_handler(reraise_exceptions=False)
def traits_test(test_func):
"""Raise errors in trait handlers (decorator)."""
@wraps(test_func)
def dec(*args, **kwargs):
with traits_test_context():
return test_func(*args, **kwargs)
return dec
@verbose
def run_subprocess(command, verbose=None, *args, **kwargs):
"""Run command using subprocess.Popen.
Run command and wait for command to complete. If the return code was zero
then return, otherwise raise CalledProcessError.
By default, this will also add stdout= and stderr=subproces.PIPE
to the call to Popen to suppress printing to the terminal.
Parameters
----------
command : list of str | str
Command to run as subprocess (see subprocess.Popen documentation).
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more). Defaults to
self.verbose.
*args, **kwargs : arguments
Additional arguments to pass to subprocess.Popen.
Returns
-------
stdout : str
Stdout returned by the process.
stderr : str
Stderr returned by the process.
"""
for stdxxx, sys_stdxxx, thresh in (
['stderr', sys.stderr, logging.ERROR],
['stdout', sys.stdout, logging.WARNING]):
if stdxxx not in kwargs and logger.level >= thresh:
kwargs[stdxxx] = subprocess.PIPE
elif kwargs.get(stdxxx, sys_stdxxx) is sys_stdxxx:
if isinstance(sys_stdxxx, StringIO):
# nose monkey patches sys.stderr and sys.stdout to StringIO
kwargs[stdxxx] = subprocess.PIPE
else:
kwargs[stdxxx] = sys_stdxxx
# Check the PATH environment variable. If run_subprocess() is to be called
# frequently this should be refactored so as to only check the path once.
env = kwargs.get('env', os.environ)
if any(p.startswith('~') for p in env['PATH'].split(os.pathsep)):
warn('Your PATH environment variable contains at least one path '
'starting with a tilde ("~") character. Such paths are not '
'interpreted correctly from within Python. It is recommended '
'that you use "$HOME" instead of "~".')
if isinstance(command, string_types):
command_str = command
else:
command_str = ' '.join(command)
logger.info("Running subprocess: %s" % command_str)
try:
p = subprocess.Popen(command, *args, **kwargs)
except Exception:
if isinstance(command, string_types):
command_name = command.split()[0]
else:
command_name = command[0]
logger.error('Command not found: %s' % command_name)
raise
stdout_, stderr = p.communicate()
stdout_ = u'' if stdout_ is None else stdout_.decode('utf-8')
stderr = u'' if stderr is None else stderr.decode('utf-8')
output = (stdout_, stderr)
if p.returncode:
print(output)
err_fun = subprocess.CalledProcessError.__init__
if 'output' in _get_args(err_fun):
raise subprocess.CalledProcessError(p.returncode, command, output)
else:
raise subprocess.CalledProcessError(p.returncode, command)
return output
###############################################################################
# LOGGING
def set_log_level(verbose=None, return_old_level=False):
"""Set the logging level.
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
If None, the environment variable MNE_LOGGING_LEVEL is read, and if
it doesn't exist, defaults to INFO.
return_old_level : bool
If True, return the old verbosity level.
"""
if verbose is None:
verbose = get_config('MNE_LOGGING_LEVEL', 'INFO')
elif isinstance(verbose, bool):
if verbose is True:
verbose = 'INFO'
else:
verbose = 'WARNING'
if isinstance(verbose, string_types):
verbose = verbose.upper()
logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL)
if verbose not in logging_types:
raise ValueError('verbose must be of a valid type')
verbose = logging_types[verbose]
logger = logging.getLogger('mne')
old_verbose = logger.level
logger.setLevel(verbose)
return (old_verbose if return_old_level else None)
def set_log_file(fname=None, output_format='%(message)s', overwrite=None):
"""Set the log to print to a file.
Parameters
----------
fname : str, or None
Filename of the log to print to. If None, stdout is used.
To suppress log outputs, use set_log_level('WARN').
output_format : str
Format of the output messages. See the following for examples:
https://docs.python.org/dev/howto/logging.html
e.g., "%(asctime)s - %(levelname)s - %(message)s".
overwrite : bool | None
Overwrite the log file (if it exists). Otherwise, statements
will be appended to the log (default). None is the same as False,
but additionally raises a warning to notify the user that log
entries will be appended.
"""
logger = logging.getLogger('mne')
handlers = logger.handlers
for h in handlers:
# only remove our handlers (get along nicely with nose)
if isinstance(h, (logging.FileHandler, logging.StreamHandler)):
if isinstance(h, logging.FileHandler):
h.close()
logger.removeHandler(h)
if fname is not None:
if op.isfile(fname) and overwrite is None:
# Don't use warn() here because we just want to
# emit a warnings.warn here (not logger.warn)
warnings.warn('Log entries will be appended to the file. Use '
'overwrite=False to avoid this message in the '
'future.', RuntimeWarning, stacklevel=2)
overwrite = False
mode = 'w' if overwrite else 'a'
lh = logging.FileHandler(fname, mode=mode)
else:
""" we should just be able to do:
lh = logging.StreamHandler(sys.stdout)
but because doctests uses some magic on stdout, we have to do this:
"""
lh = logging.StreamHandler(WrapStdOut())
lh.setFormatter(logging.Formatter(output_format))
# actually add the stream handler
logger.addHandler(lh)
class catch_logging(object):
"""Store logging.
This will remove all other logging handlers, and return the handler to
stdout when complete.
"""
def __enter__(self): # noqa: D105
self._data = StringIO()
self._lh = logging.StreamHandler(self._data)
self._lh.setFormatter(logging.Formatter('%(message)s'))
for lh in logger.handlers:
logger.removeHandler(lh)
logger.addHandler(self._lh)
return self._data
def __exit__(self, *args): # noqa: D105
logger.removeHandler(self._lh)
set_log_file(None)
###############################################################################
# CONFIG / PREFS
def get_subjects_dir(subjects_dir=None, raise_error=False):
"""Safely use subjects_dir input to return SUBJECTS_DIR.
Parameters
----------
subjects_dir : str | None
If a value is provided, return subjects_dir. Otherwise, look for
SUBJECTS_DIR config and return the result.
raise_error : bool
If True, raise a KeyError if no value for SUBJECTS_DIR can be found
(instead of returning None).
Returns
-------
value : str | None
The SUBJECTS_DIR value.
"""
if subjects_dir is None:
subjects_dir = get_config('SUBJECTS_DIR', raise_error=raise_error)
return subjects_dir
_temp_home_dir = None
def _get_extra_data_path(home_dir=None):
"""Get path to extra data (config, tables, etc.)."""
global _temp_home_dir
if home_dir is None:
home_dir = os.environ.get('_MNE_FAKE_HOME_DIR')
if home_dir is None:
# this has been checked on OSX64, Linux64, and Win32
if 'nt' == os.name.lower():
if op.isdir(op.join(os.getenv('APPDATA'), '.mne')):
home_dir = os.getenv('APPDATA')
else:
home_dir = os.getenv('USERPROFILE')
else:
# This is a more robust way of getting the user's home folder on
# Linux platforms (not sure about OSX, Unix or BSD) than checking
# the HOME environment variable. If the user is running some sort
# of script that isn't launched via the command line (e.g. a script
# launched via Upstart) then the HOME environment variable will
# not be set.
if os.getenv('MNE_DONTWRITE_HOME', '') == 'true':
if _temp_home_dir is None:
_temp_home_dir = tempfile.mkdtemp()
atexit.register(partial(shutil.rmtree, _temp_home_dir,
ignore_errors=True))
home_dir = _temp_home_dir
else:
home_dir = os.path.expanduser('~')
if home_dir is None:
raise ValueError('mne-python config file path could '
'not be determined, please report this '
'error to mne-python developers')
return op.join(home_dir, '.mne')
def get_config_path(home_dir=None):
r"""Get path to standard mne-python config file.
Parameters
----------
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
Returns
-------
config_path : str
The path to the mne-python configuration file. On windows, this
will be '%USERPROFILE%\.mne\mne-python.json'. On every other
system, this will be ~/.mne/mne-python.json.
"""
val = op.join(_get_extra_data_path(home_dir=home_dir),
'mne-python.json')
return val
def set_cache_dir(cache_dir):
"""Set the directory to be used for temporary file storage.
This directory is used by joblib to store memmapped arrays,
which reduces memory requirements and speeds up parallel
computation.
Parameters
----------
cache_dir: str or None
Directory to use for temporary file storage. None disables
temporary file storage.
"""
if cache_dir is not None and not op.exists(cache_dir):
raise IOError('Directory %s does not exist' % cache_dir)
set_config('MNE_CACHE_DIR', cache_dir, set_env=False)
def set_memmap_min_size(memmap_min_size):
"""Set the minimum size for memmaping of arrays for parallel processing.
Parameters
----------
memmap_min_size: str or None
Threshold on the minimum size of arrays that triggers automated memory
mapping for parallel processing, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
"""
if memmap_min_size is not None:
if not isinstance(memmap_min_size, string_types):
raise ValueError('\'memmap_min_size\' has to be a string.')
if memmap_min_size[-1] not in ['K', 'M', 'G']:
raise ValueError('The size has to be given in kilo-, mega-, or '
'gigabytes, e.g., 100K, 500M, 1G.')
set_config('MNE_MEMMAP_MIN_SIZE', memmap_min_size, set_env=False)
# List the known configuration values
known_config_types = (
'MNE_BROWSE_RAW_SIZE',
'MNE_CACHE_DIR',
'MNE_COREG_COPY_ANNOT',
'MNE_COREG_GUESS_MRI_SUBJECT',
'MNE_COREG_HEAD_HIGH_RES',
'MNE_COREG_HEAD_OPACITY',
'MNE_COREG_INTERACTION',
'MNE_COREG_MARK_INSIDE',
'MNE_COREG_PREPARE_BEM',
'MNE_COREG_PROJECT_EEG',
'MNE_COREG_ORIENT_TO_SURFACE',
'MNE_COREG_SCALE_LABELS',
'MNE_COREG_SCALE_BY_DISTANCE',
'MNE_COREG_SCENE_SCALE',
'MNE_COREG_WINDOW_HEIGHT',
'MNE_COREG_WINDOW_WIDTH',
'MNE_COREG_SUBJECTS_DIR',
'MNE_CUDA_IGNORE_PRECISION',
'MNE_DATA',
'MNE_DATASETS_BRAINSTORM_PATH',
'MNE_DATASETS_EEGBCI_PATH',
'MNE_DATASETS_HF_SEF_PATH',
'MNE_DATASETS_MEGSIM_PATH',
'MNE_DATASETS_MISC_PATH',
'MNE_DATASETS_MTRF_PATH',
'MNE_DATASETS_SAMPLE_PATH',
'MNE_DATASETS_SOMATO_PATH',
'MNE_DATASETS_MULTIMODAL_PATH',
'MNE_DATASETS_SPM_FACE_DATASETS_TESTS',
'MNE_DATASETS_SPM_FACE_PATH',
'MNE_DATASETS_TESTING_PATH',
'MNE_DATASETS_VISUAL_92_CATEGORIES_PATH',
'MNE_DATASETS_KILOWORD_PATH',
'MNE_DATASETS_FIELDTRIP_CMC_PATH',
'MNE_DATASETS_PHANTOM_4DBTI_PATH',
'MNE_FORCE_SERIAL',
'MNE_KIT2FIFF_STIM_CHANNELS',
'MNE_KIT2FIFF_STIM_CHANNEL_CODING',
'MNE_KIT2FIFF_STIM_CHANNEL_SLOPE',
'MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD',
'MNE_LOGGING_LEVEL',
'MNE_MEMMAP_MIN_SIZE',
'MNE_SKIP_FTP_TESTS',
'MNE_SKIP_NETWORK_TESTS',
'MNE_SKIP_TESTING_DATASET_TESTS',
'MNE_STIM_CHANNEL',
'MNE_USE_CUDA',
'MNE_SKIP_FS_FLASH_CALL',
'SUBJECTS_DIR',
)
# These allow for partial matches, e.g. 'MNE_STIM_CHANNEL_1' is okay key
known_config_wildcards = (
'MNE_STIM_CHANNEL',
)
def _load_config(config_path, raise_error=False):
"""Safely load a config file."""
with open(config_path, 'r') as fid:
try:
config = json.load(fid)
except ValueError:
# No JSON object could be decoded --> corrupt file?
msg = ('The MNE-Python config file (%s) is not a valid JSON '
'file and might be corrupted' % config_path)
if raise_error:
raise RuntimeError(msg)
warn(msg)
config = dict()
return config
def get_config(key=None, default=None, raise_error=False, home_dir=None):
"""Read MNE-Python preferences from environment or config file.
Parameters
----------
key : None | str
The preference key to look for. The os environment is searched first,
then the mne-python config file is parsed.
If None, all the config parameters present in environment variables or
the path are returned.
default : str | None
Value to return if the key is not found.
raise_error : bool
If True, raise an error if the key is not found (instead of returning
default).
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
Returns
-------
value : dict | str | None
The preference key value.
See Also
--------
set_config
"""
_validate_type(key, (string_types, type(None)), "key", 'string or None')
# first, check to see if key is in env
if key is not None and key in os.environ:
return os.environ[key]
# second, look for it in mne-python config file
config_path = get_config_path(home_dir=home_dir)
if not op.isfile(config_path):
config = {}
else:
config = _load_config(config_path)
if key is None:
# update config with environment variables
env_keys = (set(config).union(known_config_types).
intersection(os.environ))
config.update({key: os.environ[key] for key in env_keys})
return config
elif raise_error is True and key not in config:
meth_1 = 'os.environ["%s"] = VALUE' % key
meth_2 = 'mne.utils.set_config("%s", VALUE, set_env=True)' % key
raise KeyError('Key "%s" not found in environment or in the '
'mne-python config file: %s '
'Try either:'
' %s for a temporary solution, or:'
' %s for a permanent one. You can also '
'set the environment variable before '
'running python.'
% (key, config_path, meth_1, meth_2))
else:
return config.get(key, default)
def set_config(key, value, home_dir=None, set_env=True):
"""Set a MNE-Python preference key in the config file and environment.
Parameters
----------
key : str | None
The preference key to set. If None, a tuple of the valid
keys is returned, and ``value`` and ``home_dir`` are ignored.
value : str | None
The value to assign to the preference key. If None, the key is
deleted.
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
set_env : bool
If True (default), update :data:`os.environ` in addition to
updating the MNE-Python config file.
See Also
--------
get_config
"""
if key is None:
return known_config_types
_validate_type(key, 'str', "key")
# While JSON allow non-string types, we allow users to override config
# settings using env, which are strings, so we enforce that here
_validate_type(value, (string_types, type(None)), "value",
"None or string")
if key not in known_config_types and not \
any(k in key for k in known_config_wildcards):
warn('Setting non-standard config type: "%s"' % key)
# Read all previous values
config_path = get_config_path(home_dir=home_dir)
if op.isfile(config_path):
config = _load_config(config_path, raise_error=True)
else:
config = dict()
logger.info('Attempting to create new mne-python configuration '
'file:\n%s' % config_path)
if value is None:
config.pop(key, None)
if set_env and key in os.environ:
del os.environ[key]
else:
config[key] = value
if set_env:
os.environ[key] = value
# Write all values. This may fail if the default directory is not
# writeable.
directory = op.dirname(config_path)
if not op.isdir(directory):
os.mkdir(directory)
with open(config_path, 'w') as fid:
json.dump(config, fid, sort_keys=True, indent=0)
class ProgressBar(object):
"""Generate a command-line progressbar.
Parameters
----------
max_value : int | iterable
Maximum value of process (e.g. number of samples to process, bytes to
download, etc.). If an iterable is given, then `max_value` will be set
to the length of this iterable.
initial_value : int
Initial value of process, useful when resuming process from a specific
value, defaults to 0.
mesg : str
Message to include at end of progress bar.
max_chars : int | str
Number of characters to use for progress bar itself.
This does not include characters used for the message or percent
complete. Can be "auto" (default) to try to set a sane value based
on the terminal width.
progress_character : char
Character in the progress bar that indicates the portion completed.
spinner : bool
Show a spinner. Useful for long-running processes that may not
increment the progress bar very often. This provides the user with
feedback that the progress has not stalled.
max_total_width : int | str
Maximum total message width. Can use "auto" (default) to try to set
a sane value based on the current terminal width.
verbose_bool : bool
If True, show progress.
Example
-------
>>> progress = ProgressBar(13000)
>>> progress.update(3000) # doctest: +SKIP
[......... ] 23.07692 |
>>> progress.update(6000) # doctest: +SKIP
[.................. ] 46.15385 |
>>> progress = ProgressBar(13000, spinner=True)
>>> progress.update(3000) # doctest: +SKIP
[......... ] 23.07692 |
>>> progress.update(6000) # doctest: +SKIP
[.................. ] 46.15385 /
"""
spinner_symbols = ['|', '/', '-', '\\']
template = '\r[{0}{1}] {2:.02f}% {4} {3} '
def __init__(self, max_value, initial_value=0, mesg='', max_chars='auto',
progress_character='.', spinner=False,
max_total_width='auto', verbose_bool=True): # noqa: D102
self.cur_value = initial_value
if isinstance(max_value, Iterable):
self.max_value = len(max_value)
self.iterable = max_value
else:
self.max_value = float(max_value)
self.iterable = None
self.mesg = mesg
self.progress_character = progress_character
self.spinner = spinner
self.spinner_index = 0
self.n_spinner = len(self.spinner_symbols)
self._do_print = verbose_bool
self.cur_time = time.time()
if max_total_width == 'auto':
max_total_width = _get_terminal_width()
self.max_total_width = int(max_total_width)
if max_chars == 'auto':
max_chars = min(max(max_total_width - 40, 10), 60)
self.max_chars = int(max_chars)
self.cur_rate = 0
def update(self, cur_value, mesg=None):
"""Update progressbar with current value of process.
Parameters
----------
cur_value : number
Current value of process. Should be <= max_value (but this is not
enforced). The percent of the progressbar will be computed as
(cur_value / max_value) * 100
mesg : str
Message to display to the right of the progressbar. If None, the
last message provided will be used. To clear the current message,
pass a null string, ''.
"""
cur_time = time.time()
cur_rate = ((cur_value - self.cur_value) /
max(float(cur_time - self.cur_time), 1e-6))
# Smooth the estimate a bit
cur_rate = 0.1 * cur_rate + 0.9 * self.cur_rate
# Ensure floating-point division so we can get fractions of a percent
# for the progressbar.
self.cur_time = cur_time
self.cur_value = cur_value
self.cur_rate = cur_rate
progress = min(float(self.cur_value) / self.max_value, 1.)
num_chars = int(progress * self.max_chars)
num_left = self.max_chars - num_chars
# Update the message
if mesg is not None:
if mesg == 'file_sizes':
mesg = '(%s, %s/s)' % (
sizeof_fmt(self.cur_value).rjust(8),
sizeof_fmt(cur_rate).rjust(8))
self.mesg = mesg
# The \r tells the cursor to return to the beginning of the line rather
# than starting a new line. This allows us to have a progressbar-style
# display in the console window.
bar = self.template.format(self.progress_character * num_chars,
' ' * num_left,
progress * 100,
self.spinner_symbols[self.spinner_index],
self.mesg)
bar = bar[:self.max_total_width]
# Force a flush because sometimes when using bash scripts and pipes,
# the output is not printed until after the program exits.
if self._do_print:
sys.stdout.write(bar)
sys.stdout.flush()
# Increment the spinner
if self.spinner:
self.spinner_index = (self.spinner_index + 1) % self.n_spinner
def update_with_increment_value(self, increment_value, mesg=None):
"""Update progressbar with an increment.
Parameters
----------
increment_value : int
Value of the increment of process. The percent of the progressbar
will be computed as
(self.cur_value + increment_value / max_value) * 100
mesg : str
Message to display to the right of the progressbar. If None, the
last message provided will be used. To clear the current message,
pass a null string, ''.
"""
self.update(self.cur_value + increment_value, mesg)
def __iter__(self):
"""Iterate to auto-increment the pbar with 1."""
if self.iterable is None:
raise ValueError("Must give an iterable to be used in a loop.")
for obj in self.iterable:
yield obj
self.update_with_increment_value(1)
def _get_terminal_width():
"""Get the terminal width."""
if sys.version[0] == '2':
return 80
else:
return shutil.get_terminal_size((80, 20)).columns
def _get_http(url, temp_file_name, initial_size, file_size, timeout,
verbose_bool):
"""Safely (resume a) download to a file from http(s)."""
# Actually do the reading
req = urllib.request.Request(url)
if initial_size > 0:
req.headers['Range'] = 'bytes=%s-' % (initial_size,)
try:
response = urllib.request.urlopen(req, timeout=timeout)
except Exception:
# There is a problem that may be due to resuming, some
# servers may not support the "Range" header. Switch
# back to complete download method
logger.info('Resuming download failed (server '
'rejected the request). Attempting to '
'restart downloading the entire file.')
del req.headers['Range']
response = urllib.request.urlopen(req, timeout=timeout)
total_size = int(response.headers.get('Content-Length', '1').strip())
if initial_size > 0 and file_size == total_size:
logger.info('Resuming download failed (resume file size '
'mismatch). Attempting to restart downloading the '
'entire file.')
initial_size = 0
total_size += initial_size
if total_size != file_size:
raise RuntimeError('URL could not be parsed properly '
'(total size %s != file size %s)'
% (total_size, file_size))
mode = 'ab' if initial_size > 0 else 'wb'
progress = ProgressBar(total_size, initial_value=initial_size,
spinner=True, mesg='file_sizes',
verbose_bool=verbose_bool)
chunk_size = 8192 # 2 ** 13
with open(temp_file_name, mode) as local_file:
while True:
t0 = time.time()
chunk = response.read(chunk_size)
dt = time.time() - t0
if dt < 0.005:
chunk_size *= 2
elif dt > 0.1 and chunk_size > 8192:
chunk_size = chunk_size // 2
if not chunk:
if verbose_bool:
sys.stdout.write('\n')
sys.stdout.flush()
break
local_file.write(chunk)
progress.update_with_increment_value(len(chunk),
mesg='file_sizes')
def _chunk_write(chunk, local_file, progress):
"""Write a chunk to file and update the progress bar."""
local_file.write(chunk)
progress.update_with_increment_value(len(chunk))
@verbose
def _fetch_file(url, file_name, print_destination=True, resume=True,
hash_=None, timeout=30., verbose=None):
"""Load requested file, downloading it if needed or requested.
Parameters
----------
url: string
The url of file to be downloaded.
file_name: string
Name, along with the path, of where downloaded file will be saved.
print_destination: bool, optional
If true, destination of where file was saved will be printed after
download finishes.
resume: bool, optional
If true, try to resume partially downloaded files.
hash_ : str | None
The hash of the file to check. If None, no checking is
performed.
timeout : float
The URL open timeout.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
"""
# Adapted from NISL:
# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
if hash_ is not None and (not isinstance(hash_, string_types) or
len(hash_) != 32):
raise ValueError('Bad hash value given, should be a 32-character '
'string:\n%s' % (hash_,))
temp_file_name = file_name + ".part"
verbose_bool = (logger.level <= 20) # 20 is info
try:
# Check file size and displaying it alongside the download url
u = urllib.request.urlopen(url, timeout=timeout)
u.close()
# this is necessary to follow any redirects
url = u.geturl()
u = urllib.request.urlopen(url, timeout=timeout)
try:
file_size = int(u.headers.get('Content-Length', '1').strip())
finally:
u.close()
del u
logger.info('Downloading %s (%s)' % (url, sizeof_fmt(file_size)))
# Triage resume
if not os.path.exists(temp_file_name):
resume = False
if resume:
with open(temp_file_name, 'rb', buffering=0) as local_file:
local_file.seek(0, 2)
initial_size = local_file.tell()
del local_file
else:
initial_size = 0
# This should never happen if our functions work properly
if initial_size > file_size:
raise RuntimeError('Local file (%s) is larger than remote '
'file (%s), cannot resume download'
% (sizeof_fmt(initial_size),
sizeof_fmt(file_size)))
elif initial_size == file_size:
# This should really only happen when a hash is wrong
# during dev updating
warn('Local file appears to be complete (file_size == '
'initial_size == %s)' % (file_size,))
else:
# Need to resume or start over
scheme = urllib.parse.urlparse(url).scheme
if scheme not in ('http', 'https'):
raise NotImplementedError('Cannot use %s' % (scheme,))
_get_http(url, temp_file_name, initial_size, file_size, timeout,
verbose_bool)
# check md5sum
if hash_ is not None:
logger.info('Verifying hash %s.' % (hash_,))
md5 = md5sum(temp_file_name)
if hash_ != md5:
raise RuntimeError('Hash mismatch for downloaded file %s, '
'expected %s but got %s'
% (temp_file_name, hash_, md5))
shutil.move(temp_file_name, file_name)
if print_destination is True:
logger.info('File saved as %s.\n' % file_name)
except Exception:
logger.error('Error while fetching file %s.'
' Dataset fetching aborted.' % url)
raise
def sizeof_fmt(num):
"""Turn number of bytes into human-readable str.
Parameters
----------
num : int
The number of bytes.
Returns
-------
size : str
The size in human-readable format.
"""
units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB']
decimals = [0, 0, 1, 2, 2, 2]
if num > 1:
exponent = min(int(log(num, 1024)), len(units) - 1)
quotient = float(num) / 1024 ** exponent
unit = units[exponent]
num_decimals = decimals[exponent]
format_string = '{0:.%sf} {1}' % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return '0 bytes'
if num == 1:
return '1 byte'
class SizeMixin(object):
"""Estimate MNE object sizes."""
@property
def _size(self):
"""Estimate the object size."""
try:
size = object_size(self.info)
except Exception:
warn('Could not get size for self.info')
return -1
if hasattr(self, 'data'):
size += object_size(self.data)
elif hasattr(self, '_data'):
size += object_size(self._data)
return size
def __hash__(self):
"""Hash the object.
Returns
-------
hash : int
The hash
"""
from .evoked import Evoked
from .epochs import BaseEpochs
from .io.base import BaseRaw
if isinstance(self, Evoked):
return object_hash(dict(info=self.info, data=self.data))
elif isinstance(self, (BaseEpochs, BaseRaw)):
_check_preload(self, "Hashing ")
return object_hash(dict(info=self.info, data=self._data))
else:
raise RuntimeError('Hashing unknown object type: %s' % type(self))
def _url_to_local_path(url, path):
"""Mirror a url path in a local destination (keeping folder structure)."""
destination = urllib.parse.urlparse(url).path
# First char should be '/', and it needs to be discarded
if len(destination) < 2 or destination[0] != '/':
raise ValueError('Invalid URL')
destination = os.path.join(path,
urllib.request.url2pathname(destination)[1:])
return destination
def _get_stim_channel(stim_channel, info, raise_error=True):
"""Determine the appropriate stim_channel.
First, 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', etc.
are read. If these are not found, it will fall back to 'STI 014' if
present, then fall back to the first channel of type 'stim', if present.
Parameters
----------
stim_channel : str | list of str | None
The stim channel selected by the user.
info : instance of Info
An information structure containing information about the channels.
Returns
-------
stim_channel : str | list of str
The name of the stim channel(s) to use
"""
if stim_channel is not None:
if not isinstance(stim_channel, list):
_validate_type(stim_channel, 'str', "Stim channel")
stim_channel = [stim_channel]
for channel in stim_channel:
_validate_type(channel, 'str', "Each provided stim channel")
return stim_channel
stim_channel = list()
ch_count = 0
ch = get_config('MNE_STIM_CHANNEL')
while(ch is not None and ch in info['ch_names']):
stim_channel.append(ch)
ch_count += 1
ch = get_config('MNE_STIM_CHANNEL_%d' % ch_count)
if ch_count > 0:
return stim_channel
if 'STI101' in info['ch_names']: # combination channel for newer systems
return ['STI101']
if 'STI 014' in info['ch_names']: # for older systems
return ['STI 014']
from .io.pick import pick_types
stim_channel = pick_types(info, meg=False, ref_meg=False, stim=True)
if len(stim_channel) > 0:
stim_channel = [info['ch_names'][ch_] for ch_ in stim_channel]
elif raise_error:
raise ValueError("No stim channels found. Consider specifying them "
"manually using the 'stim_channel' parameter.")
return stim_channel
def _check_fname(fname, overwrite=False, must_exist=False):
"""Check for file existence."""
_validate_type(fname, 'str', 'fname')
if must_exist and not op.isfile(fname):
raise IOError('File "%s" does not exist' % fname)
if op.isfile(fname):
if not overwrite:
raise IOError('Destination file exists. Please use option '
'"overwrite=True" to force overwriting.')
elif overwrite != 'read':
logger.info('Overwriting existing file.')
def _check_subject(class_subject, input_subject, raise_error=True):
"""Get subject name from class."""
if input_subject is not None:
_validate_type(input_subject, 'str', "subject input")
return input_subject
elif class_subject is not None:
_validate_type(class_subject, 'str',
"Either subject input or class subject attribute")
return class_subject
else:
if raise_error is True:
raise ValueError('Neither subject input nor class subject '
'attribute was a string')
return None
def _check_preload(inst, msg):
"""Ensure data are preloaded."""
from .epochs import BaseEpochs
from .evoked import Evoked
from .time_frequency import _BaseTFR
if isinstance(inst, (_BaseTFR, Evoked)):
pass
else:
name = "epochs" if isinstance(inst, BaseEpochs) else 'raw'
if not inst.preload:
raise RuntimeError(
"By default, MNE does not load data into main memory to "
"conserve resources. " + msg + ' requires %s data to be '
'loaded. Use preload=True (or string) in the constructor or '
'%s.load_data().' % (name, name))
def _check_compensation_grade(inst, inst2, name, name2, ch_names=None):
"""Ensure that objects have same compensation_grade."""
from .io.pick import pick_channels, pick_info
from .io.compensator import get_current_comp
if None in [inst.info, inst2.info]:
return
if ch_names is None:
grade = inst.compensation_grade
grade2 = inst2.compensation_grade
else:
info = inst.info.copy()
info2 = inst2.info.copy()
# pick channels
for t_info in [info, info2]:
if t_info['comps']:
t_info['comps'] = []
picks = pick_channels(t_info['ch_names'], ch_names)
pick_info(t_info, picks, copy=False)
# get compensation grades
grade = get_current_comp(info)
grade2 = get_current_comp(info2)
# perform check
if grade != grade2:
msg = 'Compensation grade of %s (%d) and %s (%d) don\'t match'
raise RuntimeError(msg % (name, inst.compensation_grade,
name2, inst2.compensation_grade))
def _check_pandas_installed(strict=True):
"""Aux function."""
try:
import pandas
return pandas
except ImportError:
if strict is True:
raise RuntimeError('For this functionality to work, the Pandas '
'library is required.')
else:
return False
def _check_pandas_index_arguments(index, defaults):
"""Check pandas index arguments."""
if not any(isinstance(index, k) for k in (list, tuple)):
index = [index]
invalid_choices = [e for e in index if e not in defaults]
if invalid_choices:
options = [', '.join(e) for e in [invalid_choices, defaults]]
raise ValueError('[%s] is not an valid option. Valid index'
'values are \'None\' or %s' % tuple(options))
def _clean_names(names, remove_whitespace=False, before_dash=True):
"""Remove white-space on topo matching.
This function handles different naming
conventions for old VS new VectorView systems (`remove_whitespace`).
Also it allows to remove system specific parts in CTF channel names
(`before_dash`).
Usage
-----
# for new VectorView (only inside layout)
ch_names = _clean_names(epochs.ch_names, remove_whitespace=True)
# for CTF
ch_names = _clean_names(epochs.ch_names, before_dash=True)
"""
cleaned = []
for name in names:
if ' ' in name and remove_whitespace:
name = name.replace(' ', '')
if '-' in name and before_dash:
name = name.split('-')[0]
if name.endswith('_v'):
name = name[:-2]
cleaned.append(name)
return cleaned
def _check_type_picks(picks):
"""Guarantee type integrity of picks."""
err_msg = 'picks must be None, a list or an array of integers'
if picks is None:
pass
elif isinstance(picks, list):
for pick in picks:
_validate_type(pick, 'int', 'Each pick')
picks = np.array(picks)
elif isinstance(picks, np.ndarray):
if not picks.dtype.kind == 'i':
raise TypeError(err_msg)
else:
raise TypeError(err_msg)
return picks
@nottest
def run_tests_if_main(measure_mem=False):
"""Run tests in a given file if it is run as a script."""
local_vars = inspect.currentframe().f_back.f_locals
if not local_vars.get('__name__', '') == '__main__':
return
# we are in a "__main__"
try:
import faulthandler
faulthandler.enable()
except Exception:
pass
with warnings.catch_warnings(record=True): # memory_usage internal dep.
mem = int(round(max(memory_usage(-1)))) if measure_mem else -1
if mem >= 0:
print('Memory consumption after import: %s' % mem)
t0 = time.time()
peak_mem, peak_name = mem, 'import'
max_elapsed, elapsed_name = 0, 'N/A'
count = 0
for name in sorted(list(local_vars.keys()), key=lambda x: x.lower()):
val = local_vars[name]
if name.startswith('_'):
continue
elif callable(val) and name.startswith('test'):
count += 1
doc = val.__doc__.strip() if val.__doc__ else name
sys.stdout.write('%s ... ' % doc)
sys.stdout.flush()
try:
t1 = time.time()
if measure_mem:
with warnings.catch_warnings(record=True): # dep warn
mem = int(round(max(memory_usage((val, (), {})))))
else:
val()
mem = -1
if mem >= peak_mem:
peak_mem, peak_name = mem, name
mem = (', mem: %s MB' % mem) if mem >= 0 else ''
elapsed = int(round(time.time() - t1))
if elapsed >= max_elapsed:
max_elapsed, elapsed_name = elapsed, name
sys.stdout.write('time: %0.3f sec%s\n' % (elapsed, mem))
sys.stdout.flush()
except Exception as err:
if 'skiptest' in err.__class__.__name__.lower():
sys.stdout.write('SKIP (%s)\n' % str(err))
sys.stdout.flush()
else:
raise
elapsed = int(round(time.time() - t0))
sys.stdout.write('Total: %s tests\n• %0.3f sec (%0.3f sec for %s)\n• '
'Peak memory %s MB (%s)\n'
% (count, elapsed, max_elapsed, elapsed_name, peak_mem,
peak_name))
class ArgvSetter(object):
"""Temporarily set sys.argv."""
def __init__(self, args=(), disable_stdout=True,
disable_stderr=True): # noqa: D102
self.argv = list(('python',) + args)
self.stdout = StringIO() if disable_stdout else sys.stdout
self.stderr = StringIO() if disable_stderr else sys.stderr
def __enter__(self): # noqa: D105
self.orig_argv = sys.argv
sys.argv = self.argv
self.orig_stdout = sys.stdout
sys.stdout = self.stdout
self.orig_stderr = sys.stderr
sys.stderr = self.stderr
return self
def __exit__(self, *args): # noqa: D105
sys.argv = self.orig_argv
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
class SilenceStdout(object):
"""Silence stdout."""
def __enter__(self): # noqa: D105
self.stdout = sys.stdout
sys.stdout = StringIO()
return self
def __exit__(self, *args): # noqa: D105
sys.stdout = self.stdout
def md5sum(fname, block_size=1048576): # 2 ** 20
"""Calculate the md5sum for a file.
Parameters
----------
fname : str
Filename.
block_size : int
Block size to use when reading.
Returns
-------
hash_ : str
The hexadecimal digest of the hash.
"""
md5 = hashlib.md5()
with open(fname, 'rb') as fid:
while True:
data = fid.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def create_slices(start, stop, step=None, length=1):
"""Generate slices of time indexes.
Parameters
----------
start : int
Index where first slice should start.
stop : int
Index where last slice should maximally end.
length : int
Number of time sample included in a given slice.
step: int | None
Number of time samples separating two slices.
If step = None, step = length.
Returns
-------
slices : list
List of slice objects.
"""
# default parameters
if step is None:
step = length
# slicing
slices = [slice(t, t + length, 1) for t in
range(start, stop - length + 1, step)]
return slices
def _time_mask(times, tmin=None, tmax=None, sfreq=None, raise_error=True):
"""Safely find sample boundaries."""
orig_tmin = tmin
orig_tmax = tmax
tmin = -np.inf if tmin is None else tmin
tmax = np.inf if tmax is None else tmax
if not np.isfinite(tmin):
tmin = times[0]
if not np.isfinite(tmax):
tmax = times[-1]
if sfreq is not None:
# Push to a bit past the nearest sample boundary first
sfreq = float(sfreq)
tmin = int(round(tmin * sfreq)) / sfreq - 0.5 / sfreq
tmax = int(round(tmax * sfreq)) / sfreq + 0.5 / sfreq
if raise_error and tmin > tmax:
raise ValueError('tmin (%s) must be less than or equal to tmax (%s)'
% (orig_tmin, orig_tmax))
mask = (times >= tmin)
mask &= (times <= tmax)
if raise_error and not mask.any():
raise ValueError('No samples remain when using tmin=%s and tmax=%s '
'(original time bounds are [%s, %s])'
% (orig_tmin, orig_tmax, times[0], times[-1]))
return mask
def random_permutation(n_samples, random_state=None):
"""Emulate the randperm matlab function.
It returns a vector containing a random permutation of the
integers between 0 and n_samples-1. It returns the same random numbers
than randperm matlab function whenever the random_state is the same
as the matlab's random seed.
This function is useful for comparing against matlab scripts
which use the randperm function.
Note: the randperm(n_samples) matlab function generates a random
sequence between 1 and n_samples, whereas
random_permutation(n_samples, random_state) function generates
a random sequence between 0 and n_samples-1, that is:
randperm(n_samples) = random_permutation(n_samples, random_state) - 1
Parameters
----------
n_samples : int
End point of the sequence to be permuted (excluded, i.e., the end point
is equal to n_samples-1)
random_state : int | None
Random seed for initializing the pseudo-random number generator.
Returns
-------
randperm : ndarray, int
Randomly permuted sequence between 0 and n-1.
"""
rng = check_random_state(random_state)
idx = rng.rand(n_samples)
randperm = np.argsort(idx)
return randperm
def compute_corr(x, y):
"""Compute pearson correlations between a vector and a matrix."""
if len(x) == 0 or len(y) == 0:
raise ValueError('x or y has zero length')
X = np.array(x, float)
Y = np.array(y, float)
X -= X.mean(0)
Y -= Y.mean(0)
x_sd = X.std(0, ddof=1)
# if covariance matrix is fully expanded, Y needs a
# transpose / broadcasting else Y is correct
y_sd = Y.std(0, ddof=1)[:, None if X.shape == Y.shape else Ellipsis]
return (np.dot(X.T, Y) / float(len(X) - 1)) / (x_sd * y_sd)
def grand_average(all_inst, interpolate_bads=True, drop_bads=True):
"""Make grand average of a list evoked or AverageTFR data.
For evoked data, the function interpolates bad channels based on
`interpolate_bads` parameter. If `interpolate_bads` is True, the grand
average file will contain good channels and the bad channels interpolated
from the good MEG/EEG channels.
For AverageTFR data, the function takes the subset of channels not marked
as bad in any of the instances.
The grand_average.nave attribute will be equal to the number
of evoked datasets used to calculate the grand average.
Note: Grand average evoked should not be used for source localization.
Parameters
----------
all_inst : list of Evoked or AverageTFR data
The evoked datasets.
interpolate_bads : bool
If True, bad MEG and EEG channels are interpolated. Ignored for
AverageTFR.
drop_bads : bool
If True, drop all bad channels marked as bad in any data set.
If neither interpolate_bads nor drop_bads is True, in the output file,
every channel marked as bad in at least one of the input files will be
marked as bad, but no interpolation or dropping will be performed.
Returns
-------
grand_average : Evoked | AverageTFR
The grand average data. Same type as input.
Notes
-----
.. versionadded:: 0.11.0
"""
# check if all elements in the given list are evoked data
from .evoked import Evoked
from .time_frequency import AverageTFR
from .channels.channels import equalize_channels
assert len(all_inst) > 1
inst_type = type(all_inst[0])
_validate_type(all_inst[0], (Evoked, AverageTFR), 'All elements')
for inst in all_inst:
_validate_type(inst, inst_type, 'All elements', 'of the same type')
# Copy channels to leave the original evoked datasets intact.
all_inst = [inst.copy() for inst in all_inst]
# Interpolates if necessary
if isinstance(all_inst[0], Evoked):
if interpolate_bads:
all_inst = [inst.interpolate_bads() if len(inst.info['bads']) > 0
else inst for inst in all_inst]
equalize_channels(all_inst) # apply equalize_channels
from .evoked import combine_evoked as combine
else: # isinstance(all_inst[0], AverageTFR):
from .time_frequency.tfr import combine_tfr as combine
if drop_bads:
bads = list(set((b for inst in all_inst for b in inst.info['bads'])))
if bads:
for inst in all_inst:
inst.drop_channels(bads)
# make grand_average object using combine_[evoked/tfr]
grand_average = combine(all_inst, weights='equal')
# change the grand_average.nave to the number of Evokeds
grand_average.nave = len(all_inst)
# change comment field
grand_average.comment = "Grand average (n = %d)" % grand_average.nave
return grand_average
def _get_root_dir():
"""Get as close to the repo root as possible."""
root_dir = op.abspath(op.dirname(__file__))
up_dir = op.join(root_dir, '..')
if op.isfile(op.join(up_dir, 'setup.py')) and all(
op.isdir(op.join(up_dir, x)) for x in ('mne', 'examples', 'doc')):
root_dir = op.abspath(up_dir)
return root_dir
def sys_info(fid=None, show_paths=False):
"""Print the system information for debugging.
This function is useful for printing system information
to help triage bugs.
Parameters
----------
fid : file-like | None
The file to write to. Will be passed to :func:`print()`.
Can be None to use :data:`sys.stdout`.
show_paths : bool
If True, print paths for each module.
Examples
--------
Running this function with no arguments prints an output that is
useful when submitting bug reports::
>>> import mne
>>> mne.sys_info() # doctest: +SKIP
Platform: Linux-4.2.0-27-generic-x86_64-with-Ubuntu-15.10-wily
Python: 2.7.10 (default, Oct 14 2015, 16:09:02) [GCC 5.2.1 20151010]
Executable: /usr/bin/python
mne: 0.12.dev0
numpy: 1.12.0.dev0+ec5bd81 {lapack=mkl_rt, blas=mkl_rt}
scipy: 0.18.0.dev0+3deede3
matplotlib: 1.5.1+1107.g1fa2697
sklearn: 0.18.dev0
nibabel: 2.1.0dev
mayavi: 4.3.1
pycuda: 2015.1.3
skcuda: 0.5.2
pandas: 0.17.1+25.g547750a
""" # noqa: E501
ljust = 15
out = 'Platform:'.ljust(ljust) + platform.platform() + '\n'
out += 'Python:'.ljust(ljust) + str(sys.version).replace('\n', ' ') + '\n'
out += 'Executable:'.ljust(ljust) + sys.executable + '\n'
out += 'CPU:'.ljust(ljust) + ('%s: %s cores\n' %
(platform.processor(),
multiprocessing.cpu_count()))
out += 'Memory:'.ljust(ljust)
try:
import psutil
except ImportError:
out += 'Unavailable (requires "psutil" package)'
else:
out += '%0.1f GB\n' % (psutil.virtual_memory().total / float(2 ** 30),)
out += '\n'
old_stdout = sys.stdout
capture = StringIO()
try:
sys.stdout = capture
np.show_config()
finally:
sys.stdout = old_stdout
lines = capture.getvalue().split('\n')
libs = []
for li, line in enumerate(lines):
for key in ('lapack', 'blas'):
if line.startswith('%s_opt_info' % key):
lib = lines[li + 1]
if 'NOT AVAILABLE' in lib:
lib = 'unknown'
else:
lib = lib.split('[')[1].split("'")[1]
libs += ['%s=%s' % (key, lib)]
libs = ', '.join(libs)
version_texts = dict(pycuda='VERSION_TEXT')
for mod_name in ('mne', 'numpy', 'scipy', 'matplotlib', '', 'sklearn',
'nibabel', 'mayavi', 'pycuda', 'skcuda', 'pandas'):
if mod_name == '':
out += '\n'
continue
out += ('%s:' % mod_name).ljust(ljust)
try:
mod = __import__(mod_name)
if mod_name == 'mayavi':
# the real test
from mayavi import mlab # noqa, analysis:ignore
except Exception:
out += 'Not found\n'
else:
version = getattr(mod, version_texts.get(mod_name, '__version__'))
extra = (' (%s)' % op.dirname(mod.__file__)) if show_paths else ''
if mod_name == 'numpy':
extra = ' {%s}%s' % (libs, extra)
elif mod_name == 'matplotlib':
extra = ' {backend=%s}%s' % (mod.get_backend(), extra)
elif mod_name == 'mayavi':
try:
from pyface.qt import qt_api
except Exception:
qt_api = 'unknown'
extra = ' {qt_api=%s}%s' % (qt_api, extra)
out += '%s%s\n' % (version, extra)
print(out, end='', file=fid)
class ETSContext(object):
"""Add more meaningful message to errors generated by ETS Toolkit."""
def __enter__(self): # noqa: D105
pass
def __exit__(self, type, value, traceback): # noqa: D105
if isinstance(value, SystemExit) and value.code.\
startswith("This program needs access to the screen"):
value.code += ("\nThis can probably be solved by setting "
"ETS_TOOLKIT=qt4. On bash, type\n\n $ export "
"ETS_TOOLKIT=qt4\n\nand run the command again.")
def open_docs(kind=None, version=None):
"""Launch a new web browser tab with the MNE documentation.
Parameters
----------
kind : str | None
Can be "api" (default), "tutorials", or "examples".
The default can be changed by setting the configuration value
MNE_DOCS_KIND.
version : str | None
Can be "stable" (default) or "dev".
The default can be changed by setting the configuration value
MNE_DOCS_VERSION.
"""
if kind is None:
kind = get_config('MNE_DOCS_KIND', 'api')
help_dict = dict(api='python_reference.html', tutorials='tutorials.html',
examples='auto_examples/index.html')
if kind not in help_dict:
raise ValueError('kind must be one of %s, got %s'
% (sorted(help_dict.keys()), kind))
kind = help_dict[kind]
if version is None:
version = get_config('MNE_DOCS_VERSION', 'stable')
versions = ('stable', 'dev')
if version not in versions:
raise ValueError('version must be one of %s, got %s'
% (version, versions))
webbrowser.open_new_tab('https://martinos.org/mne/%s/%s' % (version, kind))
def _is_numeric(n):
return isinstance(n, (np.integer, np.floating, int, float))
def _validate_type(item, types=None, item_name=None, type_name=None):
"""Validate that `item` is an instance of `types`.
Parameters
----------
item : obj
The thing to be checked.
types : type | tuple of types | str
The types to be checked against. If str, must be one of 'str', 'int',
'numeric'.
"""
if types == "int":
_ensure_int(item, name=item_name)
return # terminate prematurely
elif types == "str":
types = string_types
type_name = "str" if type_name is None else type_name
elif types == "numeric":
types = (np.integer, np.floating, int, float)
type_name = "numeric" if type_name is None else type_name
elif types == "info":
from mne.io import Info as types
type_name = "Info" if type_name is None else type_name
item_name = "Info" if item_name is None else item_name
if type_name is None:
iter_types = ([types] if not isinstance(types, (list, tuple))
else types)
type_name = ', '.join(cls.__name__ for cls in iter_types)
if not isinstance(item, types):
raise TypeError(item_name, ' must be an instance of ', type_name,
', got %s instead.' % (type(item),))
def linkcode_resolve(domain, info):
"""Determine the URL corresponding to a Python object.
Parameters
----------
domain : str
Only useful when 'py'.
info : dict
With keys "module" and "fullname".
Returns
-------
url : str
The code URL.
Notes
-----
This has been adapted to deal with our "verbose" decorator.
Adapted from SciPy (doc/source/conf.py).
"""
import mne
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return None
if fn == '<string>': # verbose decorator
fn = inspect.getmodule(obj).__file__
fn = op.relpath(fn, start=op.dirname(mne.__file__))
fn = '/'.join(op.normpath(fn).split(os.sep)) # in case on Windows
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
if 'dev' in mne.__version__:
kind = 'master'
else:
kind = 'maint/%s' % ('.'.join(mne.__version__.split('.')[:2]))
return "http://github.com/mne-tools/mne-python/blob/%s/mne/%s%s" % ( # noqa
kind, fn, linespec)
def _check_if_nan(data, msg=" to be plotted"):
"""Raise if any of the values are NaN."""
if not np.isfinite(data).all():
raise ValueError("Some of the values {} are NaN.".format(msg))
|
teonlamont/mne-python
|
mne/utils.py
|
Python
|
bsd-3-clause
| 96,393
|
[
"Mayavi"
] |
ecf16f45ad300336ef68f456289c90f7ce154f6089318be91aa7cacde7e878a6
|
try: paraview.simple
except: from paraview.simple import *
def RequestDataDescription(datadescription):
"Callback to populate the request for current timestep"
if datadescription.GetForceOutput() == True:
for i in range(datadescription.GetNumberOfInputDescriptions()):
datadescription.GetInputDescription(i).AllFieldsOn()
datadescription.GetInputDescription(i).GenerateMeshOn()
return
timestep = datadescription.GetTimeStep()
input_name = 'input'
if (timestep % 1 == 0) :
datadescription.GetInputDescriptionByName(input_name).AllFieldsOn()
datadescription.GetInputDescriptionByName(input_name).GenerateMeshOn()
else:
datadescription.GetInputDescriptionByName(input_name).AllFieldsOff()
datadescription.GetInputDescriptionByName(input_name).GenerateMeshOff()
def DoCoProcessing(datadescription):
"Callback to do co-processing for current timestep"
cp_writers = []
cp_views = []
timestep = datadescription.GetTimeStep()
RenderView1 = CreateView( CreateRenderView, "image_%t.png", 1, 0, 1, cp_views )
RenderView1.LightSpecularColor = [1.0, 1.0, 1.0]
RenderView1.ViewSize = [800,600]
RenderView1.KeyLightAzimuth = 10.0
RenderView1.UseTexturedBackground = 0
RenderView1.UseLight = 1
RenderView1.CameraPosition = [1.7376650215555172, 2.8315915091611896, 2.0982689626686302]
RenderView1.FillLightKFRatio = 3.0
RenderView1.Background2 = [0.0, 0.0, 0.16470588235294117]
RenderView1.FillLightAzimuth = -10.0
RenderView1.LODResolution = 50.0
RenderView1.BackgroundTexture = []
RenderView1.InteractionMode = '3D'
RenderView1.StencilCapable = 1
RenderView1.LightIntensity = 1.0
RenderView1.CameraFocalPoint = [0.34157201718136226, 0.38772622127097012, 0.2886835956608988]
RenderView1.ImageReductionFactor = 2
RenderView1.CameraViewAngle = 30.0
RenderView1.CameraParallelScale = 0.8660254037844386
RenderView1.EyeAngle = 2.0
RenderView1.HeadLightKHRatio = 3.0
RenderView1.StereoRender = 0
RenderView1.KeyLightIntensity = 0.75
RenderView1.BackLightAzimuth = 110.0
RenderView1.OrientationAxesInteractivity = 0
RenderView1.UseInteractiveRenderingForSceenshots = 0
RenderView1.UseOffscreenRendering = 0
RenderView1.Background = [0.0, 0.0, 0.0]
RenderView1.UseOffscreenRenderingForScreenshots = 0
RenderView1.NonInteractiveRenderDelay = 2
RenderView1.CenterOfRotation = [0.5, 0.5, 0.5]
RenderView1.CameraParallelProjection = 0
RenderView1.CompressorConfig = 'vtkSquirtCompressor 0 3'
RenderView1.HeadLightWarmth = 0.40000000000000002
RenderView1.MaximumNumberOfPeels = 4
RenderView1.LightDiffuseColor = [1.0, 1.0, 1.0]
RenderView1.StereoType = 'Red-Blue'
RenderView1.DepthPeeling = 1
RenderView1.BackLightKBRatio = 3.5
RenderView1.StereoCapableWindow = 1
RenderView1.CameraViewUp = [-0.28742692126348202, -0.45848482521721556, 0.84093842222753479]
RenderView1.LightType = 'HeadLight'
RenderView1.LightAmbientColor = [1.0, 1.0, 1.0]
RenderView1.RemoteRenderThreshold = 3.0
RenderView1.CacheKey = 0.0
RenderView1.UseCache = 0
RenderView1.KeyLightElevation = 50.0
RenderView1.CenterAxesVisibility = 0
RenderView1.MaintainLuminance = 1
RenderView1.StillRenderImageReductionFactor = 1
RenderView1.BackLightWarmth = 0.5
RenderView1.FillLightElevation = -75.0
RenderView1.MultiSamples = 0
RenderView1.FillLightWarmth = 0.40000000000000002
RenderView1.AlphaBitPlanes = 1
RenderView1.LightSwitch = 0
RenderView1.OrientationAxesVisibility = 1
RenderView1.CameraClippingRange = [1.3728714095026497, 5.2446740854754132]
RenderView1.BackLightElevation = 0.0
RenderView1.ViewTime = 0.0
RenderView1.OrientationAxesOutlineColor = [1.0, 1.0, 1.0]
RenderView1.LODThreshold = 18.199999999999999
RenderView1.CollectGeometryThreshold = 100.0
RenderView1.UseGradientBackground = 0
RenderView1.KeyLightWarmth = 0.59999999999999998
RenderView1.OrientationAxesLabelColor = [1.0, 1.0, 1.0]
test2_ = CreateProducer( datadescription, "input" )
CellDatatoPointData1 = CellDatatoPointData( guiName="CellDatatoPointData1", PieceInvariant=0, PassCellData=1 )
Calculator1 = Calculator( guiName="Calculator1", Function='mag(realtempx*iHat+realtempy*jHat+realtempz*kHat)', ReplacementValue=0.0, ResultArrayName='Result', ReplaceInvalidResults=1, AttributeMode='point_data', CoordinateResults=0 )
a1_realtempx_PiecewiseFunction = CreatePiecewiseFunction( Points=[-1.49819372840493, 0.0, 0.5, 0.0, 1.4981937284049314, 1.0, 0.5, 0.0] )
a1_Result_PiecewiseFunction = CreatePiecewiseFunction( Points=[2.1146959137836494e-14, 0.99499994516372681, 0.5, 0.0, 0.88075295366304562, 0.0, 0.61720967292785645, 0.39072844386100769, 1.7310076951980591, 0.42666664719581604, 0.5, 0.0] )
a1_realtempx_PVLookupTable = GetLookupTableForArray( "realtempx", 1, Discretize=1, RGBPoints=[-1.49819372840493, 0.23000000000000001, 0.29899999999999999, 0.754, 1.4981937284049311, 0.70599999999999996, 0.016, 0.14999999999999999], UseLogScale=0, VectorComponent=0, NanColor=[0.25, 0.0, 0.0], NumberOfTableValues=256, ColorSpace='Diverging', VectorMode='Magnitude', HSVWrap=0, ScalarRangeInitialized=1.0, LockScalarRange=0 )
a1_Result_PVLookupTable = GetLookupTableForArray( "Result", 1, Discretize=1, RGBPoints=[2.1146959137836494e-14, 0.0, 1.0, 1.0, 0.77895343891915225, 0.0, 0.0, 0.95294117647058818, 0.86550382102127787, 0.0, 0.0, 0.60392156862745094, 0.95205420312340361, 0.88235294117647056, 0.0, 0.070588235294117646, 1.7310076420425347, 1.0, 1.0, 0.0], UseLogScale=0, VectorComponent=0, NanColor=[1.0, 1.0, 0.0], NumberOfTableValues=256, ColorSpace='RGB', VectorMode='Magnitude', HSVWrap=0, ScalarRangeInitialized=1.0, LockScalarRange=0 )
SetActiveSource(test2_)
DataRepresentation1 = Show()
DataRepresentation1.CubeAxesZAxisVisibility = 1
DataRepresentation1.SelectionPointLabelColor = [0.5, 0.5, 0.5]
DataRepresentation1.SelectionPointFieldDataArrayName = 'vtkOriginalPointIds'
DataRepresentation1.SuppressLOD = 0
DataRepresentation1.CubeAxesXGridLines = 0
DataRepresentation1.CubeAxesYAxisTickVisibility = 1
DataRepresentation1.CubeAxesColor = [1.0, 1.0, 1.0]
DataRepresentation1.Position = [0.0, 0.0, 0.0]
DataRepresentation1.BackfaceRepresentation = 'Follow Frontface'
DataRepresentation1.SelectionOpacity = 1.0
DataRepresentation1.SelectionPointLabelShadow = 0
DataRepresentation1.CubeAxesYGridLines = 0
DataRepresentation1.CubeAxesZAxisRange = [0.0, 1.0]
DataRepresentation1.OrientationMode = 'Direction'
DataRepresentation1.Source.TipResolution = 6
DataRepresentation1.ScaleMode = 'No Data Scaling Off'
DataRepresentation1.Diffuse = 1.0
DataRepresentation1.SelectionUseOutline = 0
DataRepresentation1.CubeAxesZTitle = 'Z-Axis'
DataRepresentation1.Specular = 0.10000000000000001
DataRepresentation1.SelectionVisibility = 1
DataRepresentation1.InterpolateScalarsBeforeMapping = 1
DataRepresentation1.CubeAxesZAxisTickVisibility = 1
DataRepresentation1.Origin = [0.0, 0.0, 0.0]
DataRepresentation1.CubeAxesVisibility = 0
DataRepresentation1.Scale = [1.0, 1.0, 1.0]
DataRepresentation1.SelectionCellLabelJustification = 'Left'
DataRepresentation1.DiffuseColor = [1.0, 1.0, 1.0]
DataRepresentation1.Shade = 0
DataRepresentation1.SelectionCellLabelOpacity = 1.0
DataRepresentation1.CubeAxesInertia = 1
DataRepresentation1.Source = "Arrow"
DataRepresentation1.Source.Invert = 0
DataRepresentation1.Masking = 0
DataRepresentation1.Opacity = 1.0
DataRepresentation1.LineWidth = 1.0
DataRepresentation1.MeshVisibility = 0
DataRepresentation1.Visibility = 0
DataRepresentation1.SelectionCellLabelFontSize = 18
DataRepresentation1.CubeAxesCornerOffset = 0.0
DataRepresentation1.SelectionPointLabelJustification = 'Left'
DataRepresentation1.SelectionPointLabelVisibility = 0
DataRepresentation1.SelectOrientationVectors = ''
DataRepresentation1.CubeAxesTickLocation = 'Inside'
DataRepresentation1.BackfaceDiffuseColor = [1.0, 1.0, 1.0]
DataRepresentation1.CubeAxesYAxisVisibility = 1
DataRepresentation1.SelectionPointLabelFontFamily = 'Arial'
DataRepresentation1.Source.ShaftResolution = 6
DataRepresentation1.CubeAxesFlyMode = 'Closest Triad'
DataRepresentation1.SelectScaleArray = ''
DataRepresentation1.CubeAxesYTitle = 'Y-Axis'
DataRepresentation1.ColorAttributeType = 'POINT_DATA'
DataRepresentation1.SpecularPower = 100.0
DataRepresentation1.Texture = []
DataRepresentation1.SelectionCellLabelShadow = 0
DataRepresentation1.AmbientColor = [1.0, 1.0, 1.0]
DataRepresentation1.MapScalars = 1
DataRepresentation1.PointSize = 2.0
DataRepresentation1.Source.TipLength = 0.34999999999999998
DataRepresentation1.SelectionCellLabelFormat = ''
DataRepresentation1.Scaling = 0
DataRepresentation1.StaticMode = 0
DataRepresentation1.SelectionCellLabelColor = [0.0, 1.0, 0.0]
DataRepresentation1.SliceMode = 'XY Plane'
DataRepresentation1.Source.TipRadius = 0.10000000000000001
DataRepresentation1.EdgeColor = [0.0, 0.0, 0.50000762951094835]
DataRepresentation1.CubeAxesXAxisTickVisibility = 1
DataRepresentation1.SelectionCellLabelVisibility = 0
DataRepresentation1.NonlinearSubdivisionLevel = 1
DataRepresentation1.CubeAxesXAxisRange = [0.0, 1.0]
DataRepresentation1.Representation = 'Outline'
DataRepresentation1.CubeAxesYAxisRange = [0.0, 1.0]
DataRepresentation1.CustomBounds = [0.0, 1.0, 0.0, 1.0, 0.0, 1.0]
DataRepresentation1.Orientation = [0.0, 0.0, 0.0]
DataRepresentation1.CubeAxesEnableCustomAxisRange = 0
DataRepresentation1.CubeAxesXTitle = 'X-Axis'
DataRepresentation1.ScalarOpacityUnitDistance = 0.013531646934131857
DataRepresentation1.BackfaceOpacity = 1.0
DataRepresentation1.SelectionPointLabelFontSize = 18
DataRepresentation1.SelectionCellFieldDataArrayName = 'realtempx'
DataRepresentation1.SelectionColor = [1.0, 0.0, 1.0]
DataRepresentation1.Ambient = 0.0
DataRepresentation1.VolumeRenderingMode = 'Smart'
DataRepresentation1.CubeAxesXAxisMinorTickVisibility = 1
DataRepresentation1.ScaleFactor = 0.10000000000000001
DataRepresentation1.BackfaceAmbientColor = [1.0, 1.0, 1.0]
DataRepresentation1.Slice = 0
DataRepresentation1.Source.ShaftRadius = 0.029999999999999999
DataRepresentation1.ScalarOpacityFunction = []
DataRepresentation1.SelectMaskArray = ''
DataRepresentation1.SelectionLineWidth = 2.0
DataRepresentation1.CubeAxesZAxisMinorTickVisibility = 1
DataRepresentation1.CubeAxesXAxisVisibility = 1
DataRepresentation1.Interpolation = 'Gouraud'
DataRepresentation1.SelectionCellLabelFontFamily = 'Arial'
DataRepresentation1.SelectionCellLabelItalic = 0
DataRepresentation1.CubeAxesYAxisMinorTickVisibility = 1
DataRepresentation1.InterpolationType = 'Linear'
DataRepresentation1.CubeAxesZGridLines = 0
DataRepresentation1.SelectionPointLabelFormat = ''
DataRepresentation1.SelectionPointLabelOpacity = 1.0
DataRepresentation1.Pickable = 1
DataRepresentation1.CustomBoundsActive = [0, 0, 0]
DataRepresentation1.SelectionRepresentation = 'Wireframe'
DataRepresentation1.SelectionPointLabelBold = 0
DataRepresentation1.ColorArrayName = ''
DataRepresentation1.SelectionPointLabelItalic = 0
DataRepresentation1.AllowSpecularHighlightingWithScalarColoring = 0
DataRepresentation1.SpecularColor = [1.0, 1.0, 1.0]
DataRepresentation1.LookupTable = []
DataRepresentation1.SelectionPointSize = 5.0
DataRepresentation1.SelectionCellLabelBold = 0
DataRepresentation1.Orient = 0
SetActiveSource(CellDatatoPointData1)
DataRepresentation2 = Show()
DataRepresentation2.CubeAxesZAxisVisibility = 1
DataRepresentation2.SelectionPointLabelColor = [0.5, 0.5, 0.5]
DataRepresentation2.SelectionPointFieldDataArrayName = 'realtempx'
DataRepresentation2.SuppressLOD = 0
DataRepresentation2.CubeAxesXGridLines = 0
DataRepresentation2.CubeAxesYAxisTickVisibility = 1
DataRepresentation2.CubeAxesColor = [1.0, 1.0, 1.0]
DataRepresentation2.Position = [0.0, 0.0, 0.0]
DataRepresentation2.BackfaceRepresentation = 'Follow Frontface'
DataRepresentation2.SelectionOpacity = 1.0
DataRepresentation2.SelectionPointLabelShadow = 0
DataRepresentation2.CubeAxesYGridLines = 0
DataRepresentation2.CubeAxesZAxisRange = [0.0, 1.0]
DataRepresentation2.OrientationMode = 'Direction'
DataRepresentation2.Source.TipResolution = 6
DataRepresentation2.ScaleMode = 'No Data Scaling Off'
DataRepresentation2.Diffuse = 1.0
DataRepresentation2.SelectionUseOutline = 0
DataRepresentation2.CubeAxesZTitle = 'Z-Axis'
DataRepresentation2.Specular = 0.10000000000000001
DataRepresentation2.SelectionVisibility = 1
DataRepresentation2.InterpolateScalarsBeforeMapping = 1
DataRepresentation2.CubeAxesZAxisTickVisibility = 1
DataRepresentation2.Origin = [0.0, 0.0, 0.0]
DataRepresentation2.CubeAxesVisibility = 0
DataRepresentation2.Scale = [1.0, 1.0, 1.0]
DataRepresentation2.SelectionCellLabelJustification = 'Left'
DataRepresentation2.DiffuseColor = [1.0, 1.0, 1.0]
DataRepresentation2.Shade = 0
DataRepresentation2.SelectionCellLabelOpacity = 1.0
DataRepresentation2.CubeAxesInertia = 1
DataRepresentation2.Source = "Arrow"
DataRepresentation2.Source.Invert = 0
DataRepresentation2.Masking = 0
DataRepresentation2.Opacity = 1.0
DataRepresentation2.LineWidth = 1.0
DataRepresentation2.MeshVisibility = 0
DataRepresentation2.Visibility = 0
DataRepresentation2.SelectionCellLabelFontSize = 18
DataRepresentation2.CubeAxesCornerOffset = 0.0
DataRepresentation2.SelectionPointLabelJustification = 'Left'
DataRepresentation2.SelectionPointLabelVisibility = 0
DataRepresentation2.SelectOrientationVectors = ''
DataRepresentation2.CubeAxesTickLocation = 'Inside'
DataRepresentation2.BackfaceDiffuseColor = [1.0, 1.0, 1.0]
DataRepresentation2.CubeAxesYAxisVisibility = 1
DataRepresentation2.SelectionPointLabelFontFamily = 'Arial'
DataRepresentation2.Source.ShaftResolution = 6
DataRepresentation2.CubeAxesFlyMode = 'Closest Triad'
DataRepresentation2.SelectScaleArray = ''
DataRepresentation2.CubeAxesYTitle = 'Y-Axis'
DataRepresentation2.ColorAttributeType = 'POINT_DATA'
DataRepresentation2.SpecularPower = 100.0
DataRepresentation2.Texture = []
DataRepresentation2.SelectionCellLabelShadow = 0
DataRepresentation2.AmbientColor = [1.0, 1.0, 1.0]
DataRepresentation2.MapScalars = 1
DataRepresentation2.PointSize = 2.0
DataRepresentation2.Source.TipLength = 0.34999999999999998
DataRepresentation2.SelectionCellLabelFormat = ''
DataRepresentation2.Scaling = 0
DataRepresentation2.StaticMode = 0
DataRepresentation2.SelectionCellLabelColor = [0.0, 1.0, 0.0]
DataRepresentation2.SliceMode = 'XY Plane'
DataRepresentation2.Source.TipRadius = 0.10000000000000001
DataRepresentation2.EdgeColor = [0.0, 0.0, 0.50000762951094835]
DataRepresentation2.CubeAxesXAxisTickVisibility = 1
DataRepresentation2.SelectionCellLabelVisibility = 0
DataRepresentation2.NonlinearSubdivisionLevel = 1
DataRepresentation2.CubeAxesXAxisRange = [0.0, 1.0]
DataRepresentation2.Representation = 'Outline'
DataRepresentation2.CubeAxesYAxisRange = [0.0, 1.0]
DataRepresentation2.CustomBounds = [0.0, 1.0, 0.0, 1.0, 0.0, 1.0]
DataRepresentation2.Orientation = [0.0, 0.0, 0.0]
DataRepresentation2.CubeAxesEnableCustomAxisRange = 0
DataRepresentation2.CubeAxesXTitle = 'X-Axis'
DataRepresentation2.ScalarOpacityUnitDistance = 0.013531646934131857
DataRepresentation2.BackfaceOpacity = 1.0
DataRepresentation2.SelectionPointLabelFontSize = 18
DataRepresentation2.SelectionCellFieldDataArrayName = 'realtempx'
DataRepresentation2.SelectionColor = [1.0, 0.0, 1.0]
DataRepresentation2.Ambient = 0.0
DataRepresentation2.VolumeRenderingMode = 'Smart'
DataRepresentation2.CubeAxesXAxisMinorTickVisibility = 1
DataRepresentation2.ScaleFactor = 0.10000000000000001
DataRepresentation2.BackfaceAmbientColor = [1.0, 1.0, 1.0]
DataRepresentation2.Slice = 0
DataRepresentation2.Source.ShaftRadius = 0.029999999999999999
DataRepresentation2.ScalarOpacityFunction = []
DataRepresentation2.SelectMaskArray = ''
DataRepresentation2.SelectionLineWidth = 2.0
DataRepresentation2.CubeAxesZAxisMinorTickVisibility = 1
DataRepresentation2.CubeAxesXAxisVisibility = 1
DataRepresentation2.Interpolation = 'Gouraud'
DataRepresentation2.SelectionCellLabelFontFamily = 'Arial'
DataRepresentation2.SelectionCellLabelItalic = 0
DataRepresentation2.CubeAxesYAxisMinorTickVisibility = 1
DataRepresentation2.InterpolationType = 'Linear'
DataRepresentation2.CubeAxesZGridLines = 0
DataRepresentation2.SelectionPointLabelFormat = ''
DataRepresentation2.SelectionPointLabelOpacity = 1.0
DataRepresentation2.Pickable = 1
DataRepresentation2.CustomBoundsActive = [0, 0, 0]
DataRepresentation2.SelectionRepresentation = 'Wireframe'
DataRepresentation2.SelectionPointLabelBold = 0
DataRepresentation2.ColorArrayName = ''
DataRepresentation2.SelectionPointLabelItalic = 0
DataRepresentation2.AllowSpecularHighlightingWithScalarColoring = 0
DataRepresentation2.SpecularColor = [1.0, 1.0, 1.0]
DataRepresentation2.LookupTable = []
DataRepresentation2.SelectionPointSize = 5.0
DataRepresentation2.SelectionCellLabelBold = 0
DataRepresentation2.Orient = 0
SetActiveSource(Calculator1)
DataRepresentation3 = Show()
DataRepresentation3.CubeAxesZAxisVisibility = 1
DataRepresentation3.SelectionPointLabelColor = [0.5, 0.5, 0.5]
DataRepresentation3.SelectionPointFieldDataArrayName = 'Result'
DataRepresentation3.SuppressLOD = 0
DataRepresentation3.CubeAxesXGridLines = 0
DataRepresentation3.CubeAxesYAxisTickVisibility = 1
DataRepresentation3.CubeAxesColor = [1.0, 1.0, 1.0]
DataRepresentation3.Position = [0.0, 0.0, 0.0]
DataRepresentation3.BackfaceRepresentation = 'Follow Frontface'
DataRepresentation3.SelectionOpacity = 1.0
DataRepresentation3.SelectionPointLabelShadow = 0
DataRepresentation3.CubeAxesYGridLines = 0
DataRepresentation3.CubeAxesZAxisRange = [0.0, 1.0]
DataRepresentation3.OrientationMode = 'Direction'
DataRepresentation3.Source.TipResolution = 6
DataRepresentation3.ScaleMode = 'No Data Scaling Off'
DataRepresentation3.Diffuse = 1.0
DataRepresentation3.SelectionUseOutline = 0
DataRepresentation3.CubeAxesZTitle = 'Z-Axis'
DataRepresentation3.Specular = 0.10000000000000001
DataRepresentation3.SelectionVisibility = 1
DataRepresentation3.InterpolateScalarsBeforeMapping = 1
DataRepresentation3.CubeAxesZAxisTickVisibility = 1
DataRepresentation3.Origin = [0.0, 0.0, 0.0]
DataRepresentation3.CubeAxesVisibility = 0
DataRepresentation3.Scale = [1.0, 1.0, 1.0]
DataRepresentation3.SelectionCellLabelJustification = 'Left'
DataRepresentation3.DiffuseColor = [1.0, 1.0, 1.0]
DataRepresentation3.Shade = 0
DataRepresentation3.SelectionCellLabelOpacity = 1.0
DataRepresentation3.CubeAxesInertia = 1
DataRepresentation3.Source = "Arrow"
DataRepresentation3.Source.Invert = 0
DataRepresentation3.Masking = 0
DataRepresentation3.Opacity = 1.0
DataRepresentation3.LineWidth = 1.0
DataRepresentation3.MeshVisibility = 0
DataRepresentation3.Visibility = 1
DataRepresentation3.SelectionCellLabelFontSize = 18
DataRepresentation3.CubeAxesCornerOffset = 0.0
DataRepresentation3.SelectionPointLabelJustification = 'Left'
DataRepresentation3.SelectionPointLabelVisibility = 0
DataRepresentation3.SelectOrientationVectors = ''
DataRepresentation3.CubeAxesTickLocation = 'Inside'
DataRepresentation3.BackfaceDiffuseColor = [1.0, 1.0, 1.0]
DataRepresentation3.CubeAxesYAxisVisibility = 1
DataRepresentation3.SelectionPointLabelFontFamily = 'Arial'
DataRepresentation3.Source.ShaftResolution = 6
DataRepresentation3.CubeAxesFlyMode = 'Closest Triad'
DataRepresentation3.SelectScaleArray = ''
DataRepresentation3.CubeAxesYTitle = 'Y-Axis'
DataRepresentation3.ColorAttributeType = 'POINT_DATA'
DataRepresentation3.SpecularPower = 100.0
DataRepresentation3.Texture = []
DataRepresentation3.SelectionCellLabelShadow = 0
DataRepresentation3.AmbientColor = [1.0, 1.0, 1.0]
DataRepresentation3.MapScalars = 1
DataRepresentation3.PointSize = 2.0
DataRepresentation3.Source.TipLength = 0.34999999999999998
DataRepresentation3.SelectionCellLabelFormat = ''
DataRepresentation3.Scaling = 0
DataRepresentation3.StaticMode = 0
DataRepresentation3.SelectionCellLabelColor = [0.0, 1.0, 0.0]
DataRepresentation3.SliceMode = 'XY Plane'
DataRepresentation3.Source.TipRadius = 0.10000000000000001
DataRepresentation3.EdgeColor = [0.0, 0.0, 0.50000762951094835]
DataRepresentation3.CubeAxesXAxisTickVisibility = 1
DataRepresentation3.SelectionCellLabelVisibility = 0
DataRepresentation3.NonlinearSubdivisionLevel = 1
DataRepresentation3.CubeAxesXAxisRange = [0.0, 1.0]
DataRepresentation3.Representation = 'Volume'
DataRepresentation3.CubeAxesYAxisRange = [0.0, 1.0]
DataRepresentation3.CustomBounds = [0.0, 1.0, 0.0, 1.0, 0.0, 1.0]
DataRepresentation3.Orientation = [0.0, 0.0, 0.0]
DataRepresentation3.CubeAxesEnableCustomAxisRange = 0
DataRepresentation3.CubeAxesXTitle = 'X-Axis'
DataRepresentation3.ScalarOpacityUnitDistance = 0.013531646934131857
DataRepresentation3.BackfaceOpacity = 1.0
DataRepresentation3.SelectionPointLabelFontSize = 18
DataRepresentation3.SelectionCellFieldDataArrayName = 'realtempx'
DataRepresentation3.SelectionColor = [1.0, 0.0, 1.0]
DataRepresentation3.Ambient = 0.0
DataRepresentation3.VolumeRenderingMode = 'Smart'
DataRepresentation3.CubeAxesXAxisMinorTickVisibility = 1
DataRepresentation3.ScaleFactor = 0.10000000000000001
DataRepresentation3.BackfaceAmbientColor = [1.0, 1.0, 1.0]
DataRepresentation3.Slice = 0
DataRepresentation3.Source.ShaftRadius = 0.029999999999999999
DataRepresentation3.ScalarOpacityFunction = a1_Result_PiecewiseFunction
DataRepresentation3.SelectMaskArray = ''
DataRepresentation3.SelectionLineWidth = 2.0
DataRepresentation3.CubeAxesZAxisMinorTickVisibility = 1
DataRepresentation3.CubeAxesXAxisVisibility = 1
DataRepresentation3.Interpolation = 'Gouraud'
DataRepresentation3.SelectionCellLabelFontFamily = 'Arial'
DataRepresentation3.SelectionCellLabelItalic = 0
DataRepresentation3.CubeAxesYAxisMinorTickVisibility = 1
DataRepresentation3.InterpolationType = 'Linear'
DataRepresentation3.CubeAxesZGridLines = 0
DataRepresentation3.SelectionPointLabelFormat = ''
DataRepresentation3.SelectionPointLabelOpacity = 1.0
DataRepresentation3.Pickable = 1
DataRepresentation3.CustomBoundsActive = [0, 0, 0]
DataRepresentation3.SelectionRepresentation = 'Wireframe'
DataRepresentation3.SelectionPointLabelBold = 0
DataRepresentation3.ColorArrayName = 'Result'
DataRepresentation3.SelectionPointLabelItalic = 0
DataRepresentation3.AllowSpecularHighlightingWithScalarColoring = 0
DataRepresentation3.SpecularColor = [1.0, 1.0, 1.0]
DataRepresentation3.LookupTable = a1_Result_PVLookupTable
DataRepresentation3.SelectionPointSize = 5.0
DataRepresentation3.SelectionCellLabelBold = 0
DataRepresentation3.Orient = 0
for writer in cp_writers:
if timestep % writer.cpFrequency == 0 or datadescription.GetForceOutput() == True:
writer.FileName = writer.cpFileName.replace("%t", str(timestep))
writer.UpdatePipeline()
if False : # rescale data range
import math
for view in cp_views:
if timestep % view.cpFrequency == 0 or datadescription.GetForceOutput() == True:
reps = view.Representations
for rep in reps:
if hasattr(rep, 'Visibility') and rep.Visibility == 1 and hasattr(rep, 'MapScalars') and rep.MapScalars != '':
input = rep.Input
input.UpdatePipeline() #make sure range is up-to-date
lut = rep.LookupTable
if lut == None:
continue
if rep.ColorAttributeType == 'POINT_DATA':
datainformation = input.GetPointDataInformation()
elif rep.ColorAttributeType == 'CELL_DATA':
datainformation = input.GetCellDataInformation()
else:
print 'something strange with color attribute type', rep.ColorAttributeType
if lut.VectorMode != 'Magnitude' or datainformation.GetArray(rep.ColorArrayName).GetNumberOfComponents() == 1:
datarange = datainformation.GetArray(rep.ColorArrayName).GetRange(lut.VectorComponent)
else:
datarange = [0,0]
for i in range(datainformation.GetArray(rep.ColorArrayName).GetNumberOfComponents()):
for j in range(2):
datarange[j] += datainformation.GetArray(rep.ColorArrayName).GetRange(i)[j]*datainformation.GetArray(rep.ColorArrayName).GetRange(i)[j]
datarange[0] = math.sqrt(datarange[0])
datarange[1] = math.sqrt(datarange[1])
rgbpoints = lut.RGBPoints.GetData()
numpts = len(rgbpoints)/4
minvalue = min(datarange[0], rgbpoints[0])
maxvalue = max(datarange[1], rgbpoints[(numpts-1)*4])
if minvalue != rgbpoints[0] or maxvalue != rgbpoints[(numpts-1)*4]:
# rescale all of the points
oldrange = rgbpoints[(numpts-1)*4] - rgbpoints[0]
newrange = maxvalue - minvalue
newrgbpoints = list(rgbpoints)
for v in range(numpts):
newrgbpoints[v*4] = minvalue+(rgbpoints[v*4] - rgbpoints[0])*newrange/oldrange
lut.RGBPoints.SetData(newrgbpoints)
for view in cp_views:
if timestep % view.cpFrequency == 0 or datadescription.GetForceOutput() == True:
fname = view.cpFileName
fname = fname.replace("%t", str(timestep))
if view.cpFitToScreen != 0:
if view.IsA("vtkSMRenderViewProxy") == True:
view.ResetCamera()
elif view.IsA("vtkSMContextViewProxy") == True:
view.ResetDisplay()
else:
print ' do not know what to do with a ', view.GetClassName()
WriteImage(fname, view, Magnification=view.cpMagnification)
# explicitly delete the proxies -- we do it this way to avoid problems with prototypes
tobedeleted = GetNextProxyToDelete()
while tobedeleted != None:
Delete(tobedeleted)
tobedeleted = GetNextProxyToDelete()
def GetNextProxyToDelete():
proxyiterator = servermanager.ProxyIterator()
for proxy in proxyiterator:
group = proxyiterator.GetGroup()
if group.find("prototypes") != -1:
continue
if group != 'timekeeper' and group.find("pq_helper_proxies") == -1 :
return proxy
return None
def CreateProducer(datadescription, gridname):
"Creates a producer proxy for the grid"
if not datadescription.GetInputDescriptionByName(gridname):
raise RuntimeError, "Simulation input name '%s' does not exist" % gridname
grid = datadescription.GetInputDescriptionByName(gridname).GetGrid()
producer = PVTrivialProducer()
producer.GetClientSideObject().SetOutput(grid)
if grid.IsA("vtkImageData") == True or grid.IsA("vtkStructuredGrid") == True or grid.IsA("vtkRectilinearGrid") == True:
extent = datadescription.GetInputDescriptionByName(gridname).GetWholeExtent()
producer.WholeExtent= [ extent[0], extent[1], extent[2], extent[3], extent[4], extent[5] ]
producer.UpdatePipeline()
return producer
def CreateWriter(proxy_ctor, filename, freq, cp_writers):
writer = proxy_ctor()
writer.FileName = filename
writer.add_attribute("cpFrequency", freq)
writer.add_attribute("cpFileName", filename)
cp_writers.append(writer)
return writer
def CreateView(proxy_ctor, filename, freq, fittoscreen, magnification, cp_views):
view = proxy_ctor()
view.add_attribute("cpFileName", filename)
view.add_attribute("cpFrequency", freq)
view.add_attribute("cpFileName", filename)
view.add_attribute("cpFitToScreen", fittoscreen)
view.add_attribute("cpMagnification", magnification)
cp_views.append(view)
return view
|
openmichigan/PSNM
|
NavierStokes/Programs/NavierStokes3dFortranMPIParaView/cell-based/pipeline_images.py
|
Python
|
bsd-2-clause
| 29,563
|
[
"ParaView"
] |
4df40dc3e22400c37eab9614af4095a1f7b9d188dc73c02d630cb9431a168e78
|
""" Activations
A collection of activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
def swish(x, inplace: bool = False):
"""Swish - Described in: https://arxiv.org/abs/1710.05941
"""
return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
class Swish(nn.Module):
def __init__(self, inplace: bool = False):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return swish(x, self.inplace)
def mish(x, inplace: bool = False):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
NOTE: I don't have a working inplace variant
"""
return x.mul(F.softplus(x).tanh())
class Mish(nn.Module):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
"""
def __init__(self, inplace: bool = False):
super(Mish, self).__init__()
def forward(self, x):
return mish(x)
def sigmoid(x, inplace: bool = False):
return x.sigmoid_() if inplace else x.sigmoid()
# PyTorch has this, but not with a consistent inplace argmument interface
class Sigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(Sigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.sigmoid_() if self.inplace else x.sigmoid()
def tanh(x, inplace: bool = False):
return x.tanh_() if inplace else x.tanh()
# PyTorch has this, but not with a consistent inplace argmument interface
class Tanh(nn.Module):
def __init__(self, inplace: bool = False):
super(Tanh, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.tanh_() if self.inplace else x.tanh()
def hard_swish(x, inplace: bool = False):
inner = F.relu6(x + 3.).div_(6.)
return x.mul_(inner) if inplace else x.mul(inner)
class HardSwish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_swish(x, self.inplace)
def hard_sigmoid(x, inplace: bool = False):
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return F.relu6(x + 3.) / 6.
class HardSigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_sigmoid(x, self.inplace)
def hard_mish(x, inplace: bool = False):
""" Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
if inplace:
return x.mul_(0.5 * (x + 2).clamp(min=0, max=2))
else:
return 0.5 * x * (x + 2).clamp(min=0, max=2)
class HardMish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardMish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_mish(x, self.inplace)
class PReLU(nn.PReLU):
"""Applies PReLU (w/ dummy inplace arg)
"""
def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None:
super(PReLU, self).__init__(num_parameters=num_parameters, init=init)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.prelu(input, self.weight)
def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor:
return F.gelu(x)
class GELU(nn.Module):
"""Applies the Gaussian Error Linear Units function (w/ dummy inplace arg)
"""
def __init__(self, inplace: bool = False):
super(GELU, self).__init__()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.gelu(input)
|
rwightman/pytorch-image-models
|
timm/models/layers/activations.py
|
Python
|
apache-2.0
| 4,040
|
[
"Gaussian"
] |
893fd64b078ad41d787c06a67c9385720a90e43a0123a16fb7e5f87d1b98b123
|
#! test QC_JSON Schema for response properties
import psi4
import numpy as np
import json
import os
from distutils import dir_util
# Generate JSON input
json_data = {
"schema_name": "qcschema_input",
"schema_version": 1,
"molecule": {
"geometry": [
0.0, 0.0, -0.1294769411935893, 0.0, -1.494187339479985, 1.0274465079245698, 0.0, 1.494187339479985,
1.0274465079245698
],
"symbols": ["O", "H", "H"],
"fix_com":
True,
"fix_orientation":
True
},
"driver": "properties",
"model": {
"method": "CC2",
"basis": "6-31G",
},
"keywords": {
"scf_type": "df",
"mp2_type": "df",
"e_convergence": 9,
"omega": [355, 439, 'nm'],
"gauge": "velocity",
"function_kwargs": {
"properties": ["dipole", "polarizability", "rotation", "roa_tensor"]
}
}
}
# Load expected output (dipole & quadrupole in au)
expected_response = {
"CC2 DIPOLE POLARIZABILITY @ 355NM": 5.175390333179149,
"CC2 DIPOLE POLARIZABILITY @ 439NM": 5.063282247200858,
"CC2 DIPOLE X": 0.0,
"CC2 DIPOLE Y": 0.0,
"CC2 DIPOLE Z": 2.5171476216528084,
"CC2 QUADRUPOLE XX": -7.393278028527795,
"CC2 QUADRUPOLE XY": -1.8387569175060344e-16,
"CC2 QUADRUPOLE XZ": 0.0,
"CC2 QUADRUPOLE YY": -4.3142280005261755,
"CC2 QUADRUPOLE YZ": 0.0,
"CC2 QUADRUPOLE ZZ": -5.9972667929848065,
"CC2 SPECIFIC ROTATION (MVG) @ 355NM": -0.0,
"CC2 SPECIFIC ROTATION (MVG) @ 439NM": -0.0,
"CC2 SPECIFIC ROTATION (VEL) @ 355NM": -0.0,
"CC2 SPECIFIC ROTATION (VEL) @ 439NM": -0.0,
"CC2 DIPOLE": [
0,
0,
0.99032208
],
"CC2 QUADRUPOLE": [
[
-5.49672082,
-1.36707065e-16,
0.0
],
[
-1.36707065e-16,
-3.20752267,
0.0
],
[
0.0,
0.0,
-4.45882072
]
],
"CC2 DIPOLE POLARIZABILITY TENSOR @ 355NM": [
[
1.53032264,
0.0,
0.0
],
[
0.0,
8.37432166,
0.0
],
[
0.0,
0.0,
5.62152674
]
],
"CC2 DIPOLE POLARIZABILITY TENSOR @ 439NM": [
[
1.49010149,
0.0,
0.0
],
[
0.0,
8.21869196,
0.0
],
[
0.0,
0.0,
5.48105331
]
],
"CC2 OPTICAL ROTATION TENSOR (MVG) @ 355NM": [
[
0.0,
-0.02923246,
0.0
],
[
-0.01267525,
0.0,
0.0
],
[
0.0,
0.0,
0.0
]
],
"CC2 OPTICAL ROTATION TENSOR (MVG) @ 439NM": [
[
0.0,
-0.01758964,
0.0
],
[
-0.00805598,
0.0,
0.0
],
[
0.0,
0.0,
0.0
]
],
"CC2 OPTICAL ROTATION TENSOR (VEL) @ 0NM": [
[
0.0,
0.0124954,
0.0
],
[
-0.25085903,
0.0,
0.0
],
[
0.0,
0.0,
0.0
]
],
"CC2 OPTICAL ROTATION TENSOR (VEL) @ 355NM": [
[
0.0,
-0.01673706,
0.0
],
[
-0.26353427,
0.0,
0.0
],
[
0.0,
0.0,
0.0
]
],
"CC2 OPTICAL ROTATION TENSOR (VEL) @ 439NM": [
[
0.0,
-0.00509424,
0.0
],
[
-0.25891501,
0.0,
0.0
],
[
0.0,
0.0,
0.0
]
],
"CC2 QUADRUPOLE POLARIZABILITY TENSOR COMPONENT 0 @ 355NM": [
[
0.0,
0.0,
0.38619772
],
[
0.0,
0.0,
0.0
],
[
0.38619772,
0.0,
0.0
]
],
"CC2 QUADRUPOLE POLARIZABILITY TENSOR COMPONENT 0 @ 439NM": [
[
0.0,
0.0,
0.37061096
],
[
0.0,
0.0,
0.0
],
[
0.37061096,
0.0,
0.0
]
],
"CC2 QUADRUPOLE POLARIZABILITY TENSOR COMPONENT 1 @ 355NM": [
[
0.0,
0.0,
0.0
],
[
0.0,
0.0,
10.97015701
],
[
0.0,
10.97015701,
0.0
]
],
"CC2 QUADRUPOLE POLARIZABILITY TENSOR COMPONENT 1 @ 439NM": [
[
0.0,
0.0,
0.0
],
[
0.0,
0.0,
10.74076995
],
[
0.0,
10.74076995,
0.0
]
],
"CC2 QUADRUPOLE POLARIZABILITY TENSOR COMPONENT 2 @ 355NM": [
[
-5.65001748,
0.0,
0.0
],
[
0.0,
5.62557634,
0.0
],
[
0.0,
0.0,
0.02444115
]
],
"CC2 QUADRUPOLE POLARIZABILITY TENSOR COMPONENT 2 @ 439NM": [
[
-5.51540341,
0.0,
0.0
],
[
0.0,
5.49921728,
0.0
],
[
0.0,
0.0,
0.01618613
]
]
}
expected_response = {k: (np.asarray(v) if isinstance(v, list) else v) for k, v in expected_response.items()}
json_ret = psi4.schema_wrapper.run_qcschema(json_data).dict()
psi4.compare_integers(True, json_ret["success"], "JSON Success") #TEST
psi4.compare_strings("qcschema_output", json_ret["schema_name"], "Schema Name") #TEST
for k in expected_response.keys(): #TEST
psi4.compare_values(expected_response[k], json_ret["extras"]["qcvars"][k], 5, "Result: " + k.upper()) #TEST
|
jturney/psi4
|
tests/json/schema-1-response/input.py
|
Python
|
lgpl-3.0
| 5,134
|
[
"Psi4"
] |
70923317cca4fede723d53c2477a4f817bd0e0f325f9a79cf84b3dd126a2b486
|
#!/usr/bin/env python
import os, sys
import re
if len(sys.argv) != 3:
print """
Usage: WhatModulesVTK.py vtkSourceTree applicationFile|applicationFolder
Generate a FindPackage(VTK COMPONENTS) that lists all modules
referenced by a set of files.
Additionally, two extra find_package( VTK COMPONENTS) lists of modules
are produced. One is a minimal set and the other chases down all the
dependencies to produce a maximal set of modules. This is done by
parsing the module.cmake files.
For example:
Running from the VTK source,
./Utilities/Maintenance/WhatModulesVTK.py . Filters/Modeling/Testing/Cxx/TestRotationalExtrusion.cxx
Produces
Modules and their dependencies:
find_package(VTK COMPONENTS
vtkCommonComputationalGeometry
vtkCommonCore
vtkCommonDataModel
vtkCommonExecutionModel
vtkCommonMath
vtkCommonMisc
vtkCommonSystem
vtkCommonTransforms
vtkFiltersCore
vtkFiltersGeneral
vtkFiltersModeling
vtkFiltersSources
vtkImagingCore
vtkRenderingCore
vtkRenderingOpenGL
vtkTestingCore
vtkTestingRendering
)
Your application code includes 17 of 170 vtk modules.
All modules referenced in the files:
find_package(VTK COMPONENTS
vtkCommonCore
vtkFiltersCore
vtkFiltersModeling
vtkFiltersSources
vtkRenderingCore
vtkRenderingOpenGL
vtkTestingCore
vtkTestingRendering
)
Your application code includes 8 of 170 vtk modules.
Minimal set of modules:
find_package(VTK COMPONENTS
vtkCommonCore
vtkFiltersCore
vtkFiltersModeling
vtkRenderingOpenGL
vtkTestingRendering
)
Your application code includes 5 of 170 vtk modules.
"""
exit(0)
def IncludesToPaths(path):
'''
Build a dict that maps include files to paths.
'''
includeToPath = dict()
prog = re.compile(r"(vtk.*\.h)")
for root, dirs, files in os.walk(path):
for f in files:
if prog.match(f):
includeFile = prog.findall(f)[0]
parts = root.split("/")
module = parts[len(parts)-2] + parts[len(parts)-1]
includeToPath[includeFile] = module
return includeToPath
def FindModules(path):
'''
Build a dict that maps paths to modules.
'''
pathToModule = dict()
fileProg = re.compile(r"module.cmake")
moduleProg = re.compile(r".*module[^(]*\(\s*(\w*)",re.S)
for root, dirs, files in os.walk(path):
for f in files:
if fileProg.match(f):
fid = open(os.path.join(root, f), "r")
contents = fid.read()
m = moduleProg.match(contents)
if m:
moduleName = m.group(1)
parts = root.split("/")
pathToModule[parts[len(parts)-2] + parts[len(parts)-1]] = moduleName
fid.close()
return pathToModule
def FindIncludes(path):
'''
Build a set that contains vtk includes.
'''
includes = set()
includeProg = re.compile(r"(vtk.*\.h)")
fid = open(path, "r")
contents = fid.read()
incs = includeProg.findall(contents)
includes.update(incs)
fid.close()
return includes
def FindModuleFiles(path):
'''
Get a list of module files in the VTK directory.
'''
moduleFiles = [os.path.join(root, name)
for root, dirs, files in os.walk(path)
for name in files
if name == ("module.cmake")]
return moduleFiles
def ParseModuleFile(fileName):
'''
Read each module file returning the module name and what
it depends on or implements.
'''
fh = open(fileName, 'rb')
lines = []
for line in fh:
line = line.strip()
if line.startswith('$'): # Skip CMake variable names
continue
if line.startswith('#'):
continue
line = line.split('#')[0].strip() # inline comments
if line == "":
continue
line = line.split(')')[0].strip() # closing brace with no space
if line == "":
continue
for l in line.split(" "):
lines.append(l)
languages = ['PYTHON', 'TCL', 'JAVA']
keywords = ['BACKEND', 'COMPILE_DEPENDS', 'DEPENDS', 'EXCLUDE_FROM_ALL',
'EXCLUDE_FROM_WRAPPING', 'GROUPS', 'IMPLEMENTS', 'KIT',
'PRIVATE_DEPENDS', 'TEST_DEPENDS'] + \
map(lambda l: 'EXCLUDE_FROM_%s_WRAPPING' % l, languages)
moduleName = ""
depends = []
implements = []
state = "START";
for item in lines:
if state == "START" and item.startswith("vtk_module("):
moduleName = item.split("(")[1]
continue
if item in keywords:
state = item
continue
if state == 'DEPENDS' and item != ')':
depends.append(item)
continue
if state == 'IMPLEMENTS' and item != ')':
implements.append(item)
continue
return [moduleName, depends + implements]
def FindMinimalSetOfModules(modules, moduleDepencencies):
'''
Find the minimal set of modules needed.
'''
dependencies = set()
for m in modules:
dependencies = dependencies | set(moduleDepencencies[m]) # Set union
return modules - dependencies # Set difference
def FindAllNeededModules(modules, foundModules, moduleDepencencies):
'''
Recursively search moduleDependencies finding all modules.
'''
if modules != None and len(modules) > 0:
for m in modules:
foundModules.add(m)
foundModules = foundModules | set(moduleDepencencies[m]) # Set union
foundModules = FindAllNeededModules(moduleDepencencies[m],
foundModules,moduleDepencencies)
return foundModules
def MakeFindPackage(modules):
'''
Make a useful find_package command.
'''
# Print a useful cmake command
res = "find_package(VTK COMPONENTS\n"
for module in sorted(modules):
res += " " + module + "\n"
res += ")"
return res
def main():
'''
Start the program
'''
# Generate dict's for mapping includes to modules
includesToPaths = IncludesToPaths(sys.argv[1] + "/")
pathsToModules = FindModules(sys.argv[1] + "/")
# Test to see if VTK source is provided
if len(pathsToModules) == 0:
print sys.argv[1] +\
" is not a VTK source directory. It does not contain any module.cmake files."
exit(1)
# Parse the module files making a dictionary of each module and its
# dependencies or what it implements.
moduleDepencencies = dict()
moduleFiles = FindModuleFiles(sys.argv[1] + "/")
for fname in moduleFiles:
m = ParseModuleFile(fname)
moduleDepencencies[m[0]] = m[1]
# Build a set of includes for all command line files
allIncludes = set()
program = sys.argv[0]
sys.argv.pop(0) # remove program name
sys.argv.pop(0) # remove vtk source tree
for f in sys.argv:
if os.path.isfile(f):
allIncludes.update(FindIncludes(f))
else:
# We have a folder so look through all the files.
for path, dirs, files in os.walk(f):
for fn in files:
allIncludes.update(FindIncludes(os.path.join(path,fn)))
if len(allIncludes) == 0:
print program + ": " + f + " does not exist"
exit(1)
# Build a set that contains all modules referenced in command line files
allModules = set()
for inc in allIncludes:
if inc in includesToPaths:
module = includesToPaths[inc]
if module in pathsToModules:
allModules.add(pathsToModules[includesToPaths[inc]])
# Add vtkInteractionStyle if required.
if "vtkRenderWindowInteractor.h" in allIncludes:
allModules.add("vtkInteractionStyle")
# Add OpenGL factory classes if required.
if "vtkRenderingFreeType" in allModules:
allModules.add("vtkRenderingFreeTypeFontConfig")
allModules.add("vtkRenderingFreeTypeOpenGL")
if "vtkRenderingCore" in allModules:
allModules.add("vtkRenderingOpenGL")
if "vtkRenderingVolume" in allModules:
allModules.add("vtkRenderingVolumeOpenGL")
# Find the minimal set of modules.
minimalSetOfModules =\
FindMinimalSetOfModules(allModules, moduleDepencencies)
# Find all the modules, chasing down all the dependencies.
allNeededModules =\
FindAllNeededModules(minimalSetOfModules, set(), moduleDepencencies)
modules = {'All modules referenced in the files:': allModules,
'Minimal set of modules:': minimalSetOfModules,
'Modules and their dependencies:': allNeededModules
}
for k, v in modules.iteritems():
print k
print MakeFindPackage(v)
print "Your application code includes " + str(len(v)) +\
" of " + str(len(pathsToModules)) + " vtk modules.\n"
print
if __name__ == '__main__':
main()
|
timkrentz/SunTracker
|
IMU/VTK-6.2.0/Utilities/Maintenance/WhatModulesVTK.py
|
Python
|
mit
| 9,635
|
[
"VTK"
] |
a59ef0de60282f95e67fb8885c289919e087aaea3a879808fef9400973aa7506
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import numpy as np
from pyscf.nao.log_mesh import log_mesh
from pyscf.nao.m_libnao import libnao
from ctypes import POINTER, c_double, c_int, byref
# phia,la,ra,phib,lb,rb,rcen,lbdmxa,rhotb,rr,nr,jtb,clbdtb,lbdtb,nterm,ord,pcs,rho_min_jt,dr_jt
"""
Reduction of the products of two atomic orbitals placed at some distance
[1] Talman JD. Multipole Expansions for Numerical Orbital Products, Int. J. Quant. Chem. 107, 1578--1584 (2007)
ngl : order of Gauss-Legendre quadrature
"""
libnao.prdred.argtypes = (
POINTER(c_double), # phia(nr)
POINTER(c_int), # la
POINTER(c_double), # ra(3)
POINTER(c_double), # phib(nr)
POINTER(c_int), # lb
POINTER(c_double), # rb(3)
POINTER(c_double), # rcen(3)
POINTER(c_int), # lbdmxa
POINTER(c_double), # rhotb(nr,nterm)
POINTER(c_double), # rr(nr)
POINTER(c_int), # nr
POINTER(c_int), # jtb(nterm)
POINTER(c_int), # clbdtb(nterm)
POINTER(c_int), # lbdtb(nterm)
POINTER(c_int), # nterm
POINTER(c_int), # ord
POINTER(c_int), # pcs
POINTER(c_double), # rho_min_jt
POINTER(c_double)) # dr_jt
#
#
#
class prod_talman_c(log_mesh):
def __init__(self, lm=None, jmx=7, ngl=96, lbdmx=14):
"""
Expansion of the products of two atomic orbitals placed at given locations and around a center between these locations
[1] Talman JD. Multipole Expansions for Numerical Orbital Products, Int. J. Quant. Chem. 107, 1578--1584 (2007)
ngl : order of Gauss-Legendre quadrature
log_mesh : instance of log_mesh_c defining the logarithmic mesh (rr and pp arrays)
jmx : maximal angular momentum quantum number of each atomic orbital in a product
lbdmx : maximal angular momentum quantum number used for the expansion of the product phia*phib
"""
from numpy.polynomial.legendre import leggauss
from pyscf.nao.m_log_interp import log_interp_c
from pyscf.nao.m_csphar_talman_libnao import csphar_talman_libnao as csphar_jt
assert ngl>2
assert jmx>-1
assert hasattr(lm, 'rr')
assert hasattr(lm, 'pp')
self.ngl = ngl
self.lbdmx = lbdmx
self.xx, self.ww = leggauss(ngl)
log_mesh.__init__(self, rr=lm.rr, pp=lm.pp)
self.plval=np.zeros([2*(self.lbdmx+jmx+1), ngl])
self.plval[0,:] = 1.0
self.plval[1,:] = self.xx
for kappa in range(1,2*(self.lbdmx+jmx)+1):
self.plval[kappa+1, :]= ((2*kappa+1)*self.xx*self.plval[kappa, :]-kappa*self.plval[kappa-1, :])/(kappa+1)
self.log_interp = log_interp_c(self.rr)
self.ylm_cr = csphar_jt([0.0,0.0,1.0], self.lbdmx+2*jmx)
return
def prdred(self,phia,la,ra, phib,lb,rb,rcen):
""" Reduce two atomic orbitals given by their radial functions phia, phib,
angular momentum quantum numbers la, lb and their centers ra,rb.
The expansion is done around a center rcen."""
from numpy import sqrt
from pyscf.nao.m_thrj import thrj
from pyscf.nao.m_fact import fact as fac, sgn
assert la>-1
assert lb>-1
assert len(rcen)==3
assert len(ra)==3
assert len(rb)==3
jtb,clbdtb,lbdtb=self.prdred_terms(la,lb)
nterm = len(jtb)
ya = phia/self.rr**la
yb = phib/self.rr**lb
raa,rbb=sqrt(sum((ra-rcen)**2)),sqrt(sum((rb-rcen)**2))
ijmx=la+lb
fval=np.zeros([2*self.lbdmxa+ijmx+1, self.nr])
yz = np.zeros(self.ngl)
kpmax = 0
for ir,r in enumerate(self.rr):
for igl,x in enumerate(self.xx):
a1 = sqrt(r*r-2*raa*r*x+raa**2)
a2 = sqrt(r*r+2*rbb*r*x+rbb**2)
yz[igl]=self.log_interp(ya,a1)*self.log_interp(yb,a2)
kpmax = 2*self.lbdmxa+ijmx if raa+rbb>1.0e-5 else 0
for kappa in range(kpmax+1):
fval[kappa,ir]=0.5*(self.plval[kappa,:]*yz*self.ww).sum()
rhotb=np.zeros([nterm,self.nr])
for ix,[ij,clbd,clbdp] in enumerate(zip(jtb, clbdtb, lbdtb)):
for lbd1 in range(la+1):
lbdp1 = la-lbd1
aa = thrj(lbd1,lbdp1,la,0,0,0)*fac[lbd1]*fac[lbdp1]*fac[2*la+1] / (fac[2*lbd1]*fac[2*lbdp1]*fac[la])
for lbd2 in range(lb+1):
lbdp2=lb-lbd2
bb=thrj(lbd2,lbdp2,lb,0,0,0)*fac[lbd2]*fac[lbdp2]*fac[2*lb+1] / (fac[2*lbd2]*fac[2*lbdp2]*fac[lb])
bb=aa*bb
for kappa in range(kpmax+1):
sumb=0.0
lcmin=max(abs(lbd1-lbd2),abs(clbd-kappa))
lcmax=min(lbd1+lbd2,clbd+kappa)
for lc in range(lcmin,lcmax+1,2):
lcpmin=max(abs(lbdp1-lbdp2),abs(clbdp-kappa))
lcpmax=min(lbdp1+lbdp2,clbdp+kappa)
for lcp in range(lcpmin,lcpmax+1,2):
if abs(lc-ij)<=lcp and lcp<=lc+ij:
sumb = sumb+(2*lc+1)*(2*lcp+1) * \
thrj(lbd1,lbd2,lc,0,0,0) * \
thrj(lbdp1,lbdp2,lcp,0,0,0) * \
thrj(lc,clbd,kappa,0,0,0) * \
thrj(lcp,clbdp,kappa,0,0,0) * \
sixj(clbd,clbdp,ij,lcp,lc,kappa) * \
ninej(la,lb,ij,lbd1,lbd2,lc,lbdp1,lbdp2,lcp)
cc=sgn(lbd1+kappa+lb)*(2*ij+1)*(2*kappa+1) * (2*clbd+1)*(2*clbdp+1)*bb*sumb
if cc != 0.0:
lbd1_p_lbd2 = lbd1 + lbd2
rhotb[ix,:] = rhotb[ix,:] + cc*self.rr[:]**(lbd1_p_lbd2) *(raa**lbdp1)* (rbb**lbdp2)* fval[kappa,:]
return jtb,clbdtb,lbdtb,rhotb
def prdred_terms(self,la,lb):
""" Compute term-> Lambda,Lambda',j correspondence """
nterm=0
ijmx=la+lb
for ij in range(abs(la-lb),ijmx+1):
for clbd in range(self.lbdmx+1):
nterm=nterm+ (clbd+ij+1 - abs(clbd-ij))
jtb = np.zeros(nterm, dtype=np.int32)
clbdtb = np.zeros(nterm, dtype=np.int32)
lbdtb = np.zeros(nterm, dtype=np.int32)
ix=-1
for ij in range(abs(la-lb),ijmx+1):
for clbd in range(self.lbdmx+1):
for lbd in range(abs(clbd-ij),clbd+ij+1):
ix=ix+1
jtb[ix]=ij
clbdtb[ix]=clbd
lbdtb[ix]=lbd
return jtb,clbdtb,lbdtb
def prdred_libnao(self,phia,la,ra, phib,lb,rb,rcen):
""" By calling a subroutine """
assert len(phia)==self.nr
assert len(phib)==self.nr
jtb,clbdtb,lbdtb=self.prdred_terms(la,lb)
nterm = len(jtb)
jtb_cp = np.require(jtb, dtype=c_int, requirements='C')
clbdtb_cp = np.require(clbdtb, dtype=c_int, requirements='C')
lbdtb_cp = np.require(lbdtb, dtype=c_int, requirements='C')
rhotb_cp = np.require( np.zeros([nterm, self.nr]), dtype=c_double, requirements='CW')
rr_cp = np.require(self.rr,dtype=c_double, requirements='C')
phia_cp = np.require(phia,dtype=c_double, requirements='C')
phib_cp = np.require(phib,dtype=c_double, requirements='C')
ra_cp = np.require(ra,dtype=c_double, requirements='C')
rb_cp = np.require(rb,dtype=c_double, requirements='C')
rcen_cp = np.require(rcen,dtype=c_double, requirements='C')
libnao.prdred(phia_cp.ctypes.data_as(POINTER(c_double)), c_int(la), ra_cp.ctypes.data_as(POINTER(c_double)),
phib_cp.ctypes.data_as(POINTER(c_double)), c_int(lb), rb_cp.ctypes.data_as(POINTER(c_double)),
rcen_cp.ctypes.data_as(POINTER(c_double)),
c_int(self.lbdmx),
rhotb_cp.ctypes.data_as(POINTER(c_double)),
rr_cp.ctypes.data_as(POINTER(c_double)),
c_int(self.nr),
jtb_cp.ctypes.data_as(POINTER(c_int)),
clbdtb_cp.ctypes.data_as(POINTER(c_int)),
lbdtb_cp.ctypes.data_as(POINTER(c_int)),
c_int(nterm),
c_int(self.ngl),
c_int(1),
c_double(self.log_interp.gammin_jt),
c_double(self.log_interp.dg_jt) )
rhotb = rhotb_cp
return jtb,clbdtb,lbdtb,rhotb
def prdred_further(self, ja,ma,jb,mb,rcen,jtb,clbdtb,lbdtb,rhotb):
""" Evaluate the Talman's expansion at given Cartesian coordinates"""
from pyscf.nao.m_thrj import thrj
from pyscf.nao.m_fact import sgn
from pyscf.nao.m_csphar_talman_libnao import csphar_talman_libnao as csphar_jt
from numpy import zeros, sqrt, pi, array
assert all(rcen == zeros(3)) # this works only when center is at the origin
nterm = len(jtb)
assert nterm == len(clbdtb)
assert nterm == len(lbdtb)
assert nterm == rhotb.shape[0]
assert self.nr == rhotb.shape[1]
ffr = zeros([self.lbdmx+1,self.nr], np.complex128)
m = mb + ma
ylm_cr = csphar_jt([0.0,0.0,1.0], lbdtb.max())
for j,clbd,lbd,rho in zip(jtb,clbdtb,lbdtb,rhotb):
ffr[clbd,:]=ffr[clbd,:] + thrj(ja,jb,j,ma,mb,-m)*thrj(j,clbd,lbd,-m,m,0)*rho*ylm_cr[lbd*(lbd+1)]
return ffr,m
def prdred_further_scalar(self, ja,ma,jb,mb,rcen,jtb,clbdtb,lbdtb,rhotb):
""" Evaluate the Talman's expansion at given Cartesian coordinates"""
from pyscf.nao.m_thrj import thrj
from pyscf.nao.m_csphar_talman_libnao import csphar_talman_libnao as csphar_jt
from numpy import zeros, sqrt, pi, array
assert all(rcen == zeros(3)) # this works only when center is at the origin
nterm = len(jtb)
assert nterm == len(clbdtb)
assert nterm == len(lbdtb)
assert nterm == len(rhotb)
ffr = zeros([self.lbdmx+1], np.complex128)
m = mb + ma
for j,clbd,lbd,rho in zip(jtb,clbdtb,lbdtb,rhotb):
ffr[clbd]=ffr[clbd] + thrj(ja,jb,j,ma,mb,-m)*thrj(j,clbd,lbd,-m,m,0)*rho*self.ylm_cr[lbd*(lbd+1)]
return ffr,m
#
#
#
if __name__=='__main__':
from pyscf.nao import prod_basis_c, system_vars_c
from pyscf import gto
import numpy as np
|
gkc1000/pyscf
|
pyscf/nao/m_prod_talman.py
|
Python
|
apache-2.0
| 10,232
|
[
"PySCF"
] |
6e441300ad69805eb061b33a3b92bc8c71e1714feeaf1ccb185401ba556f2beb
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Parameters for debugging
res = 1000
# create pipeline
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName(VTK_DATA_ROOT + "/Data/combxyz.bin")
pl3d.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
# Create a probe plane
center = output.GetCenter()
plane = vtk.vtkPlaneSource()
plane.SetResolution(res,res)
plane.SetOrigin(0,0,0)
plane.SetPoint1(10,0,0)
plane.SetPoint2(0,10,0)
plane.SetCenter(center)
plane.SetNormal(0,1,0)
# Reuse the locator
locator = vtk.vtkStaticPointLocator()
locator.SetDataSet(output)
locator.BuildLocator()
# Voronoi kernel------------------------------------------------
voronoiKernel = vtk.vtkVoronoiKernel()
interpolator = vtk.vtkPointInterpolator()
interpolator.SetInputConnection(plane.GetOutputPort())
interpolator.SetSourceData(output)
interpolator.SetKernel(voronoiKernel)
interpolator.SetLocator(locator)
# Time execution
timer = vtk.vtkTimerLog()
timer.StartTimer()
interpolator.Update()
timer.StopTimer()
time = timer.GetElapsedTime()
print("Interpolate Points (Voronoi): {0}".format(time))
intMapper = vtk.vtkPolyDataMapper()
intMapper.SetInputConnection(interpolator.GetOutputPort())
intActor = vtk.vtkActor()
intActor.SetMapper(intMapper)
# Create an outline
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
# Gaussian kernel-------------------------------------------------------
gaussianKernel = vtk.vtkGaussianKernel()
#gaussianKernel = vtk.vtkEllipsoidalGaussianKernel()
#gaussianKernel.UseScalarsOn()
#gaussianKernel.UseNormalsOn()
gaussianKernel.SetSharpness(4)
gaussianKernel.SetRadius(0.5)
interpolator1 = vtk.vtkPointInterpolator()
interpolator1.SetInputConnection(plane.GetOutputPort())
interpolator1.SetSourceData(output)
interpolator1.SetKernel(gaussianKernel)
interpolator1.SetLocator(locator)
interpolator1.SetNullPointsStrategyToNullValue()
# Time execution
timer.StartTimer()
interpolator1.Update()
timer.StopTimer()
time = timer.GetElapsedTime()
print("Interpolate Points (Gaussian): {0}".format(time))
intMapper1 = vtk.vtkPolyDataMapper()
intMapper1.SetInputConnection(interpolator1.GetOutputPort())
intActor1 = vtk.vtkActor()
intActor1.SetMapper(intMapper1)
# Create an outline
outline1 = vtk.vtkStructuredGridOutlineFilter()
outline1.SetInputData(output)
outlineMapper1 = vtk.vtkPolyDataMapper()
outlineMapper1.SetInputConnection(outline1.GetOutputPort())
outlineActor1 = vtk.vtkActor()
outlineActor1.SetMapper(outlineMapper1)
# Shepard kernel-------------------------------------------------------
shepardKernel = vtk.vtkShepardKernel()
shepardKernel.SetPowerParameter(2)
shepardKernel.SetRadius(0.5)
interpolator2 = vtk.vtkPointInterpolator()
interpolator2.SetInputConnection(plane.GetOutputPort())
interpolator2.SetSourceData(output)
interpolator2.SetKernel(shepardKernel)
interpolator2.SetLocator(locator)
interpolator2.SetNullPointsStrategyToMaskPoints()
# Time execution
timer.StartTimer()
interpolator2.Update()
timer.StopTimer()
time = timer.GetElapsedTime()
print("Interpolate Points (Shepard): {0}".format(time))
intMapper2 = vtk.vtkPolyDataMapper()
intMapper2.SetInputConnection(interpolator2.GetOutputPort())
intActor2 = vtk.vtkActor()
intActor2.SetMapper(intMapper2)
# Create an outline
outline2 = vtk.vtkStructuredGridOutlineFilter()
outline2.SetInputData(output)
outlineMapper2 = vtk.vtkPolyDataMapper()
outlineMapper2.SetInputConnection(outline2.GetOutputPort())
outlineActor2 = vtk.vtkActor()
outlineActor2.SetMapper(outlineMapper2)
# SPH kernel-------------------------------------------------------
SPHKernel = vtk.vtkSPHKernel()
interpolator3 = vtk.vtkPointInterpolator()
interpolator3.SetInputConnection(plane.GetOutputPort())
interpolator3.SetSourceData(output)
interpolator3.SetKernel(voronoiKernel)
#interpolator3.SetKernel(SPHKernel)
interpolator3.SetLocator(locator)
# Time execution
timer.StartTimer()
interpolator3.Update()
timer.StopTimer()
time = timer.GetElapsedTime()
print("Interpolate Points (SPH): {0}".format(time))
intMapper3 = vtk.vtkPolyDataMapper()
intMapper3.SetInputConnection(interpolator3.GetOutputPort())
intActor3 = vtk.vtkActor()
intActor3.SetMapper(intMapper3)
# Create an outline
outline3 = vtk.vtkStructuredGridOutlineFilter()
outline3.SetInputData(output)
outlineMapper3 = vtk.vtkPolyDataMapper()
outlineMapper3.SetInputConnection(outline3.GetOutputPort())
outlineActor3 = vtk.vtkActor()
outlineActor3.SetMapper(outlineMapper3)
# Create the RenderWindow, Renderer and both Actors
#
ren0 = vtk.vtkRenderer()
ren0.SetViewport(0,0,.5,.5)
ren1 = vtk.vtkRenderer()
ren1.SetViewport(0.5,0,1,.5)
ren2 = vtk.vtkRenderer()
ren2.SetViewport(0,0.5,.5,1)
ren3 = vtk.vtkRenderer()
ren3.SetViewport(0.5,0.5,1,1)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren0)
renWin.AddRenderer(ren1)
renWin.AddRenderer(ren2)
renWin.AddRenderer(ren3)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren0.AddActor(intActor)
ren0.AddActor(outlineActor)
ren0.SetBackground(0.1, 0.2, 0.4)
ren1.AddActor(intActor1)
ren1.AddActor(outlineActor1)
ren1.SetBackground(0.1, 0.2, 0.4)
ren2.AddActor(intActor2)
ren2.AddActor(outlineActor2)
ren2.SetBackground(0.1, 0.2, 0.4)
ren3.AddActor(intActor3)
ren3.AddActor(outlineActor3)
ren3.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(500, 500)
cam = ren0.GetActiveCamera()
cam.SetClippingRange(3.95297, 50)
cam.SetFocalPoint(8.88908, 0.595038, 29.3342)
cam.SetPosition(-12.3332, 31.7479, 41.2387)
cam.SetViewUp(0.060772, -0.319905, 0.945498)
ren1.SetActiveCamera(cam)
ren2.SetActiveCamera(cam)
ren3.SetActiveCamera(cam)
iren.Initialize()
# render the image
#
renWin.Render()
#iren.Start()
|
sumedhasingla/VTK
|
Filters/Points/Testing/Python/TestPointInterpolator.py
|
Python
|
bsd-3-clause
| 6,099
|
[
"Gaussian",
"VTK"
] |
bd2c5b0286dc890f655cecbf74519c7c2c0d9873db8b0a3afc9b6c2696f295bc
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os, common, sys, traceback
from proton import *
from threading import Thread, Event
from time import sleep, time
from common import Skipped
class Test(common.Test):
def setup(self):
self.server_credit = 10
self.server_received = 0
self.server_finite_credit = False
self.server = Messenger("server")
self.server.timeout = self.timeout
self.server.start()
self.server.subscribe("amqp://~0.0.0.0:12345")
self.server_thread = Thread(name="server-thread", target=self.run_server)
self.server_thread.daemon = True
self.server_is_running_event = Event()
self.running = True
self.server_thread_started = False
self.client = Messenger("client")
self.client.timeout = self.timeout
def start(self):
self.server_thread_started = True
self.server_thread.start()
self.server_is_running_event.wait(self.timeout)
self.client.start()
def _safelyStopClient(self):
self.server.interrupt()
self.client.stop()
self.client = None
def teardown(self):
try:
if self.running:
if not self.server_thread_started: self.start()
# send a message to cause the server to promptly exit
self.running = False
self._safelyStopClient()
finally:
self.server_thread.join(self.timeout)
self.server = None
REJECT_ME = "*REJECT-ME*"
class MessengerTest(Test):
def run_server(self):
if self.server_finite_credit:
self._run_server_finite_credit()
else:
self._run_server_recv()
def _run_server_recv(self):
""" Use recv() to replenish credit each time the server waits
"""
msg = Message()
try:
while self.running:
self.server_is_running_event.set()
try:
self.server.recv(self.server_credit)
self.process_incoming(msg)
except Interrupt:
pass
finally:
self.server.stop()
self.running = False
def _run_server_finite_credit(self):
""" Grant credit once, process until credit runs out
"""
msg = Message()
self.server_is_running_event.set()
try:
self.server.recv(self.server_credit)
while self.running:
try:
# do not grant additional credit (eg. call recv())
self.process_incoming(msg)
self.server.work()
except Interrupt:
break
finally:
self.server.stop()
self.running = False
def process_incoming(self, msg):
while self.server.incoming:
self.server.get(msg)
self.server_received += 1
if msg.body == REJECT_ME:
self.server.reject()
else:
self.server.accept()
self.dispatch(msg)
def dispatch(self, msg):
if msg.reply_to:
msg.address = msg.reply_to
self.server.put(msg)
self.server.settle()
def testSendReceive(self, size=None, address_size=None):
self.start()
msg = Message()
if address_size:
msg.address="amqp://0.0.0.0:12345/%s" % ("x"*address_size)
else:
msg.address="amqp://0.0.0.0:12345"
msg.reply_to = "~"
msg.subject="Hello World!"
body = "First the world, then the galaxy!"
if size is not None:
while len(body) < size:
body = 2*body
body = body[:size]
msg.body = body
self.client.put(msg)
self.client.send()
reply = Message()
self.client.recv(1)
assert self.client.incoming == 1, self.client.incoming
self.client.get(reply)
assert reply.subject == "Hello World!"
rbod = reply.body
assert rbod == body, (rbod, body)
def testSendReceive1K(self):
self.testSendReceive(1024)
def testSendReceive2K(self):
self.testSendReceive(2*1024)
def testSendReceive4K(self):
self.testSendReceive(4*1024)
def testSendReceive10K(self):
self.testSendReceive(10*1024)
def testSendReceive100K(self):
self.testSendReceive(100*1024)
def testSendReceive1M(self):
self.testSendReceive(1024*1024)
def testSendReceiveLargeAddress(self):
self.testSendReceive(address_size=2048)
# PROTON-285 - prevent continually failing test
def xtestSendBogus(self):
self.start()
msg = Message()
msg.address="totally-bogus-address"
try:
self.client.put(msg)
assert False, "Expecting MessengerException"
except MessengerException, exc:
err = str(exc)
assert "unable to send to address: totally-bogus-address" in err, err
def testOutgoingWindow(self):
self.server.incoming_window = 10
self.start()
msg = Message()
msg.address="amqp://0.0.0.0:12345"
msg.subject="Hello World!"
trackers = []
for i in range(10):
trackers.append(self.client.put(msg))
self.client.send()
for t in trackers:
assert self.client.status(t) is None
# reduce outgoing_window to 5 and then try to send 10 messages
self.client.outgoing_window = 5
trackers = []
for i in range(10):
trackers.append(self.client.put(msg))
for i in range(5):
t = trackers[i]
assert self.client.status(t) is None, (t, self.client.status(t))
for i in range(5, 10):
t = trackers[i]
assert self.client.status(t) is PENDING, (t, self.client.status(t))
self.client.send()
for i in range(5):
t = trackers[i]
assert self.client.status(t) is None
for i in range(5, 10):
t = trackers[i]
assert self.client.status(t) is ACCEPTED
def testReject(self, process_incoming=None):
if process_incoming:
self.process_incoming = process_incoming
self.server.incoming_window = 10
self.start()
msg = Message()
msg.address="amqp://0.0.0.0:12345"
msg.subject="Hello World!"
self.client.outgoing_window = 10
trackers = []
rejected = []
for i in range(10):
if i == 5:
msg.body = REJECT_ME
else:
msg.body = "Yay!"
trackers.append(self.client.put(msg))
if msg.body == REJECT_ME:
rejected.append(trackers[-1])
self.client.send()
for t in trackers:
if t in rejected:
assert self.client.status(t) is REJECTED, (t, self.client.status(t))
else:
assert self.client.status(t) is ACCEPTED, (t, self.client.status(t))
def testRejectIndividual(self):
self.testReject(self.reject_individual)
def reject_individual(self, msg):
if self.server.incoming < 10:
self.server.work(0)
return
while self.server.incoming:
t = self.server.get(msg)
if msg.body == REJECT_ME:
self.server.reject(t)
self.dispatch(msg)
self.server.accept()
def testIncomingWindow(self):
self.server.incoming_window = 10
self.server.outgoing_window = 10
self.start()
msg = Message()
msg.address="amqp://0.0.0.0:12345"
msg.reply_to = "~"
msg.subject="Hello World!"
self.client.outgoing_window = 10
trackers = []
for i in range(10):
trackers.append(self.client.put(msg))
self.client.send()
for t in trackers:
assert self.client.status(t) is ACCEPTED, (t, self.client.status(t))
self.client.incoming_window = 10
remaining = 10
trackers = []
while remaining:
self.client.recv(remaining)
while self.client.incoming:
t = self.client.get()
trackers.append(t)
self.client.accept(t)
remaining -= 1
for t in trackers:
assert self.client.status(t) is ACCEPTED, (t, self.client.status(t))
def testIncomingQueueBiggerThanWindow(self, size=10):
self.server.outgoing_window = size
self.client.incoming_window = size
self.start()
msg = Message()
msg.address = "amqp://0.0.0.0:12345"
msg.reply_to = "~"
msg.subject = "Hello World!"
for i in range(2*size):
self.client.put(msg)
trackers = []
while len(trackers) < 2*size:
self.client.recv(2*size - len(trackers))
while self.client.incoming:
t = self.client.get(msg)
assert self.client.status(t) is SETTLED, (t, self.client.status(t))
trackers.append(t)
for t in trackers[:size]:
assert self.client.status(t) is None, (t, self.client.status(t))
for t in trackers[size:]:
assert self.client.status(t) is SETTLED, (t, self.client.status(t))
self.client.accept()
for t in trackers[:size]:
assert self.client.status(t) is None, (t, self.client.status(t))
for t in trackers[size:]:
assert self.client.status(t) is ACCEPTED, (t, self.client.status(t))
def testIncomingQueueBiggerThanSessionWindow(self):
self.testIncomingQueueBiggerThanWindow(2048)
def testBuffered(self):
self.client.outgoing_window = 1000
self.client.incoming_window = 1000
self.start();
assert self.server_received == 0
buffering = 0
count = 100
for i in range(count):
msg = Message()
msg.address="amqp://0.0.0.0:12345"
msg.subject="Hello World!"
msg.body = "First the world, then the galaxy!"
t = self.client.put(msg)
buffered = self.client.buffered(t)
# allow transition from False to True, but not back
if buffered:
buffering += 1
else:
assert not buffering, ("saw %s buffered deliveries before?" % buffering)
while self.client.outgoing:
last = self.client.outgoing
self.client.send()
#print "sent ", last - self.client.outgoing
assert self.server_received == count
def test_proton222(self):
self.start()
msg = Message()
msg.address="amqp://0.0.0.0:12345"
msg.subject="Hello World!"
msg.body = "First the world, then the galaxy!"
assert self.server_received == 0
self.client.put(msg)
self.client.send()
# ensure the server got the message without requiring client to stop first
deadline = time() + 10
while self.server_received == 0:
assert time() < deadline, "Server did not receive message!"
sleep(.1)
assert self.server_received == 1
def testUnlimitedCredit(self):
""" Bring up two links. Verify credit is granted to each link by
transferring a message over each.
"""
self.server_credit = -1
self.start()
msg = Message()
msg.address="amqp://0.0.0.0:12345/XXX"
msg.reply_to = "~"
msg.subject="Hello World!"
body = "First the world, then the galaxy!"
msg.body = body
self.client.put(msg)
self.client.send()
reply = Message()
self.client.recv(1)
assert self.client.incoming == 1
self.client.get(reply)
assert reply.subject == "Hello World!"
rbod = reply.body
assert rbod == body, (rbod, body)
msg = Message()
msg.address="amqp://0.0.0.0:12345/YYY"
msg.reply_to = "~"
msg.subject="Hello World!"
body = "First the world, then the galaxy!"
msg.body = body
self.client.put(msg)
self.client.send()
reply = Message()
self.client.recv(1)
assert self.client.incoming == 1
self.client.get(reply)
assert reply.subject == "Hello World!"
rbod = reply.body
assert rbod == body, (rbod, body)
def _DISABLE_test_proton268(self):
""" Reproducer for JIRA Proton-268 """
""" DISABLED: Causes failure on Jenkins, appears to be unrelated to fix """
self.server_credit = 2048
self.start()
msg = Message()
msg.address="amqp://0.0.0.0:12345"
msg.body = "X" * 1024
for x in range( 100 ):
self.client.put( msg )
self.client.send()
try:
self.client.stop()
except Timeout:
assert False, "Timeout waiting for client stop()"
# need to restart client, as teardown() uses it to stop server
self.client.start()
def testRoute(self):
if not common.isSSLPresent():
domain = "amqp"
else:
domain = "amqps"
self.server.subscribe(domain + "://~0.0.0.0:12346")
self.start()
self.client.route("route1", "amqp://0.0.0.0:12345")
self.client.route("route2", domain + "://0.0.0.0:12346")
msg = Message()
msg.address = "route1"
msg.reply_to = "~"
msg.body = "test"
self.client.put(msg)
self.client.recv(1)
reply = Message()
self.client.get(reply)
msg = Message()
msg.address = "route2"
msg.reply_to = "~"
msg.body = "test"
self.client.put(msg)
self.client.recv(1)
self.client.get(reply)
assert reply.body == "test"
def testDefaultRoute(self):
self.start()
self.client.route("*", "amqp://0.0.0.0:12345")
msg = Message()
msg.address = "asdf"
msg.body = "test"
msg.reply_to = "~"
self.client.put(msg)
self.client.recv(1)
reply = Message()
self.client.get(reply)
assert reply.body == "test"
def testDefaultRouteSubstitution(self):
self.start()
self.client.route("*", "amqp://0.0.0.0:12345/$1")
msg = Message()
msg.address = "asdf"
msg.body = "test"
msg.reply_to = "~"
self.client.put(msg)
self.client.recv(1)
reply = Message()
self.client.get(reply)
assert reply.body == "test"
def testIncomingRoute(self):
self.start()
self.client.route("in", "amqp://~0.0.0.0:12346")
self.client.subscribe("in")
msg = Message()
msg.address = "amqp://0.0.0.0:12345"
msg.reply_to = "amqp://0.0.0.0:12346"
msg.body = "test"
self.client.put(msg)
self.client.recv(1)
reply = Message()
self.client.get(reply)
assert reply.body == "test"
def echo_address(self, msg):
while self.server.incoming:
self.server.get(msg)
msg.body = msg.address
self.dispatch(msg)
def _testRewrite(self, original, rewritten):
self.start()
self.process_incoming = self.echo_address
self.client.route("*", "amqp://0.0.0.0:12345")
msg = Message()
msg.address = original
msg.body = "test"
msg.reply_to = "~"
self.client.put(msg)
assert msg.address == original
self.client.recv(1)
assert self.client.incoming == 1
echo = Message()
self.client.get(echo)
assert echo.body == rewritten, (echo.body, rewritten)
assert msg.address == original
def testDefaultRewriteH(self):
self._testRewrite("original", "original")
def testDefaultRewriteUH(self):
self._testRewrite("user@original", "original")
def testDefaultRewriteUPH(self):
self._testRewrite("user:pass@original", "original")
def testDefaultRewriteHP(self):
self._testRewrite("original:123", "original:123")
def testDefaultRewriteUHP(self):
self._testRewrite("user@original:123", "original:123")
def testDefaultRewriteUPHP(self):
self._testRewrite("user:pass@original:123", "original:123")
def testDefaultRewriteHN(self):
self._testRewrite("original/name", "original/name")
def testDefaultRewriteUHN(self):
self._testRewrite("user@original/name", "original/name")
def testDefaultRewriteUPHN(self):
self._testRewrite("user:pass@original/name", "original/name")
def testDefaultRewriteHPN(self):
self._testRewrite("original:123/name", "original:123/name")
def testDefaultRewriteUHPN(self):
self._testRewrite("user@original:123/name", "original:123/name")
def testDefaultRewriteUPHPN(self):
self._testRewrite("user:pass@original:123/name", "original:123/name")
def testDefaultRewriteSH(self):
self._testRewrite("amqp://original", "amqp://original")
def testDefaultRewriteSUH(self):
self._testRewrite("amqp://user@original", "amqp://original")
def testDefaultRewriteSUPH(self):
self._testRewrite("amqp://user:pass@original", "amqp://original")
def testDefaultRewriteSHP(self):
self._testRewrite("amqp://original:123", "amqp://original:123")
def testDefaultRewriteSUHP(self):
self._testRewrite("amqp://user@original:123", "amqp://original:123")
def testDefaultRewriteSUPHP(self):
self._testRewrite("amqp://user:pass@original:123", "amqp://original:123")
def testDefaultRewriteSHN(self):
self._testRewrite("amqp://original/name", "amqp://original/name")
def testDefaultRewriteSUHN(self):
self._testRewrite("amqp://user@original/name", "amqp://original/name")
def testDefaultRewriteSUPHN(self):
self._testRewrite("amqp://user:pass@original/name", "amqp://original/name")
def testDefaultRewriteSHPN(self):
self._testRewrite("amqp://original:123/name", "amqp://original:123/name")
def testDefaultRewriteSUHPN(self):
self._testRewrite("amqp://user@original:123/name", "amqp://original:123/name")
def testDefaultRewriteSUPHPN(self):
self._testRewrite("amqp://user:pass@original:123/name", "amqp://original:123/name")
def testRewriteSupress(self):
self.client.rewrite("*", None)
self._testRewrite("asdf", None)
def testRewrite(self):
self.client.rewrite("a", "b")
self._testRewrite("a", "b")
def testRewritePattern(self):
self.client.rewrite("amqp://%@*", "amqp://$2")
self._testRewrite("amqp://foo@bar", "amqp://bar")
def testRewriteToAt(self):
self.client.rewrite("amqp://%/*", "$2@$1")
self._testRewrite("amqp://domain/name", "name@domain")
def testRewriteOverrideDefault(self):
self.client.rewrite("*", "$1")
self._testRewrite("amqp://user:pass@host", "amqp://user:pass@host")
def testCreditBlockingRebalance(self):
""" The server is given a fixed amount of credit, and runs until that
credit is exhausted.
"""
self.server_finite_credit = True
self.server_credit = 11
self.start()
# put one message out on "Link1" - since there are no other links, it
# should get all the credit (10 after sending)
msg = Message()
msg.address="amqp://0.0.0.0:12345/Link1"
msg.subject="Hello World!"
body = "First the world, then the galaxy!"
msg.body = body
msg.reply_to = "~"
self.client.put(msg)
self.client.send()
self.client.recv(1)
assert self.client.incoming == 1
# Now attempt to exhaust credit using a different link
for i in range(10):
msg.address="amqp://0.0.0.0:12345/Link2"
self.client.put(msg)
self.client.send()
deadline = time() + self.timeout
count = 0
while count < 11 and time() < deadline:
self.client.recv(-1)
while self.client.incoming:
self.client.get(msg)
count += 1
assert count == 11, count
# now attempt to send one more. There isn't enough credit, so it should
# not be sent
self.client.timeout = 1
msg.address="amqp://0.0.0.0:12345/Link2"
self.client.put(msg)
try:
self.client.send()
assert False, "expected client to time out in send()"
except Timeout:
pass
assert self.client.outgoing == 1
class NBMessengerTest(common.Test):
def setup(self):
self.client = Messenger("client")
self.client2 = Messenger("client2")
self.server = Messenger("server")
self.messengers = [self.client, self.client2, self.server]
self.client.blocking = False
self.client2.blocking = False
self.server.blocking = False
self.server.start()
self.client.start()
self.client2.start()
self.address = "amqp://0.0.0.0:12345"
self.server.subscribe("amqp://~0.0.0.0:12345")
def _pump(self, timeout, work_triggers_exit):
for msgr in self.messengers:
if msgr.work(timeout) and work_triggers_exit:
return True
return False
def pump(self, timeout=0):
while self._pump(0, True): pass
self._pump(timeout, False)
while self._pump(0, True): pass
def teardown(self):
self.server.stop()
self.client.stop()
self.client2.stop()
self.pump()
assert self.server.stopped
assert self.client.stopped
assert self.client2.stopped
def testSmoke(self, count=1):
self.server.recv()
msg = Message()
msg.address = self.address
for i in range(count):
msg.body = "Hello %s" % i
self.client.put(msg)
msg2 = Message()
for i in range(count):
if self.server.incoming == 0:
self.pump()
assert self.server.incoming > 0, self.server.incoming
self.server.get(msg2)
assert msg2.body == "Hello %s" % i, (msg2.body, i)
assert self.client.outgoing == 0, self.client.outgoing
assert self.server.incoming == 0, self.client.incoming
def testSmoke1024(self):
self.testSmoke(1024)
def testSmoke4096(self):
self.testSmoke(4096)
def testPushback(self):
self.server.recv()
msg = Message()
msg.address = self.address
for i in xrange(16):
for i in xrange(1024):
self.client.put(msg)
self.pump()
if self.client.outgoing > 0:
break
assert self.client.outgoing > 0
def testRecvBeforeSubscribe(self):
self.client.recv()
self.client.subscribe(self.address + "/foo")
self.pump()
msg = Message()
msg.address = "amqp://client/foo"
msg.body = "Hello World!"
self.server.put(msg)
assert self.client.incoming == 0
self.pump(self.delay)
assert self.client.incoming == 1
msg2 = Message()
self.client.get(msg2)
assert msg2.address == msg.address
assert msg2.body == msg.body
def testCreditAutoBackpressure(self):
""" Verify that use of automatic credit (pn_messenger_recv(-1)) does not
fill the incoming queue indefinitely. If the receiver does not 'get' the
message, eventually the sender will block. See PROTON-350 """
self.server.recv()
msg = Message()
msg.address = self.address
deadline = time() + self.timeout
while time() < deadline:
old = self.server.incoming
for j in xrange(1001):
self.client.put(msg)
self.pump()
if old == self.server.incoming:
break;
assert old == self.server.incoming, "Backpressure not active!"
def testCreditRedistribution(self):
""" Verify that a fixed amount of credit will redistribute to new
links.
"""
self.server.recv( 5 )
# first link will get all credit
msg1 = Message()
msg1.address = self.address + "/msg1"
self.client.put(msg1)
self.pump()
assert self.server.incoming == 1, self.server.incoming
assert self.server.receiving == 4, self.server.receiving
# no credit left over for this link
msg2 = Message()
msg2.address = self.address + "/msg2"
self.client.put(msg2)
self.pump()
assert self.server.incoming == 1, self.server.incoming
assert self.server.receiving == 4, self.server.receiving
# eventually, credit will rebalance and the new link will send
deadline = time() + self.timeout
while time() < deadline:
sleep(.1)
self.pump()
if self.server.incoming == 2:
break;
assert self.server.incoming == 2, self.server.incoming
assert self.server.receiving == 3, self.server.receiving
def testCreditReclaim(self):
""" Verify that credit is reclaimed when a link with outstanding credit is
torn down.
"""
self.server.recv( 9 )
# first link will get all credit
msg1 = Message()
msg1.address = self.address + "/msg1"
self.client.put(msg1)
self.pump()
assert self.server.incoming == 1, self.server.incoming
assert self.server.receiving == 8, self.server.receiving
# no credit left over for this link
msg2 = Message()
msg2.address = self.address + "/msg2"
self.client.put(msg2)
self.pump()
assert self.server.incoming == 1, self.server.incoming
assert self.server.receiving == 8, self.server.receiving
# and none for this new client
msg3 = Message()
msg3.address = self.address + "/msg3"
self.client2.put(msg3)
self.pump()
# eventually, credit will rebalance and all links will
# send a message
deadline = time() + self.timeout
while time() < deadline:
sleep(.1)
self.pump()
if self.server.incoming == 3:
break;
assert self.server.incoming == 3, self.server.incoming
assert self.server.receiving == 6, self.server.receiving
# now tear down client two, this should cause its outstanding credit to be
# made available to the other links
self.client2.stop()
self.pump()
for i in range(4):
self.client.put(msg1)
self.client.put(msg2)
# should exhaust all credit
deadline = time() + self.timeout
while time() < deadline:
sleep(.1)
self.pump()
if self.server.incoming == 9:
break;
assert self.server.incoming == 9, self.server.incoming
assert self.server.receiving == 0, self.server.receiving
def testCreditReplenish(self):
""" When extra credit is available it should be granted to the first
link that can use it.
"""
# create three links
msg = Message()
for i in range(3):
msg.address = self.address + "/%d" % i
self.client.put(msg)
self.server.recv( 50 ) # 50/3 = 16 per link + 2 extra
self.pump()
assert self.server.incoming == 3, self.server.incoming
assert self.server.receiving == 47, self.server.receiving
# 47/3 = 15 per link, + 2 extra
# verify one link can send 15 + the two extra (17)
for i in range(17):
msg.address = self.address + "/0"
self.client.put(msg)
self.pump()
assert self.server.incoming == 20, self.server.incoming
assert self.server.receiving == 30, self.server.receiving
# now verify that the remaining credit (30) will eventually rebalance
# across all links (10 per link)
for j in range(10):
for i in range(3):
msg.address = self.address + "/%d" % i
self.client.put(msg)
deadline = time() + self.timeout
while time() < deadline:
sleep(.1)
self.pump()
if self.server.incoming == 50:
break
assert self.server.incoming == 50, self.server.incoming
assert self.server.receiving == 0, self.server.receiving
from select import select
class Pump:
def __init__(self, *messengers):
self.messengers = messengers
self.selectables = []
def pump_once(self):
for m in self.messengers:
while True:
sel = m.selectable()
if sel:
self.selectables.append(sel)
else:
break
reading = []
writing = []
for sel in self.selectables[:]:
if sel.is_terminal:
sel.free()
self.selectables.remove(sel)
else:
if sel.capacity > 0:
reading.append(sel)
if sel.pending > 0:
writing.append(sel)
readable, writable, _ = select(reading, writing, [], 0)
count = 0
for s in readable:
s.readable()
count += 1
for s in writable:
s.writable()
count += 1
return count
def pump(self):
while self.pump_once(): pass
class SelectableMessengerTest(common.Test):
def testSelectable(self, count = 1):
if os.name=="nt":
# Conflict between native OS select() in Pump and IOCP based pn_selector_t
# makes this fail on Windows (see PROTON-668).
raise Skipped("Invalid test on Windows with IOCP.")
mrcv = Messenger()
mrcv.passive = True
mrcv.subscribe("amqp://~0.0.0.0:1234")
msnd = Messenger()
msnd.passive = True
m = Message()
m.address = "amqp://0.0.0.0:1234"
for i in range(count):
m.body = u"Hello World! %s" % i
msnd.put(m)
p = Pump(msnd, mrcv)
p.pump()
assert msnd.outgoing == count
assert mrcv.incoming == 0
mrcv.recv()
mc = Message()
for i in range(count):
if mrcv.incoming == 0:
p.pump()
assert mrcv.incoming > 0
mrcv.get(mc)
assert mc.body == u"Hello World! %s" % i, (i, mc.body)
mrcv.stop()
assert not mrcv.stopped
p.pump()
assert mrcv.stopped
def testSelectable16(self):
self.testSelectable(count=16)
def testSelectable1024(self):
self.testSelectable(count=1024)
def testSelectable4096(self):
self.testSelectable(count=4096)
class IdleTimeoutTest(common.Test):
def testIdleTimeout(self):
"""
Verify that a Messenger connection is kept alive using empty idle frames
when a idle_timeout is advertised by the remote peer.
"""
if "java" in sys.platform:
raise Skipped()
idle_timeout_secs = self.delay
try:
idle_server = common.TestServerDrain(idle_timeout=idle_timeout_secs)
idle_server.timeout = self.timeout
idle_server.start()
idle_client = Messenger("idle_client")
idle_client.timeout = self.timeout
idle_client.start()
idle_client.subscribe("amqp://%s:%s/foo" %
(idle_server.host, idle_server.port))
idle_client.work(idle_timeout_secs/10)
# wait up to 3x the idle timeout and hence verify that everything stays
# connected during that time by virtue of no Exception being raised
duration = 3 * idle_timeout_secs
deadline = time() + duration
while time() <= deadline:
idle_client.work(idle_timeout_secs/10)
continue
# confirm link is still active
cxtr = idle_server.driver.head_connector()
assert not cxtr.closed, "Connector has unexpectedly been closed"
conn = cxtr.connection
assert conn.state == (Endpoint.LOCAL_ACTIVE
| Endpoint.REMOTE_ACTIVE
), "Connection has unexpectedly terminated"
link = conn.link_head(0)
while link:
assert link.state != (Endpoint.REMOTE_CLOSED
), "Link unexpectedly closed"
link = link.next(0)
finally:
try:
idle_client.stop()
except:
pass
try:
idle_server.stop()
except:
pass
|
astitcher/qpid-proton-old
|
tests/python/proton_tests/messenger.py
|
Python
|
apache-2.0
| 30,342
|
[
"Galaxy"
] |
19984be246d43ea3259f734d7f375a2c855a6168caec4f8ede9a9d5e5be60b50
|
"""
:mod:`Mutators` -- mutation methods module
=====================================================================
In this module we have the genetic operators of mutation for each chromosome representation.
"""
import Util
from random import randint as rand_randint, gauss as rand_gauss, uniform as rand_uniform
from random import choice as rand_choice
import Consts
import GTree
#############################
## 1D Binary String ##
#############################
def G1DBinaryStringMutatorSwap(genome, **args):
""" The 1D Binary String Swap Mutator """
if args["pmut"] <= 0.0: return 0
stringLength = len(genome)
mutations = args["pmut"] * (stringLength)
if mutations < 1.0:
mutations = 0
for it in xrange(stringLength):
if Util.randomFlipCoin(args["pmut"]):
Util.listSwapElement(genome, it, rand_randint(0, stringLength-1))
mutations+=1
else:
for it in xrange(int(round(mutations))):
Util.listSwapElement(genome, rand_randint(0, stringLength-1),
rand_randint(0, stringLength-1))
return int(mutations)
def G1DBinaryStringMutatorFlip(genome, **args):
""" The classical flip mutator for binary strings """
if args["pmut"] <= 0.0: return 0
stringLength = len(genome)
mutations = args["pmut"] * (stringLength)
if mutations < 1.0:
mutations = 0
for it in xrange(stringLength):
if Util.randomFlipCoin(args["pmut"]):
if genome[it] == 0: genome[it] = 1
else: genome[it] = 0
mutations+=1
else:
for it in xrange(int(round(mutations))):
which = rand_randint(0, stringLength-1)
if genome[which] == 0: genome[which] = 1
else: genome[which] = 0
return int(mutations)
####################
## 1D List ##
####################
def G1DListMutatorSwap(genome, **args):
""" The mutator of G1DList, Swap Mutator
.. note:: this mutator is :term:`Data Type Independent`
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome) - 1
mutations = args["pmut"] * (listSize+1)
if mutations < 1.0:
mutations = 0
for it in xrange(listSize+1):
if Util.randomFlipCoin(args["pmut"]):
Util.listSwapElement(genome, it, rand_randint(0, listSize))
mutations+=1
else:
for it in xrange(int(round(mutations))):
Util.listSwapElement(genome, rand_randint(0, listSize), rand_randint(0, listSize))
return int(mutations)
def G1DListModifiedMutator(genome, **args):
""" The mutator of G1DList, Swap Mutator
.. note:: this mutator is :term:`Data Type Independent`
"""
if args["pmut"] <= 0.0:
return 0
listSize = len(genome) - 1
mutations = args["pmut"] * (listSize+1)
if mutations < 1.0:
mutations = 0
for it in xrange(listSize+1):
if Util.randomFlipCoin(args["pmut"]):
Util.listFrameShiftElement(genome, it, rand_randint(0,listSize))
mutations+=1
else:
for it in xrange(int(round(mutations))):
Util.listFrameShiftElement(genome, rand_randint(0,listSize), rand_randint(0,listSize))
return int(mutations)
def G1DListMutatorSIM(genome, **args):
""" The mutator of G1DList, Simple Inversion Mutation
.. note:: this mutator is :term:`Data Type Independent`
"""
mutations = 0
if args["pmut"] <= 0.0: return 0
cuts = [rand_randint(0, len(genome)), rand_randint(0, len(genome))]
if cuts[0] > cuts[1]:
Util.listSwapElement(cuts, 0, 1)
if (cuts[1]-cuts[0]) <= 0:
cuts[1] = rand_randint(cuts[0], len(genome))
if Util.randomFlipCoin(args["pmut"]):
part = genome[cuts[0]:cuts[1]]
if len(part) == 0: return 0
part.reverse()
genome[cuts[0]:cuts[1]] = part
mutations += 1
return mutations
def G1DListMutatorIntegerRange(genome, **args):
""" Simple integer range mutator for G1DList
Accepts the *rangemin* and *rangemax* genome parameters, both optional.
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome)
mutations = args["pmut"] * listSize
if mutations < 1.0:
mutations = 0
for it in xrange(listSize):
if Util.randomFlipCoin(args["pmut"]):
genome[it] = rand_randint(genome.getParam("rangemin", Consts.CDefRangeMin),
genome.getParam("rangemax", Consts.CDefRangeMax))
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_gene = rand_randint(0, listSize-1)
genome[which_gene] = rand_randint(genome.getParam("rangemin", Consts.CDefRangeMin),
genome.getParam("rangemax", Consts.CDefRangeMax))
return int(mutations)
def G1DListMutatorRealRange(genome, **args):
""" Simple real range mutator for G1DList
Accepts the *rangemin* and *rangemax* genome parameters, both optional.
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome)
mutations = args["pmut"] * (listSize)
if mutations < 1.0:
mutations = 0
for it in xrange(listSize):
if Util.randomFlipCoin(args["pmut"]):
genome[it] = rand_uniform(genome.getParam("rangemin", Consts.CDefRangeMin),
genome.getParam("rangemax", Consts.CDefRangeMax))
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_gene = rand_randint(0, listSize-1)
genome[which_gene] = rand_uniform(genome.getParam("rangemin", Consts.CDefRangeMin),
genome.getParam("rangemax", Consts.CDefRangeMax))
return int(mutations)
def G1DListMutatorIntegerGaussian(genome, **args):
""" A gaussian mutator for G1DList of Integers
Accepts the *rangemin* and *rangemax* genome parameters, both optional. Also
accepts the parameter *gauss_mu* and the *gauss_sigma* which respectively
represents the mean and the std. dev. of the random distribution.
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome)
mutations = args["pmut"] * (listSize)
mu = genome.getParam("gauss_mu")
sigma = genome.getParam("gauss_sigma")
if mu is None:
mu = Consts.CDefG1DListMutIntMU
if sigma is None:
sigma = Consts.CDefG1DListMutIntSIGMA
if mutations < 1.0:
mutations = 0
for it in xrange(listSize):
if Util.randomFlipCoin(args["pmut"]):
final_value = genome[it] + int(rand_gauss(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome[it] = final_value
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_gene = rand_randint(0, listSize-1)
final_value = genome[which_gene] + int(rand_gauss(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome[which_gene] = final_value
return int(mutations)
def G1DListMutatorRealGaussian(genome, **args):
""" The mutator of G1DList, Gaussian Mutator
Accepts the *rangemin* and *rangemax* genome parameters, both optional. Also
accepts the parameter *gauss_mu* and the *gauss_sigma* which respectively
represents the mean and the std. dev. of the random distribution.
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome)
mutations = args["pmut"] * (listSize)
mu = genome.getParam("gauss_mu")
sigma = genome.getParam("gauss_sigma")
if mu is None:
mu = Consts.CDefG1DListMutRealMU
if sigma is None:
sigma = Consts.CDefG1DListMutRealSIGMA
if mutations < 1.0:
mutations = 0
for it in xrange(listSize):
if Util.randomFlipCoin(args["pmut"]):
final_value = genome[it] + rand_gauss(mu, sigma)
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome[it] = final_value
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_gene = rand_randint(0, listSize-1)
final_value = genome[which_gene] + rand_gauss(mu, sigma)
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome[which_gene] = final_value
return int(mutations)
def G1DListMutatorIntegerBinary(genome, **args):
""" The mutator of G1DList, the binary mutator
This mutator will random change the 0 and 1 elements of the 1D List.
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome)
mutations = args["pmut"] * (listSize)
if mutations < 1.0:
mutations = 0
for it in xrange(listSize):
if Util.randomFlipCoin(args["pmut"]):
if genome[it] == 0: genome[it] = 1
elif genome[it] == 1: genome[it] = 0
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_gene = rand_randint(0, listSize-1)
if genome[which_gene] == 0: genome[which_gene] = 1
elif genome[which_gene] == 1: genome[which_gene] = 0
return int(mutations)
def G1DListMutatorAllele(genome, **args):
""" The mutator of G1DList, Allele Mutator
To use this mutator, you must specify the *allele* genome parameter with the
:class:`GAllele.GAlleles` instance.
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome) - 1
mutations = args["pmut"] * (listSize+1)
allele = genome.getParam("allele", None)
if allele is None:
Util.raiseException("to use the G1DListMutatorAllele, you must specify the 'allele' parameter", TypeError)
if mutations < 1.0:
mutations = 0
for it in xrange(listSize+1):
if Util.randomFlipCoin(args["pmut"]):
new_val = allele[it].getRandomAllele()
genome[it] = new_val
mutations+=1
else:
for it in xrange(int(round(mutations))):
which_gene = rand_randint(0, listSize)
new_val = allele[which_gene].getRandomAllele()
genome[which_gene] = new_val
return int(mutations)
####################
## 2D List ##
####################
def G2DListMutatorSwap(genome, **args):
""" The mutator of G1DList, Swap Mutator
.. note:: this mutator is :term:`Data Type Independent`
"""
if args["pmut"] <= 0.0: return 0
height, width = genome.getSize()
elements = height * width
mutations = args["pmut"] * elements
if mutations < 1.0:
mutations = 0
for i in xrange(height):
for j in xrange(width):
if Util.randomFlipCoin(args["pmut"]):
index_b = (rand_randint(0, height-1), rand_randint(0, width-1))
Util.list2DSwapElement(genome.genomeList, (i,j), index_b)
mutations+=1
else:
for it in xrange(int(round(mutations))):
index_a = (rand_randint(0, height-1), rand_randint(0, width-1))
index_b = (rand_randint(0, height-1), rand_randint(0, width-1))
Util.list2DSwapElement(genome.genomeList, index_a, index_b)
return int(mutations)
def G2DListMutatorIntegerRange(genome, **args):
""" Simple integer range mutator for G2DList
Accepts the *rangemin* and *rangemax* genome parameters, both optional.
"""
if args["pmut"] <= 0.0: return 0
height, width = genome.getSize()
elements = height * width
mutations = args["pmut"] * elements
range_min = genome.getParam("rangemin", Consts.CDefRangeMin)
range_max = genome.getParam("rangemax", Consts.CDefRangeMax)
if mutations < 1.0:
mutations = 0
for i in xrange(genome.getHeight()):
for j in xrange(genome.getWidth()):
if Util.randomFlipCoin(args["pmut"]):
random_int = rand_randint(range_min, range_max)
genome.setItem(i, j, random_int)
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_x = rand_randint(0, genome.getWidth()-1)
which_y = rand_randint(0, genome.getHeight()-1)
random_int = rand_randint(range_min, range_max)
genome.setItem(which_y, which_x, random_int)
return int(mutations)
def G2DListMutatorIntegerGaussian(genome, **args):
""" A gaussian mutator for G2DList of Integers
Accepts the *rangemin* and *rangemax* genome parameters, both optional. Also
accepts the parameter *gauss_mu* and the *gauss_sigma* which respectively
represents the mean and the std. dev. of the random distribution.
"""
if args["pmut"] <= 0.0: return 0
height, width = genome.getSize()
elements = height * width
mutations = args["pmut"] * elements
mu = genome.getParam("gauss_mu")
sigma = genome.getParam("gauss_sigma")
if mu is None:
mu = Consts.CDefG2DListMutIntMU
if sigma is None:
sigma = Consts.CDefG2DListMutIntSIGMA
if mutations < 1.0:
mutations = 0
for i in xrange(genome.getHeight()):
for j in xrange(genome.getWidth()):
if Util.randomFlipCoin(args["pmut"]):
final_value = genome[i][j] + int(rand_gauss(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome.setItem(i, j, final_value)
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_x = rand_randint(0, genome.getWidth()-1)
which_y = rand_randint(0, genome.getHeight()-1)
final_value = genome[which_y][which_x] + int(rand_gauss(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome.setItem(which_y, which_x, final_value)
return int(mutations)
def G2DListMutatorAllele(genome, **args):
""" The mutator of G2DList, Allele Mutator
To use this mutator, you must specify the *allele* genome parameter with the
:class:`GAllele.GAlleles` instance.
.. warning:: the :class:`GAllele.GAlleles` instance must have the homogeneous flag enabled
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome) - 1
mutations = args["pmut"] * (listSize+1)
allele = genome.getParam("allele", None)
if allele is None:
Util.raiseException("to use the G2DListMutatorAllele, you must specify the 'allele' parameter", TypeError)
if allele.homogeneous == False:
Util.raiseException("to use the G2DListMutatorAllele, the 'allele' must be homogeneous")
if mutations < 1.0:
mutations = 0
for i in xrange(genome.getHeight()):
for j in xrange(genome.getWidht()):
if Util.randomFlipCoin(args["pmut"]):
new_val = allele[0].getRandomAllele()
genome.setItem(i, j, new_val)
mutations+=1
else:
for it in xrange(int(round(mutations))):
which_x = rand_randint(0, genome.getWidth()-1)
which_y = rand_randint(0, genome.getHeight()-1)
new_val = allele[0].getRandomAllele()
genome.setItem(which_x, which_y, new_val)
return int(mutations)
def G2DListMutatorRealGaussian(genome, **args):
""" A gaussian mutator for G2DList of Real
Accepts the *rangemin* and *rangemax* genome parameters, both optional. Also
accepts the parameter *gauss_mu* and the *gauss_sigma* which respectively
represents the mean and the std. dev. of the random distribution.
"""
if args["pmut"] <= 0.0: return 0
height, width = genome.getSize()
elements = height * width
mutations = args["pmut"] * elements
mu = genome.getParam("gauss_mu")
sigma = genome.getParam("gauss_sigma")
if mu is None:
mu = Consts.CDefG2DListMutRealMU
if sigma is None:
sigma = Consts.CDefG2DListMutRealSIGMA
if mutations < 1.0:
mutations = 0
for i in xrange(genome.getHeight()):
for j in xrange(genome.getWidth()):
if Util.randomFlipCoin(args["pmut"]):
final_value = genome[i][j] + rand_gauss(mu, sigma)
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome.setItem(i, j, final_value)
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_x = rand_randint(0, genome.getWidth()-1)
which_y = rand_randint(0, genome.getHeight()-1)
final_value = genome[which_y][which_x] + rand_gauss(mu, sigma)
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome.setItem(which_y, which_x, final_value)
return int(mutations)
#############################
## 2D Binary String ##
#############################
def G2DBinaryStringMutatorSwap(genome, **args):
""" The mutator of G2DBinaryString, Swap Mutator
.. versionadded:: 0.6
The *G2DBinaryStringMutatorSwap* function
"""
if args["pmut"] <= 0.0: return 0
height, width = genome.getSize()
elements = height * width
mutations = args["pmut"] * elements
if mutations < 1.0:
mutations = 0
for i in xrange(height):
for j in xrange(width):
if Util.randomFlipCoin(args["pmut"]):
index_b = (rand_randint(0, height-1), rand_randint(0, width-1))
Util.list2DSwapElement(genome.genomeString, (i,j), index_b)
mutations+=1
else:
for it in xrange(int(round(mutations))):
index_a = (rand_randint(0, height-1), rand_randint(0, width-1))
index_b = (rand_randint(0, height-1), rand_randint(0, width-1))
Util.list2DSwapElement(genome.genomeString, index_a, index_b)
return int(mutations)
def G2DBinaryStringMutatorFlip(genome, **args):
""" A flip mutator for G2DBinaryString
.. versionadded:: 0.6
The *G2DBinaryStringMutatorFlip* function
"""
if args["pmut"] <= 0.0: return 0
height, width = genome.getSize()
elements = height * width
mutations = args["pmut"] * elements
if mutations < 1.0:
mutations = 0
for i in xrange(genome.getHeight()):
for j in xrange(genome.getWidth()):
if Util.randomFlipCoin(args["pmut"]):
if genome[i][j] == 0: genome.setItem(i, j, 1)
else: genome.setItem(i, j, 0)
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_x = rand_randint(0, genome.getWidth()-1)
which_y = rand_randint(0, genome.getHeight()-1)
if genome[i][j] == 0: genome.setItem(which_y, which_x, 1)
else: genome.setItem(which_y, which_x, 0)
return int(mutations)
#################
## Tree ##
#################
def GTreeMutatorSwap(genome, **args):
""" The mutator of GTree, Swap Mutator
.. versionadded:: 0.6
The *GTreeMutatorSwap* function
"""
if args["pmut"] <= 0.0: return 0
elements = len(genome)
mutations = args["pmut"] * elements
if mutations < 1.0:
mutations = 0
for i in xrange(len(genome)):
if Util.randomFlipCoin(args["pmut"]):
mutations += 1
nodeOne = genome.getRandomNode()
nodeTwo = genome.getRandomNode()
nodeOne.swapNodeData(nodeTwo)
else:
for it in xrange(int(round(mutations))):
nodeOne = genome.getRandomNode()
nodeTwo = genome.getRandomNode()
nodeOne.swapNodeData(nodeTwo)
return int(mutations)
def GTreeMutatorIntegerRange(genome, **args):
""" The mutator of GTree, Integer Range Mutator
Accepts the *rangemin* and *rangemax* genome parameters, both optional.
.. versionadded:: 0.6
The *GTreeMutatorIntegerRange* function
"""
if args["pmut"] <= 0.0: return 0
elements = len(genome)
mutations = args["pmut"] * elements
range_min = genome.getParam("rangemin", Consts.CDefRangeMin)
range_max = genome.getParam("rangemax", Consts.CDefRangeMax)
if mutations < 1.0:
mutations = 0
for i in xrange(len(genome)):
if Util.randomFlipCoin(args["pmut"]):
mutations += 1
rand_node = genome.getRandomNode()
random_int = rand_randint(range_min, range_max)
rand_node.setData(random_int)
else:
for it in xrange(int(round(mutations))):
rand_node = genome.getRandomNode()
random_int = rand_randint(range_min, range_max)
rand_node.setData(random_int)
return int(mutations)
def GTreeMutatorRealRange(genome, **args):
""" The mutator of GTree, Real Range Mutator
Accepts the *rangemin* and *rangemax* genome parameters, both optional.
.. versionadded:: 0.6
The *GTreeMutatorRealRange* function
"""
if args["pmut"] <= 0.0: return 0
elements = len(genome)
mutations = args["pmut"] * elements
range_min = genome.getParam("rangemin", Consts.CDefRangeMin)
range_max = genome.getParam("rangemax", Consts.CDefRangeMax)
if mutations < 1.0:
mutations = 0
for i in xrange(len(genome)):
if Util.randomFlipCoin(args["pmut"]):
mutations += 1
rand_node = genome.getRandomNode()
random_real = rand_uniform(range_min, range_max)
rand_node.setData(random_real)
else:
for it in xrange(int(round(mutations))):
rand_node = genome.getRandomNode()
random_real = rand_uniform(range_min, range_max)
rand_node.setData(random_real)
return int(mutations)
def GTreeMutatorIntegerGaussian(genome, **args):
""" A gaussian mutator for GTree of Integers
Accepts the *rangemin* and *rangemax* genome parameters, both optional. Also
accepts the parameter *gauss_mu* and the *gauss_sigma* which respectively
represents the mean and the std. dev. of the random distribution.
"""
if args["pmut"] <= 0.0: return 0
elements = len(genome)
mutations = args["pmut"] * elements
mu = genome.getParam("gauss_mu", Consts.CDefG1DListMutIntMU)
sigma = genome.getParam("gauss_sigma", Consts.CDefG1DListMutIntSIGMA)
if mutations < 1.0:
mutations = 0
for i in xrange(len(genome)):
if Util.randomFlipCoin(args["pmut"]):
mutations += 1
rand_node = genome.getRandomNode()
final_value = rand_node.getData() + int(rand_gauss(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
rand_node.setData(final_value)
else:
for it in xrange(int(round(mutations))):
rand_node = genome.getRandomNode()
final_value = rand_node.getData() + int(rand_gauss(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
rand_node.setData(final_value)
return int(mutations)
def GTreeMutatorRealGaussian(genome, **args):
""" A gaussian mutator for GTree of Real numbers
Accepts the *rangemin* and *rangemax* genome parameters, both optional. Also
accepts the parameter *gauss_mu* and the *gauss_sigma* which respectively
represents the mean and the std. dev. of the random distribution.
"""
if args["pmut"] <= 0.0: return 0
elements = len(genome)
mutations = args["pmut"] * elements
mu = genome.getParam("gauss_mu", Consts.CDefG1DListMutRealMU)
sigma = genome.getParam("gauss_sigma", Consts.CDefG1DListMutRealSIGMA)
if mutations < 1.0:
mutations = 0
for i in xrange(len(genome)):
if Util.randomFlipCoin(args["pmut"]):
mutations += 1
rand_node = genome.getRandomNode()
final_value = rand_node.getData() + rand_gauss(mu, sigma)
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
rand_node.setData(final_value)
else:
for it in xrange(int(round(mutations))):
rand_node = genome.getRandomNode()
final_value = rand_node.getData() + rand_gauss(mu, sigma)
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
rand_node.setData(final_value)
return int(mutations)
###################
## Tree GP ##
###################
def GTreeGPMutatorOperation(genome, **args):
""" The mutator of GTreeGP, Operation Mutator
.. versionadded:: 0.6
The *GTreeGPMutatorOperation* function
"""
if args["pmut"] <= 0.0: return 0
elements = len(genome)
mutations = args["pmut"] * elements
ga_engine = args["ga_engine"]
gp_terminals = ga_engine.getParam("gp_terminals")
assert gp_terminals is not None
gp_function_set = ga_engine.getParam("gp_function_set")
assert gp_function_set is not None
if mutations < 1.0:
mutations = 0
for i in xrange(len(genome)):
if Util.randomFlipCoin(args["pmut"]):
mutations += 1
rand_node = genome.getRandomNode()
assert rand_node is not None
if rand_node.getType() == Consts.nodeType["TERMINAL"]:
term_operator = rand_choice(gp_terminals)
else:
op_len = gp_function_set[rand_node.getData()]
fun_candidates = []
for o, l in gp_function_set.items():
if l==op_len:
fun_candidates.append(o)
if len(fun_candidates) <= 0:
continue
term_operator = rand_choice(fun_candidates)
rand_node.setData(term_operator)
else:
for it in xrange(int(round(mutations))):
rand_node = genome.getRandomNode()
assert rand_node is not None
if rand_node.getType() == Consts.nodeType["TERMINAL"]:
term_operator = rand_choice(gp_terminals)
else:
op_len = gp_function_set[rand_node.getData()]
fun_candidates = []
for o, l in gp_function_set.items():
if l==op_len:
fun_candidates.append(o)
if len(fun_candidates) <= 0:
continue
term_operator = rand_choice(fun_candidates)
rand_node.setData(term_operator)
return int(mutations)
def GTreeGPMutatorSubtree(genome, **args):
""" The mutator of GTreeGP, Subtree Mutator
This mutator will recreate random subtree of the tree using the grow algorithm.
.. versionadded:: 0.6
The *GTreeGPMutatorSubtree* function
"""
if args["pmut"] <= 0.0: return 0
ga_engine = args["ga_engine"]
max_depth = genome.getParam("max_depth", None)
mutations = 0
if max_depth is None:
Util.raiseException("You must specify the max_depth genome parameter !", ValueError)
if max_depth < 0:
Util.raiseException("The max_depth must be >= 1, if you want to use GTreeGPMutatorSubtree crossover !", ValueError)
branch_list = genome.nodes_branch
elements = len(branch_list)
for i in xrange(elements):
node = branch_list[i]
assert node is not None
if Util.randomFlipCoin(args["pmut"]):
depth = genome.getNodeDepth(node)
mutations += 1
root_subtree = GTree.buildGTreeGPGrow(ga_engine, 0, max_depth-depth)
node_parent = node.getParent()
if node_parent is None:
genome.setRoot(root_subtree)
genome.processNodes()
return mutations
else:
root_subtree.setParent(node_parent)
node_parent.replaceChild(node, root_subtree)
genome.processNodes()
return int(mutations)
|
tapomayukh/projects_in_python
|
sandbox_tapo/src/AI/Genetic Algorithm/Mutators.py
|
Python
|
mit
| 28,614
|
[
"Gaussian"
] |
29ebc2cb23de011a19ca17f4be15126e850e2f3d5de793f33b5c8ac7358f5fba
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Eficent (<http://www.eficent.com/>)
# <contact@eficent.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Operating Unit in Purchase Requisitions",
"version": "1.0",
"author": "Eficent",
"website": "http://www.eficent.com",
"category": "Purchase Management",
"depends": ["purchase_requisition",
"purchase_operating_unit"],
"description": """
Operating Unit in Purchase Requisitions
=======================================
This module was written to extend the Purchase Requsition capabilities of Odoo.
This module introduces the operating unit to the purchase requisition.
Security rules are defined to ensure that users can only display the
Purchase Requisitions in which they are allowed access to.
Installation
============
No additional installation instructions are required.
Configuration
=============
This module does not require any additional configuration.
Usage
=====
At the time when a user creates a new purchase requisition the system
proposes the user's default operating unit.
The operating unit is a required field.
When the user creates a purchase order (PO) from the purchase requisition the
operating unit is copied to the PO.
Known issues / Roadmap
======================
No issue has been identified.
Credits
=======
Contributors
------------
* Jordi Ballester <jordi.ballester@eficent.com>
Maintainer
----------
.. image:: http://odoo-community.org/logo.png
:alt: Odoo Community Association
:target: http://odoo-community.org
This module is maintained by the OCA.
OCA, or the Odoo Community Association, is a nonprofit organization whose
mission is to support the collaborative development of Odoo features and
promote its widespread use.
To contribute to this module, please visit http://odoo-community.org.
""",
"init_xml": [],
"update_xml": [
"view/purchase_requisition.xml",
"security/purchase_security.xml",
],
'demo_xml': [
],
'test': [
],
'installable': True,
'active': False,
'certificate': '',
}
|
Eficent/odoo-operating-unit
|
purchase_requisition_operating_unit/__openerp__.py
|
Python
|
agpl-3.0
| 2,941
|
[
"VisIt"
] |
607fed54966f7426ec36788c3983555d7cbc1a945a569cd4b13370bb4567534b
|
#! A test of the basis specification. A benzene atom is defined using a ZMatrix containing dummy atoms
#! and various basis sets are assigned to different atoms. The symmetry of the molecule is automatically
#! lowered to account for the different basis sets.
import psi4
psi4.set_output_file("output.dat", False)
bz = psi4.geometry("""
X
X 1 RXX
X 2 RXX 1 90.0
C 3 RCC 2 90.0 1 0.0
C 3 RCC 2 90.0 1 60.0
C1 3 RCC 2 90.0 1 120.0
C 3 RCC 2 90.0 1 180.0
C1 3 RCC 2 90.0 1 240.0
C 3 RCC 2 90.0 1 300.0 # unnecessary comment
H1 3 RCH 2 90.0 1 0.0
H 3 RCH 2 90.0 1 60.0
H 3 RCH 2 90.0 1 120.0
H1 3 RCH 2 90.0 1 180.0
H 3 RCH 2 90.0 1 240.0
H 3 RCH 2 90.0 1 300.0
RCC = 1.3915
RCH = 2.4715
RXX = 1.00
""")
# Here we specify some of the basis sets manually. They could be written to one or more external
# files and included by adding the directory to environment variable PSIPATH
#
# The format of these external files follows the same format as those below, where there's a [name]
# tag before the standard G94 basis set specification:
# [DZ]
# spherical
# ****
# H 0
# S 3 1.00
# 19.2406000 0.0328280
# 2.8992000 0.2312080
# 0.6534000 0.8172380
# S 1 1.00
# 0.1776000 1.0000000
# ****
# C 0
# definition of carbon atom DZ basis...
# ****
# Any more atoms needed...
# ****
# The keywords cartesian or spherical are optional and provide default behavior if the
# puream keyword is not set. In basis strings, like below, multiple basis sets can appear, as long
# as there is a [name] tag above the definition of each basis set. The basis sets specified
# using either basis <opt_name> {...} are utilized first (in the order specified
# in the input file). Any remaining basis sets required are extracted from the built-in library,
# if they exist, or an error message is printed.
psi4.basis_helper("""
#
# We start by assigning basis sets to atoms. These commands can go anywhere in the basis block
#
# First, assign DZ to all atoms
assign DZ
# Now, assign 3-21G to all carbon atoms
assign C my3-21G
# The two atoms labelled H1 get a STO-3G basis two
assign H1 sto-3g
# Carbons 3 and 5 get a STO-3G basis, too
assign C1 sto-3g
# With all these in place, the symmetry is lowered to C2v automatically
# The commands are applied in order i.e., adding a line like
# assign cc-pvtz
# here would override all of the above and assign cc-pvtz to all atoms
#
# Now we define the basis sets. N.B. Indentation does not matter; it just looks prettier.
#
[my3-21G] #This is really the standard 3-21G basis, but with a different name
cartesian
****
H 0
S 2 1.00
5.4471780 0.1562850
0.8245470 0.9046910
S 1 1.00
0.1831920 1.0000000
****
C 0
S 3 1.00
172.2560000 0.0617669
25.9109000 0.3587940
5.5333500 0.7007130
SP 2 1.00
3.6649800 -0.3958970 0.2364600
0.7705450 1.2158400 0.8606190
SP 1 1.00
0.1958570 1.0000000 1.0000000
****
[DZ]
spherical
****
H 0
S 3 1.00
19.2406000 0.0328280
2.8992000 0.2312080
0.6534000 0.8172380
S 1 1.00
0.1776000 1.0000000
****
""")
psi4.set_options({
'd_convergence': 11,
'e_convergence': 11,
'scf_type': 'pk'})
scfenergy = psi4.energy('scf')
|
ashutoshvt/psi4
|
samples/python/mints2/input.py
|
Python
|
lgpl-3.0
| 3,880
|
[
"Psi4"
] |
de1cfbcaed1e9287a2ab215ae8cba54eafede98fc2ccf2bd27cd8ecd71274ed0
|
from pele.storage.database import Database, Minimum, TransitionState
import time
import numpy as np
from simtk.openmm.app import AmberPrmtopFile
""" creates a sqlite database from min.data, ts.data, extractedmin, extractedts
Input:
coords.prmtop ( for number of atoms )
min.data, ts.data, extractedmin, extractedts
Output:
storage.sqlite
"""
# determine number of atoms from prmtop
prmtop = AmberPrmtopFile( 'coords.prmtop' ) # TOSET
natoms = prmtop.topology._numAtoms
# open database
db = Database(db="storage.sqlite") # TOSET
def read_coords(filee):
coords = np.zeros(3*natoms)
for i in xrange(natoms):
x = filee.readline().split()
coords[i*3:i*3+3] = [float(y) for y in x]
return coords
# counter to keep track of added minima
mini=1
minima={}
# for timing
tt = t0 = time.time()
# open coordinate file
fcoords = open("extractedmin") # TOSET
print "Reading minima"
# loop over all lines in min
for line in open("min.data"): # TOSET
coords = read_coords(fcoords)
energy, frequency, pgorder, itx, ity, itz = line.split()
min1 = Minimum(float(energy), coords)
db.session.add(min1)
minima[mini]=min1
if(mini%10000 == 0):
print "commiting the next 10000 minima, %d in total"%(mini)
db.session.commit()
mini+=1
print "%.1f seconds"%(time.time() - tt)
tt = time.time()
print "Commiting changes to database"
db.session.commit()
print "%.1f seconds"%(time.time() - tt)
tt = time.time()
print "Reading transition states"
fcoords = open("extractedts") # TOSET
tsi=1
for line in open("ts.data"): # TOSET
coords = read_coords(fcoords)
energy, frequency, pgorder, min1, min2, itx, ity, itz = line.split()
ts = TransitionState(float(energy), coords, minima[int(min1)], minima[int(min2)])
db.session.add(ts)
#db.addTransitionState(float(energy), None, minima[int(min1)], minima[int(min2)], commit=False)
if(tsi%10000 == 0):
print "commiting the next 10000 transition states, %d in total"%(tsi)
db.session.commit()
tsi+=1
print "%.1f seconds"%(time.time() - tt)
tt = time.time()
print "Commiting changes to database"
db.session.commit()
print "%.1f seconds"%(time.time() - tt)
print "Done after %.1f seconds"%(time.time() - t0)
|
kjs73/pele
|
playground/amber/aladipep/optim2sqlite.py
|
Python
|
gpl-3.0
| 2,319
|
[
"OpenMM"
] |
754467a25e233e7c0f9797c8af241f77a92c072ae3b85343ec1a3842e837c606
|
""" This file contains custom checkers for pylint.
"""
from typing import List
import astroid.node_classes
import astroid.nodes
from pylint.checkers import BaseChecker
from pylint.interfaces import IAstroidChecker
from pylint.lint.pylinter import PyLinter
class NoFromImportChecker(BaseChecker):
"""This checker checks that from * import * isn't used for any modules in the stp
package.
"""
__implements__ = IAstroidChecker
DISPLAYED_MSG = (
"Don't use 'from ... import ...' for any modules from the stp package."
)
MSG_ID = "stp-from-import"
MESSAGE_HELP = (
"'from ... import ...' shouldn't be used for importing any "
"modules / symbols from the stp package as it breaks hot reloading."
)
name = "stp-from-import"
priority = -100
msgs = {"C8001": (DISPLAYED_MSG, MSG_ID, MESSAGE_HELP)}
def __init__(self, linter: PyLinter):
super().__init__(linter)
self.is_testing_module: List[bool] = []
def visit_module(self, node: astroid.nodes.Module) -> None:
"""Visit method for astroid.nodes.Module."""
is_testing_module = "test" in node.name
self.is_testing_module.append(is_testing_module)
def leave_module(self, node: astroid.nodes.Module) -> None:
"""Leave method for astroid.nodes.Module."""
self.is_testing_module.pop()
def visit_importfrom(self, node: astroid.nodes.ImportFrom) -> None:
"""Visit method for astroid.nodes.ImportFrom."""
if node.modname and node.modname == "stp":
if not self.__is_testing_module():
self.add_message(
NoFromImportChecker.MSG_ID,
node=node,
)
def __is_testing_module(self) -> bool:
"""Returns whether the current module is for tests.
:return: True if the current module is for tests,
"""
return any(self.is_testing_module)
def register(linter: PyLinter):
"""Registers the linter for pylint."""
linter.register_checker(NoFromImportChecker(linter))
|
RoboJackets/robocup-software
|
rj_gameplay/stp/pylint_stp.py
|
Python
|
apache-2.0
| 2,080
|
[
"VisIt"
] |
478126b720c89ba58f76423559cca1e3a7e72d0f2011e60748a14e1293009acd
|
""" DISET request handler base class for the ProductionDB.
"""
import six
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities.DEncode import ignoreEncodeWarning
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
prodTypes = [six.string_types, int]
transTypes = [six.string_types, int, list]
class ProductionManagerHandler(RequestHandler):
@classmethod
def initializeHandler(cls, serviceInfoDict):
"""Initialization of DB object"""
try:
result = ObjectLoader().loadObject("ProductionSystem.DB.ProductionDB", "ProductionDB")
if not result["OK"]:
return result
cls.productionDB = result["Value"]()
except RuntimeError as excp:
return S_ERROR("Can't connect to DB: %s" % excp)
return S_OK()
####################################################################
#
# These are the methods to manipulate the Productions table
#
types_addProduction = [six.string_types, six.string_types]
def export_addProduction(self, prodName, prodDescription):
credDict = self.getRemoteCredentials()
authorDN = credDict.get("DN", credDict.get("CN"))
authorGroup = credDict.get("group")
res = self.productionDB.addProduction(prodName, prodDescription, authorDN, authorGroup)
if res["OK"]:
gLogger.info("Added production %d" % res["Value"])
return res
types_deleteProduction = [prodTypes]
def export_deleteProduction(self, prodName):
credDict = self.getRemoteCredentials()
authorDN = credDict.get("DN", credDict.get("CN"))
return self.productionDB.deleteProduction(prodName, author=authorDN)
types_getProductions = []
@classmethod
def export_getProductions(
cls,
condDict=None,
older=None,
newer=None,
timeStamp="CreationDate",
orderAttribute=None,
limit=None,
offset=None,
):
if not condDict:
condDict = {}
return cls.productionDB.getProductions(
condDict=condDict,
older=older,
newer=newer,
timeStamp=timeStamp,
orderAttribute=orderAttribute,
limit=limit,
offset=offset,
)
types_getProduction = [prodTypes]
@classmethod
def export_getProduction(cls, prodName):
return cls.productionDB.getProduction(prodName)
types_getProductionParameters = [prodTypes, [six.string_types, list, tuple]]
@classmethod
def export_getProductionParameters(cls, prodName, parameters):
return cls.productionDB.getProductionParameters(prodName, parameters)
types_setProductionStatus = [prodTypes, six.string_types]
@classmethod
def export_setProductionStatus(cls, prodName, status):
return cls.productionDB.setProductionStatus(prodName, status)
types_startProduction = [prodTypes]
@classmethod
@ignoreEncodeWarning
def export_startProduction(cls, prodName):
return cls.productionDB.startProduction(prodName)
####################################################################
#
# These are the methods to manipulate the ProductionTransformations table
#
types_addTransformationsToProduction = [prodTypes, transTypes, transTypes]
@classmethod
def export_addTransformationsToProduction(cls, prodName, transIDs, parentTransIDs):
return cls.productionDB.addTransformationsToProduction(prodName, transIDs, parentTransIDs=parentTransIDs)
types_getProductionTransformations = []
@classmethod
def export_getProductionTransformations(
cls,
prodName,
condDict=None,
older=None,
newer=None,
timeStamp="CreationTime",
orderAttribute=None,
limit=None,
offset=None,
):
if not condDict:
condDict = {}
return cls.productionDB.getProductionTransformations(
prodName,
condDict=condDict,
older=older,
newer=newer,
timeStamp=timeStamp,
orderAttribute=orderAttribute,
limit=limit,
offset=offset,
)
####################################################################
#
# These are the methods to manipulate the ProductionSteps table
#
types_addProductionStep = [dict]
@classmethod
def export_addProductionStep(cls, prodStep):
stepName = prodStep["name"]
stepDescription = prodStep["description"]
stepLongDescription = prodStep["longDescription"]
stepBody = prodStep["body"]
stepType = prodStep["stepType"]
stepPlugin = prodStep["plugin"]
stepAgentType = prodStep["agentType"]
stepGroupSize = prodStep["groupsize"]
stepInputQuery = prodStep["inputquery"]
stepOutputQuery = prodStep["outputquery"]
res = cls.productionDB.addProductionStep(
stepName,
stepDescription,
stepLongDescription,
stepBody,
stepType,
stepPlugin,
stepAgentType,
stepGroupSize,
stepInputQuery,
stepOutputQuery,
)
if res["OK"]:
gLogger.info("Added production step %d" % res["Value"])
return res
types_getProductionStep = [int]
@classmethod
def export_getProductionStep(cls, stepID):
return cls.productionDB.getProductionStep(stepID)
|
DIRACGrid/DIRAC
|
src/DIRAC/ProductionSystem/Service/ProductionManagerHandler.py
|
Python
|
gpl-3.0
| 5,600
|
[
"DIRAC"
] |
86535bdabe4c238b96eb9bbc20cd438486bef34818dbabe9a57232344026b18b
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""Camelot includes editors for various types of fields. Each editor at least supports
these features :
* a set_value method to set a python type as the editor's value
* a get_value method to retrieve a python type from the editor
* the ValueLoading state : an editor has as its value ValueLoading upon construction and
the editor's value can be set to ValueLoading if the value that should be displayed is
not yet available in the GUI thread, but is still on it's way from the model to the GUI.
This means that once set_value( ValueLoading ) is called, get_value() will always return
ValueLoading until set_value is called with another argument.
"""
from booleditor import BoolEditor, TextBoolEditor
from charteditor import ChartEditor
from choiceseditor import ChoicesEditor
from codeeditor import CodeEditor
from coloredfloateditor import ColoredFloatEditor
from coloreditor import ColorEditor
from customeditor import CustomEditor
from dateeditor import DateEditor
from datetimeeditor import DateTimeEditor
from fileeditor import FileEditor
from floateditor import FloatEditor
from imageeditor import ImageEditor
from integereditor import IntegerEditor
from languageeditor import LanguageEditor
from localfileeditor import LocalFileEditor
from many2oneeditor import Many2OneEditor
from one2manyeditor import One2ManyEditor
from onetomanychoiceseditor import OneToManyChoicesEditor
from richtexteditor import RichTextEditor
from stareditor import StarEditor
from textlineeditor import TextLineEditor
from timeeditor import TimeEditor
from virtualaddresseditor import VirtualAddressEditor
from smileyeditor import SmileyEditor
from textediteditor import TextEditEditor
from wideeditor import WideEditor
from noteeditor import NoteEditor
from labeleditor import LabelEditor
from monthseditor import MonthsEditor
__all__ = [
BoolEditor.__name__,
ChartEditor.__name__,
ChoicesEditor.__name__,
CodeEditor.__name__,
ColoredFloatEditor.__name__,
ColorEditor.__name__,
CustomEditor.__name__,
DateEditor.__name__,
DateTimeEditor.__name__,
FileEditor.__name__,
FloatEditor.__name__,
ImageEditor.__name__,
IntegerEditor.__name__,
LabelEditor.__name__,
LanguageEditor.__name__,
LocalFileEditor.__name__,
Many2OneEditor.__name__,
MonthsEditor.__name__,
NoteEditor.__name__,
One2ManyEditor.__name__,
OneToManyChoicesEditor.__name__,
RichTextEditor.__name__,
StarEditor.__name__,
TextLineEditor.__name__,
TimeEditor.__name__,
VirtualAddressEditor.__name__,
SmileyEditor.__name__,
TextBoolEditor.__name__,
TextEditEditor.__name__,
WideEditor.__name__,
]
|
jeroendierckx/Camelot
|
camelot/view/controls/editors/__init__.py
|
Python
|
gpl-2.0
| 3,742
|
[
"VisIt"
] |
fc30baf50ce1289fbf67d532c05f50aa5e590220d4ea038fcc593963d5a96c1b
|
#!/usr/bin/env python
"""
Script that facilitates the modification of a element through the command line.
However, the usage of this script will set the element token to the command
issuer with a duration of 1 day.
"""
from datetime import datetime, timedelta
from DIRAC import gLogger, exit as DIRACExit, S_OK, version
from DIRAC.Core.Base.Script import Script
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ResourceStatusSystem.Client import ResourceStatusClient
from DIRAC.ResourceStatusSystem.PolicySystem import StateMachine
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
subLogger = None
def registerSwitches():
"""
Registers all switches that can be used while calling the script from the
command line interface.
"""
switches = (
("element=", "Element family to be Synchronized ( Site, Resource or Node )"),
("name=", "Name (or comma-separeted list of names) of the element where the change applies"),
("statusType=", "StatusType (or comma-separeted list of names), if none applies to all possible statusTypes"),
("status=", "Status to be changed"),
("reason=", "Reason to set the Status"),
("VO=", "VO to change a status for. When omitted, status will be changed for all VOs"),
)
for switch in switches:
Script.registerSwitch("", switch[0], switch[1])
def registerUsageMessage():
"""
Takes the script __doc__ and adds the DIRAC version to it
"""
usageMessage = " DIRAC %s\n" % version
usageMessage += __doc__
Script.setUsageMessage(usageMessage)
def parseSwitches():
"""
Parses the arguments passed by the user
"""
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if args:
subLogger.error("Found the following positional args '%s', but we only accept switches" % args)
subLogger.error("Please, check documentation below")
Script.showHelp(exitCode=1)
switches = dict(Script.getUnprocessedSwitches())
switches.setdefault("statusType", None)
switches.setdefault("VO", None)
for key in ("element", "name", "status", "reason"):
if key not in switches:
subLogger.error("%s Switch missing" % key)
subLogger.error("Please, check documentation below")
Script.showHelp(exitCode=1)
if not switches["element"] in ("Site", "Resource", "Node"):
subLogger.error("Found %s as element switch" % switches["element"])
subLogger.error("Please, check documentation below")
Script.showHelp(exitCode=1)
statuses = StateMachine.RSSMachine(None).getStates()
if not switches["status"] in statuses:
subLogger.error("Found %s as element switch" % switches["element"])
subLogger.error("Please, check documentation below")
Script.showHelp(exitCode=1)
subLogger.debug("The switches used are:")
map(subLogger.debug, switches.items())
return switches
def checkStatusTypes(statusTypes):
"""
To check if values for 'statusType' are valid
"""
opsH = Operations().getValue("ResourceStatus/Config/StatusTypes/StorageElement")
acceptableStatusTypes = opsH.replace(",", "").split()
for statusType in statusTypes:
if statusType not in acceptableStatusTypes and statusType != "all":
acceptableStatusTypes.append("all")
subLogger.error(
"'%s' is a wrong value for switch 'statusType'.\n\tThe acceptable values are:\n\t%s"
% (statusType, str(acceptableStatusTypes))
)
if "all" in statusType:
return acceptableStatusTypes
return statusTypes
def unpack(switchDict):
"""
To split and process comma-separated list of values for 'name' and 'statusType'
"""
switchDictSet = []
names = []
statusTypes = []
if switchDict["name"] is not None:
names = list(filter(None, switchDict["name"].split(",")))
if switchDict["statusType"] is not None:
statusTypes = list(filter(None, switchDict["statusType"].split(",")))
statusTypes = checkStatusTypes(statusTypes)
if len(names) > 0 and len(statusTypes) > 0:
combinations = [(a, b) for a in names for b in statusTypes]
for combination in combinations:
n, s = combination
switchDictClone = switchDict.copy()
switchDictClone["name"] = n
switchDictClone["statusType"] = s
switchDictSet.append(switchDictClone)
elif len(names) > 0 and len(statusTypes) == 0:
for name in names:
switchDictClone = switchDict.copy()
switchDictClone["name"] = name
switchDictSet.append(switchDictClone)
elif len(names) == 0 and len(statusTypes) > 0:
for statusType in statusTypes:
switchDictClone = switchDict.copy()
switchDictClone["statusType"] = statusType
switchDictSet.append(switchDictClone)
elif len(names) == 0 and len(statusTypes) == 0:
switchDictClone = switchDict.copy()
switchDictClone["name"] = None
switchDictClone["statusType"] = None
switchDictSet.append(switchDictClone)
return switchDictSet
def getTokenOwner():
"""
Function that gets the userName from the proxy
"""
proxyInfo = getProxyInfo()
if not proxyInfo["OK"]:
return proxyInfo
userName = proxyInfo["Value"]["username"]
return S_OK(userName)
def setStatus(switchDict, tokenOwner):
"""
Function that gets the user token, sets the validity for it. Gets the elements
in the database for a given name and statusType(s). Then updates the status
of all them adding a reason and the token.
"""
rssClient = ResourceStatusClient.ResourceStatusClient()
elements = rssClient.selectStatusElement(
switchDict["element"],
"Status",
name=switchDict["name"],
statusType=switchDict["statusType"],
vO=switchDict["VO"],
meta={"columns": ["Status", "StatusType"]},
)
if not elements["OK"]:
return elements
elements = elements["Value"]
if not elements:
subLogger.warn(
"Nothing found for %s, %s, %s %s"
% (switchDict["element"], switchDict["name"], switchDict["VO"], switchDict["statusType"])
)
return S_OK()
tomorrow = datetime.utcnow().replace(microsecond=0) + timedelta(days=1)
for status, statusType in elements:
subLogger.debug("%s %s" % (status, statusType))
if switchDict["status"] == status:
subLogger.notice("Status for %s (%s) is already %s. Ignoring.." % (switchDict["name"], statusType, status))
continue
subLogger.debug(
"About to set status %s -> %s for %s, statusType: %s, VO: %s, reason: %s"
% (status, switchDict["status"], switchDict["name"], statusType, switchDict["VO"], switchDict["reason"])
)
result = rssClient.modifyStatusElement(
switchDict["element"],
"Status",
name=switchDict["name"],
statusType=statusType,
status=switchDict["status"],
reason=switchDict["reason"],
vO=switchDict["VO"],
tokenOwner=tokenOwner,
tokenExpiration=tomorrow,
)
if not result["OK"]:
return result
return S_OK()
def run(switchDict):
"""
Main function of the script
"""
tokenOwner = getTokenOwner()
if not tokenOwner["OK"]:
subLogger.error(tokenOwner["Message"])
DIRACExit(1)
tokenOwner = tokenOwner["Value"]
subLogger.notice("TokenOwner is %s" % tokenOwner)
result = setStatus(switchDict, tokenOwner)
if not result["OK"]:
subLogger.error(result["Message"])
DIRACExit(1)
@Script()
def main():
global subLogger
global registerUsageMessage
subLogger = gLogger.getSubLogger(__file__)
# Script initialization
registerSwitches()
registerUsageMessage()
switchDict = parseSwitches()
switchDictSets = unpack(switchDict)
# Run script
for switchDict in switchDictSets:
run(switchDict)
# Bye
DIRACExit(0)
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/ResourceStatusSystem/scripts/dirac_rss_set_status.py
|
Python
|
gpl-3.0
| 8,316
|
[
"DIRAC"
] |
ea2dca519b8b298bc538ac666e269078dace5b90e8949bcfceedf0904a4beaa4
|
import re
import os
import six
class Compiler(object):
RE_INTERPOLATE = re.compile(r'(\\)?([#!]){(.*?)}')
doctypes = {
'5': '<!DOCTYPE html>'
, 'xml': '<?xml version="1.0" encoding="utf-8" ?>'
, 'default': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
, 'transitional': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
, 'strict': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'
, 'frameset': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">'
, '1.1': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'
, 'basic': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML Basic 1.1//EN" "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd">'
, 'mobile': '<!DOCTYPE html PUBLIC "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile12.dtd">'
}
inlineTags = [
'a'
, 'abbr'
, 'acronym'
, 'b'
, 'br'
, 'code'
, 'em'
, 'font'
, 'i'
, 'img'
, 'ins'
, 'kbd'
, 'map'
, 'samp'
, 'small'
, 'span'
, 'strong'
, 'sub'
, 'sup'
, 'textarea'
]
selfClosing = [
'meta'
, 'img'
, 'link'
, 'input'
, 'area'
, 'base'
, 'col'
, 'br'
, 'hr'
]
autocloseCode = 'if,for,block,filter,autoescape,with,trans,spaceless,comment,cache,macro,localize,compress,raw'.split(',')
filters = {}
def __init__(self, node, **options):
self.options = options
self.node = node
self.hasCompiledDoctype = False
self.hasCompiledTag = False
self.pp = options.get('pretty', True)
self.debug = options.get('compileDebug', False) is not False
self.filters.update(options.get('filters', {}))
self.doctypes.update(options.get('doctypes', {}))
# self.var_processor = options.get('var_processor', lambda x: x)
self.selfClosing.extend(options.get('selfClosing', []))
self.autocloseCode.extend(options.get('autocloseCode', []))
self.inlineTags.extend(options.get('inlineTags', []))
self.useRuntime = options.get('useRuntime', True)
self.extension = options.get('extension', None) or '.jade'
self.indents = 0
self.doctype = None
self.terse = False
self.xml = False
self.mixing = 0
self.variable_start_string = options.get("variable_start_string", "{{")
self.variable_end_string = options.get("variable_end_string", "}}")
if 'doctype' in self.options: self.setDoctype(options['doctype'])
self.instring = False
def var_processor(self, var):
if isinstance(var,six.string_types) and var.startswith('_ '):
var = '_("%s")'%var[2:]
return var
def compile_top(self):
return ''
def compile(self):
self.buf = [self.compile_top()]
self.lastBufferedIdx = -1
self.visit(self.node)
compiled = u''.join(self.buf)
if isinstance(compiled, six.binary_type):
compiled = six.text_type(compiled, 'utf8')
return compiled
def setDoctype(self, name):
self.doctype = self.doctypes.get(name or 'default',
'<!DOCTYPE %s>' % name)
self.terse = name in ['5','html']
self.xml = self.doctype.startswith('<?xml')
def buffer(self, str):
if self.lastBufferedIdx == len(self.buf):
self.lastBuffered += str
self.buf[self.lastBufferedIdx - 1] = self.lastBuffered
else:
self.buf.append(str)
self.lastBuffered = str;
self.lastBufferedIdx = len(self.buf)
def visit(self, node, *args, **kwargs):
# debug = self.debug
# if debug:
# self.buf.append('__jade.unshift({ lineno: %d, filename: %s });' % (node.line,('"%s"'%node.filename) if node.filename else '__jade[0].filename'));
# if node.debug==False and self.debug:
# self.buf.pop()
# self.buf.pop()
self.visitNode(node, *args, **kwargs)
# if debug: self.buf.append('__jade.shift();')
def visitNode (self, node, *args, **kwargs):
name = node.__class__.__name__
if self.instring and name != 'Tag':
self.buffer('\n')
self.instring = False
return getattr(self, 'visit%s' % name)(node, *args, **kwargs)
def visitLiteral(self, node):
self.buffer(node.str)
def visitBlock(self, block):
for node in block.nodes:
self.visit(node)
def visitCodeBlock(self, block):
self.buffer('{%% block %s %%}' % block.name)
if block.mode=='prepend':
self.buffer('%ssuper()%s' % (self.variable_start_string,
self.variable_end_string))
self.visitBlock(block)
if block.mode == 'append':
self.buffer('%ssuper()%s' % (self.variable_start_string,
self.variable_end_string))
self.buffer('{% endblock %}')
def visitDoctype(self,doctype=None):
if doctype and (doctype.val or not self.doctype):
self.setDoctype(doctype.val or 'default')
if self.doctype:
self.buffer(self.doctype)
self.hasCompiledDoctype = True
def visitMixin(self,mixin):
if mixin.block:
self.buffer('{%% macro %s(%s) %%}' % (mixin.name, mixin.args))
self.visitBlock(mixin.block)
self.buffer('{% endmacro %}')
else:
self.buffer('%s%s(%s)%s' % (self.variable_start_string, mixin.name,
mixin.args, self.variable_end_string))
def visitTag(self,tag):
self.indents += 1
name = tag.name
if not self.hasCompiledTag:
if not self.hasCompiledDoctype and 'html' == name:
self.visitDoctype()
self.hasCompiledTag = True
if self.pp and name not in self.inlineTags and not tag.inline:
self.buffer('\n' + ' ' * (self.indents - 1))
if name in self.inlineTags or tag.inline:
self.instring = False
closed = name in self.selfClosing and not self.xml
self.buffer('<%s' % name)
self.visitAttributes(tag.attrs)
self.buffer('/>' if not self.terse and closed else '>')
if not closed:
if tag.code: self.visitCode(tag.code)
if tag.text: self.buffer(self.interpolate(tag.text.nodes[0].lstrip()))
self.escape = 'pre' == tag.name
# empirically check if we only contain text
textOnly = tag.textOnly or not bool(len(tag.block.nodes))
self.instring = False
self.visit(tag.block)
if self.pp and not name in self.inlineTags and not textOnly:
self.buffer('\n' + ' ' * (self.indents-1))
self.buffer('</%s>' % name)
self.indents -= 1
def visitFilter(self,filter):
if filter.name not in self.filters:
if filter.isASTFilter:
raise Exception('unknown ast filter "%s"' % filter.name)
else:
raise Exception('unknown filter "%s"' % filter.name)
fn = self.filters.get(filter.name)
if filter.isASTFilter:
self.buf.append(fn(filter.block, self, filter.attrs))
else:
text = ''.join(filter.block.nodes)
text = self.interpolate(text)
filter.attrs = filter.attrs or {}
filter.attrs['filename'] = self.options.get('filename', None)
self.buffer(fn(text, filter.attrs))
def _interpolate(self, attr, repl):
return self.RE_INTERPOLATE.sub(lambda matchobj:repl(matchobj.group(3)),
attr)
def interpolate(self, text, escape=None):
def repl(matchobj):
if escape is None:
if matchobj.group(2) == '!':
filter_string = ''
else:
filter_string = '|escape'
elif escape is True:
filter_string = '|escape'
elif escape is False:
filter_string = ''
return self.variable_start_string + matchobj.group(3) + \
filter_string + self.variable_end_string
return self.RE_INTERPOLATE.sub(repl, text)
def visitText(self,text):
text = ''.join(text.nodes)
text = self.interpolate(text)
self.buffer(text)
if self.pp:
self.buffer('\n')
def visitString(self,text):
instring = not text.inline
text = ''.join(text.nodes)
text = self.interpolate(text)
self.buffer(text)
self.instring = instring
def visitComment(self,comment):
if not comment.buffer: return
if self.pp:
self.buffer('\n' + ' ' * (self.indents))
self.buffer('<!--%s-->' % comment.val)
def visitAssignment(self,assignment):
self.buffer('{%% set %s = %s %%}' % (assignment.name, assignment.val))
def format_path(self,path):
has_extension = os.path.basename(path).find('.') > -1
if not has_extension:
path += self.extension
return path
def visitExtends(self,node):
path = self.format_path(node.path)
self.buffer('{%% extends "%s" %%}' % (path))
def visitInclude(self,node):
path = self.format_path(node.path)
self.buffer('{%% include "%s" %%}' % (path))
def visitBlockComment(self, comment):
if not comment.buffer:
return
isConditional = comment.val.strip().startswith('if')
self.buffer('<!--[%s]>' % comment.val.strip() if isConditional else '<!--%s' % comment.val)
self.visit(comment.block)
self.buffer('<![endif]-->' if isConditional else '-->')
def visitConditional(self, conditional):
TYPE_CODE = {
'if': lambda x: 'if %s'%x,
'unless': lambda x: 'if not %s'%x,
'elif': lambda x: 'elif %s'%x,
'else': lambda x: 'else'
}
self.buf.append('{%% %s %%}' % TYPE_CODE[conditional.type](conditional.sentence))
if conditional.block:
self.visit(conditional.block)
for next in conditional.next:
self.visitConditional(next)
if conditional.type in ['if','unless']:
self.buf.append('{% endif %}')
def visitVar(self, var, escape=False):
var = self.var_processor(var)
return ('%s%s%s%s' % (self.variable_start_string, var,
'|escape' if escape else '', self.variable_end_string))
def visitCode(self,code):
if code.buffer:
val = code.val.lstrip()
self.buf.append(self.visitVar(val, code.escape))
else:
self.buf.append('{%% %s %%}' % code.val)
if code.block:
# if not code.buffer: self.buf.append('{')
self.visit(code.block)
# if not code.buffer: self.buf.append('}')
if not code.buffer:
codeTag = code.val.strip().split(' ', 1)[0]
if codeTag in self.autocloseCode:
self.buf.append('{%% end%s %%}' % codeTag)
def visitEach(self,each):
self.buf.append('{%% for %s in %s|__pyjade_iter:%d %%}' % (','.join(each.keys), each.obj, len(each.keys)))
self.visit(each.block)
self.buf.append('{% endfor %}')
def attributes(self,attrs):
return "%s__pyjade_attrs(%s)%s" % (self.variable_start_string, attrs, self.variable_end_string)
def visitDynamicAttributes(self, attrs):
buf, classes, params = [], [], {}
terse='terse=True' if self.terse else ''
for attr in attrs:
if attr['name'] == 'class':
classes.append('(%s)' % attr['val'])
else:
pair = "('%s',(%s))" % (attr['name'], attr['val'])
buf.append(pair)
if classes:
classes = " , ".join(classes)
buf.append("('class', (%s))" % classes)
buf = ', '.join(buf)
if self.terse: params['terse'] = 'True'
if buf: params['attrs'] = '[%s]' % buf
param_string = ', '.join(['%s=%s' % (n, v) for n, v in six.iteritems(params)])
if buf or terse:
self.buf.append(self.attributes(param_string))
def visitAttributes(self, attrs):
temp_attrs = []
for attr in attrs:
if (not self.useRuntime and not attr['name']=='class') or attr['static']: #
if temp_attrs:
self.visitDynamicAttributes(temp_attrs)
temp_attrs = []
n, v = attr['name'], attr['val']
if isinstance(v, six.string_types):
if self.useRuntime or attr['static']:
self.buf.append(' %s=%s' % (n, v))
else:
self.buf.append(' %s="%s"' % (n, self.visitVar(v)))
elif v is True:
if self.terse:
self.buf.append(' %s' % (n,))
else:
self.buf.append(' %s="%s"' % (n, n))
else:
temp_attrs.append(attr)
if temp_attrs: self.visitDynamicAttributes(temp_attrs)
@classmethod
def register_filter(cls, name, f):
cls.filters[name] = f
@classmethod
def register_autoclosecode(cls, name):
cls.autocloseCode.append(name)
#1-
|
zeraien/pyjade
|
pyjade/compiler.py
|
Python
|
mit
| 13,936
|
[
"VisIt"
] |
27afe8092f2e34158baa003b33434924084102ab399e1191d4c1fc46adb4e924
|
#encoding:utf-8
from __future__ import division
from random import random, randint
from math import log, e, tan
'''
Various random generators, inspired from sources present in github.com/belangeo/pyo
'''
class MyRandoms:
def __init__(self):
self.RAND_MAX = 4294967295
self.PYO_RAND_SEED = randint(1, self.RAND_MAX)
self.x1 = None
self.x2 = None
self.funcs = {
"uniform" : self.__uniform,
"linearMin" : self.__linearMin,
"linearMax" : self.__linearMax,
"triange" : self.__triangle,
"exponMin" : self.__exponMin,
"exponMax" : self.__exponMax,
"biExpon" : self.__biExpon,
"cauchy" : self.__cauchy,
"weibull" : self.__weibull,
"gaussian" : self.__gaussian,
"gaussian2" : self.__gaussian2,
"poisson" : self.__poisson,
"poisson2" : self.__poisson_2,
"walker" : self.__walker,
"loopseg" : self.__loopseg
}
def call(self, funcName, **kwargs):
argNum = len(kwargs.keys())
if argNum == 1:
self.x1 = kwargs['x1']
elif argNum == 2:
self.x1, self.x2 = kwargs['x1'], kwargs['x2']
return self.funcs[funcName]()
def addFunc(self,funcName, funcPtr):
if funcName in self.funcs.keys():
raise ValueError("Function name already taken.")
else:
self.funcs[funcName] = funcPtr
def __pyorand(self):
self.PYO_RAND_SEED = (self.PYO_RAND_SEED * 1664525 + 1013904223) % self.RAND_MAX
return self.PYO_RAND_SEED
def __checkZero(self, val):
if val <= 0 or val == None:
return 0.0001
return val
def __normalize(self, val):
#this doesnt sound right
if val < 0:
return 0
if val > 1:
return 1
return val
def __uniform(self):
return random()
def __linearMin(self):
a = random()
b = random()
return a if a < b else b
def __linearMax(self):
a = random()
b = random()
return b if a < b else b
def __triangle(self):
a = random()
b = random()
return ((a+b)*0.5)
def __exponMin(self):
'''
expon_min
self.x1: slope {0 = no slope -> 10 = sharp slope}
'''
self.x1 = self.__checkZero(self.x1)
val = -log(random())/self.x1
return self.__normalize(val)
def __exponMax(self):
'''
expon_max
self.x1: slope {0 = no slope -> 10 = sharp slope}
'''
self.x1 = self.__checkZero(self.x1)
val = 1.0-(-log(random())/self.x1)
return self.__normalize(val)
def __biExpon(self):
'''
biexpon
self.x1: bandwidth {0 = huge bandwidth -> 10 = narrow bandwidth}
'''
polar = 0
val = 0
self.x1 = self.__checkZero(self.x1)
s = random() * 2
if s > 1:
polar = -1
s = 2 - s
else:
polar = 1
val = 0.5*(polar*log(s)/self.x1)+0.5
return self.__normalize(val)
def __cauchy(self):
'''
cauchy
self.x1: bandwidth {0 = huge bandwidth -> 10 = narrow bandwidth}
'''
self.x1 = self.__checkZero(self.x1)
rnd = 0.5
while rnd == 0.5:
rnd = random()
if (self.__pyorand() < self.RAND_MAX/2):
d = -1
else:
d = 1
val = 0.5*(tan(rnd)*self.x1*d)+0.5
return self.__normalize(val)
def __weibull(self):
'''
weibull
self.x1: mean location {0 -> 1}
self.x2: shape {0.5 = linear min, 1.5 = expon min, 3.5 = gaussian}
'''
self.x1 = self.__checkZero(self.x1)
self.x2 = self.__checkZero(self.x2)
rnd = 1/(1-random())
val = self.x1*pow(log(rnd),1/self.x2)
return self.__normalize(val)
def __gaussian(self):
'''
gaussian
self.x1: mean location {0 -> 1}
self.x2: bandwidth {0 = narrow bandwidth -> 10 = huge bandwidth}
'''
self.x1 = self.__checkZero(self.x1)
self.x2 = self.__checkZero(self.x2)
rnd = sum([random() for _ in range(6)])
val = self.x2*(rnd-3)*0.33+self.x1
return self.__normalize(val)
def __gaussian2(self):
"""same as gaussian but without normalization"""
self.x1 = self.__checkZero(self.x1)
self.x2 = self.__checkZero(self.x2)
rnd = sum([random() for _ in range(6)])
val = self.x2*(rnd-3)*0.33+self.x1
return val
def __poisson(self):
"""
poisson
self.x1: gravity center {0 = low values -> 10 = high values}
self.x2: compress/expand range {0.1 = full compress -> 4 full expand}
"""
poissonTab = 0
lastPoisson = -99.0
poissonBuffer = [0 for _ in range(2000)]
if self.x1 < 0.1 or self.x1 == None: self.x1 = 0.1
if self.x2 < 0.1 or self.x2 == None: self.x2 = 0.1
if self.x1 != lastPoisson:
lastPoisson = self.x1
poissonTab = 0
factorial = 1
for i in range(1,13): #interval [1,12]
factorial *= i
tot = 1000*(pow(e,-self.x1)*pow(self.x1,i)/factorial)
j = 0
while j < tot:
poissonBuffer[poissonTab] = i
poissonTab += 1
j += 1
val = poissonBuffer[self.__pyorand() % poissonTab] /12*self.x2
return self.__normalize(val)
def __poisson_2(self):
poissonTab = 0
lastPoisson = -99.0
poissonBuffer = [0 for _ in range(2000)]
if self.x1 < 0.1 or self.x1 == None : self.x1 = 0.1
if self.x2 < 0.1 or self.x2 == None : self.x2 = 0.1
if self.x1 != lastPoisson:
lastPoisson = self.x1
poissonTab = 0
factorial = 1
for i in range(1,13): #interval [1,12]
factorial *= i
tot = 1000*(pow(e,-self.x1)*pow(self.x1,i)/factorial)
j = 0
while j < tot:
poissonBuffer[poissonTab] = i
poissonTab += 1
j += 1
val = poissonBuffer[self.__pyorand() % poissonTab] /12*self.x2
return val
def __walker(self):
"""
walker
self.x1: maximum value {0.1 -> 1}
self.x2: maximum step {0.1 -> 1}
"""
walkerVal = 0.5
if self.x1 < 0.002 or self.x1 == None:
self.x1 = 1.0
if self.x2 < 0.002 or self.x2 == None:
self.x2 = 0.15
modulo = int(self.x2*1000)
d = self.__pyorand() % 100
print modulo, d
if d < 50:
walkerVal += (int(self.__pyorand()%modulo))*0.001
else:
walkerVal -= (int(self.__pyorand()%modulo))*0.001
print "walkerVal ", walkerVal
if walkerVal > self.x1:
walkerVal = self.x1
elif walkerVal < 0:
walkerVal = 0
return walkerVal
def __loopseg(self):
"""
loopseg
self.x1: maximum value {0.1 -> 1}
self.x2: maximum step {0.1 -> 1}
"""
if self.x1 < 0.002 or self.x1 == None:
self.x1 = 1
if self.x2 < 0.002 or self.x2 == None:
self.x2 = 0.15
walkerVal = 0.5
loopChoice = loopCountPlay = loopTime = loopCountRec = loopStop = 0
loopLen = (self.__pyorand() % 10) + 3
loopBuffer = [0 for _ in range(15)]
if loopChoice == 0:
loopCountPlay = loopTime = 0
if self.x2 < 0.002: self.x2 = 0.002
modulo = int(self.x2*1000)
d = self.__pyorand()%100
if d < 50:
walkerVal += (self.__pyorand()%modulo)*0.001
else:
walkerVal -= (self.__pyorand()%modulo)*0.001
if walkerVal > self.x1:
walkerVal = self.x1
elif walkerVal < 0:
walkerVal = 0
loopBuffer[loopCountRec] = walkerVal
loopCountRec += 1
if loopCountRec < loopLen:
loopChoice = 0
else:
loopChoice = 1
loopStop = (self.__pyorand()%4)+1
else:
loopCountRec = 0
walkerVal = loopBuffer[loopCountPlay]
loopCountPlay += 1
if loopCountPlay < loopLen:
loopChoice = 1
else:
loopCountPlay = 0
loopTime += 1
if loopTime == loopStop:
loopChoice = 0
loopLen = (self.__pyorand()%10)+3
return walkerVal
|
poifra/Creamuspython2
|
MyRandoms.py
|
Python
|
mit
| 7,071
|
[
"Gaussian"
] |
81df7aa78514b1b3227571b6d5a05ae6e63ca1ef26f6279fea1986fa154016dc
|
# -*- coding: utf-8 -*-
# More advanced SWIG module test script
#
# Import highlight.py, which is the interface for the _highlight.so module.
# See highlight.py for all available attributes and class members.
#
# Example: python2.7 testmod.py -Sperl -O odt testmod.pl testmod.pl.odt
import highlight
import sys
from optparse import OptionParser
formatList = { "html": highlight.HTML,
"xhtml": highlight.XHTML,
"latex": highlight.LATEX,
"rtf": highlight.RTF,
"tex": highlight.TEX,
"ansi": highlight.ANSI,
"xterm256": highlight.XTERM256,
"odt": highlight.ODTFLAT,
"bbcode": highlight.BBCODE,
"svg": highlight.SVG,
"pango": highlight.PANGO
}
HL_DIR="/usr/share/highlight/"
def highlightFile():
parser = OptionParser("usage: %prog [options] input-file output-file")
parser.add_option("-O", "--format", default="html",
choices=("html","xhtml","latex","tex","rtf","ansi","xterm256", "svg", "odt", "bbcode", "pango"),
help="Output format (html, xhtml, latex, tex, rtf, ansi, xterm256, odt, bbcode, svg, pango)")
parser.add_option("-d", "--doc-title", default="Source file",
help="document title")
parser.add_option("-f", "--fragment", action="store_true",
help="omit file header and footer")
parser.add_option("-F", "--reformat",
choices=('allman','gnu','java','kr','linux', 'banner','stroustrup','whitesmith', 'google', 'pico', 'lisp', 'vtk'),
help="reformats and indents output in given style")
parser.add_option("-l", "--linenumbers", action="store_true",
help="print line numbers in output file")
parser.add_option("-S", "--syntax",
help="specify type of source code")
parser.add_option("-s", "--theme", default="seashell",
help="defines colour theme")
parser.add_option("-u", "--encoding", default="ISO-8859-1",
help="define output encoding which matches input file encoding")
(options, args) = parser.parse_args(sys.argv[1:])
if len(args)!=2:
parser.print_help()
return
formatName=options.format.lower()
outFormat = formatName in formatList and formatList[formatName] or highlight.HTML
(infile, outfile) = args
gen=highlight.CodeGenerator.getInstance(outFormat)
datadir=highlight.DataDir()
datadir.initSearchDirectories(HL_DIR)
#initialize the generator with a colour theme and the language definition
gen.initTheme(datadir.getThemePath("%s.theme" % options.theme))
if options.reformat:
gen.initIndentationScheme(options.reformat)
if (options.syntax):
syntax = options.syntax
else:
syntax = infile[infile.rindex(".")+1:]
print datadir.getLangPath("%s.lang" % syntax)
gen.loadLanguage(datadir.getLangPath("%s.lang" % syntax))
gen.setIncludeStyle(1)
gen.setTitle(options.doc_title)
gen.setFragmentCode(options.fragment)
gen.setPrintLineNumbers(options.linenumbers)
gen.setEncoding(options.encoding)
gen.generateFile(infile, outfile)
# clear the instance
highlight.CodeGenerator.deleteInstance(gen)
###############################################################################
if __name__ == "__main__":
highlightFile()
|
ahmadaghazadeh/FastBook
|
src/FastBookCreator/highlight/examples/swig/testmod.py
|
Python
|
mit
| 3,086
|
[
"VTK"
] |
24fa1cd7addcd705d0977e2fde97e67fae3c2ecf5be5ebcd30605bf5063db76a
|
#
# Copyright (C) 2000 Stephen Davies
# Copyright (C) 2000 Stefan Seefeld
# All rights reserved.
# Licensed to the public under the terms of the GNU LGPL (>= 2),
# see the file COPYING for details.
#
from Synopsis.Processor import Parameter
from Tree import Tree
from Synopsis.Formatters.HTML.Tags import *
import os
class FileTree(Tree):
"""Create a javascript-enabled file tree."""
link_to_views = Parameter(False, 'some docs...')
def filename(self):
if self.main:
return self.directory_layout.index()
else:
return self.directory_layout.special('FileTree')
def title(self):
return 'File Tree'
def root(self):
return self.filename(), self.title()
def process(self):
# Start the file
self.start_file()
self.write_navigation_bar()
self.begin_tree()
# recursively visit all nodes
self.process_node(self.processor.file_tree)
self.end_tree()
self.end_file()
def process_node(self, node):
def node_cmp(a, b):
a_leaf = hasattr(a, 'declarations')
b_leaf = hasattr(b, 'declarations')
if a_leaf != b_leaf:
return cmp(b_leaf, a_leaf)
return cmp(a.path[-1].upper(), b.path[-1].upper())
dirname, filename = os.path.split(node.path)
if hasattr(node, 'declarations'):
# Leaf node
text = href(self.directory_layout.file_index(node.path),
filename, target='detail')
self.write_leaf(text)
return
# Non-leaf node
children = node.children[:]
children.sort(node_cmp)
if node.path:
self.write_node_start(filename + os.sep)
if len(children):
for child in children:
# self.write('<div class="files">')
self.process_node(child)
# self.write('</div>')
if node.path:
self.write_node_end()
|
stefanseefeld/synopsis
|
Synopsis/Formatters/HTML/Views/FileTree.py
|
Python
|
lgpl-2.1
| 2,018
|
[
"VisIt"
] |
07fd3490c438bb820236c6ecad5507075fa1db90eeac936c4e877700d746dc4e
|
# -*- coding: utf-8 -*-
"""
==========================================================
Structure data I/O (:mod:`sknano.io`)
==========================================================
.. currentmodule:: sknano.io
Contents
========
Base I/O classes to inherit from when creating new I/O classes
----------------------------------------------------------------
.. autosummary::
:toctree: generated/
StructureIO
StructureReader
StructureWriter
StructureFormatSpec
StructureIOError
StructureConverter
I/O classes for the `LAMMPS data` structure data format
--------------------------------------------------------
.. autosummary::
:toctree: generated/
DATAReader
DATAWriter
DATAData
DATAFormatSpec
DATAIOError
DATA2XYZConverter
I/O classes for the `xyz` structure data format
-------------------------------------------------
.. autosummary::
:toctree: generated/
XYZReader
XYZWriter
XYZData
XYZFormatSpec
XYZIOError
XYZ2DATAConverter
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
__docformat__ = 'restructuredtext en'
from ._base import *
from ._lammps_data_format import *
from ._lammps_dump_format import *
from ._xyz_format import *
__all__ = [s for s in dir() if not s.startswith('_')]
|
androomerrill/scikit-nano
|
sknano/io/__init__.py
|
Python
|
bsd-2-clause
| 1,324
|
[
"LAMMPS"
] |
0362a36a095ae633383c80c159aec7e439e8e94a9df5dc48a1a051251cecafd4
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import gtk
import mock
from stoqlib.domain.payment.category import PaymentCategory
from stoqlib.gui.editors.paymentcategoryeditor import PaymentCategoryEditor
from stoqlib.gui.test.uitestutils import GUITest
class TestPaymentCategoryEditor(GUITest):
def test_create(self):
editor = PaymentCategoryEditor(self.store)
self.check_editor(editor, 'editor-paymentcategory-create')
def test_show(self):
payment_category = self.create_payment_category()
editor = PaymentCategoryEditor(self.store, model=payment_category)
self.check_editor(editor, 'editor-paymentcategory-show')
def test_confirm(self):
payment = self.create_payment()
payment_category = self.create_payment_category()
payment.category = payment_category
editor = PaymentCategoryEditor(self.store, model=payment_category)
# Change the category type so validate_confirm will ask the
# user to remove this category from payments
editor.category_type.select(PaymentCategory.TYPE_RECEIVABLE)
with mock.patch('stoqlib.gui.editors.paymentcategoryeditor.yesno') as yesno:
yesno.return_value = False
self.click(editor.main_dialog.ok_button)
yesno.assert_called_once_with(
"Changing the payment type will remove "
"this category from 1 payments. Are you sure?",
gtk.RESPONSE_NO, "Change", "Don't change")
self.assertEqual(payment.category, payment_category)
yesno.reset_mock()
yesno.return_value = True
self.click(editor.main_dialog.ok_button)
yesno.assert_called_once_with(
"Changing the payment type will remove "
"this category from 1 payments. Are you sure?",
gtk.RESPONSE_NO, "Change", "Don't change")
self.assertEqual(payment.category, None)
|
tiagocardosos/stoq
|
stoqlib/gui/test/test_paymentcategoryeditor.py
|
Python
|
gpl-2.0
| 2,817
|
[
"VisIt"
] |
7615dac8966702c41eb0083adbb679025a8436ca99966956de8009d56dc155bd
|
#!/usr/bin/env python
#PBS -m ae
#PBS -q verylong
#PBS -l nodes=2:ppn=8
#!/usr/bin/env python
from ase import Atom, Atoms
from ase.optimize.bfgslinesearch import BFGSLineSearch
from ase.io import read
from gpaw import GPAW
#Relax the Au 55 atoms cluster with both lcao and paw modes
cluster = read('Au55.xyz')
cell = [(17.79365715,0,0),
(0,19.60846479,0),
(0,0, 19.84025464)]
cluster.set_cell(cell,scale_atoms=False)
cluster.center()
kwargs_lcao = dict(mode='lcao',
#basis='dzp',
convergence={'density':0.1, 'energy':0.1}
)#poissonsolver=poissonsolver)
calc = GPAW(h=0.18,txt=None,**kwargs_lcao)
cluster.set_calculator(calc)
dyn1 = BFGSLineSearch(cluster, trajectory='Au_cluster_lcao.traj')
dyn1.run(fmax=0.02)
e_cluster_lcao = cluster.get_potential_energy()
#cluster.set_calculator(GPAW(h=0.18,tex=None))
dyn2 = BFGSLineSearch(cluster, trajectory='Au_cluster_paw.traj')
dyn2.run(fmax=0.02)
e_cluster_paw = cluster.get_potential_energy()
#Relax CO molecule with both lcao and paw modes
CO = Atoms([Atom('C',(1.0,1.0,1.0)),
Atom('O',(1.0,1.0,2.3))],
cell=(12,12.5,14.5))
CO.set_calculator(calc)
CO.center()
dyn3 = BFGSLineSearch(CO)
dyn3.run(fmax=0.02)
e_CO_lcao = CO.get_potential_energy()
CO.set_calculator(GPAW(h=0.18,tex=None))
dyn4 = BFGSLineSearch(CO)
dyn4.run(fmax=0.02)
e_CO_paw = CO.get_potential_energy()
CO_bond = CO.get_positions()[1][2] - CO.get_positions()[0][2]
#Attach CO molecule onto the Au cluster
pos=[]
pos.append(cluster[34].get_position())
pos.append(cluster[34].get_position())
pos[0][2] += 1.8
pos[1][2] += 1.8 + CO_bond
CO = Atoms([Atom('C', pos[0]),
Atom('O', pos[1])])
cluster.extend(CO)
#Relax the CO adsorbed Au cluster with both Lcao and paw modes
cluster.set_calculator(calc)
dyn5 = BFGSLineSearch(cluster, trajectory='CO_cluster_lcao.traj')
dyn5.run(fmax=0.02)
e_cocluster_lcao = cluster.get_potential_energy()
cluster.set_calculator(GPAW(h=0.18,tex=None))
dyn6 = BFGSLineSearch(cluster, trajectory='CO_cluster_paw.traj')
dyn6.run(fmax=0.02)
e_cocluster_paw = cluster.get_potential_energy()
#Print results
print 'Adsorption energy of CO on Au cluster (lcao):', e_cocluster_lcao - e_CO_lcao - e_cluster_lcao
print 'Adsorption energy of CO on Au cluster (paw):', e_cocluster_paw - e_CO_paw - e_cluster_paw
|
qsnake/gpaw
|
gpaw/test/big/CO_Au55_cluster/Au55_cluster.py
|
Python
|
gpl-3.0
| 2,345
|
[
"ASE",
"GPAW"
] |
083e5dcafc52eefef5f3e224ae2fcc1d4eadd09a66e2f77abe93d4a92f70a7ab
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Okinawa Institute of Science and Technology, Japan.
#
# This script runs on STEPS 2.x http://steps.sourceforge.net
#
# H Anwar, I Hepburn, H Nedelescu, W Chen and E De Schutter
# Stochastic calcium mechanisms cause dendritic calcium spike variability
# J Neuroscience 2013
#
# HybridCaburst_detchannels.py : The calcium burst model with spatial
# stochastic calcium and discrete deterministics channels.
#
# Script authors: Iain Hepburn and Haroon Anwar
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# USAGE
#
# $ python HybridCaburst_detchannels.py *mesh* *root* *iter_n*
#
# *mesh* is the tetrahedral mesh (10um to 160um cylinder)
# *root* is the path to the location for data storage
# *iter_n* (is intened to be an integer) is an identifier number for each
# simulation iteration. iter_n is also used to initialize the random
# number generator.
#
# E.g: python HybridCaburst_detchannels.py Cylinder2_dia2um_L10um_outer0_3um_0.3shell_0.3size_19156tets_adaptive.inp ~/stochcasims/ 1
#
#
# OUTPUT
#
# In (root)/data/HybridCaburst_detchannels/(mesh)/(iter_n+time) directory
# 3 data files will be recorded. Each file contains one row for every
# time-point at which data is recorded, organised into the following columns:
#
# currents.dat
# Time (ms), P-type current, T-type current, BK current, SK current
# (current units are Amps/m^2)
#
# voltage.dat
# Time (ms), voltage at mesh centre (mV)
#
# calcium.dat
# Time (ms), calcium concentration in submembrane (micromolar),
# number of calcium ions in submembrane.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import math
import time
from random import *
import steps.interface
from steps.model import *
from steps.geom import *
from steps.rng import *
from steps.sim import *
from extra.constants import *
import extra.curr_funcs as cf
from extra.discrete import *
import sys
import os
import numpy as np
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
_, meshfile_ab, root, iter_n = sys.argv
if meshfile_ab == 'Cylinder2_dia2um_L160um_outer0_0.3shell_0.3size_279152tets_adaptive.inp':
cyl160=True
else:
cyl160=False
########################### BIOCHEMICAL MODEL ###############################
r = ReactionManager()
mdl_stoch = Model()
mdl_det = Model()
with mdl_stoch:
Ca_stoch = Species.Create(valence=2)
iCBsf, iCBsCa, iCBCaf, iCBCaCa, CBsf, CBsCa, CBCaf, CBCaCa, PV, PVMg, PVCa, Mg = Species.Create()
vsys_stoch = VolumeSystem.Create()
ssys_stoch = SurfaceSystem.Create()
with vsys_stoch:
diff_Ca = Diffusion.Create(Ca_stoch, DCST)
diff_CBsf = Diffusion.Create(CBsf, DCB)
diff_CBsCa = Diffusion.Create(CBsCa, DCB)
diff_CBCaf = Diffusion.Create(CBCaf, DCB)
diff_CBCaCa = Diffusion.Create(CBCaCa, DCB)
diff_PV = Diffusion.Create(PV, DPV)
diff_PVCa = Diffusion.Create(PVCa, DPV)
diff_PVMg = Diffusion.Create(PVMg, DPV)
(Ca_stoch + iCBsf <r['iCBsf1_f']> iCBsCa) + Ca_stoch <r['iCBsCa_f']> iCBCaCa
(Ca_stoch + iCBsf <r['iCBsf2_f']> iCBCaf) + Ca_stoch <r['iCBCaf_f']> iCBCaCa
r['iCBsf1_f'].K = iCBsf1_f_kcst, iCBsf1_b_kcst
r['iCBsCa_f'].K = iCBsCa_f_kcst, iCBsCa_b_kcst
r['iCBsf2_f'].K = iCBsf2_f_kcst, iCBsf2_b_kcst
r['iCBCaf_f'].K = iCBCaf_f_kcst, iCBCaf_b_kcst
(CBsf + Ca_stoch <r['CBsf1_f']> CBsCa) + Ca_stoch <r['CBsCa_f']> CBCaCa
(CBsf + Ca_stoch <r['CBsf2_f']> CBCaf) + Ca_stoch <r['CBCaf_f']> CBCaCa
r['CBsf1_f'].K = CBsf1_f_kcst, CBsf1_b_kcst
r['CBsCa_f'].K = CBsCa_f_kcst, CBsCa_b_kcst
r['CBsf2_f'].K = CBsf2_f_kcst, CBsf2_b_kcst
r['CBCaf_f'].K = CBCaf_f_kcst, CBCaf_b_kcst
Ca_stoch + PV <r['PVca_f']> PVCa
Mg + PV <r['PVmg_f']> PVMg
r['PVca_f'].K = PVca_f_kcst, PVca_b_kcst
r['PVmg_f'].K = PVmg_f_kcst, PVmg_b_kcst
with mdl_det:
Ca_det = Species.Create(valence=2)
Pump, CaPump, CaP_m0, CaP_m1, CaP_m2, CaP_m3, CaT_m0h0, CaT_m0h1, CaT_m1h0, CaT_m1h1, CaT_m2h0, \
CaT_m2h1, BK_C0, BK_C1, BK_C2, BK_C3, BK_C4, BK_O0, BK_O1, BK_O2, BK_O3, BK_O4, SK_C1, \
SK_C2, SK_C3, SK_C4, SK_O1, SK_O2 = Species.Create()
# Vol/surface systems
vsys_det = VolumeSystem.Create()
ssys_det = SurfaceSystem.Create()
with ssys_det:
#Pump
Ca_det.i + Pump.s <r['PumpD_f']> CaPump.s >r['PumpD_k']> Pump.s
r['PumpD_f'].K = P_f_kcst, P_b_kcst
r['PumpD_k'].K = P_k_kcst
CaP_m0.s <r['CaPm0m1']> CaP_m1.s <r['CaPm1m2']> CaP_m2.s <r['CaPm2m3']> CaP_m3.s
r['CaPm0m1'].K = 0.0, 0.0
r['CaPm1m2'].K = 0.0, 0.0
r['CaPm2m3'].K = 0.0, 0.0
CaT_m0h0.s <r['CaTm0h0_m1h0']> CaT_m1h0.s <r['CaTm1h0_m2h0']> CaT_m2h0.s <r['CaTm2h0_m2h1']> CaT_m2h1.s
r['CaTm0h0_m1h0'].K = 0.0, 0.0
r['CaTm1h0_m2h0'].K = 0.0, 0.0
r['CaTm2h0_m2h1'].K = 0.0, 0.0
CaT_m1h0.s <r['CaTm1h0_m1h1']> CaT_m1h1.s
r['CaTm1h0_m1h1'].K = 0.0, 0.0
CaT_m0h0.s <r['CaTm0h0_m0h1']> CaT_m0h1.s <r['CaTm0h1_m1h1']> CaT_m1h1.s <r['CaTm1h1_m2h1']> CaT_m2h1.s
r['CaTm0h0_m0h1'].K = 0.0, 0.0
r['CaTm1h1_m2h1'].K = 0.0, 0.0
r['CaTm0h1_m1h1'].K = 0.0, 0.0
(((BK_C0.s + Ca_det.i <r['BKCAC0']> BK_C1.s)\
+ Ca_det.i <r['BKCAC1']> BK_C2.s)\
+ Ca_det.i <r['BKCAC2']> BK_C3.s)\
+ Ca_det.i <r['BKCAC3']> BK_C4.s
r['BKCAC0'].K = c_01, c_10
r['BKCAC1'].K = c_12, c_21
r['BKCAC2'].K = c_23, c_32
r['BKCAC3'].K = c_34, c_43
(((BK_O0.s + Ca_det.i <r['BKCAO0']> BK_O1.s)\
+ Ca_det.i <r['BKCAO1']> BK_O2.s)\
+ Ca_det.i <r['BKCAO2']> BK_O3.s)\
+ Ca_det.i <r['BKCAO3']> BK_O4.s
r['BKCAO0'].K = o_01, o_10
r['BKCAO1'].K = o_12, o_21
r['BKCAO2'].K = o_23, o_32
r['BKCAO3'].K = o_34, o_43
BK_C0.s <r['BKC0O0']> BK_O0.s
BK_C1.s <r['BKC1O1']> BK_O1.s
BK_C2.s <r['BKC2O2']> BK_O2.s
BK_C3.s <r['BKC3O3']> BK_O3.s
BK_C4.s <r['BKC4O4']> BK_O4.s
r['BKC0O0'].K = 0.0, 0.0
r['BKC1O1'].K = 0.0, 0.0
r['BKC2O2'].K = 0.0, 0.0
r['BKC3O3'].K = 0.0, 0.0
r['BKC4O4'].K = 0.0, 0.0
((SK_C1.s + Ca_det.i <r['SKCAC1']> SK_C2.s)\
+ Ca_det.i <r['SKCAC2']> SK_C3.s)\
+ Ca_det.i <r['SKCAC3']> SK_C4.s
r['SKCAC1'].K = dirc2_t, invc1_t
r['SKCAC2'].K = dirc3_t, invc2_t
r['SKCAC3'].K = dirc4_t, invc3_t
SK_C3.s <r['SKC3O1']> SK_O1.s
SK_C4.s <r['SKC4O2']> SK_O2.s
r['SKC3O1'].K = diro1_t, invo1_t
r['SKC4O2'].K = diro2_t, invo2_t
##################################
########### MESH & COMPARTMENTALIZATION #################
##########Import Mesh
mesh_stoch = TetMesh.Load('./meshes/'+meshfile_ab)
mesh_det = TetMesh.Load('./meshes/'+meshfile_ab)
with mesh_stoch as mesh:
# Use mesh_stoch for geometrical operations
rad, zmin, zmax = 1e-6, -200e-6, 200e-6
inner_tets, outer_tets = TetList(), TetList()
for t in mesh.tets:
c = t.center
if zmin <= c.z <= zmax and c.x**2 + c.y**2 <= rad**2:
inner_tets.append(t)
else:
outer_tets.append(t)
print(len(outer_tets), " tets in outer compartment")
print(len(inner_tets), " tets in inner compartment")
# Record voltage from the central tetrahedron
cent_tet = mesh.tets[0.0, 0.0, 0.0]
if cyl160:
# Ensure that we use points a small distance inside the boundary:
minz, maxz = mesh.bbox.min.z, mesh.bbox.max.z
memb_tris = TriList(tri for tri in mesh_stock.surface if minz < tri.center.z < maxz)
else:
print('Finding connecting triangles...')
memb_tris = inner_tets.surface & outer_tets.surface
submemb_tets = TetList()
for tri in memb_tris:
submemb_tets += tri.tetNeighbs
submemb_tets = submemb_tets & inner_tets
print(len(submemb_tets))
vol = sum(tet.Vol for tet in submemb_tets)
print('Volume of submembrane region is', vol)
submemb_tris = TriList()
for tet in submemb_tets:
for tri in tet.faces:
if tri in memb_tris:
submemb_tris.append(tri)
break
assert(len(submemb_tris) == len(submemb_tets))
########## Create an intracellular compartment i.e. cytosolic compartment
cyto_stoch = Compartment.Create(inner_tets.indices, vsys_stoch)
########## Create a membrane as a surface mesh
memb_stoch = Patch.Create(memb_tris, cyto_stoch, None, 'ssys_stoch')
# For EField calculation
print("Creating membrane..")
membrane = Membrane.Create([memb_stoch], opt_file_name='./meshes/'+meshfile_ab+"_optimalidx")
print("Membrane created.")
print("Area: ", memb_stoch.Area)
with mesh_det:
cyto_det = Compartment.Create(inner_tets.indices, vsys_det)
memb_det = Patch.Create(memb_tris.indices, cyto_det, None, 'ssys_det')
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # SIMULATION # # # # # # # # # # # # # # # # # # # # # #
rng1 = RNG('mt19937', 512, 7)
rng2 = RNG('mt19937', 512, 7)
#Creating two solvers
sim_stoch = Simulation('Tetexact', mdl_stoch, mesh_stoch, rng1, calcMembPot=True)
sim_det = Simulation('TetODE', mdl_det, mesh_det, rng2)
sim_det.setTolerances(1.0e-7, 1.0e-7)
print("Resetting simulation object..")
sim_stoch.newRun()
sim_det.newRun()
print("Injecting molecules..")
sim_stoch.Temp = TEMPERATURE+273.15
sim_det.cyto_det.Ca_det.Conc = Ca_iconc
sim_stoch.cyto_stoch.Ca_stoch.Conc = Ca_iconc
print("Calcium concentration in stochastic simulation is: ", sim_stoch.cyto_stoch.Ca_stoch.Conc)
print("No. of Ca molecules in stochastic simulation is: ", sim_stoch.cyto_stoch.Ca_stoch.Count)
print("Calcium concentration in deterministic simulation is: ", sim_det.cyto_det.Ca_det.Conc)
print("No. of Ca molecules in deterministic simulation is: ", sim_det.cyto_det.Ca_det.Count)
sim_stoch.cyto_stoch.Mg.Conc = Mg_conc
surfarea = sim_stoch.memb_stoch.Area
#Total pump is 1e-15 mol/cm2 ---> 1e-11 mol/m2
#pumpnbs per unit area (im m2) is Total pump times AVOGADRO's NUMBER (1e-11 mol/m2 * 6.022e23 /mol )
pumpnbs = 6.022141e12*surfarea
print("Number of pump molecules: ", pumpnbs)
sim_stoch.cyto_stoch.iCBsf.Conc = iCBsf_conc
sim_stoch.cyto_stoch.iCBCaf.Conc = iCBCaf_conc
sim_stoch.cyto_stoch.iCBsCa.Conc = iCBsCa_conc
sim_stoch.cyto_stoch.iCBCaCa.Conc = iCBCaCa_conc
sim_stoch.cyto_stoch.CBsf.Conc = CBsf_conc
sim_stoch.cyto_stoch.CBCaf.Conc = CBCaf_conc
sim_stoch.cyto_stoch.CBsCa.Conc = CBsCa_conc
sim_stoch.cyto_stoch.CBCaCa.Conc = CBCaCa_conc
sim_stoch.cyto_stoch.PV.Conc = PV_conc
sim_stoch.cyto_stoch.PVCa.Conc = PVCa_conc
sim_stoch.cyto_stoch.PVMg.Conc = PVMg_conc
with open('./meshes/'+meshfile_ab+"_distribution", 'r') as dist_file:
for line in dist_file:
line = list(map(float, line.split()))
t = int(line[0])
sim_det.TRI(t).Pump.Count = line[1]
sim_det.TRI(t).CaP_m0.Count = line[2]
sim_det.TRI(t).CaP_m1.Count = line[3]
sim_det.TRI(t).CaP_m2.Count = line[4]
sim_det.TRI(t).CaP_m3.Count = line[5]
sim_det.TRI(t).CaT_m0h0.Count = line[6]
sim_det.TRI(t).CaT_m1h0.Count = line[7]
sim_det.TRI(t).CaT_m2h0.Count = line[8]
sim_det.TRI(t).CaT_m0h1.Count = line[9]
sim_det.TRI(t).CaT_m1h1.Count = line[10]
sim_det.TRI(t).CaT_m2h1.Count = line[11]
sim_det.TRI(t).BK_C0.Count = line[12]
sim_det.TRI(t).BK_C1.Count = line[13]
sim_det.TRI(t).BK_C2.Count = line[14]
sim_det.TRI(t).BK_C3.Count = line[15]
sim_det.TRI(t).BK_C4.Count = line[16]
sim_det.TRI(t).BK_O0.Count = line[17]
sim_det.TRI(t).BK_O1.Count = line[18]
sim_det.TRI(t).BK_O2.Count = line[19]
sim_det.TRI(t).BK_O3.Count = line[20]
sim_det.TRI(t).BK_O4.Count = line[21]
sim_det.TRI(t).SK_C1.Count = line[22]
sim_det.TRI(t).SK_C2.Count = line[23]
sim_det.TRI(t).SK_C3.Count = line[24]
sim_det.TRI(t).SK_C4.Count = line[25]
sim_det.TRI(t).SK_O1.Count = line[26]
sim_det.TRI(t).SK_O2.Count = line[27]
sim_stoch.EfieldDT = EF_DT
sim_stoch.membrane.Potential = init_pot
sim_stoch.membrane.VolRes = Ra
#cm = 1.5uF/cm2 -> 1.5e-6F/1e-4m2 ->1.5e-2 F/m2
sim_stoch.membrane.Capac = memb_capac
#### Recording #####
dc = time.strftime('%b%d_%H_%M_%S_%Y')
runPath = os.path.join(root, 'data/HybridCaburst_detchannels/', meshfile_ab, f'{iter_n}__{dc}')
os.makedirs(runPath, exist_ok=True)
rng1.initialize(10*int(iter_n))
datfile = open(os.path.join(runPath, 'currents.dat'), 'w')
datfile2 = open(os.path.join(runPath, 'voltage.dat'), 'w')
datfile3 = open(os.path.join(runPath, 'calcium.dat'), 'w')
stets = submemb_tets.indices
stris = submemb_tris.indices
for l in range(NTIMEPOINTS):
print("Tpnt: ", l)
#1) READ STOCHASTIC CA and 2) SET DETERMINISTIC CA
sim_det.TETS(stets).Ca_det.Conc = sim_stoch.TETS(stets).Ca_stoch.Conc
#Assuming this sim V is not constant everwhere
allPots = sim_stoch.TRIS(stris).V
#3) Set the rate constants and RUN THE DETERMINISTIC SIMULATION
sim_det.TRIS(stris).CaPm0m1['fwd'].K = [1.0e3 *3.* alpha_cap(V*1.0e3)*Qt for V in allPots]
sim_det.TRIS(stris).CaPm1m2['fwd'].K = [1.0e3 *2.* alpha_cap(V*1.0e3)*Qt for V in allPots]
sim_det.TRIS(stris).CaPm2m3['fwd'].K = [1.0e3 *1.* alpha_cap(V*1.0e3)*Qt for V in allPots]
sim_det.TRIS(stris).CaPm2m3['bkw'].K = [1.0e3 *3.* beta_cap(V*1.0e3)*Qt for V in allPots]
sim_det.TRIS(stris).CaPm1m2['bkw'].K = [1.0e3 *2.* beta_cap(V*1.0e3)*Qt for V in allPots]
sim_det.TRIS(stris).CaPm0m1['bkw'].K = [1.0e3 *1.* beta_cap(V*1.0e3)*Qt for V in allPots]
sim_det.TRIS(stris).CaTm0h0_m1h0['fwd'].K = [1.0e3 *2.* alpham_cat(V*1.0e3) for V in allPots]
sim_det.TRIS(stris).CaTm1h0_m2h0['fwd'].K = [1.0e3 *1.* alpham_cat(V*1.0e3) for V in allPots]
sim_det.TRIS(stris).CaTm1h0_m2h0['bkw'].K = [1.0e3 *2.* betam_cat(V*1.0e3) for V in allPots]
sim_det.TRIS(stris).CaTm0h0_m1h0['bkw'].K = [1.0e3 *1.* betam_cat(V*1.0e3) for V in allPots]
sim_det.TRIS(stris).CaTm0h0_m0h1['fwd'].K = [1.0e3 *1.* alphah_cat(V*1.0e3) for V in allPots]
sim_det.TRIS(stris).CaTm1h0_m1h1['fwd'].K = [1.0e3 *1.* alphah_cat(V*1.0e3) for V in allPots]
sim_det.TRIS(stris).CaTm2h0_m2h1['fwd'].K = [1.0e3 *1.* alphah_cat(V*1.0e3) for V in allPots]
sim_det.TRIS(stris).CaTm2h0_m2h1['bkw'].K = [1.0e3 *1.* betah_cat(V*1.0e3) for V in allPots]
sim_det.TRIS(stris).CaTm1h0_m1h1['bkw'].K = [1.0e3 *1.* betah_cat(V*1.0e3) for V in allPots]
sim_det.TRIS(stris).CaTm0h0_m0h1['bkw'].K = [1.0e3 *1.* betah_cat(V*1.0e3) for V in allPots]
sim_det.TRIS(stris).CaTm0h1_m1h1['fwd'].K = [1.0e3 *2.* alpham_cat(V*1.0e3) for V in allPots]
sim_det.TRIS(stris).CaTm1h1_m2h1['fwd'].K = [1.0e3 *1.* alpham_cat(V*1.0e3) for V in allPots]
sim_det.TRIS(stris).CaTm1h1_m2h1['bkw'].K = [1.0e3 *2.* betam_cat(V*1.0e3) for V in allPots]
sim_det.TRIS(stris).CaTm0h1_m1h1['bkw'].K = [1.0e3 *1.* betam_cat(V*1.0e3) for V in allPots]
sim_det.TRIS(stris).BKC0O0['fwd'].K = [f_0(V) for V in allPots]
sim_det.TRIS(stris).BKC1O1['fwd'].K = [f_1(V) for V in allPots]
sim_det.TRIS(stris).BKC2O2['fwd'].K = [f_2(V) for V in allPots]
sim_det.TRIS(stris).BKC3O3['fwd'].K = [f_3(V) for V in allPots]
sim_det.TRIS(stris).BKC4O4['fwd'].K = [f_4(V) for V in allPots]
sim_det.TRIS(stris).BKC0O0['bkw'].K = [b_0(V) for V in allPots]
sim_det.TRIS(stris).BKC1O1['bkw'].K = [b_1(V) for V in allPots]
sim_det.TRIS(stris).BKC2O2['bkw'].K = [b_2(V) for V in allPots]
sim_det.TRIS(stris).BKC3O3['bkw'].K = [b_3(V) for V in allPots]
sim_det.TRIS(stris).BKC4O4['bkw'].K = [b_4(V) for V in allPots]
sim_det.run(TIMECONVERTER*l)
#4)READ DETERMINISTIC CHANNELS & THEN COMPUTE CURRENT USING DETERMINISTIC GHK (could be stochastic)
So = Ca_oconc
# i) For each tet in submembrane, find the corresponding triID
# ii) For each tri, compute GHK current for each channel
# iii) Count the channel states / Spec in open states for each of the triID and compute the total current of that channel
allCa = sim_det.TETS(stets).Ca_det.Conc
currs_CaP = np.array([
nb * cf.getGHKI(CaP_P, V, 2, TEMPERATURE+273.15, Si*1.0e3, So*1.0e3)
for V, Si, nb in zip(allPots, allCa, sim_det.TRIS(stris).CaP_m3.Count)
])
currs_CaT = np.array([
nb * cf.getGHKI(CaT_P, V, 2, TEMPERATURE+273.15, Si*1.0e3, So*1.0e3)
for V, Si, nb in zip(allPots, allCa, sim_det.TRIS(stris).CaT_m2h1.Count)
])
allBK = np.array(sim_det.TRIS(stris).LIST(BK_O0, BK_O1, BK_O2, BK_O3, BK_O4).Count).reshape(len(stris), 5)
currs_BK = np.array([
nb * cf.getOhmI(V, BK_rev, BK_G)
for V, nb in zip(allPots, np.sum(allBK, axis=1))
])
allSK = np.array(sim_det.TRIS(stris).LIST(SK_O1, SK_O2).Count).reshape(len(stris), 2)
currs_SK = np.array([
nb * cf.getOhmI(V, SK_rev, SK_G)
for V, nb in zip(allPots, np.sum(allSK, axis=1))
])
membArea = sim_det.memb_det.Area
currs_L = np.array([
cf.getOhmI(V, L_rev, L_G) * round(L_ro * membArea) * (area / membArea)
for V, area in zip(allPots, sim_stoch.TRIS(stris).Area)
])
tcur_CaP = sum(currs_CaP)
tcur_CaT = sum(currs_CaT)
tcur_BK = sum(currs_BK)
tcur_SK = sum(currs_SK)
tca_count = sum(sim_stoch.TETS(stets).Ca_stoch.Count)
# Update sim stoch
sim_stoch.TETS(stets).Ca_stoch.Count = sim_det.TETS(stets).Ca_det.Count - (currs_CaP + currs_CaT) * TIMECONVERTER / (2 * E_CHARGE)
sim_stoch.TRIS(stris).IClamp = currs_CaP + currs_CaT + currs_BK + currs_SK + currs_L
sim_stoch.run(TIMECONVERTER*l)
datfile.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
datfile.write('%.6g' %((tcur_CaP*1.0e-1)/surfarea) + ' ')
datfile.write('%.6g' %((tcur_CaT*1.0e-1)/surfarea) + ' ')
datfile.write('%.6g' %((tcur_BK*1.0e-1)/surfarea) + ' ')
datfile.write('%.6g' %((tcur_SK*1.0e-1)/surfarea) + ' ')
datfile.write('\n')
datfile2.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
datfile2.write('%.6g' %(sim_stoch.TET(cent_tet).V*1.0e3) + ' ')
datfile2.write('\n')
datfile3.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
datfile3.write('%.6g' %(((tca_count/AVOGADRO)/(vol*1.0e3))*1.0e6) + ' ')
datfile3.write('%.6g' %tca_count + ' ')
datfile3.write('\n')
datfile.close()
datfile2.close()
datfile3.close()
## END
|
CNS-OIST/STEPS_Example
|
publication_models/API_2/Anwar_J_Neurosci_2013/HybridCaburst_detchannels.py
|
Python
|
gpl-2.0
| 18,841
|
[
"Avogadro"
] |
fc6daf96d1372d85813efb5e9cd2c7ec1a48c14956ffc03a444272b858088ebf
|
"""Generate the storage netcdf file for Iowa Flood Center Precip"""
import datetime
import sys
import os
import numpy as np
from pyiem.util import ncopen, logger
LOG = logger()
def init_year(ts):
"""
Create a new NetCDF file for a year of our specification!
"""
fp = "/mesonet/data/iemre/%s_ifc_daily.nc" % (ts.year,)
if os.path.isfile(fp):
LOG.info("Cowardly refusing to overwrite file %s.", fp)
sys.exit()
nc = ncopen(fp, "w")
nc.title = "IFC Daily Precipitation %s" % (ts.year,)
nc.platform = "Grided Estimates"
nc.description = "Iowa Flood Center ~0.004 degree grid"
nc.institution = "Iowa State University, Ames, IA, USA"
nc.source = "Iowa Environmental Mesonet"
nc.project_id = "IEM"
nc.realization = 1
nc.Conventions = "CF-1.0"
nc.contact = "Daryl Herzmann, akrherz@iastate.edu, 515-294-5978"
nc.history = "%s Generated" % (
datetime.datetime.now().strftime("%d %B %Y"),
)
nc.comment = "No Comment at this time"
# Setup Dimensions
nc.createDimension("lat", 1057)
nc.createDimension("lon", 1741)
days = ((ts.replace(year=ts.year + 1)) - ts).days
nc.createDimension("time", int(days))
nc.createDimension("nv", 2)
# Setup Coordinate Variables
lat = nc.createVariable("lat", float, ("lat",))
lat.units = "degrees_north"
lat.long_name = "Latitude"
lat.standard_name = "latitude"
lat.bounds = "lat_bnds"
lat.axis = "Y"
# Grid centers
lat[:] = 40.133331 + np.arange(1057) * 0.004167
lon = nc.createVariable("lon", float, ("lon",))
lon.units = "degrees_east"
lon.long_name = "Longitude"
lon.standard_name = "longitude"
lon.bounds = "lon_bnds"
lon.axis = "X"
lon[:] = -97.154167 + np.arange(1741) * 0.004167
tm = nc.createVariable("time", float, ("time",))
tm.units = "Days since %s-01-01 00:00:0.0" % (ts.year,)
tm.long_name = "Time"
tm.standard_name = "time"
tm.axis = "T"
tm.calendar = "gregorian"
tm[:] = np.arange(0, int(days))
p01d = nc.createVariable(
"p01d", float, ("time", "lat", "lon"), fill_value=1.0e20
)
p01d.units = "mm"
p01d.long_name = "Precipitation"
p01d.standard_name = "Precipitation"
p01d.coordinates = "lon lat"
p01d.description = "Precipitation accumulation for the day"
nc.close()
if __name__ == "__main__":
init_year(datetime.datetime(int(sys.argv[1]), 1, 1))
|
akrherz/iem
|
scripts/iemre/init_daily_ifc.py
|
Python
|
mit
| 2,453
|
[
"NetCDF"
] |
6b9bae98e4a34abc8bb12661da6db883dd8a5eff1a34927d23782d4e08795585
|
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
Gaussian Processes regression examples
"""
try:
from matplotlib import pyplot as pb
except:
pass
import numpy as np
import GPy
def olympic_marathon_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Olympic marathon data."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.olympic_marathon_men()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
# set the lengthscale to be something sensible (defaults to 1)
m.kern.lengthscale = 10.
if optimize:
m.optimize('bfgs', max_iters=200)
if plot:
m.plot(plot_limits=(1850, 2050))
return m
def coregionalization_toy(optimize=True, plot=True):
"""
A simple demonstration of coregionalization on two sinusoidal functions.
"""
#build a design matrix with a column of integers indicating the output
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
#build a suitable set of observed variables
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = np.sin(X2) + np.random.randn(*X2.shape) * 0.05 + 2.
m = GPy.models.GPCoregionalizedRegression(X_list=[X1,X2], Y_list=[Y1,Y2])
if optimize:
m.optimize('bfgs', max_iters=100)
if plot:
slices = GPy.util.multioutput.get_slices([X1,X2])
m.plot(fixed_inputs=[(1,0)],which_data_rows=slices[0],Y_metadata={'output_index':0})
m.plot(fixed_inputs=[(1,1)],which_data_rows=slices[1],Y_metadata={'output_index':1},ax=pb.gca())
return m
def coregionalization_sparse(optimize=True, plot=True):
"""
A simple demonstration of coregionalization on two sinusoidal functions using sparse approximations.
"""
#build a design matrix with a column of integers indicating the output
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
#build a suitable set of observed variables
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = np.sin(X2) + np.random.randn(*X2.shape) * 0.05 + 2.
m = GPy.models.SparseGPCoregionalizedRegression(X_list=[X1,X2], Y_list=[Y1,Y2])
if optimize:
m.optimize('bfgs', max_iters=100)
if plot:
slices = GPy.util.multioutput.get_slices([X1,X2])
m.plot(fixed_inputs=[(1,0)],which_data_rows=slices[0],Y_metadata={'output_index':0})
m.plot(fixed_inputs=[(1,1)],which_data_rows=slices[1],Y_metadata={'output_index':1},ax=pb.gca())
pb.ylim(-3,)
return m
def epomeo_gpx(max_iters=200, optimize=True, plot=True):
"""
Perform Gaussian process regression on the latitude and longitude data
from the Mount Epomeo runs. Requires gpxpy to be installed on your system
to load in the data.
"""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.epomeo_gpx()
num_data_list = []
for Xpart in data['X']:
num_data_list.append(Xpart.shape[0])
num_data_array = np.array(num_data_list)
num_data = num_data_array.sum()
Y = np.zeros((num_data, 2))
t = np.zeros((num_data, 2))
start = 0
for Xpart, index in zip(data['X'], range(len(data['X']))):
end = start+Xpart.shape[0]
t[start:end, :] = np.hstack((Xpart[:, 0:1],
index*np.ones((Xpart.shape[0], 1))))
Y[start:end, :] = Xpart[:, 1:3]
num_inducing = 200
Z = np.hstack((np.linspace(t[:,0].min(), t[:, 0].max(), num_inducing)[:, None],
np.random.randint(0, 4, num_inducing)[:, None]))
k1 = GPy.kern.RBF(1)
k2 = GPy.kern.Coregionalize(output_dim=5, rank=5)
k = k1**k2
m = GPy.models.SparseGPRegression(t, Y, kernel=k, Z=Z, normalize_Y=True)
m.constrain_fixed('.*variance', 1.)
m.inducing_inputs.constrain_fixed()
m.Gaussian_noise.variance.constrain_bounded(1e-3, 1e-1)
m.optimize(max_iters=max_iters,messages=True)
return m
def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=10000, max_iters=300, optimize=True, plot=True):
"""
Show an example of a multimodal error surface for Gaussian process
regression. Gene 939 has bimodal behaviour where the noisy mode is
higher.
"""
# Contour over a range of length scales and signal/noise ratios.
length_scales = np.linspace(0.1, 60., resolution)
log_SNRs = np.linspace(-3., 4., resolution)
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.della_gatta_TRP63_gene_expression(data_set='della_gatta',gene_number=gene_number)
# data['Y'] = data['Y'][0::2, :]
# data['X'] = data['X'][0::2, :]
data['Y'] = data['Y'] - np.mean(data['Y'])
lls = GPy.examples.regression._contour_data(data, length_scales, log_SNRs, GPy.kern.RBF)
if plot:
pb.contour(length_scales, log_SNRs, np.exp(lls), 20, cmap=pb.cm.jet)
ax = pb.gca()
pb.xlabel('length scale')
pb.ylabel('log_10 SNR')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Now run a few optimizations
models = []
optim_point_x = np.empty(2)
optim_point_y = np.empty(2)
np.random.seed(seed=seed)
for i in range(0, model_restarts):
# kern = GPy.kern.RBF(1, variance=np.random.exponential(1.), lengthscale=np.random.exponential(50.))
kern = GPy.kern.RBF(1, variance=np.random.uniform(1e-3, 1), lengthscale=np.random.uniform(5, 50))
m = GPy.models.GPRegression(data['X'], data['Y'], kernel=kern)
m.likelihood.variance = np.random.uniform(1e-3, 1)
optim_point_x[0] = m.rbf.lengthscale
optim_point_y[0] = np.log10(m.rbf.variance) - np.log10(m.likelihood.variance);
# optimize
if optimize:
m.optimize('scg', xtol=1e-6, ftol=1e-6, max_iters=max_iters)
optim_point_x[1] = m.rbf.lengthscale
optim_point_y[1] = np.log10(m.rbf.variance) - np.log10(m.likelihood.variance);
if plot:
pb.arrow(optim_point_x[0], optim_point_y[0], optim_point_x[1] - optim_point_x[0], optim_point_y[1] - optim_point_y[0], label=str(i), head_length=1, head_width=0.5, fc='k', ec='k')
models.append(m)
if plot:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return m # (models, lls)
def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.RBF):
"""
Evaluate the GP objective function for a given data set for a range of
signal to noise ratios and a range of lengthscales.
:data_set: A data set from the utils.datasets director.
:length_scales: a list of length scales to explore for the contour plot.
:log_SNRs: a list of base 10 logarithm signal to noise ratios to explore for the contour plot.
:kernel: a kernel to use for the 'signal' portion of the data.
"""
lls = []
total_var = np.var(data['Y'])
kernel = kernel_call(1, variance=1., lengthscale=1.)
model = GPy.models.GPRegression(data['X'], data['Y'], kernel=kernel)
for log_SNR in log_SNRs:
SNR = 10.**log_SNR
noise_var = total_var / (1. + SNR)
signal_var = total_var - noise_var
model.kern['.*variance'] = signal_var
model.likelihood.variance = noise_var
length_scale_lls = []
for length_scale in length_scales:
model['.*lengthscale'] = length_scale
length_scale_lls.append(model.log_likelihood())
lls.append(length_scale_lls)
return np.array(lls)
def olympic_100m_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Rogers and Girolami olympics data."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.olympic_100m_men()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
# set the lengthscale to be something sensible (defaults to 1)
m.rbf.lengthscale = 10
if optimize:
m.optimize('bfgs', max_iters=200)
if plot:
m.plot(plot_limits=(1850, 2050))
return m
def toy_rbf_1d(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.toy_rbf_1d()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
if optimize:
m.optimize('bfgs')
if plot:
m.plot()
return m
def toy_rbf_1d_50(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.toy_rbf_1d_50()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
if optimize:
m.optimize('bfgs')
if plot:
m.plot()
return m
def toy_poisson_rbf_1d_laplace(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
optimizer='scg'
x_len = 100
X = np.linspace(0, 10, x_len)[:, None]
f_true = np.random.multivariate_normal(np.zeros(x_len), GPy.kern.RBF(1).K(X))
Y = np.array([np.random.poisson(np.exp(f)) for f in f_true])[:,None]
kern = GPy.kern.RBF(1)
poisson_lik = GPy.likelihoods.Poisson()
laplace_inf = GPy.inference.latent_function_inference.Laplace()
# create simple GP Model
m = GPy.core.GP(X, Y, kernel=kern, likelihood=poisson_lik, inference_method=laplace_inf)
if optimize:
m.optimize(optimizer)
if plot:
m.plot()
# plot the real underlying rate function
pb.plot(X, np.exp(f_true), '--k', linewidth=2)
return m
def toy_ARD(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize=True, plot=True):
# Create an artificial dataset where the values in the targets (Y)
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
# see if this dependency can be recovered
X1 = np.sin(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X2 = np.cos(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X3 = np.exp(np.sort(np.random.rand(num_samples, 1), 0))
X4 = np.log(np.sort(np.random.rand(num_samples, 1), 0))
X = np.hstack((X1, X2, X3, X4))
Y1 = np.asarray(2 * X[:, 0] + 3).reshape(-1, 1)
Y2 = np.asarray(4 * (X[:, 2] - 1.5 * X[:, 0])).reshape(-1, 1)
Y = np.hstack((Y1, Y2))
Y = np.dot(Y, np.random.rand(2, D));
Y = Y + 0.2 * np.random.randn(Y.shape[0], Y.shape[1])
Y -= Y.mean()
Y /= Y.std()
if kernel_type == 'linear':
kernel = GPy.kern.Linear(X.shape[1], ARD=1)
elif kernel_type == 'rbf_inv':
kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1)
else:
kernel = GPy.kern.RBF(X.shape[1], ARD=1)
kernel += GPy.kern.White(X.shape[1]) + GPy.kern.Bias(X.shape[1])
m = GPy.models.GPRegression(X, Y, kernel)
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
# m.set_prior('.*lengthscale',len_prior)
if optimize:
m.optimize(optimizer='scg', max_iters=max_iters)
if plot:
m.kern.plot_ARD()
return m
def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize=True, plot=True):
# Create an artificial dataset where the values in the targets (Y)
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
# see if this dependency can be recovered
X1 = np.sin(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X2 = np.cos(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X3 = np.exp(np.sort(np.random.rand(num_samples, 1), 0))
X4 = np.log(np.sort(np.random.rand(num_samples, 1), 0))
X = np.hstack((X1, X2, X3, X4))
Y1 = np.asarray(2 * X[:, 0] + 3)[:, None]
Y2 = np.asarray(4 * (X[:, 2] - 1.5 * X[:, 0]))[:, None]
Y = np.hstack((Y1, Y2))
Y = np.dot(Y, np.random.rand(2, D));
Y = Y + 0.2 * np.random.randn(Y.shape[0], Y.shape[1])
Y -= Y.mean()
Y /= Y.std()
if kernel_type == 'linear':
kernel = GPy.kern.Linear(X.shape[1], ARD=1)
elif kernel_type == 'rbf_inv':
kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1)
else:
kernel = GPy.kern.RBF(X.shape[1], ARD=1)
#kernel += GPy.kern.Bias(X.shape[1])
X_variance = np.ones(X.shape) * 0.5
m = GPy.models.SparseGPRegression(X, Y, kernel, X_variance=X_variance)
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
# m.set_prior('.*lengthscale',len_prior)
if optimize:
m.optimize(optimizer='scg', max_iters=max_iters)
if plot:
m.kern.plot_ARD()
return m
def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):
"""Predict the location of a robot given wirelss signal strength readings."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.robot_wireless()
# create simple GP Model
m = GPy.models.GPRegression(data['Y'], data['X'], kernel=kernel)
# optimize
if optimize:
m.optimize(max_iters=max_iters)
Xpredict = m.predict(data['Ytest'])[0]
if plot:
pb.plot(data['Xtest'][:, 0], data['Xtest'][:, 1], 'r-')
pb.plot(Xpredict[:, 0], Xpredict[:, 1], 'b-')
pb.axis('equal')
pb.title('WiFi Localization with Gaussian Processes')
pb.legend(('True Location', 'Predicted Location'))
sse = ((data['Xtest'] - Xpredict)**2).sum()
print(('Sum of squares error on test data: ' + str(sse)))
return m
def silhouette(max_iters=100, optimize=True, plot=True):
"""Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.silhouette()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
# optimize
if optimize:
m.optimize(messages=True, max_iters=max_iters)
print(m)
return m
def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100, optimize=True, plot=True, checkgrad=False):
"""Run a 1D example of a sparse GP regression."""
# sample inputs and outputs
X = np.random.uniform(-3., 3., (num_samples, 1))
Y = np.sin(X) + np.random.randn(num_samples, 1) * 0.05
# construct kernel
rbf = GPy.kern.RBF(1)
# create simple GP Model
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
if checkgrad:
m.checkgrad()
if optimize:
m.optimize('tnc', max_iters=max_iters)
if plot:
m.plot()
return m
def sparse_GP_regression_2D(num_samples=400, num_inducing=50, max_iters=100, optimize=True, plot=True, nan=False):
"""Run a 2D example of a sparse GP regression."""
np.random.seed(1234)
X = np.random.uniform(-3., 3., (num_samples, 2))
Y = np.sin(X[:, 0:1]) * np.sin(X[:, 1:2]) + np.random.randn(num_samples, 1) * 0.05
if nan:
inan = np.random.binomial(1,.2,size=Y.shape)
Y[inan] = np.nan
# construct kernel
rbf = GPy.kern.RBF(2)
# create simple GP Model
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
# contrain all parameters to be positive (but not inducing inputs)
m['.*len'] = 2.
m.checkgrad()
# optimize
if optimize:
m.optimize('tnc', messages=1, max_iters=max_iters)
# plot
if plot:
m.plot()
print(m)
return m
def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True):
"""Run a 1D example of a sparse GP regression with uncertain inputs."""
fig, axes = pb.subplots(1, 2, figsize=(12, 5), sharex=True, sharey=True)
# sample inputs and outputs
S = np.ones((20, 1))
X = np.random.uniform(-3., 3., (20, 1))
Y = np.sin(X) + np.random.randn(20, 1) * 0.05
# likelihood = GPy.likelihoods.Gaussian(Y)
Z = np.random.uniform(-3., 3., (7, 1))
k = GPy.kern.RBF(1)
# create simple GP Model - no input uncertainty on this one
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
if optimize:
m.optimize('scg', messages=1, max_iters=max_iters)
if plot:
m.plot(ax=axes[0])
axes[0].set_title('no input uncertainty')
print(m)
# the same Model with uncertainty
m = GPy.models.SparseGPRegression(X, Y, kernel=GPy.kern.RBF(1), Z=Z, X_variance=S)
if optimize:
m.optimize('scg', messages=1, max_iters=max_iters)
if plot:
m.plot(ax=axes[1])
axes[1].set_title('with input uncertainty')
fig.canvas.draw()
print(m)
return m
def simple_mean_function(max_iters=100, optimize=True, plot=True):
"""
The simplest possible mean function. No parameters, just a simple Sinusoid.
"""
#create simple mean function
mf = GPy.core.Mapping(1,1)
mf.f = np.sin
mf.update_gradients = lambda a,b: None
X = np.linspace(0,10,50).reshape(-1,1)
Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape)
k =GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
if optimize:
m.optimize(max_iters=max_iters)
if plot:
m.plot(plot_limits=(-10,15))
return m
def parametric_mean_function(max_iters=100, optimize=True, plot=True):
"""
A linear mean function with parameters that we'll learn alongside the kernel
"""
#create simple mean function
mf = GPy.core.Mapping(1,1)
mf.f = np.sin
X = np.linspace(0,10,50).reshape(-1,1)
Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape) + 3*X
mf = GPy.mappings.Linear(1,1)
k =GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
if optimize:
m.optimize(max_iters=max_iters)
if plot:
m.plot()
return m
|
avehtari/GPy
|
GPy/examples/regression.py
|
Python
|
bsd-3-clause
| 18,747
|
[
"Gaussian"
] |
ed7ccef629281dc97a6a298c321da9a144a735584d84e42d0045ec736599018d
|
#!/usr/bin/env python3
"""diffout
Usage:
diffout [options] [--] <commandline> <infile>...
diffout -s
diffout -h | --help
diffout --version
Runs a command on a list of input files, then compares the resulting outputs with copies stored from a previous run.
Examples:
diffout <commandline> <infile>...
Options:
-s, --save Clear expected results, then save all test generated output as new expected results.
-p, --pipe In addition to normal output files, diff terminal output as well
-h, --help Show help.
-q, --quiet Print less text.
-v, --verbose Print more text.
--version Show version.
"""
from docopt import docopt
import logging
import difflib
import glob
import re
import os
import shutil
import shlex
import subprocess
import time
#import colorama
__appname__ = "diffout"
__author__ = "David Maranhao"
__license__ = "MIT"
__version__ = "0.1.0" # MAJOR.MINOR.PATCH | http://semver.org
HTML_PATH = os.path.join("diffout","diffs")
EXPECTED_PATH = os.path.join("diffout","expected")
OUTPUT_PATH = os.path.join("diffout","output")
TERMINAL_OUT_PATH = os.path.join("diffout","output")
def fatal(errorMsg):
logging.critical(errorMsg)
exit(1)
return
def loadFile(fn):
inBuf = []
encoding = ""
if not os.path.isfile(fn):
fatal("File not found: {}".format(fn))
if encoding == "":
try:
wbuf = open(fn, "r", encoding='ascii').read()
encoding = "ASCII"
inBuf = wbuf.split("\n")
except Exception as e:
pass
if encoding == "":
try:
wbuf = open(fn, "rU", encoding='UTF-8').read()
encoding = "utf_8"
inBuf = wbuf.split("\n")
# remove BOM on first line if present
t = ":".join("{0:x}".format(ord(c)) for c in inBuf[0])
if t[0:4] == 'feff':
inBuf[0] = inBuf[0][1:]
except:
pass
if encoding == "":
try:
wbuf = open(fn, "r", encoding='latin_1').read()
encoding = "latin_1"
inBuf = wbuf.split("\n")
except Exception as e:
pass
if encoding == "":
fatal("Cannot determine input file decoding")
return inBuf;
def expandPath(path):
path = os.path.abspath(path)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
return path
def getFilesModifiedAfterFile(path):
path = expandPath(path)
startTime = os.path.getmtime(path)
lp, rp = os.path.split(path)
searchPath = os.path.join(lp, "*")
modifiedFiles = []
for f in glob.glob(searchPath):
mtime = os.path.getmtime(f)
if mtime > startTime:
modifiedFiles.append(f)
return modifiedFiles
def getDirectoryFileList(path):
path = expandPath(path)
if not os.path.isdir(path):
logging.error("{} is not a directory".format(path))
return []
searchPath = os.path.join(path,"*")
return glob.glob(searchPath)
def saveFiles(fileList, destDir):
destDir = expandPath(destDir)
for f in fileList:
f = expandPath(f)
lp, rp = os.path.split(f)
if not os.path.exists(destDir):
os.makedirs(destDir)
dstFile = os.path.join(destDir,rp)
srcFile = f
shutil.copy(srcFile,dstFile)
def diffDir(newDir, oldDir):
logging.info("--- Comparing new outputs with expected outputs:")
newDir = expandPath(newDir)
oldDir = expandPath(oldDir)
newFiles = getDirectoryFileList(newDir)
oldFiles = getDirectoryFileList(oldDir)
if not os.path.exists(HTML_PATH):
os.makedirs(HTML_PATH)
# Index HTML Header
indexHtml = []
indexHtml.extend(htmlHeader)
indexHtml.append("<table class='results'>")
#TODO add timestamp + command line headings
#TODO add table header
#for f in sorted(extraFiles):
#logging.info("Unexpected output file was generated: {}".format(f))
fileChangeCount = 0
d = difflib.HtmlDiff(8,80)
for f in sorted(newFiles):
# HTML Header
outBuf = []
outBuf.extend(htmlHeader)
# Diffs
fn = os.path.basename(f)
expectedFilePath = os.path.join(oldDir,fn)
actual = loadFile(f)
diffResult = ""
if os.path.exists(expectedFilePath):
expected = loadFile(expectedFilePath)
#matchText = colorama.Style.BRIGHT + colorama.Back.LIGHTRED_EX + "[ DIFF ]" + colorama.Back.RESET + colorama.Style.RESET_ALL
matchText = "[ DIFF ]"
if actual==expected:
diffResult = "NODIFF"
#matchText = colorama.Back.LIGHTGREEN_EX + "[ NODIFF ]" + colorama.Style.RESET_ALL
matchText = "[ NODIFF ]"
else:
diffResult = "DIFF"
fileChangeCount += 1
print("{} {}".format(matchText,f))
s = d.make_table(expected, actual, expectedFilePath, f, True)
outBuf.append(s)
outBuf.append('<br />')
else:
diffResult = "EXTRA"
#matchText = colorama.Style.BRIGHT + colorama.Back.BLUE + "[ EXTRA ]" + colorama.Back.RESET + colorama.Style.RESET_ALL
#print("{} Unexpected output file was generated: {}".format(matchText,os.path.basename(f)))
# HTML Footer
outBuf.extend(htmlFooter)
# Write out results
p = os.path.join(HTML_PATH,"{}.html".format(fn))
htmlout = open(p, mode='w', encoding="utf-8")
htmlout.writelines(["{}\n".format(item) for item in outBuf])
htmlout.close()
# Add to index
if diffResult == "DIFF":
resultCell = "<td style='text-align:center;background:#d55;color:white;font: bold 1em sans-serif, serif;'>DIFF</td>"
elif diffResult == "NODIFF":
resultCell = "<td style='text-align:center;background:#5b5;color:white;font: bold 1em sans-serif, serif;'>NODIFF</td>"
elif diffResult == "EXTRA":
resultCell = "<td style='text-align:center;background:#55b;color:white;font: bold 1em sans-serif, serif;'>EXTRA</td>"
indexHtml.append("<tr>{4}<td>{0}</td><td><a href='{1}/{0}.html'>diff</a></td><td><a href='{2}/{0}'>output</a></td><td><a href='{3}/{0}'>expected</a></td></tr>".format(fn,os.path.basename(HTML_PATH),os.path.basename(OUTPUT_PATH),os.path.basename(EXPECTED_PATH),resultCell))
# Check for missing/unexpected files
nfSet = set([os.path.basename(f) for f in newFiles])
ofSet = set([os.path.basename(f) for f in oldFiles])
extraFiles = nfSet - ofSet
missingFiles = ofSet - nfSet
for f in sorted(missingFiles):
#matchText = colorama.Style.BRIGHT + colorama.Back.CYAN + "[ MISSING]" + colorama.Back.RESET + colorama.Style.RESET_ALL
#print("{} Expected output file not generated: {}".format(matchText,f))
indexHtml.append("<tr><td style='text-align:center;background:#aaa;color:white;font: bold 1em sans-serif, serif;'>MISSING</td><td>{0}</td><td><a href='{1}/{0}.html'>diff</a></td><td><a href='{2}/{0}'>output</a></td><td><a href='{3}/{0}'>expected</a></td></tr>".format(os.path.basename(f),os.path.basename(HTML_PATH),os.path.basename(OUTPUT_PATH),os.path.basename(EXPECTED_PATH)))
# Index HTML Footer
indexHtml.append("</table>")
indexHtml.append("</body>")
indexHtml.append("</html>")
# Write out index.html
parentDir = os.path.dirname(HTML_PATH)
p = os.path.join(parentDir,"results.html")
htmlout = open(p, mode='w', encoding="utf-8")
htmlout.writelines(["{}\n".format(item) for item in indexHtml])
htmlout.close()
# Finished, summarize results
print()
if extraFiles:
print("{} unexpected output files were generated.".format(len(extraFiles)))
if missingFiles:
print("{} expected output files were not generated.".format(len(missingFiles)))
if fileChangeCount > 0:
parentDir = expandPath(os.path.dirname(HTML_PATH))
p = os.path.join(parentDir,"results.html")
print("{} output file(s) differ with expected output, view file://{} for diff results".format(fileChangeCount,p))
else:
print("No differences with expected output found.")
print()
return
def main():
args = docopt(__doc__, version="diffout v{}".format(__version__))
#colorama.init()
# Configure logging
logLevel = logging.INFO #default
if args['--verbose']:
logLevel = logging.DEBUG
elif args['--quiet']:
logLevel = logging.ERROR
logging.basicConfig(format='%(levelname)s: %(message)s', level=logLevel)
logging.debug(args)
# Make directories if missing
if not os.path.exists(EXPECTED_PATH):
os.makedirs(EXPECTED_PATH)
if not os.path.exists(OUTPUT_PATH):
os.makedirs(OUTPUT_PATH)
if not os.path.exists(HTML_PATH):
os.makedirs(HTML_PATH)
if not os.path.exists(TERMINAL_OUT_PATH):
os.makedirs(TERMINAL_OUT_PATH)
# Save command
if args['--save']:
p = expandPath(EXPECTED_PATH)
if os.path.exists(p):
logging.info("--- Clearing current expected results")
print(p)
shutil.rmtree(p)
os.makedirs(p)
logging.info("--- Saving latest results to expected")
saveFiles(getDirectoryFileList(OUTPUT_PATH),EXPECTED_PATH)
return
# Delete output files from previous run
p = expandPath(OUTPUT_PATH)
if os.path.exists(p):
logging.info("--- Clearing results from last run")
print(p)
shutil.rmtree(p)
os.makedirs(p)
# Delete diff from previous run
p = expandPath(HTML_PATH)
if os.path.exists(p):
logging.info("--- Clearing diffs from last run")
print(p)
shutil.rmtree(p)
os.makedirs(p)
# Delete terminal output files from previous run
p = expandPath(TERMINAL_OUT_PATH)
if os.path.exists(p):
logging.info("--- Clearing terminal output logs from last run")
print(p)
shutil.rmtree(p)
os.makedirs(p)
# Write marker file for time index
f = open("STARTTIME",'w')
f.write('.')
f.close()
time.sleep(1)
# Run command on each input file
commandCount = 0
commandErrorCount = 0
for infile in args['<infile>']:
for f in glob.glob(infile):
commandline = args['<commandline>']
commandline = commandline.replace('%f',f)
terminalOutFile = open(os.path.join(TERMINAL_OUT_PATH,os.path.basename(f))+".out",'w')
#s = colorama.Fore.LIGHTYELLOW_EX + "\n----- Running command:\n{}\n".format(commandline) + colorama.Fore.RESET
s = "\n----- Running command:\n{}".format(commandline)
print(s)
commandCount += 1
with terminalOutFile as outFile:
cl = shlex.split(commandline)
logging.debug("args: {}".format(str(cl)))
if args['--pipe']:
proc=subprocess.Popen(cl, stdout=outFile, stderr=outFile)
else:
proc=subprocess.Popen(cl)
proc.wait()
if(proc.returncode != 0):
logging.error("Command failed: {}".format(commandline))
commandErrorCount += 1
# Copy recently modified files into output/
outFiles = getFilesModifiedAfterFile("STARTTIME")
saveFiles(outFiles,OUTPUT_PATH)
print("\nFinished executing {} command(s) ({} error(s) occured).".format(commandCount,commandErrorCount))
print("{} output file(s) were generated ({} expected).".format(len(getDirectoryFileList(OUTPUT_PATH)),len(getDirectoryFileList(EXPECTED_PATH))))
print()
diffDir(OUTPUT_PATH,EXPECTED_PATH)
return
htmlHeader = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">',
'<html>',
'',
'<head>',
' <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />',
' <title>diffout results</title>',
' <style type="text/css">',
' table.diff {font-family:Courier; border:medium;}',
' .diff_header {background-color:#e0e0e0}',
' td.diff_header {text-align:right}',
' .diff_next {background-color:#c0c0c0}',
' .diff_add {background-color:#aaffaa}',
' .diff_chg {background-color:#ffff77}',
' .diff_sub {background-color:#ffaaaa}',
' table.results {margin:3em auto; width:auto;background:#ffe;border-spacing:0.3em;border:thin solid #ccc}',
' .results td {text-align: left; padding: 0.3em 0.6em; border: none;}',
' </style>',
'</head>',
'',
'<body>')
htmlFooter = (' <table class="diff" summary="Legends">',
' <tr> <th colspan="2"> Legends </th> </tr>',
' <tr> <td> <table border="" summary="Colors">',
' <tr><th> Colors </th> </tr>',
' <tr><td class="diff_add"> Added </td></tr>',
' <tr><td class="diff_chg">Changed</td> </tr>',
' <tr><td class="diff_sub">Deleted</td> </tr>',
' </table></td>',
' <td> <table border="" summary="Links">',
' <tr><th colspan="2"> Links </th> </tr>',
' <tr><td>(f)irst change</td> </tr>',
' <tr><td>(n)ext change</td> </tr>',
' <tr><td>(t)op</td> </tr>',
' </table>',
' </td> </tr>',
' </table>',
'</body>',
'',
'</html>')
if __name__ == "__main__":
main()
|
davem2/diffout
|
diffout/diffout.py
|
Python
|
mit
| 12,520
|
[
"FEFF"
] |
a5a3df3308f772054ed4ee1616a1d1c72c298e7be7e10e4c6d0f5c08a7f0d357
|
"""URL opener.
Copyright 2004-2006 John J Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
from __future__ import absolute_import
import bisect
import os
import tempfile
import threading
from . import _response
from . import _rfc3986
from . import _sockettimeout
from . import _urllib2_fork
from ._request import Request
from ._util import isstringlike
from .polyglot import HTTPError, URLError, iteritems, is_class
open_file = open
class ContentTooShortError(URLError):
def __init__(self, reason, result):
URLError.__init__(self, reason)
self.result = result
def set_request_attr(req, name, value, default):
try:
getattr(req, name)
except AttributeError:
setattr(req, name, default)
if value is not default:
setattr(req, name, value)
class OpenerDirector(_urllib2_fork.OpenerDirector):
def __init__(self):
_urllib2_fork.OpenerDirector.__init__(self)
# really none of these are (sanely) public -- the lack of initial
# underscore on some is just due to following urllib2
self.process_response = {}
self.process_request = {}
self._any_request = {}
self._any_response = {}
self._handler_index_valid = True
self._tempfiles = []
def add_handler(self, handler):
if not hasattr(handler, "add_parent"):
raise TypeError("expected BaseHandler instance, got %r" %
type(handler))
if handler in self.handlers:
return
# XXX why does self.handlers need to be sorted?
bisect.insort(self.handlers, handler)
handler.add_parent(self)
self._handler_index_valid = False
def _maybe_reindex_handlers(self):
if self._handler_index_valid:
return
handle_error = {}
handle_open = {}
process_request = {}
process_response = {}
any_request = set()
any_response = set()
unwanted = []
for handler in self.handlers:
added = False
for meth in dir(handler):
if meth in ["redirect_request", "do_open", "proxy_open"]:
# oops, coincidental match
continue
if meth == "any_request":
any_request.add(handler)
added = True
continue
elif meth == "any_response":
any_response.add(handler)
added = True
continue
ii = meth.find("_")
scheme = meth[:ii]
condition = meth[ii + 1:]
if condition.startswith("error"):
jj = meth[ii + 1:].find("_") + ii + 1
kind = meth[jj + 1:]
try:
kind = int(kind)
except ValueError:
pass
lookup = handle_error.setdefault(scheme, {})
elif condition == "open":
kind = scheme
lookup = handle_open
elif condition == "request":
kind = scheme
lookup = process_request
elif condition == "response":
kind = scheme
lookup = process_response
else:
continue
lookup.setdefault(kind, set()).add(handler)
added = True
if not added:
unwanted.append(handler)
for handler in unwanted:
self.handlers.remove(handler)
# sort indexed methods
# XXX could be cleaned up
for lookup in [process_request, process_response]:
for scheme, handlers in iteritems(lookup):
lookup[scheme] = handlers
for scheme, lookup in iteritems(handle_error):
for code, handlers in iteritems(lookup):
handlers = list(handlers)
handlers.sort()
lookup[code] = handlers
for scheme, handlers in iteritems(handle_open):
handlers = list(handlers)
handlers.sort()
handle_open[scheme] = handlers
# cache the indexes
self.handle_error = handle_error
self.handle_open = handle_open
self.process_request = process_request
self.process_response = process_response
self._any_request = any_request
self._any_response = any_response
def _request(self, url_or_req, data, visit,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
if isstringlike(url_or_req):
req = Request(url_or_req, data, visit=visit, timeout=timeout)
else:
# already a mechanize.Request instance
req = url_or_req
if data is not None:
req.add_data(data)
# XXX yuck
set_request_attr(req, "visit", visit, None)
set_request_attr(req, "timeout", timeout,
_sockettimeout._GLOBAL_DEFAULT_TIMEOUT)
return req
def open(self, fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
req = self._request(fullurl, data, None, timeout)
req_scheme = req.get_type()
self._maybe_reindex_handlers()
# pre-process request
# XXX should we allow a Processor to change the URL scheme
# of the request?
request_processors = set(self.process_request.get(req_scheme, []))
request_processors.update(self._any_request)
request_processors = list(request_processors)
request_processors.sort()
for processor in request_processors:
for meth_name in ["any_request", req_scheme + "_request"]:
meth = getattr(processor, meth_name, None)
if meth:
req = meth(req)
# In Python >= 2.4, .open() supports processors already, so we must
# call ._open() instead.
urlopen = _urllib2_fork.OpenerDirector._open
response = urlopen(self, req, data)
# post-process response
response_processors = set(self.process_response.get(req_scheme, []))
response_processors.update(self._any_response)
response_processors = list(response_processors)
response_processors.sort()
for processor in response_processors:
for meth_name in ["any_response", req_scheme + "_response"]:
meth = getattr(processor, meth_name, None)
if meth:
response = meth(req, response)
return response
def error(self, proto, *args):
if proto in ['http', 'https']:
# XXX http[s] protocols are special-cased
# https is not different than http
dict = self.handle_error['http']
proto = args[2] # YUCK!
meth_name = 'http_error_%s' % proto
http_err = 1
orig_args = args
else:
dict = self.handle_error
meth_name = proto + '_error'
http_err = 0
args = (dict, proto, meth_name) + args
result = self._call_chain(*args)
if result:
return result
if http_err:
args = (dict, 'default', 'http_error_default') + orig_args
return self._call_chain(*args)
BLOCK_SIZE = 1024 * 8
def retrieve(self, fullurl, filename=None, reporthook=None, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT,
open=open_file):
"""Returns (filename, headers).
For remote objects, the default filename will refer to a temporary
file. Temporary files are removed when the OpenerDirector.close()
method is called.
For file: URLs, at present the returned filename is None. This may
change in future.
If the actual number of bytes read is less than indicated by the
Content-Length header, raises ContentTooShortError (a URLError
subclass). The exception's .result attribute contains the (filename,
headers) that would have been returned.
"""
req = self._request(fullurl, data, False, timeout)
scheme = req.get_type()
fp = self.open(req)
try:
headers = fp.info()
if filename is None and scheme == 'file':
# XXX req.get_selector() seems broken here, return None,
# pending sanity :-/
return None, headers
# return urllib.url2pathname(req.get_selector()), headers
if filename:
tfp = open(filename, 'wb')
else:
path = _rfc3986.urlsplit(req.get_full_url())[2]
suffix = os.path.splitext(path)[1]
fd, filename = tempfile.mkstemp(suffix)
self._tempfiles.append(filename)
tfp = os.fdopen(fd, 'wb')
try:
result = filename, headers
bs = self.BLOCK_SIZE
size = -1
read = 0
blocknum = 0
if reporthook:
if "content-length" in headers:
size = int(headers["content-length"])
reporthook(blocknum, bs, size)
while 1:
block = fp.read(bs)
if not block:
break
read += len(block)
tfp.write(block)
blocknum += 1
if reporthook:
reporthook(blocknum, bs, size)
finally:
tfp.close()
finally:
fp.close()
# raise exception if actual size does not match content-length header
if size >= 0 and read < size:
raise ContentTooShortError(
"retrieval incomplete: "
"got only %i out of %i bytes" % (read, size),
result
)
return result
def close(self):
_urllib2_fork.OpenerDirector.close(self)
# make it very obvious this object is no longer supposed to be used
self.open = self.error = self.retrieve = self.add_handler = None
if self._tempfiles:
for filename in self._tempfiles:
try:
os.unlink(filename)
except OSError:
pass
del self._tempfiles[:]
def wrapped_open(urlopen, process_response_object, fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
success = True
try:
response = urlopen(fullurl, data, timeout)
except HTTPError as error:
success = False
if error.fp is None: # not a response
raise
response = error
if response is not None:
response = process_response_object(response)
if not success:
raise response
return response
class ResponseProcessingOpener(OpenerDirector):
def open(self, fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
def bound_open(fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
return OpenerDirector.open(self, fullurl, data, timeout)
return wrapped_open(
bound_open, self.process_response_object, fullurl, data, timeout)
def process_response_object(self, response):
return response
class SeekableResponseOpener(ResponseProcessingOpener):
def process_response_object(self, response):
return _response.seek_wrapped_response(response)
class OpenerFactory:
"""This class's interface is quite likely to change."""
default_classes = [
# handlers
_urllib2_fork.ProxyHandler,
_urllib2_fork.UnknownHandler,
_urllib2_fork.HTTPHandler,
_urllib2_fork.HTTPDefaultErrorHandler,
_urllib2_fork.HTTPRedirectHandler,
_urllib2_fork.FTPHandler,
_urllib2_fork.FileHandler,
# processors
_urllib2_fork.HTTPCookieProcessor,
_urllib2_fork.HTTPErrorProcessor,
]
default_classes.append(_urllib2_fork.HTTPSHandler)
handlers = []
replacement_handlers = []
def __init__(self, klass=OpenerDirector):
self.klass = klass
def build_opener(self, *handlers):
"""Create an opener object from a list of handlers and processors.
The opener will use several default handlers and processors, including
support for HTTP and FTP.
If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used.
"""
opener = self.klass()
default_classes = list(self.default_classes)
skip = set()
for klass in default_classes:
for check in handlers:
if is_class(check):
if issubclass(check, klass):
skip.add(klass)
elif isinstance(check, klass):
skip.add(klass)
for klass in skip:
default_classes.remove(klass)
for klass in default_classes:
opener.add_handler(klass())
for h in handlers:
if is_class(h):
h = h()
opener.add_handler(h)
return opener
build_opener = OpenerFactory().build_opener
thread_local = threading.local()
thread_local.opener = None
def get_thread_local_opener():
try:
ans = thread_local.opener
except AttributeError:
# threading module is broken, use a single global instance
ans = getattr(get_thread_local_opener, 'ans', None)
if ans is None:
ans = get_thread_local_opener.ans = build_opener()
if ans is None:
ans = thread_local.opener = build_opener()
return ans
def urlopen(url, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
return get_thread_local_opener().open(url, data, timeout)
def urlretrieve(url, filename=None, reporthook=None, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
return get_thread_local_opener().retrieve(
url, filename, reporthook, data, timeout)
def install_opener(opener):
get_thread_local_opener.ans = opener
try:
thread_local.opener = opener
except AttributeError:
pass
|
Masood-M/yalih
|
mechanize/_opener.py
|
Python
|
apache-2.0
| 14,735
|
[
"VisIt"
] |
db25a3ec600f7f7206b8485f75e2cb0db4debf59d5d29eac2b2817d83cd144d8
|
"""
@name: PyHouse/src/Modules/Drivers/_test/test_Null.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2018 by D. Brian Kimmel
@license: MIT License
@note: Created on Jul 30, 2015
@Summary:
"""
__updated__ = '2018-02-12'
# Import system type stuff
from twisted.trial import unittest, reporter, runner
# Import PyMh files and modules.
from Modules.Drivers.Null import test as I_test
class Z_Suite(unittest.TestCase):
def setUp(self):
self.m_test = runner.TestLoader()
def test_Null(self):
l_package = runner.TestLoader().loadPackage(I_test)
l_ret = reporter.Reporter()
l_package.run(l_ret)
l_ret.done()
#
print('\n====================\n*** test_Null ***\n{}\n'.format(l_ret))
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Core/Drivers/_test/test_Null.py
|
Python
|
mit
| 813
|
[
"Brian"
] |
f12b7c86c10083717426bfadf1fd29ffca3eb7fb322b79384c01e6cf98e8cc3f
|
import unittest
import numpy as np
from spglib import get_symmetry
from vasp import read_vasp
from os import listdir
class TestGetSymmetry(unittest.TestCase):
def setUp(self):
lattice = [[4, 0, 0], [0, 4, 0], [0, 0, 4]]
positions = [[0, 0, 0], [0.5, 0.5, 0.5]]
numbers = [1, 1]
magmoms = [0, 0]
self._cell = (lattice, positions, numbers, magmoms)
def tearDown(self):
pass
def test_get_symmetry_ferro(self):
self._cell[3][0] = 1
self._cell[3][1] = 1
sym = get_symmetry(self._cell)
self.assertEqual(96, len(sym['rotations']))
self.assertTrue((sym['equivalent_atoms'] == [0, 0]).all())
def test_get_symmetry_anti_ferro(self):
self._cell[3][0] = 1
self._cell[3][1] = -1
sym = get_symmetry(self._cell)
self.assertEqual(96, len(sym['rotations']))
self.assertTrue((sym['equivalent_atoms'] == [0, 0]).all())
def test_get_symmetry_broken_magmoms(self):
self._cell[3][0] = 1
self._cell[3][1] = 2
sym = get_symmetry(self._cell)
self.assertEqual(48, len(sym['rotations']))
self.assertTrue((sym['equivalent_atoms'] == [0, 1]).all())
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestGetSymmetry)
unittest.TextTestRunner(verbosity=2).run(suite)
# unittest.main()
|
sauliusg/cod-tools
|
src/externals/spglib/python/test/test_collinear_spin.py
|
Python
|
gpl-2.0
| 1,389
|
[
"VASP"
] |
f9bbaf6ce88d79571053c2e56ea58c671e5a94bcee77789dc7943fe3e53e7968
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.560482
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/external.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class external(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(external, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_90355923 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2webifexternals>
''')
for plugin in VFFSL(SL,"plugins",True): # generated from line 4, col 2
write(u'''\t<e2webifexternal>
\t\t<e2path>''')
_v = VFFSL(SL,"plugin",True)[0] # u'$plugin[0]' on line 6, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$plugin[0]')) # from line 6, col 11.
write(u'''</e2path>
\t\t<e2name>''')
_v = VFFSL(SL,"plugin",True)[2] # u'$plugin[2]' on line 7, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$plugin[2]')) # from line 7, col 11.
write(u'''</e2name>
\t\t<e2externalversion>''')
_v = VFFSL(SL,"plugin",True)[3] # u'$plugin[3]' on line 8, col 22
if _v is not None: write(_filter(_v, rawExpr=u'$plugin[3]')) # from line 8, col 22.
write(u'''</e2externalversion>
\t</e2webifexternal>
''')
write(u'''</e2webifexternals>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_90355923
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_external= 'respond'
## END CLASS DEFINITION
if not hasattr(external, '_initCheetahAttributes'):
templateAPIClass = getattr(external, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(external)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=external()).run()
|
MOA-2011/enigma2-plugin-extensions-openwebif
|
plugin/controllers/views/web/external.py
|
Python
|
gpl-2.0
| 5,656
|
[
"VisIt"
] |
876ea510e11f0182672346047c5c02c455816e175eec4fcd3f25c95fac682b49
|
"""Module documentation goes here."""
# Enthought library imports.
from traits.api import Instance
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
# Local imports
from mayavi.core.module import Module
from mayavi.components.actor import Actor
######################################################################
# `MyModule` class.
######################################################################
class MyModule(Module):
# The version of this class. Used for persistence.
__version__ = 0
# The actor component that represents the visualization.
actor = Instance(Actor)
########################################
# View related code.
######################################################################
# `Module` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* the tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters. You should also
set the `actors` attribute up at this point.
"""
# Create the components and set them up.
# Setup the actor suitably for this module.
# Setup the components, actors and widgets. (sample code)
#self.components.extend([your_components, ...])
#self.actors.append(your_actor)
# Note that self.actor.actor need not be added.
#self.widgets.append(your_widget)
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when any of the inputs
sends a `pipeline_changed` event.
"""
# Data is available, so set the input for the grid plane.
# Do your stuff here!
# Now flush the pipeline
self.pipeline_changed = True
def update_data(self):
"""Override this method so that it flushes the vtk pipeline if
that is necessary.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
# Just set data_changed, the components should do the rest if
# they are connected.
self.data_changed = True
######################################################################
# Non-public methods.
######################################################################
|
dmsurti/mayavi
|
mayavi/modules/skeleton_module.py
|
Python
|
bsd-3-clause
| 2,822
|
[
"Mayavi",
"VTK"
] |
34c1765733810c8aabc7c6d756cef675b92339d5d94a2ef94182802dcb46e928
|
# -*- coding: utf-8 -*-
from .body import GeoBody
from .point import Point
from .solver import solve
from .vector import Vector
class Plane(GeoBody):
"""A Plane (not the flying one)"""
def __init__(self, *args):
"""Plane(Point, Point, Point):
Initialise a plane going through the three given points.
Plane(Point, Vector, Vector):
Initialise a plane given by a point and two vectors lying on
the plane.
Plane(Point, Vector):
Initialise a plane given by a point and a normal vector (point
normal form)
Plane(a, b, c, d):
Initialise a plane given by the equation
ax1 + bx2 + cx3 = d (general form).
"""
if len(args) == 3:
a, b, c = args
if (isinstance(a, Point) and
isinstance(b, Point) and
isinstance(b, Point)):
# for three points we just calculate the vectors AB
# and AC and continue like we were given two vectors
# instead
vab = b.pv() - a.pv()
vac = c.pv() - a.pv()
elif (isinstance(a, Point) and
isinstance(b, Vector) and
isinstance(c, Vector)):
vab, vac = b, c
# We need a vector orthogonal to the two given ones so we
# (the length doesn't matter) so we just use the cross
# product
vec = vab.cross(vac)
self._init_pn(a, vec)
elif len(args) == 2:
self._init_pn(*args)
elif len(args) == 4:
self._init_gf(*args)
def _init_pn(self, p, normale):
"""Initialise a plane given in the point normal form."""
self.p = p
self.n = normale
def _init_gf(self, a, b, c, d):
"""Initialise a plane given in the general form."""
# We need
# 1) a normal vector -> given by (a, b, c)
# 2) a point on the plane -> solve the equation and chose a
# "random" point
solution = solve([[a, b, c, d]])
self.n = Vector(a, b, c)
self.p = Point(*solution(1, 1))
def __eq__(self, other):
"""Checks if two planes are equal. Two planes can be equal even
if the representation is different!
"""
return self.p in other and self.parallel(other)
def __contains__(self, other):
"""Checks if a Point lies on the Plane or a Line is a subset of
the plane.
"""
from .line import Line
if isinstance(other, Point):
return other.pv() * self.n == self.p.pv() * self.n
elif isinstance(other, Line):
return Point(other.sv) in self and self.parallel(other)
def __repr__(self):
return "Plane({}, {})".format(self.p, self.n)
def point_normal(self):
"""Returns (p, n) so that you can build the equation
_ _
E: (x - p) n = 0
to describe the plane.
"""
# That's the form we use to store the plane internally,
# we don't have to calculate anything
return (self.p.pv(), self.n)
def general_form(self):
"""Returns (a, b, c, d) so that you can build the equation
E: ax1 + bx2 + cx3 = d
to describe the plane.
"""
# Since this form is just the point-normal-form when you do the
# multiplication, we don't have to calulate much here
return (
self.n[0],
self.n[1],
self.n[2],
self.n * self.p.pv(),
)
def parametric(self):
"""Returns (u, v, w) so that you can build the equation
_ _ _ _
E: x = u + rv + sw ; (r, s) e R
to describe the plane (a point and two vectors).
"""
s = solve([list(self.n) + [0]])
# Pick a first vector orthogonal to the normal vector
# there are infinitely many solutions, varying in direction
# and length, so just choose some values
v = Vector(*s(1, 1))
assert v.orthogonal(self.n)
# Pick a second vector orthogonal to the normal vector and
# orthogonal to the first vector (v)
# again, there are infinitely many solutions, varying in length
s = solve([
list(self.n) + [0],
list(v) + [0],
])
w = Vector(*s(1))
return (self.p.pv(), v, w)
def draw(self, renderer, box, color=(1, 1, 0), draw_normal=True):
"""Draw the plane on the given renderer (vtk).
color defaults to yellow.
draw_normal defaults to True.
"""
from .line import Line
from .calc import distance
min_, max_ = box
# Define the 12 edges of the cuboid that is visible. We define
# it as 12 (infinitely long) lines and later discard any points
# outside of the cuboid.
boundaries = [
Line(Point(max_[0], max_[1], 0), Vector(0, 0, 1)),
Line(Point(max_[0], 0, min_[2]), Vector(0, 1, 0)),
Line(Point(max_[0], 0, max_[2]), Vector(0, 1, 0)),
Line(Point(max_[0], min_[1], 0), Vector(0, 0, 1)),
Line(Point(0, min_[1], min_[2]), Vector(1, 0, 0)),
Line(Point(0, min_[1], max_[2]), Vector(1, 0, 0)),
Line(Point(min_[0], min_[0], 0), Vector(0, 0, 1)),
Line(Point(min_[0], 0, max_[2]), Vector(0, 1, 0)),
Line(Point(min_[0], 0, min_[2]), Vector(0, 1, 0)),
Line(Point(min_[0], max_[1], 0), Vector(0, 0, 1)),
Line(Point(0, max_[1], max_[2]), Vector(1, 0, 0)),
Line(Point(0, max_[1], min_[2]), Vector(1, 0, 0)),
]
intersections = filter(None, map(self.intersection, boundaries))
# If a line is a subset of a plane, we will get back a Line as
# intersection. We need to filter those out, otherwise they
# will break everything
intersections = filter(lambda x: not isinstance(x, Line),
intersections)
# Remove duplicates
intersections = list(set(intersections))
# Filter out any out of bounds intersections
def in_bounds(point):
# intersect is actually (num, point)
return (
# <3 Python's comparison operator
min_[0] <= point.x <= max_[0] and
min_[1] <= point.y <= max_[1] and
min_[2] <= point.z <= max_[2]
)
intersections = list(filter(in_bounds, intersections))
polygon = [intersections.pop()]
while intersections:
last = polygon[-1]
distances = [distance(last, x) for x in intersections]
# We're only interested in the index of the next point,
# this min function returns the minimum (index, distance)
# tuple...
successor = min(enumerate(distances), key=lambda x: x[1])
# ...but we only need the index :)
successor = successor[0]
polygon.append(intersections.pop(successor))
# Please don't ask me what all this stuff is for
import vtk
points = vtk.vtkPoints()
for point in polygon:
# The axes are labelled differently in maths and 3d
# graphic programming
points.InsertNextPoint(point.y, point.z, point.x)
poly = vtk.vtkPolygon()
poly.GetPointIds().SetNumberOfIds(len(polygon))
for i in range(len(polygon)):
poly.GetPointIds().SetId(i, i)
polys = vtk.vtkCellArray()
polys.InsertNextCell(poly)
data = vtk.vtkPolyData()
data.SetPoints(points)
data.SetPolys(polys)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(data)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Yellow planes
actor.GetProperty().SetColor(*color)
renderer.AddActor(actor)
# Draw the normal
if draw_normal:
self.n.draw(renderer, None, origin=self.p)
__all__ = ("Plane",)
|
Kingdread/sgl
|
sgl/plane.py
|
Python
|
gpl-3.0
| 8,095
|
[
"VTK"
] |
0ff931a25faca78d5b8a29b96f709929eaf8680968aa61f3904ba595c3ea7a2e
|
#!/usr/bin/python
""" Functions to calculate the mean-square displacement from a LAMMPS trajectory
Usage:
#Must be in pythonpath or working directory
from msd import msd
msd_df = msd(atom_type,first_frame,last_frame)
Requirement:
python2
numpy
dump_dataframe.py
pandas
TODO:
Parallelisation
Add a function for a trajectory in a single file
"""
from dump_dataframe import read_dump
import numpy as np
import pandas as pd
from glob import glob
def msd(atom_type=3, first_frame=-1000, last_frame=-1):
""" Function to calculate the mean-square displacement(in each direction and the total msd)
of a trajectory. Reads all the dump to create an array with the time evolution of
the positions for each particles of an atom_type
Args:
----
atom_type(int): The atom type of the desired atoms to calculate the msd_df
first_frame(int): The first frame to start the msd
last_frame(int): The last frame for the msd
Returns:
----
msd(dataframe): An dataframe with the time as index, msd x,msd y,msd z and total as columns
"""
# List of all the dump in the trajectory
complete_trajectory = glob("*dump*")
# sort the list according to the number in the filename
complete_trajectory.sort(key=lambda f: int(filter(str.isdigit, f)))
# consider only the desired frames
desired_trajectory = complete_trajectory[first_frame:last_frame]
# Initialize the lists for the positions and timestep
x = []
y = []
z = []
timesteps = []
for step in desired_trajectory:
# read the dump for each steps
dump = read_dump(step, wrap=False)
timestep = dump["step"]
atom_df = dump["atom_df"]
# select only the usefull columns
msd_col_list = ["type", "xu", "yu", "zu"]
msd_df = atom_df[msd_col_list]
# choose only the wanted atom_type
msd_df = msd_df[msd_df["type"] == atom_type]
# drop the now useless type column
msd_df = msd_df.drop(["type"], axis=1)
# append each values to the list
timesteps.append(timestep)
x.append(msd_df.xu.values.tolist())
y.append(msd_df.yu.values.tolist())
z.append(msd_df.zu.values.tolist())
# Convert list to arrays and transpose them, so the lines will be each particles
# and the columns the steps
timesteps = np.array(timesteps).T
x = np.array(x).T
y = np.array(y).T
z = np.array(z).T
msd = []
n = 1
while n < len(desired_trajectory):
# calculate the delta_t
delta_t = timesteps[n] - timesteps[0]
# calculate (x(t+n)-x(t))**2 and the mean over all the particles and
# the same delta_t
x_diff = x[:, n:] - x[:, :-n]
msd_x = np.mean(x_diff**2)
y_diff = y[:, n:] - y[:, :-n]
msd_y = np.mean(y_diff**2)
z_diff = z[:, n:] - z[:, :-n]
msd_z = np.mean(z_diff**2)
msd.append([delta_t, msd_x, msd_y, msd_z, msd_x + msd_y + msd_z])
n += 1
msd = np.array(msd)
msd_df = pd.DataFrame(msd[:, 1:], index=msd[:, 0],
columns=["x", "y", "z", "total"])
msd_df.index.name = "temps"
return msd_df
|
EtiCui/Msc-UdeS
|
dataAnalysis/msd.py
|
Python
|
mit
| 3,196
|
[
"LAMMPS"
] |
a971285f73a2fb626b1fa38ded466f5e60ebd48e4a72cdc33187b6a74ed290f3
|
#!/usr/bin/env python
# qm.py -- A Quine McCluskey Python implementation
#
# Copyright (c) 2006-2013 Thomas Pircher <tehpeh@gmx.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""An implementation of the Quine McCluskey algorithm.
This implementation of the Quine McCluskey algorithm has no inherent limits
(other than the calculation time) on the size of the inputs.
Also, in the limited tests of the author of this module, this implementation is
considerably faster than other public Python implementations for non-trivial
inputs.
Another unique feature of this implementation is the possibility to use the XOR
and XNOR operators, in addition to the normal AND operator, to minimise the
terms. This slows down the algorithm, but in some cases it can be a big win in
terms of complexity of the output.
"""
from __future__ import print_function
import math
class QuineMcCluskey:
"""The Quine McCluskey class.
The QuineMcCluskey class minimises boolean functions using the Quine
McCluskey algorithm.
If the class was instantiiated with the use_xor set to True, then the
resulting boolean function may contain XOR and XNOR operators.
"""
__version__ = "0.1"
def __init__(self, use_xor = False):
"""The class constructor.
Kwargs:
use_xor (bool): if True, try to use XOR and XNOR operations to give
a more compact return.
"""
self.use_xor = use_xor # Whether or not to use XOR and XNOR operations.
self.n_bits = 0 # number of bits (i.e. self.n_bits == len(ones[i]) for every i).
def __num2str(self, i):
"""
Convert an integer to its bit-representation in a string.
Args:
i (int): the number to convert.
Returns:
The binary string representation of the parameter i.
"""
x = ['1' if i & (1 << k) else '0' for k in range(self.n_bits - 1, -1, -1)]
return "".join(x)
def simplify(self, ones, dc = []):
"""Simplify a list of terms.
Args:
ones (list of int): list of integers that describe when the output
function is '1', e.g. [1, 2, 6, 8, 15].
Kwargs:
dc (list of int): list of numbers for which we don't care if they
have one or zero in the output.
Returns:
see: simplify_los.
Example:
ones = [2, 6, 10, 14]
dc = []
This will produce the ouput: ['--10']
This means x = b1 & ~b0, (bit1 AND NOT bit0)
Example:
ones = [1, 2, 5, 6, 9, 10, 13, 14]
dc = []
This will produce the ouput: ['--^^'].
In other words, x = b1 ^ b0, (bit1 XOR bit0).
"""
terms = ones + dc
if len(terms) == 0:
return None
# Calculate the number of bits to use
# Needed internally by __num2str()
self.n_bits = int(math.ceil(math.log(max(terms) + 1, 2)))
# Generate the sets of ones and dontcares
ones = set(self.__num2str(i) for i in ones)
dc = set(self.__num2str(i) for i in dc)
return self.simplify_los(ones, dc)
def simplify_los(self, ones, dc = []):
"""The simplification algorithm for a list of string-encoded inputs.
Args:
ones (list of str): list of strings that describe when the output
function is '1', e.g. ['0001', '0010', '0110', '1000', '1111'].
Kwargs:
dc: (list of str)set of strings that define the don't care
combinations.
Returns:
Returns a set of strings which represent the reduced minterms. The
length of the strings is equal to the number of bits in the input.
Character 0 of the output string stands for the most significant
bit, Character n - 1 (n is the number of bits) stands for the least
significant bit.
The following characters are allowed in the return string:
'-' don't care: this bit can be either zero or one.
'1' the bit must be one.
'0' the bit must be zero.
'^' all bits with the caret are XOR-ed together.
'~' all bits with the tilde are XNOR-ed together.
Example:
ones = ['0010', '0110', '1010', '1110']
dc = []
This will produce the ouput: ['--10'].
In other words, x = b1 & ~b0, (bit1 AND NOT bit0).
Example:
ones = ['0001', '0010', '0101', '0110', '1001', '1010' '1101', '1110']
dc = []
This will produce the ouput: ['--^^'].
In other words, x = b1 ^ b0, (bit1 XOR bit0).
"""
self.profile_cmp = 0 # number of comparisons (for profiling)
self.profile_xor = 0 # number of comparisons (for profiling)
self.profile_xnor = 0 # number of comparisons (for profiling)
terms = ones | dc
if len(terms) == 0:
return None
# Calculate the number of bits to use
self.n_bits = max(len(i) for i in terms)
if self.n_bits != min(len(i) for i in terms):
return None
# First step of Quine-McCluskey method.
prime_implicants = self.__get_prime_implicants(terms)
# Remove essential terms.
essential_implicants = self.__get_essential_implicants(prime_implicants)
# Insert here the Quine McCluskey step 2: prime implicant chart.
# Insert here Petrick's Method.
return essential_implicants
def __reduce_simple_xor_terms(self, t1, t2):
"""Try to reduce two terms t1 and t2, by combining them as XOR terms.
Args:
t1 (str): a term.
t2 (str): a term.
Returns:
The reduced term or None if the terms cannot be reduced.
"""
difft10 = 0
difft20 = 0
ret = []
for (t1c, t2c) in zip(t1, t2):
if t1c == '^' or t2c == '^' or t1c == '~' or t2c == '~':
return None
elif t1c != t2c:
ret.append('^')
if t2c == '0':
difft10 += 1
else:
difft20 += 1
else:
ret.append(t1c)
if difft10 == 1 and difft20 == 1:
return "".join(ret)
return None
def __reduce_simple_xnor_terms(self, t1, t2):
"""Try to reduce two terms t1 and t2, by combining them as XNOR terms.
Args:
t1 (str): a term.
t2 (str): a term.
Returns:
The reduced term or None if the terms cannot be reduced.
"""
difft10 = 0
difft20 = 0
ret = []
for (t1c, t2c) in zip(t1, t2):
if t1c == '^' or t2c == '^' or t1c == '~' or t2c == '~':
return None
elif t1c != t2c:
ret.append('~')
if t1c == '0':
difft10 += 1
else:
difft20 += 1
else:
ret.append(t1c)
if (difft10 == 2 and difft20 == 0) or (difft10 == 0 and difft20 == 2):
return "".join(ret)
return None
def __get_prime_implicants(self, terms):
"""Simplify the set 'terms'.
Args:
terms (set of str): set of strings representing the minterms of
ones and dontcares.
Returns:
A list of prime implicants. These are the minterms that cannot be
reduced with step 1 of the Quine McCluskey method.
This is the very first step in the Quine McCluskey algorithm. This
generates all prime implicants, whether they are redundant or not.
"""
# Sort and remove duplicates.
n_groups = self.n_bits + 1
marked = set()
# Group terms into the list groups.
# groups is a list of length n_groups.
# Each element of groups is a set of terms with the same number
# of ones. In other words, each term contained in the set
# groups[i] contains exactly i ones.
groups = [set() for i in range(n_groups)]
for t in terms:
n_bits = t.count('1')
groups[n_bits].add(t)
if self.use_xor:
# Add 'simple' XOR and XNOR terms to the set of terms.
# Simple means the terms can be obtained by combining just two
# bits.
for gi, group in enumerate(groups):
for t1 in group:
for t2 in group:
t12 = self.__reduce_simple_xor_terms(t1, t2)
if t12 != None:
terms.add(t12)
if gi < n_groups - 2:
for t2 in groups[gi + 2]:
t12 = self.__reduce_simple_xnor_terms(t1, t2)
if t12 != None:
terms.add(t12)
done = False
while not done:
# Group terms into groups.
# groups is a list of length n_groups.
# Each element of groups is a set of terms with the same
# number of ones. In other words, each term contained in the
# set groups[i] contains exactly i ones.
groups = dict()
for t in terms:
n_ones = t.count('1')
n_xor = t.count('^')
n_xnor = t.count('~')
# The algorithm can not cope with mixed XORs and XNORs in
# one expression.
assert n_xor == 0 or n_xnor == 0
key = (n_ones, n_xor, n_xnor)
if key not in groups:
groups[key] = set()
groups[key].add(t)
terms = set() # The set of new created terms
used = set() # The set of used terms
# Find prime implicants
for key in groups:
key_next = (key[0]+1, key[1], key[2])
if key_next in groups:
group_next = groups[key_next]
for t1 in groups[key]:
# Optimisation:
# The Quine-McCluskey algorithm compares t1 with
# each element of the next group. (Normal approach)
# But in reality it is faster to construct all
# possible permutations of t1 by adding a '1' in
# opportune positions and check if this new term is
# contained in the set groups[key_next].
for i, c1 in enumerate(t1):
if c1 == '0':
self.profile_cmp += 1
t2 = t1[:i] + '1' + t1[i+1:]
if t2 in group_next:
t12 = t1[:i] + '-' + t1[i+1:]
used.add(t1)
used.add(t2)
terms.add(t12)
# Find XOR combinations
for key in [k for k in groups if k[1] > 0]:
key_complement = (key[0] + 1, key[2], key[1])
if key_complement in groups:
for t1 in groups[key]:
t1_complement = t1.replace('^', '~')
for i, c1 in enumerate(t1):
if c1 == '0':
self.profile_xor += 1
t2 = t1_complement[:i] + '1' + t1_complement[i+1:]
if t2 in groups[key_complement]:
t12 = t1[:i] + '^' + t1[i+1:]
used.add(t1)
terms.add(t12)
# Find XNOR combinations
for key in [k for k in groups if k[2] > 0]:
key_complement = (key[0] + 1, key[2], key[1])
if key_complement in groups:
for t1 in groups[key]:
t1_complement = t1.replace('~', '^')
for i, c1 in enumerate(t1):
if c1 == '0':
self.profile_xnor += 1
t2 = t1_complement[:i] + '1' + t1_complement[i+1:]
if t2 in groups[key_complement]:
t12 = t1[:i] + '~' + t1[i+1:]
used.add(t1)
terms.add(t12)
# Add the unused terms to the list of marked terms
for g in list(groups.values()):
marked |= group - used
if len(used) == 0:
done = True
# Prepare the list of prime implicants
pi = marked
for g in list(groups.values()):
pi |= g
return pi
def __get_essential_implicants(self, terms):
"""Simplify the set 'terms'.
Args:
terms (set of str): set of strings representing the minterms of
ones and dontcares.
Returns:
A list of prime implicants. These are the minterms that cannot be
reduced with step 1 of the Quine McCluskey method.
This function is usually called after __get_prime_implicants and its
objective is to remove non-essential minterms.
In reality this function omits all terms that can be covered by at
least one other term in the list.
"""
# Create all permutations for each term in terms.
perms = {}
for t in terms:
perms[t] = set(p for p in self.permutations(t))
# Now group the remaining terms and see if any term can be covered
# by a combination of terms.
ei_range = set()
ei = set()
groups = dict()
for t in terms:
n = self.__get_term_rank(t, len(perms[t]))
if n not in groups:
groups[n] = set()
groups[n].add(t)
for t in sorted(list(groups.keys()), reverse=True):
for g in groups[t]:
if not perms[g] <= ei_range:
ei.add(g)
ei_range |= perms[g]
return ei
def __get_term_rank(self, term, term_range):
"""Calculate the "rank" of a term.
Args:
term (str): one single term in string format.
term_range (int): the rank of the class of term.
Returns:
The "rank" of the term.
The rank of a term is a positive number or zero. If a term has all
bits fixed '0's then its "rank" is 0. The more 'dontcares' and xor or
xnor it contains, the higher its rank.
A dontcare weights more than a xor, a xor weights more than a xnor, a
xnor weights more than 1 and a 1 weights more than a 0.
This means, the higher rank of a term, the more desireable it is to
include this term in the final result.
"""
n = 0
for t in term:
if t == "-":
n += 8
elif t == "^":
n += 4
elif t == "~":
n += 2
elif t == "1":
n += 1
return 4*term_range + n
def permutations(self, value = ''):
"""Iterator to generate all possible values out of a string.
Args:
value (str): A string containing any of the above characters.
Returns:
The output strings contain only '0' and '1'.
Example:
from qm import QuineMcCluskey
qm = QuineMcCluskey()
for i in qm.permutations('1--^^'):
print(i)
The operation performed by this generator function can be seen as the
inverse of binary minimisation methonds such as Karnaugh maps, Quine
McCluskey or Espresso. It takes as input a minterm and generates all
possible maxterms from it. Inputs and outputs are strings.
Possible input characters:
'0': the bit at this position will always be zero.
'1': the bit at this position will always be one.
'-': don't care: this bit can be zero or one.
'^': all bits with the caret are XOR-ed together.
'~': all bits with the tilde are XNOR-ed together.
Algorithm description:
This lovely piece of spaghetti code generates all possibe
permutations of a given string describing logic operations.
This could be achieved by recursively running through all
possibilities, but a more linear approach has been preferred.
The basic idea of this algorithm is to consider all bit
positions from 0 upwards (direction = +1) until the last bit
position. When the last bit position has been reached, then the
generated string is yielded. At this point the algorithm works
its way backward (direction = -1) until it finds an operator
like '-', '^' or '~'. The bit at this position is then flipped
(generally from '0' to '1') and the direction flag again
inverted. This way the bit position pointer (i) runs forth and
back several times until all possible permutations have been
generated.
When the position pointer reaches position -1, all possible
combinations have been visited.
"""
n_bits = len(value)
n_xor = value.count('^') + value.count('~')
xor_value = 0
seen_xors = 0
res = ['0' for i in range(n_bits)]
i = 0
direction = +1
while i >= 0:
# binary constant
if value[i] == '0' or value[i] == '1':
res[i] = value[i]
# dontcare operator
elif value[i] == '-':
if direction == +1:
res[i] = '0'
elif res[i] == '0':
res[i] = '1'
direction = +1
# XOR operator
elif value[i] == '^':
seen_xors = seen_xors + direction
if direction == +1:
if seen_xors == n_xor and xor_value == 0:
res[i] = '1'
else:
res[i] = '0'
else:
if res[i] == '0' and seen_xors < n_xor - 1:
res[i] = '1'
direction = +1
seen_xors = seen_xors + 1
if res[i] == '1':
xor_value = xor_value ^ 1
# XNOR operator
elif value[i] == '~':
seen_xors = seen_xors + direction
if direction == +1:
if seen_xors == n_xor and xor_value == 1:
res[i] = '1'
else:
res[i] = '0'
else:
if res[i] == '0' and seen_xors < n_xor - 1:
res[i] = '1'
direction = +1
seen_xors = seen_xors + 1
if res[i] == '1':
xor_value = xor_value ^ 1
# unknown input
else:
res[i] = '#'
i = i + direction
if i == n_bits:
direction = -1
i = n_bits - 1
yield "".join(res)
|
alexgorbatchev/node-crc
|
test/pycrc/qm.py
|
Python
|
mit
| 20,761
|
[
"ESPResSo"
] |
2981c11d9a6bb9ff883d44da7a554799c5f1485aa83e75a021c32e0ca37fccce
|
import numpy as np;
import calconscious_lib as ccl;
"""
This test, since I don't have any real data, so I simply use numpy's
random function to generate Gaussian-like signals to test the correction of implementation.
Xt here is a 8*64 array which consists of 8 samples (X^t)
Xt_tau is a 8*64 array which consists of 8 samples (X^(t-tau))
I simply divide them in two parts, and
Mt here is a 4*64*2 array where each part has 4 samples (M^t)
Mt_tau is a 4*64*2 array where each part has 4 samples (M^(t-tau))
beta is a constant
I didn't perform Gradient descent in this test, but I tested all the functions,
they are all working right now.
This implementation is carried out based on my understanding,
please point it out if you find any problems.
"""
mu, sigma = 0, 0.1;
Xt = np.array([np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64)]);
Xt_tau = np.array([np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),]);
Mt=np.zeros((4,64,2));
Mt[:,:,0]=np.array([np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64)]);
Mt[:,:,1]=np.array([np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64)]);
Mt_tau=np.zeros((4,64,2));
Mt_tau[:,:,0]=np.array([np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64)]);
Mt_tau[:,:,1]=np.array([np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64),
np.random.normal(mu, sigma, 64)]);
beta=0.2;
print ccl.calIntegratedInformation(Xt, Xt_tau, Mt, Mt_tau, beta);
print ccl.calDIstarDbeta(Xt, Xt_tau, Mt, Mt_tau, beta);
|
duguyue100/pycalconscious
|
calconscious_main.py
|
Python
|
gpl-3.0
| 2,704
|
[
"Gaussian"
] |
e13a4553db8be05564911687140af5826cc3c6e7f0fa1f9d5a4a5c1674f55e5e
|
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from functools import reduce
from docker.errors import APIError
from .config import ConfigurationError
from .config import get_service_name_from_net
from .const import DEFAULT_TIMEOUT
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
from .container import Container
from .legacy import check_for_legacy_containers
from .service import ContainerNet
from .service import ConvergenceStrategy
from .service import Net
from .service import parse_volume_from_spec
from .service import Service
from .service import ServiceNet
from .service import VolumeFromSpec
from .utils import parallel_execute
log = logging.getLogger(__name__)
def sort_service_dicts(services):
# Topological sort (Cormen/Tarjan algorithm).
unmarked = services[:]
temporary_marked = set()
sorted_services = []
def get_service_names(links):
return [link.split(':')[0] for link in links]
def get_service_names_from_volumes_from(volumes_from):
return [
parse_volume_from_spec(volume_from).source
for volume_from in volumes_from
]
def get_service_dependents(service_dict, services):
name = service_dict['name']
return [
service for service in services
if (name in get_service_names(service.get('links', [])) or
name in get_service_names_from_volumes_from(service.get('volumes_from', [])) or
name == get_service_name_from_net(service.get('net')))
]
def visit(n):
if n['name'] in temporary_marked:
if n['name'] in get_service_names(n.get('links', [])):
raise DependencyError('A service can not link to itself: %s' % n['name'])
if n['name'] in n.get('volumes_from', []):
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
else:
raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked))
if n in unmarked:
temporary_marked.add(n['name'])
for m in get_service_dependents(n, services):
visit(m)
temporary_marked.remove(n['name'])
unmarked.remove(n)
sorted_services.insert(0, n)
while unmarked:
visit(unmarked[-1])
return sorted_services
class Project(object):
"""
A collection of services.
"""
def __init__(self, name, services, client):
self.name = name
self.services = services
self.client = client
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
]
@classmethod
def from_dicts(cls, name, service_dicts, client):
"""
Construct a ServiceCollection from a list of dicts representing services.
"""
project = cls(name, [], client)
for service_dict in sort_service_dicts(service_dicts):
links = project.get_links(service_dict)
volumes_from = project.get_volumes_from(service_dict)
net = project.get_net(service_dict)
project.services.append(
Service(
client=client,
project=name,
links=links,
net=net,
volumes_from=volumes_from,
**service_dict))
return project
@property
def service_names(self):
return [service.name for service in self.services]
def get_service(self, name):
"""
Retrieve a service by name. Raises NoSuchService
if the named service does not exist.
"""
for service in self.services:
if service.name == name:
return service
raise NoSuchService(name)
def validate_service_names(self, service_names):
"""
Validate that the given list of service names only contains valid
services. Raises NoSuchService if one of the names is invalid.
"""
valid_names = self.service_names
for name in service_names:
if name not in valid_names:
raise NoSuchService(name)
def get_services(self, service_names=None, include_deps=False):
"""
Returns a list of this project's services filtered
by the provided list of names, or all services if service_names is None
or [].
If include_deps is specified, returns a list including the dependencies for
service_names, in order of dependency.
Preserves the original order of self.services where possible,
reordering as needed to resolve dependencies.
Raises NoSuchService if any of the named services do not exist.
"""
if service_names is None or len(service_names) == 0:
return self.get_services(
service_names=self.service_names,
include_deps=include_deps
)
else:
unsorted = [self.get_service(name) for name in service_names]
services = [s for s in self.services if s in unsorted]
if include_deps:
services = reduce(self._inject_deps, services, [])
uniques = []
[uniques.append(s) for s in services if s not in uniques]
return uniques
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
for link in service_dict.get('links', []):
if ':' in link:
service_name, link_name = link.split(':', 1)
else:
service_name, link_name = link, None
try:
links.append((self.get_service(service_name), link_name))
except NoSuchService:
raise ConfigurationError(
'Service "%s" has a link to service "%s" which does not '
'exist.' % (service_dict['name'], service_name))
del service_dict['links']
return links
def get_volumes_from(self, service_dict):
volumes_from = []
if 'volumes_from' in service_dict:
for volume_from_config in service_dict.get('volumes_from', []):
volume_from_spec = parse_volume_from_spec(volume_from_config)
# Get service
try:
service_name = self.get_service(volume_from_spec.source)
volume_from_spec = VolumeFromSpec(service_name, volume_from_spec.mode)
except NoSuchService:
try:
container_name = Container.from_id(self.client, volume_from_spec.source)
volume_from_spec = VolumeFromSpec(container_name, volume_from_spec.mode)
except APIError:
raise ConfigurationError(
'Service "%s" mounts volumes from "%s", which is '
'not the name of a service or container.' % (
service_dict['name'],
volume_from_spec.source))
volumes_from.append(volume_from_spec)
del service_dict['volumes_from']
return volumes_from
def get_net(self, service_dict):
net = service_dict.pop('net', None)
if not net:
return Net(None)
net_name = get_service_name_from_net(net)
if not net_name:
return Net(net)
try:
return ServiceNet(self.get_service(net_name))
except NoSuchService:
pass
try:
return ContainerNet(Container.from_id(self.client, net_name))
except APIError:
raise ConfigurationError(
'Service "%s" is trying to use the network of "%s", '
'which is not the name of a service or container.' % (
service_dict['name'],
net_name))
def start(self, service_names=None, **options):
for service in self.get_services(service_names):
service.start(**options)
def stop(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.stop(**options),
msg_index=lambda c: c.name,
msg="Stopping"
)
def pause(self, service_names=None, **options):
for service in reversed(self.get_services(service_names)):
service.pause(**options)
def unpause(self, service_names=None, **options):
for service in self.get_services(service_names):
service.unpause(**options)
def kill(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.kill(**options),
msg_index=lambda c: c.name,
msg="Killing"
)
def remove_stopped(self, service_names=None, **options):
all_containers = self.containers(service_names, stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
parallel_execute(
objects=stopped_containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def restart(self, service_names=None, **options):
for service in self.get_services(service_names):
service.restart(**options)
def build(self, service_names=None, no_cache=False, pull=False):
for service in self.get_services(service_names):
if service.can_be_built():
service.build(no_cache, pull)
else:
log.info('%s uses an image, skipping' % service.name)
def up(self,
service_names=None,
start_deps=True,
strategy=ConvergenceStrategy.changed,
do_build=True,
timeout=DEFAULT_TIMEOUT):
services = self.get_services(service_names, include_deps=start_deps)
for service in services:
service.remove_duplicate_containers()
plans = self._get_convergence_plans(services, strategy)
return [
container
for service in services
for container in service.execute_convergence_plan(
plans[service.name],
do_build=do_build,
timeout=timeout
)
]
def _get_convergence_plans(self, services, strategy):
plans = {}
for service in services:
updated_dependencies = [
name
for name in service.get_dependency_names()
if name in plans
and plans[name].action == 'recreate'
]
if updated_dependencies and strategy.allows_recreate:
log.debug('%s has upstream changes (%s)',
service.name,
", ".join(updated_dependencies))
plan = service.convergence_plan(ConvergenceStrategy.always)
else:
plan = service.convergence_plan(strategy)
plans[service.name] = plan
return plans
def pull(self, service_names=None, ignore_pull_failures=False):
for service in self.get_services(service_names, include_deps=True):
service.pull(ignore_pull_failures)
def containers(self, service_names=None, stopped=False, one_off=False):
if service_names:
self.validate_service_names(service_names)
else:
service_names = self.service_names
containers = list(filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})]))
def matches_service_names(container):
return container.labels.get(LABEL_SERVICE) in service_names
if not containers:
check_for_legacy_containers(
self.client,
self.name,
self.service_names,
)
return [c for c in containers if matches_service_names(c)]
def _inject_deps(self, acc, service):
dep_names = service.get_dependency_names()
if len(dep_names) > 0:
dep_services = self.get_services(
service_names=list(set(dep_names)),
include_deps=True
)
else:
dep_services = []
dep_services.append(service)
return acc + dep_services
class NoSuchService(Exception):
def __init__(self, name):
self.name = name
self.msg = "No such service: %s" % self.name
def __str__(self):
return self.msg
class DependencyError(ConfigurationError):
pass
|
charleswhchan/compose
|
compose/project.py
|
Python
|
apache-2.0
| 13,153
|
[
"VisIt"
] |
8a3bcb79af4e1a3c919e9305c6823ca15ebc2dc6ec097ffa29bae72f890a197d
|
# -*- coding: utf-8 -*-
# Akvo RSR is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from ..fields import ValidXMLCharField
from akvo.codelists.models import DocumentCategory, FileFormat, Language
from akvo.codelists.store.default_codelists import DOCUMENT_CATEGORY, FILE_FORMAT, LANGUAGE
from akvo.utils import codelist_choices, codelist_value
def document_path(self, filename):
return 'db/project/%s/document/%s' % (str(self.project.pk), filename)
class ProjectDocument(models.Model):
project = models.ForeignKey('Project', on_delete=models.CASCADE, related_name='documents', verbose_name=_('project'))
url = models.URLField(
_('document url'), blank=True,
help_text=_('Enter the online location of your document. The URL should start with '
'\'http://\' or \'https://\'.')
)
document = models.FileField(
_('document'), blank=True, upload_to=document_path,
help_text=_('You can upload a document to your project. To upload multiple documents, '
'press the \'Add another document\' link.<br>'
'These documents will be stored on the RSR server and will be '
'publicly available for users to download and view to gain further insight in '
'the project activities.')
)
format = ValidXMLCharField(
_('document format'), max_length=85, blank=True, choices=codelist_choices(FILE_FORMAT),
help_text=_('This provides the code for the Internet Media Type ("MIME type") of the '
'document, and includes pdf, msword, rtf, xml, csv, etc. For a list of '
'commonly used MIME types, visit this link: '
'<a href="http://www.sitepoint.com/web-foundations/mime-types-summary-list/" '
'target="_blank">http://www.sitepoint.com/web-foundations/'
'mime-types-summary-list/</a>.')
)
title = ValidXMLCharField(
_('document title'), max_length=100, blank=True, default=_('Untitled document'),
help_text=_('Enter the title of your document.')
)
title_language = ValidXMLCharField(
_('title language'), max_length=2, blank=True, choices=codelist_choices(LANGUAGE),
help_text=_('Select the language of the document title.')
)
language = ValidXMLCharField(
_('document language'), max_length=2, blank=True, choices=codelist_choices(LANGUAGE),
help_text=_('Select the language that the document is written in.')
)
document_date = models.DateField(
_('document date'), null=True, blank=True,
help_text=_('Enter the date (DD/MM/YYYY) to be used for the production or publishing date '
'of the relevant document to identify the specific document version.')
)
def __str__(self):
return self.show_link()
def clean(self):
# Check if the user has at least uploaded a document or indicated an URL.
if not (self.url or self.document or self.title):
raise ValidationError(
_('It is required to have at least a title, an uploaded document or indicate an '
'URL.')
)
# Check for non-unicode characters
if self.document:
self.document.name = self.document.name.encode('ascii', 'ignore')
def document_show_link(self):
if self.document:
return '<a href="{0}">{1}</a>'.format(self.document.url, self.document.url)
return ''
def show_link(self):
title = self.title if self.title else '%s' % _('Untitled document')
if self.url:
return '<a href="{0}">{1}</a>'.format(self.url, title)
elif self.document:
return '<a href="{0}">{1}</a>'.format(self.document.url, title)
else:
return title
def iati_format(self):
return codelist_value(FileFormat, self, 'format')
def iati_format_unicode(self):
return str(self.iati_format())
def iati_language(self):
return codelist_value(Language, self, 'language')
def iati_language_unicode(self):
return str(self.iati_language())
def iati_title_language(self):
return codelist_value(Language, self, 'title_language')
def iati_title_language_unicode(self):
return str(self.iati_title_language())
class Meta:
app_label = 'rsr'
verbose_name = _('project document')
verbose_name_plural = _('project documents')
ordering = ['-id', ]
class ProjectDocumentCategory(models.Model):
document = models.ForeignKey(ProjectDocument, on_delete=models.CASCADE, related_name='categories',
verbose_name=_('document'))
category = ValidXMLCharField(_('document category'), max_length=3, blank=True,
choices=codelist_choices(DOCUMENT_CATEGORY),
help_text=_('The description of the type of content contained '
'within the document.'))
class Meta:
app_label = 'rsr'
verbose_name = _('project document category')
verbose_name_plural = _('project document categories')
ordering = ['-id', ]
def __str__(self):
if self.category:
try:
return self.iati_category().name
except AttributeError:
return self.iati_category()
else:
return '%s' % _('No category specified')
def iati_category(self):
return codelist_value(DocumentCategory, self, 'category')
def iati_category_unicode(self):
return str(self.iati_category())
|
akvo/akvo-rsr
|
akvo/rsr/models/project_document.py
|
Python
|
agpl-3.0
| 6,058
|
[
"VisIt"
] |
f1369704702dc1d824b6f56ebd98ff28671627905f9daea380884fd34e70583e
|
import extractor as ex
from sklearn.svm import LinearSVC
from sklearn import linear_model
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.qda import QDA
from sklearn.lda import LDA
from sklearn.svm import SVC
"""
L1- Based Feature Selection
"""
def extract_linear_features_indexes(features, labels):
"""
Perform Linear festure selection.
"""
clf = LinearSVC(C=0.01, penalty="l1", dual=False)
clf.fit(features, labels)
return [i for i, e in enumerate(clf.coef_[0]) if e != 0 and abs(e) > 1e-6]
def extract_lasso_features_indexes(features, labels):
"""
Perform Lasso feature selection.
"""
clf = linear_model.Lasso(alpha=0.022, fit_intercept=False,
max_iter=2000,normalize=False, positive=False,
tol=0.001, warm_start=True)
clf.fit(features, labels)
return [i for i, e in enumerate(clf.coef_) if e != 0 and abs(e) > 1e-6]
def extract_features(included_index ,features, labels):
"""
Return the only features that must be included in the classification
process.
"""
return features[:, included_index], labels
def scaled_features(features,labels):
max_features = features.max(axis = 0)
max_features = (max_features + (max_features == 0))
scaled_features = features / max_features
return scaled_features, labels
def main():
input_filename = 'data/input00.txt'
output_filename = 'data/output00.txt'
(train_features, train_labels,test_features, test_labels) = ex.extract(input_filename, output_filename)
classifiers = {
"NB Multinomial" : MultinomialNB(),
"NB Gaussian": GaussianNB(),
"Logistic Regression" : LogisticRegression(C=1e5, tol=0.001, fit_intercept=True),
"Decision Tree" : DecisionTreeClassifier(min_samples_split=1, random_state=0),
"KNN" : KNeighborsClassifier(n_neighbors=3),
"SVM" : SVC(gamma=2, C=1),
"LDA" : LDA(),
"QDA" : QDA(reg_param=0.5),
"Random Forest" : RandomForestClassifier(n_estimators=200),
"AdaBoost" : AdaBoostClassifier(n_estimators=200),
}
print "-"*80, "\n", "Raw Dataset", "\n", "-"*80
for name, classifier in classifiers.iteritems():
clf = classifier.fit(train_features,train_labels)
print name, clf.score(test_features,test_labels)
print "-"*80, "\n", "Scaled Feature Dataset", "\n", "-"*80
for name, classifier in classifiers.iteritems():
(new_features,new_lables) = scaled_features(train_features, train_labels)
clf = classifier.fit(new_features,new_lables)
(new_test_features,new_test_lables) = scaled_features(train_features, train_labels)
print name, clf.score(new_test_features,new_test_lables)
print "-"*80, "\n", "Lasso Feature Selection", "\n", "-"*80
for name, classifier in classifiers.iteritems():
(new_features,new_lables) = extract_features(extract_lasso_features_indexes(train_features, train_labels),train_features, train_labels)
clf = classifier.fit(new_features,new_lables)
(new_test_features,new_test_lables) = extract_features(extract_lasso_features_indexes(train_features, train_labels),test_features,test_labels)
print name, clf.score(new_test_features,new_test_lables)
print "-"*80, "\n", "Linear Feature Selection", "\n", "-"*80
for name, classifier in classifiers.iteritems():
(new_features,new_lables) = extract_features(extract_linear_features_indexes(train_features, train_labels),train_features, train_labels)
clf = classifier.fit(new_features,new_lables)
(new_test_features,new_test_lables) = extract_features(extract_linear_features_indexes(train_features, train_labels),test_features,test_labels)
print name, clf.score(new_test_features,new_test_lables)
if __name__ == '__main__':
main()
|
krishnasumanthm/Quora_Answer_Classifier
|
execute.py
|
Python
|
mit
| 4,202
|
[
"Gaussian"
] |
58df2eb8bf375c99dfdb4046df3e7fb531100e90e501a4aefe38f98ab90d75a1
|
import numpy
from tkp.utility import nice_format
from scipy.stats import norm
from sqlalchemy.sql.expression import desc
from tkp.db.model import Image
from tkp.db.quality import reject_reasons
def rms_invalid(rms, noise, low_bound=1, high_bound=50):
"""
Is the RMS value of an image outside the plausible range?
:param rms: RMS value of an image, can be computed with
tkp.quality.statistics.rms
:param noise: Theoretical noise level of instrument, can be calculated with
tkp.lofar.noise.noise_level
:param low_bound: multiplied with noise to define lower threshold
:param high_bound: multiplied with noise to define upper threshold
:returns: True/False
"""
if (rms < noise * low_bound) or (rms > noise * high_bound):
ratio = rms / noise
return "rms value (%s) is %s times theoretical noise (%s)" % \
(nice_format(rms), nice_format(ratio), nice_format(noise))
else:
return False
def rms(data):
"""Returns the RMS of the data about the median.
Args:
data: a numpy array
"""
data -= numpy.median(data)
return numpy.sqrt(numpy.power(data, 2).sum()/len(data))
def clip(data, sigma=3):
"""Remove all values above a threshold from the array.
Uses iterative clipping at sigma value until nothing more is getting clipped.
Args:
data: a numpy array
"""
raveled = data.ravel()
median = numpy.median(raveled)
std = numpy.std(raveled)
newdata = raveled[numpy.abs(raveled-median) <= sigma*std]
if len(newdata) and len(newdata) != len(raveled):
return clip(newdata, sigma)
else:
return newdata
def subregion(data, f=4):
"""Returns the inner region of a image, according to f.
Resulting area is 4/(f*f) of the original.
Args:
data: a numpy array
"""
x, y = data.shape
return data[(x/2 - x/f):(x/2 + x/f), (y/2 - y/f):(y/2 + y/f)]
def rms_with_clipped_subregion(data, rms_est_sigma=3, rms_est_fraction=4):
"""
RMS for quality-control.
Root mean square value calculated from central region of an image.
We sigma-clip the input-data in an attempt to exclude source-pixels
and keep only background-pixels.
Args:
data: A numpy array
rms_est_sigma: sigma value used for clipping
rms_est_fraction: determines size of subsection, result will be
1/fth of the image size where f=rms_est_fraction
returns the rms value of a iterative sigma clipped subsection of an image
"""
return rms(clip(subregion(data, rms_est_fraction), rms_est_sigma))
def reject_basic_rms(image_id, session, est_sigma=4, rms_max=100., rms_min=0.0):
"""Check if the RMS value of an image lies within a range predetermined
at the start of a pipeline run.
args:
image_id (int): database ID of the image we want to check
session (sqlalchemy.orm.session.Session): the database session
est_sigma (float): sigma multiplication factor
rms_max (float): global maximum rms for image quality check
rms_min (float): global minimum rms for image quality check
returns:
bool: None if not rejected, (rejectreason, comment) if rejected
"""
image = session.query(Image).filter(Image.id == image_id).one()
if not rms_min < image.rms_qc < rms_max:
return reject_reasons['rms'],\
"RMS value not within {} and {}".format(rms_min, rms_max)
def reject_historical_rms(image_id, session, history=100, est_sigma=4, rms_max=100., rms_min=0.0, rej_sigma=3.0):
"""
Check if the RMS value of an image lies within a range defined
by a gaussian fit on the histogram calculated from the last x RMS
values in this subband. Upper and lower bound are then controlled
by est_sigma multiplied with the sigma of the gaussian.
args:
image_id (int): database ID of the image we want to check
session (sqlalchemy.orm.session.Session): the database session
history (int): the number of timestamps we want to use for histogram
est_sigma (float): sigma multiplication factor
rms_max (float): global maximum rms for image quality check
rms_min (float): global minimum rms for image quality check
returns:
bool: None if not rejected, (rejectreason, comment) if rejected
"""
image = session.query(Image).filter(Image.id == image_id).one()
rmss = session.query(Image.rms_qc).filter(
(Image.band == image.band)).order_by(desc(Image.taustart_ts)).limit(
history).all()
if len(rmss) < history:
return False
mu, sigma = norm.fit(rmss)
t_low = mu - sigma * rej_sigma
t_high = mu + sigma * rej_sigma
if not rms_min < image.rms_qc < rms_max:
return reject_reasons['rms'],\
"RMS value not within {} and {}".format(rms_min, rms_max)
if not t_low < image.rms_qc < t_high or not rms_min < image.rms_qc < rms_max:
return reject_reasons['rms'],\
"RMS value not within {} and {}".format(t_low, t_high)
|
transientskp/tkp
|
tkp/quality/rms.py
|
Python
|
bsd-2-clause
| 5,118
|
[
"Gaussian"
] |
59e5c882cfbb8cf6b607bb76520c3de76c0ce30e6f5c1b0621db998e0cfed413
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Brian Coca <brian.coca+dev@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: getent
short_description: a wrapper to the unix getent utility
description:
- Runs getent against one of it's various databases and returns information into
the host's facts, in a getent_<database> prefixed variable
version_added: "1.8"
options:
database:
required: True
description:
- the name of a getent database supported by the target system (passwd, group,
hosts, etc).
key:
required: False
default: ''
description:
- key from which to return values from the specified database, otherwise the
full contents are returned.
split:
required: False
default: None
description:
- "character used to split the database values into lists/arrays such as ':' or '\t', otherwise it will try to pick one depending on the database"
fail_key:
required: False
default: True
description:
- If a supplied key is missing this will make the task fail if True
notes:
- "Not all databases support enumeration, check system documentation for details"
requirements: [ ]
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# get root user info
- getent:
database: passwd
key: root
- debug:
var: getent_passwd
# get all groups
- getent:
database: group
split: ':'
- debug:
var: getent_group
# get all hosts, split by tab
- getent:
database: hosts
- debug:
var: getent_hosts
# get http service info, no error if missing
- getent:
database: services
key: http
fail_key: False
- debug:
var: getent_services
# get user password hash (requires sudo/root)
- getent:
database: shadow
key: www-data
split: ':'
- debug:
var: getent_shadow
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec = dict(
database = dict(required=True),
key = dict(required=False, default=None),
split = dict(required=False, default=None),
fail_key = dict(required=False, type='bool', default=True),
),
supports_check_mode = True,
)
colon = [ 'passwd', 'shadow', 'group', 'gshadow' ]
database = module.params['database']
key = module.params.get('key')
split = module.params.get('split')
fail_key = module.params.get('fail_key')
getent_bin = module.get_bin_path('getent', True)
if key is not None:
cmd = [ getent_bin, database, key ]
else:
cmd = [ getent_bin, database ]
if split is None and database in colon:
split = ':'
try:
rc, out, err = module.run_command(cmd)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
msg = "Unexpected failure!"
dbtree = 'getent_%s' % database
results = { dbtree: {} }
if rc == 0:
for line in out.splitlines():
record = line.split(split)
results[dbtree][record[0]] = record[1:]
module.exit_json(ansible_facts=results)
elif rc == 1:
msg = "Missing arguments, or database unknown."
elif rc == 2:
msg = "One or more supplied key could not be found in the database."
if not fail_key:
results[dbtree][key] = None
module.exit_json(ansible_facts=results, msg=msg)
elif rc == 3:
msg = "Enumeration not supported on this database."
module.fail_json(msg=msg)
if __name__ == '__main__':
main()
|
tszym/ansible
|
lib/ansible/modules/system/getent.py
|
Python
|
gpl-3.0
| 4,071
|
[
"Brian"
] |
1a723a9830b213d8a76ec6f8f036574ccb36de858c96ef491035c928a5fecdb9
|
import argparse
import json
import sys
import numpy as np
import vtk
#### import the simple module from the paraview
import paraview.simple as pv
description = "ParaView python script to generate 3D event displays from 3D spacepoint data created with pixy_roimux."
parser = argparse.ArgumentParser(description=description)
parser.add_argument("runParamsFile")
parser.add_argument("hitsFile", help="CSV file containing the 3D hits.")
parser.add_argument("-p", "--pcaFile", help="CSV file containing the PCA data. If not specified, no PCA line is drawn.")
parser.add_argument("-o", "--plotFile", help="Save plot to file. If not specified, the interactive display is started.")
parser.add_argument("-c", "--colourColumn", help="Specify the colour column. Q for charge and A for ambiguities (default: %(default)s).", default="Q")
parser.add_argument("-l", "--logo", action="store_true", help="Show logo.")
parser.add_argument("-d", "--detail", action="store_true", help="Enable detailed colouring.")
args = parser.parse_args()
if args.plotFile:
if args.plotFile[-6:] == ".webgl":
createPlot = "view"
elif args.plotFile[-4:] == ".png":
createPlot = "screenshot"
else:
print("ERROR: Unrecognised output file type: " + args.plotFile + "!")
sys.exit(1)
else:
createPlot = None
with open(args.runParamsFile, "r") as runParamsFile:
runParams = json.load(runParamsFile)
tpcLength = runParams["driftLength"]
tpcRadius = runParams["tpcRadius"]
pixelPitch = runParams["pixelPitch"]
data = np.loadtxt(args.hitsFile, delimiter=',', skiprows=1)
chargeData = np.transpose(data)[3]
chargeScale = 1. / (np.mean(chargeData) + np.std(chargeData))
scaleFactor = 1. * pixelPitch * chargeScale
ambData = np.transpose(data)[4]
maxAmb = np.max(ambData)
#### disable automatic camera reset on 'Show'
pv._DisableFirstRenderCameraReset()
# create a new 'CSV Reader'
hitsCsv = pv.CSVReader(FileName=[args.hitsFile])
# create a new 'Table To Points'
tableToPoints1 = pv.TableToPoints(Input=hitsCsv)
tableToPoints1.XColumn = 'X'
tableToPoints1.YColumn = 'Y'
tableToPoints1.ZColumn = 'Z'
# find view
renderView1 = pv.FindViewOrCreate('RenderView1', viewtype='RenderView')
# set active view
pv.SetActiveView(renderView1)
# create a new 'Glyph'
# sphere
glyph1 = pv.Glyph(Input=tableToPoints1, GlyphType='Sphere')
glyph1.Scalars = ['POINTS', 'Q']
glyph1.ScaleMode = 'scalar'
glyph1.ScaleFactor = scaleFactor
glyph1.GlyphMode = 'All Points'
#glyph1.GlyphType.ThetaResolution = 8
#glyph1.GlyphType.PhiResolution = 8
# get color transfer function/color map for args.colourColumn
cLUT = pv.GetColorTransferFunction(args.colourColumn)
nc = vtk.vtkNamedColors()
rgb = [0. for i in range(3)]
if args.colourColumn == 'Q':
cLUT.ApplyPreset("Plasma (matplotlib)")
else:
nc.GetColorRGB("Magenta", rgb)
rgbPoints = [-1.] + rgb # Rejected Hit
nc.GetColorRGB("Lime", rgb)
rgbPoints += [0.] + rgb # Unambiguous Hit
if args.detail:
nc.GetColorRGB("Green", rgb)
else:
nc.GetColorRGB("Lime", rgb)
rgbPoints += [1.] + rgb # Accepted Ambiguity
nc.GetColorRGB("Maroon", rgb)
rgbPoints += [2.] + rgb # Rejected Ambiguity
if args.detail and maxAmb > 2:
nc.GetColorRGB("Black", rgb)
rgbPoints += [float(maxAmb)] + rgb # Rejected Ambiguity
cLUT.RGBPoints = rgbPoints
# show data in view
glyph1Display = pv.Show(glyph1, renderView1)
glyph1Display.ColorArrayName = ['POINTS', args.colourColumn]
glyph1Display.LookupTable = cLUT
if args.pcaFile:
glyph1Display.Opacity = .1
# show color bar/color legend
glyph1Display.SetScalarBarVisibility(renderView1, True)
# set active source
pv.SetActiveSource(None)
cylinder1 = pv.Cylinder()
# Properties modified on cylinder1
cylinder1.Resolution = 60
cylinder1.Height = tpcLength
cylinder1.Radius = tpcRadius
# show data in view
cylinder1Display = pv.Show(cylinder1, renderView1)
cylinder1Display.Orientation = [90.0, 0.0, 0.0]
cylinder1Display.Opacity = 0.05
nc.GetColorRGB("White", rgb)
cylinder1Display.DiffuseColor = rgb
if args.pcaFile:
pca = np.loadtxt(args.pcaFile, delimiter=",")
avePos = pca[0]
direction = pca[1]
direction /= np.linalg.norm(direction)
relOffset = avePos[2] / direction[2]
cylPos = avePos - relOffset * direction
cylinder2 = pv.Cylinder()
cylinder2.Resolution = 60
cylinder2.Height = tpcLength
cylinder2.Radius = pixelPitch / 5.
cylinder2Display = pv.Show(cylinder2, renderView1)
if args.colourColumn == 'Q':
nc.GetColorRGB("Lime", rgb)
else:
nc.GetColorRGB("Blue", rgb)
cylinder2Display.DiffuseColor = rgb
cylinder2Display.Orientation = [np.rad2deg(np.arcsin(direction[2])),
0.,
(np.rad2deg(np.arctan2(direction[1], direction[0])) - 90.)]
cylinder2Display.Position = cylPos
if args.logo:
a3DText1 = pv.a3DText()
a3DText1.Text = "(C) 2018 AEC, LHEP, University of Bern, Switzerland"
a3DText1Display = pv.Show(a3DText1, renderView1)
nc.GetColorRGB("Red", rgb)
a3DText1Display.DiffuseColor = rgb
a3DText1Display.Orientation = [0., 90., 180.]
a3DText1Display.Position = [0., -10., 15.]
nc.GetColorRGB("Grey", rgb)
renderView1.Background = rgb
renderView1.Update()
viewAngle = 10.
renderView1.CameraViewAngle = viewAngle
renderView1.CameraPosition = [(-1.1 * tpcLength / (2. * np.tan(np.deg2rad(viewAngle / 2.)))), 0., 0.]
renderView1.CameraFocalPoint = [0., 0., 0.]
renderView1.CameraViewUp = [0.0, 0.0, -1.0]
renderView1.ViewSize = [800, 1000]
if createPlot == "view":
pv.ExportView(args.plotFile, view=renderView1)
elif createPlot == "screenshot":
pv.SaveScreenshot(args.plotFile, magnification=2, quality=100, view=renderView1)
else:
pv.Interact(view=renderView1)
|
70rc/pixy_roimux
|
display.py
|
Python
|
gpl-3.0
| 5,824
|
[
"ParaView",
"VTK"
] |
53c5dc6eb493c5754266f2250382b272f92b55a5d0b2e44e28b8a55ddb6b3099
|
# author: brian dillmann
# for rscs
from context import DeviceManager
import unittest
import RPi.GPIO as GPIO
class device_manager_test(unittest.TestCase):
def setUp(self):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
def test_not_found_errors(self):
m = DeviceManager()
with self.assertRaises(KeyError):
m.read('blahdevice')
with self.assertRaises(KeyError):
m.turnOn('randomdevice')
with self.assertRaises(KeyError):
m.turnOff('randomDevice')
def test_input_exists(self):
m = DeviceManager()
m.addSimpleInput('device name', 18)
with self.assertRaises(KeyError):
m.addSimpleInput('device name', 18)
with self.assertRaises(KeyError):
m.addTimer('device name')
with self.assertRaises(KeyError):
m.addAnalogInput('device name', 18)
def test_output_exists(self):
m = DeviceManager()
m.addOutput('device name', 2)
with self.assertRaises(KeyError):
m.addOutput('device name', 2)
def test_simple_input(self):
m = DeviceManager()
m.addSimpleInput('device', 18)
x = m.read('device')
m.addSimpleInput('device 2', 18, True)
y = m.read('device 2')
self.assertNotEqual(x, y)
def test_analog_input(self):
m = DeviceManager()
m.addAnalogInput('analog input', 18)
x = m.read('analog input')
self.assertNotEqual(x, 0)
def test_timer(self):
m = DeviceManager()
m.addTimer('timer', 's')
m.addTimer('timer 2', 'ms')
import time
time.sleep(1)
x = m.read('timer')
y = m.read('timer 2')
self.assertGreater(y, x)
if __name__ == 'main':
unittest.main()
|
dillmann/rscs
|
test/devicetests/device_manager_test.py
|
Python
|
mit
| 1,558
|
[
"Brian"
] |
7167e325be1d798758a013d94e79f5f13c7c913fabbdc30a5272e79e1da6a0fe
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Live value resolution.
Live values are extracted from the known execution context.
Requires activity and reaching definitions analyses.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
# TODO(aqj): Do we need this? Do other builtins fail in similar ways
# See b/114389775 for a related bug in pyct
# These symbols are legal in Python, but don't appear in the namespace.
_SPECIAL_SYMBOLS = {'range': range, 'print': print}
if six.PY2:
_SPECIAL_SYMBOLS['xrange'] = xrange
class LiveValueResolver(transformer.Base):
"""Annotates nodes with live values."""
def __init__(self, context, literals):
super(LiveValueResolver, self).__init__(context)
self.literals = literals
def visit_ClassDef(self, node):
self.generic_visit(node)
anno.setanno(node, 'live_val', self.entity_info.namespace[node.name])
return node
def visit_Name(self, node):
self.generic_visit(node)
if isinstance(node.ctx, gast.Load):
defs = anno.getanno(node, anno.Static.DEFINITIONS, ())
is_defined = bool(defs)
has_single_def = len(defs) == 1
if not is_defined:
if node.id in self.literals:
anno.setanno(node, 'live_val', self.literals[node.id])
elif node.id in self.entity_info.namespace:
obj = self.entity_info.namespace[node.id]
anno.setanno(node, 'live_val', obj)
if hasattr(obj, '__name__'):
anno.setanno(node, 'fqn', (obj.__name__,))
elif hasattr(obj, '__class__'):
obj_class = obj.__class__
anno.setanno(node, 'fqn',
(obj_class.__module__, obj_class.__name__))
else:
# If the symbol value is for example a primitive, then it will not
# have a name.
pass
elif node.id in _SPECIAL_SYMBOLS:
# Note: if the user redefined any of these symbols, then they would
# be visible in the namespace and we would never reach this branch.
anno.setanno(node, 'live_val', _SPECIAL_SYMBOLS[node.id])
else:
pass
# TODO(mdan): Should we raise an error here?
# Can encounter this when:
# * a symbol truly lacks reference
# * a symbol is new, like the new name of a function we just renamed.
else:
pass
# TODO(mdan): Attempt to trace its value through the local chain.
# TODO(mdan): Use type annotations as fallback.
if has_single_def:
def_, = defs
# Note: param_of is a weakref.
if def_.param_of and def_.param_of() is self.enclosing_entities[0]:
if node.id in self.entity_info.arg_values:
obj = self.entity_info.arg_values[node.id]
anno.setanno(node, 'live_val', obj)
anno.setanno(node, 'fqn', (obj.__class__.__name__,))
return node
def visit_Attribute(self, node):
self.generic_visit(node)
if anno.hasanno(node.value, 'live_val'):
assert anno.hasanno(node.value, 'fqn')
parent_object = anno.getanno(node.value, 'live_val')
anno.setanno(node, 'parent_type', type(parent_object))
anno.setanno(node, 'fqn', anno.getanno(node.value, 'fqn') + (node.attr,))
if hasattr(parent_object, node.attr):
# This can happen when the attribute's creation and use depend on the
# same static condition, for example:
#
# if cond:
# foo.bar = baz
# if cond:
# x = foo.bar
#
anno.setanno(node, 'live_val', getattr(parent_object, node.attr))
# TODO(mdan): Investigate the role built-in annotations can play here.
elif anno.hasanno(node.value, 'type'):
parent_type = anno.getanno(node.value, 'type')
if hasattr(parent_type, node.attr):
# This should hold for static members like methods.
# This would not hold for dynamic members like function attributes.
# For the dynamic case, we simply leave the node without an annotation,
# and let downstream consumers figure out what to do.
anno.setanno(node, 'parent_type', parent_type)
anno.setanno(node, 'live_val', getattr(parent_type, node.attr))
anno.setanno(node, 'fqn',
anno.getanno(node.value, 'type_fqn') + (node.attr,))
elif isinstance(node.value, gast.Name):
stem_name = node.value
# All nonlocal symbols should be fully resolved.
assert anno.hasanno(stem_name, NodeAnno.IS_LOCAL), stem_name
# TODO(mdan): Figure out what to do when calling attribute on local object
# Maybe just leave as-is?
return node
def resolve(node, context, literals):
return LiveValueResolver(context, literals).visit(node)
|
snnn/tensorflow
|
tensorflow/python/autograph/pyct/static_analysis/live_values.py
|
Python
|
apache-2.0
| 5,662
|
[
"VisIt"
] |
4c9d510eeba132c5ea33d5486e172b543e87c966c2839ed2b65affaaea57423a
|
# plugin: com.py
# Copyright (c) 2010 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""
Centre of mass
==============
Calculate the centre of mass of index groups.
Plugin class
------------
.. autoclass:: COM
:members: worker_class
:undoc-members:
Worker class
------------
The worker class performs the analysis.
.. autoclass:: _COM
:members:
"""
from __future__ import with_statement
__docformat__ = "restructuredtext en"
import os.path
import warnings
import numpy
import gromacs
from gromacs.utilities import AttributeDict, asiterable
from gromacs.analysis.core import Worker, Plugin
import logging
logger = logging.getLogger('gromacs.analysis.plugins.com')
# Worker classes that are registered via Plugins (see below)
# ----------------------------------------------------------
# These must be defined before the plugins.
class _COM(Worker):
"""COM worker class."""
def __init__(self,**kwargs):
"""Set up COM analysis.
:Keywords:
*group_names*
list of index group names
*ndx*
index file if groups are not in the default index
*offset*
add the *offset* to the residue numbers [0]
*name*
plugin name [COM]
*simulation*
The :class:`gromacs.analysis.Simulation` instance that
owns the plugin [None]
"""
group_names = asiterable(kwargs.pop('group_names', []))
ndx = kwargs.pop('ndx', None)
offset = kwargs.pop('offset', 0)
super(_COM, self).__init__(**kwargs)
self.parameters.group_names = group_names
self.parameters.offset = offset
self.ndx = ndx
if not self.simulation is None:
self._register_hook()
def _register_hook(self, **kwargs):
"""Run when registering; requires simulation."""
super(_COM, self)._register_hook(**kwargs)
assert not self.simulation is None
if self.ndx is None:
self.ndx = self.simulation.ndx
self.parameters.filenames = { # result xvg files
'com': self.plugindir('com.xvg'),
}
# default filename for the plots -- not used
self.parameters.fignames = {
'com': self.figdir('com'),
}
def run(self, force=None, **gmxargs):
"""Analyze trajectory and write COM file.
All three components of the COM coordinate are written.
:Arguments:
- *force*: ``True`` does analysis and overwrites existing files
- *gmxargs*: additional keyword arguments for :func:`gromacs.g_bundle`
"""
gmxargs['com'] = True
gmxargs['mol'] = False
gmxargs['ng'] = len(self.parameters.group_names)
gmxargs['x'] = True
gmxargs['y'] = True
gmxargs['z'] = True
if gmxargs['ng'] == 0:
errmsg = "No index group name(s) provided. Use group_name with the constructor."
logger.error(errmsg)
raise ValueError(errmsg)
if self.check_file_exists(self.parameters.filenames['com'], resolve='warning', force=force):
return
logger.info("Analyzing COM ...")
f = self.parameters.filenames
gromacs.g_traj(s=self.simulation.tpr, f=self.simulation.xtc, n=self.ndx,
ox=f['com'], input=self.parameters.group_names, **gmxargs)
def analyze(self,**kwargs):
"""Collect output xvg files as :class:`gromacs.formats.XVG` objects.
- Make COM as a function of time available as XVG files and
objects.
- Compute RMSD of the COM of each group (from average
position, "rmsd").
- Compute distance whic encompasses 50% of observations ("median")
- Compute drift of COM, i.e. length of the vector between
initial and final position. Initial and final position are
computed as averages over *nframesavg* frames ("drift").
RMSD, median, and drift are columns in an xvg file. The rows correspond
to the groups in :attr:`gromacs.analysis.plugins.com.Worker.results.group_names`.
:Keywords:
*nframesavg*
number of initial and final frames that are averaged in
order to compute the drift of the COM of each group
[5000]
*refgroup*
group name whose com is taken as the reference and subtracted from
all other coms for the distance calculations. If supplied,
additional result 'com_relative_*refgroup*' is created.
:Returns: a dictionary of the results and also sets
:attr:`gromacs.analysis.plugins.com.Worker.results`.
"""
from gromacs.formats import XVG
logger.info("Preparing COM graphs as XVG objects.")
self.results = AttributeDict( (k, XVG(fn)) for k,fn in self.parameters.filenames.items() )
# compute RMSD of COM and shift of COM (drift) between avg pos
# over first/last 5,000 frames
nframesavg = kwargs.pop('nframesavg', 5000)
ngroups = len(self.parameters.group_names)
xcom = self.results['com'].array
refgroup = kwargs.pop('refgroup', None)
if not refgroup is None:
if not refgroup in self.parameters.group_names:
errmsg = "refgroup=%s must be one of %r" % (refgroup, self.parameters.group_names)
logger.error(errmsg)
raise ValueError(errmsg)
nreference = 1 + 3 * self.parameters.group_names.index(refgroup) # 1-based !!
reference_com = xcom[nreference:nreference+3]
xcom[1:] -= numpy.vstack(ngroups * [reference_com]) # can't use broadcast
logger.debug("distances computed with refgroup %r", refgroup)
self.store_xvg('com_relative_%s' % refgroup, xcom,
names=['time']+self.parameters.group_names)
def vlength(v):
return numpy.sqrt(numpy.sum(v**2, axis=0)) # distances over time step
logger.debug("drift calculated between %d-frame averages at beginning and end",nframesavg)
records = []
for i in xrange(1, 3*ngroups+1, 3):
x = xcom[i:i+3]
r = vlength(x - x.mean(axis=1)[:,numpy.newaxis]) # distances over time step
#r0 = vlength(r - r[:,0][:,numpy.newaxis]) # distances over time step from r(t=0)
#h,edges = numpy.histogram(r, bins=kwargs.get('bins', 100), normed=True)
#m = 0.5*(edges[1:]+edges[:-1])
#c = h.cumsum(dtype=float) # integral
#c /= c[-1] # normalized (0 to 1)
#median = m[c < 0.5][-1]
#g = h/(4*numpy.pi*m**2)
#import scipy.integrate
#radint = lambda y: 4*numpy.pi*scipy.integrate.simps(m**2*y, x=m)
#g /= radint(g) # properly normalized radial distribution function
rmsd = numpy.sqrt(numpy.mean(r**2)) # radial spread sqrt(radint(m**2 * g))
median = numpy.median(r) # radius that contains 50% of the observations
dx = x[:,:nframesavg].mean(axis=1) - x[:,-nframesavg:].mean(axis=1)
drift = vlength(dx)
records.append((rmsd, median, drift))
self.store_xvg('distance', numpy.transpose(records), names="rmsd,median,drift")
return self.results
def plot(self, **kwargs):
"""Plot all results in one graph, labelled by the result keys.
:Keywords:
observables
select one or more of the stored results. Can be a list
or a string (a key into the results dict). ``None``
plots everything [``None``]
figure
- ``True``: save figures in the given formats
- "name.ext": save figure under this filename (``ext`` -> format)
- ``False``: only show on screen [``False``]
formats : sequence
sequence of all formats that should be saved [('png', 'pdf')]
plotargs
keyword arguments for pylab.plot()
"""
import pylab
figure = kwargs.pop('figure', False)
observables = asiterable(kwargs.pop('observables', self.results.keys()))
extensions = kwargs.pop('formats', ('pdf','png'))
for name in observables:
result = self.results[name]
try:
result.plot(**kwargs) # This requires result classes with a plot() method!!
except AttributeError:
warnings.warn("Sorry, plotting of result %(name)r is not implemented" % vars(),
category=UserWarning)
# quick labels -- relies on the proper ordering
labels = [str(n)+" "+dim for n in self.parameters.group_names
for dim in 'xyz']
if not kwargs.get('columns', None) is None:
# select labels according to columns; only makes sense
# if plotting against the time (col 0)
if kwargs['columns'][0] == 0:
labels = numpy.array([None]+labels)[kwargs['columns'][1:]]
else:
labels = ()
pylab.legend(labels, loc='best')
if figure is True:
for ext in extensions:
self.savefig(ext=ext)
elif figure:
self.savefig(filename=figure)
# Public classes that register the worker classes
#------------------------------------------------
class COM(Plugin):
"""*COM* plugin.
Calculate the centre of mass (COM) of various index groups.
.. class:: COM(group_names, [ndx[, offset [, name[, simulation]]]])
"""
worker_class = _COM
|
pslacerda/GromacsWrapper
|
gromacs/analysis/plugins/com.py
|
Python
|
gpl-3.0
| 9,964
|
[
"Gromacs"
] |
6713fbfa2c00f3ec43d1a4d3d12ecb930c044c82bc334f4c7542f779682f1d06
|
#!/usr/bin/env python
########################################################################
# File : dirac-wms-get-wn
# Author : Philippe Charpentier
########################################################################
"""
Get WNs for a selection of jobs
"""
import datetime
from functools import cmp_to_key
import DIRAC
from DIRAC.Core.Base.Script import Script
@Script()
def main():
site = "BOINC.World.org"
status = ["Running"]
minorStatus = None
workerNodes = None
since = None
date = "today"
full = False
until = None
batchIDs = None
Script.registerSwitch("", "Site=", " Select site (default: %s)" % site)
Script.registerSwitch("", "Status=", " Select status (default: %s)" % status)
Script.registerSwitch("", "MinorStatus=", " Select minor status")
Script.registerSwitch("", "WorkerNode=", " Select WN")
Script.registerSwitch("", "BatchID=", " Select batch jobID")
Script.registerSwitch("", "Since=", " Date since when to select jobs, or number of days (default: today)")
Script.registerSwitch("", "Date=", " Specify the date (check for a full day)")
Script.registerSwitch("", "Full", " Printout full list of job (default: False except if --WorkerNode)")
Script.parseCommandLine()
from DIRAC import gLogger
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.WorkloadManagementSystem.Client.JobMonitoringClient import JobMonitoringClient
switches = Script.getUnprocessedSwitches()
for switch in switches:
if switch[0] == "Site":
site = switch[1]
elif switch[0] == "MinorStatus":
minorStatus = switch[1]
elif switch[0] == "Status":
if switch[1].lower() == "all":
status = [None]
else:
status = switch[1].split(",")
elif switch[0] == "WorkerNode":
workerNodes = switch[1].split(",")
elif switch[0] == "BatchID":
try:
batchIDs = [int(id) for id in switch[1].split(",")]
except Exception:
gLogger.error("Invalid jobID", switch[1])
DIRAC.exit(1)
elif switch[0] == "Full":
full = True
elif switch[0] == "Date":
since = switch[1].split()[0]
until = str(datetime.datetime.strptime(since, "%Y-%m-%d") + datetime.timedelta(days=1)).split()[0]
elif switch[0] == "Since":
date = switch[1].lower()
if date == "today":
since = None
elif date == "yesterday":
since = 1
elif date == "ever":
since = 2 * 365
elif date.isdigit():
since = int(date)
date += " days"
else:
since = date
if isinstance(since, int):
since = str(datetime.datetime.now() - datetime.timedelta(days=since)).split()[0]
if workerNodes or batchIDs:
# status = [None]
full = True
monitoring = JobMonitoringClient()
dirac = Dirac()
# Get jobs according to selection
jobs = set()
for stat in status:
res = dirac.selectJobs(site=site, date=since, status=stat, minorStatus=minorStatus)
if not res["OK"]:
gLogger.error("Error selecting jobs", res["Message"])
DIRAC.exit(1)
allJobs = set(int(job) for job in res["Value"])
if until:
res = dirac.selectJobs(site=site, date=until, status=stat)
if not res["OK"]:
gLogger.error("Error selecting jobs", res["Message"])
DIRAC.exit(1)
allJobs -= set(int(job) for job in res["Value"])
jobs.update(allJobs)
if not jobs:
gLogger.always("No jobs found...")
DIRAC.exit(0)
# res = monitoring.getJobsSummary( jobs )
# print eval( res['Value'] )[jobs[0]]
allJobs = set()
result = {}
wnJobs = {}
gLogger.always("%d jobs found" % len(jobs))
# Get host name
for job in jobs:
res = monitoring.getJobParameter(job, "HostName")
node = res.get("Value", {}).get("HostName", "Unknown")
res = monitoring.getJobParameter(job, "LocalJobID")
batchID = res.get("Value", {}).get("LocalJobID", "Unknown")
if workerNodes:
if not [wn for wn in workerNodes if node.startswith(wn)]:
continue
allJobs.add(job)
if batchIDs:
if batchID not in batchIDs:
continue
allJobs.add(job)
if full or status == [None]:
allJobs.add(job)
result.setdefault(job, {})["Status"] = status
result[job]["Node"] = node
result[job]["LocalJobID"] = batchID
wnJobs[node] = wnJobs.setdefault(node, 0) + 1
# If necessary get jobs' status
statusCounters = {}
if allJobs:
allJobs = sorted(allJobs, reverse=True)
res = monitoring.getJobsStates(allJobs)
if not res["OK"]:
gLogger.error("Error getting job parameter", res["Message"])
else:
jobStates = res["Value"]
for job in allJobs:
stat = (
jobStates.get(job, {}).get("Status", "Unknown")
+ "; "
+ jobStates.get(job, {}).get("MinorStatus", "Unknown")
+ "; "
+ jobStates.get(job, {}).get("ApplicationStatus", "Unknown")
)
result[job]["Status"] = stat
statusCounters[stat] = statusCounters.setdefault(stat, 0) + 1
elif not workerNodes and not batchIDs:
allJobs = sorted(jobs, reverse=True)
# Print out result
if workerNodes or batchIDs:
gLogger.always("Found %d jobs at %s, WN %s (since %s):" % (len(allJobs), site, workerNodes, date))
if allJobs:
gLogger.always("List of jobs:", ",".join([str(job) for job in allJobs]))
else:
if status == [None]:
gLogger.always("Found %d jobs at %s (since %s):" % (len(allJobs), site, date))
for stat in sorted(statusCounters):
gLogger.always("%d jobs %s" % (statusCounters[stat], stat))
else:
gLogger.always("Found %d jobs %s at %s (since %s):" % (len(allJobs), status, site, date))
gLogger.always(
"List of WNs:",
",".join(
[
"%s (%d)" % (node, wnJobs[node])
for node in sorted(wnJobs, key=cmp_to_key(lambda n1, n2: (wnJobs[n2] - wnJobs[n1])))
]
),
)
if full:
if workerNodes or batchIDs:
nodeJobs = {}
for job in allJobs:
status = result[job]["Status"]
node = result[job]["Node"].split(".")[0]
jobID = result[job].get("LocalJobID")
nodeJobs.setdefault(node, []).append((jobID, job, status))
if not workerNodes:
workerNodes = sorted(nodeJobs)
for node in workerNodes:
for job in nodeJobs.get(node.split(".")[0], []):
gLogger.always("%s " % node + "(%s): %s - %s" % job)
else:
for job in allJobs:
status = result[job]["Status"]
node = result[job]["Node"]
jobID = result[job].get("LocalJobID")
gLogger.always("%s (%s): %s - %s" % (node, jobID, job, status))
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/WorkloadManagementSystem/scripts/dirac_wms_get_wn.py
|
Python
|
gpl-3.0
| 7,558
|
[
"DIRAC"
] |
6c5e05e7b9e08181e8770775f994607f7db73eb61a5333c8e94fc7a82376f2fb
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import espressomd
import numpy as np
class DomainDecomposition(ut.TestCase):
system = espressomd.System(box_l=[50.0, 50.0, 50.0])
def setUp(self):
self.system.part.clear()
self.system.cell_system.set_domain_decomposition(
use_verlet_lists=False)
def test_resort(self):
n_part = 2351
# Add the particles on node 0, so that they have to be resorted
for i in range(n_part):
self.system.part.add(id=i, pos=[0, 0, 0], type=1)
# And now change their positions
for i in range(n_part):
self.system.part[i].pos = self.system.box_l * np.random.random(3)
# Distribute the particles on the nodes
part_dist = self.system.cell_system.resort()
# Check that we did not lose particles
self.assertEqual(sum(part_dist), n_part)
# Check that we can still access all the particles
# This basically checks if part_node and local_particles
# is still in a valid state after the particle exchange
self.assertEqual(sum(self.system.part[:].type), n_part)
def test_position_rounding(self):
"""This places a particle on the box boundary,
with parameters that could cause problems with
rounding."""
self.system.box_l = [50.0, 50.0, 50.0]
self.system.cell_system.skin = 0.4
self.system.min_global_cut = 12.0 / 4.25
self.system.part.add(pos=[25, 25, 0])
self.assertEqual(1, len(self.system.part))
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/domain_decomposition.py
|
Python
|
gpl-3.0
| 2,297
|
[
"ESPResSo"
] |
0d80f5ce7a5a65c1cac28c973b66f194c651ddb473184f783944819f912abc43
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow graph (CFG) structure for Python AST representation.
The CFG is a digraph with edges representing valid control flow. Each
node is associated with exactly one AST node, but not all AST nodes may have
a corresponding CFG counterpart.
Once built, the CFG itself is immutable, but the values it holds need not be;
they are usually annotated with information extracted by walking the graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from enum import Enum
# pylint:disable=g-bad-import-order
import gast
# pylint:enable=g-bad-import-order
from tensorflow.contrib.autograph.pyct import compiler
class Node(object):
"""A node in the CFG.
Although new instances of this class are mutable, the objects that a user
finds in the CFG are typically not.
The nodes represent edges in the CFG graph, and maintain pointers to allow
efficient walking in both forward and reverse order. The following property
holds for all nodes: "child in node.next" iff "node in child.prev".
Attributes:
next: FrozenSet[Node, ...], the nodes that follow this node, in control
flow order
prev: FrozenSet[Node, ...], the nodes that precede this node, in reverse
control flow order
ast_node: ast.AST, the AST node corresponding to this CFG node
"""
def __init__(self, next_, prev, ast_node):
self.next = next_
self.prev = prev
self.ast_node = ast_node
def freeze(self):
self.next = frozenset(self.next)
self.prev = frozenset(self.prev)
def __repr__(self):
return compiler.ast_to_source(self.ast_node).strip()
class Graph(
collections.namedtuple('Graph', ['entry', 'exit', 'error', 'index'])):
"""A Control Flow Graph.
The CFG maintains an index to allow looking up a CFG node by the AST node to
which it is associated. The index can also be enumerated in top-down, depth
first order.
Walking the graph in forward or reverse order is supported by double
parent-child links.
Note: the error nodes are not wired to their corresponding finally guards,
because these are shared, and wiring them would create a reverse path from
normal control flow into the error nodes, which we want to avoid.
Attributes:
entry: Node, the entry node
exit: FrozenSet[Node, ...], the exit nodes
error: FrozenSet[Node, ...], nodes that exit due to an explicitly raised
error (errors propagated from function calls are not accounted)
index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG
node
"""
def __repr__(self):
result = 'digraph CFG {\n'
for node in self.index.values():
result += ' %s [label="%s"];\n' % (id(node), node)
for node in self.index.values():
if node.next:
result += ' %s -> {%s};\n' % (id(node), ', '.join(
repr(id(n)) for n in node.next))
result += '}'
return result
class _WalkMode(Enum):
FORWARD = 1
REVERSE = 2
class GraphVisitor(object):
"""Base class for a CFG visitors.
This implementation is not thread safe.
The visitor has some facilities to simplify dataflow analyses. In particular,
it allows revisiting the nodes at the decision of the subclass. This can be
used to visit the graph until the state reaches a fixed point.
For more details on dataflow analysis, see
https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf
Note: the literature generally suggests visiting successor nodes only when the
state of the current node changed, regardless of whether that successor has
ever been visited. This implementation visits every successor at least once.
Attributes:
graph: Graph
in_: Dict[Node, Any], stores node-keyed state during a visit
out: Dict[Node, Any], stores node-keyed state during a visit
"""
def reset(self):
self.in_ = {
node: self.init_state(node) for node in self.graph.index.values()
}
self.out = {
node: self.init_state(node) for node in self.graph.index.values()
}
def init_state(self, node):
"""State initialization function. Optional to overload.
An in/out state slot will be created for each node in the graph. Subclasses
may overload this to control what that is initialized to.
Args:
node: Node
"""
del node
return None
def visit_node(self, node):
"""Visitor function.
Args:
node: Node
Returns:
bool, whether the node should be revisited; subclasses can visit every
reachable node exactly once by always returning False
"""
raise NotImplementedError('Subclasses must implement this.')
def _visit_internal(self, mode):
"""Visits the CFG, depth-first."""
assert mode in (_WalkMode.FORWARD, _WalkMode.REVERSE)
if mode == _WalkMode.FORWARD:
open_ = [self.graph.entry]
elif mode == _WalkMode.REVERSE:
open_ = list(self.graph.exit)
closed = set()
self.reset()
while open_:
node = open_.pop(0)
closed.add(node)
should_revisit = self.visit_node(node)
if mode == _WalkMode.FORWARD:
children = node.next
elif mode == _WalkMode.REVERSE:
children = node.prev
for next_ in children:
if should_revisit or next_ not in closed:
open_.append(next_)
def visit_forward(self, graph):
self.graph = graph
self._visit_internal(_WalkMode.FORWARD)
def visit_reverse(self, graph):
self.graph = graph
self._visit_internal(_WalkMode.REVERSE)
class GraphBuilder(object):
"""Builder that constructs a CFG from a given AST.
This GraphBuilder facilitates constructing the DAG that forms the CFG when
nodes
are supplied in lexical order (i.e., top-down, depth first). Under these
conditions, it supports building patterns found in typical structured
programs.
This builder ignores the flow generated by exceptions, which are assumed to
always be catastrophic and present purely for diagnostic purposes (e.g. to
print debug information). Statements like raise and try/catch sections are
allowed and will generate control flow edges, but ordinaty statements are
assumed not to raise exceptions.
Finally sections are also correctly interleaved between break/continue/return
nodes and their subsequent statements.
Important concepts:
* nodes - nodes refer refer to CFG nodes; AST nodes are qualified explicitly
* leaf set - since the graph is constructed gradually, a leaf set maintains
the CFG nodes that will precede the node that the builder expects to
receive next; when an ordinary node is added, it is connected to the
existing leaves and it in turn becomes the new leaf
* jump nodes - nodes that should generate edges other than what
ordinary nodes would; these correspond to break, continue and return
statements
* sections - logical delimiters for subgraphs that require special
edges; there are various types of nodes, each admitting various
types of jump nodes; sections are identified by their corresponding AST
node
"""
# TODO(mdan): Perhaps detail this in a markdown doc.
# TODO(mdan): Add exception support.
def __init__(self, parent_ast_node):
self.reset()
self.parent = parent_ast_node
def reset(self):
"""Resets the state of this factory."""
self.head = None
self.errors = set()
self.node_index = collections.OrderedDict()
# TODO(mdan): Too many primitives. Use classes.
self.leaves = set()
self.finally_sections = {}
self.finally_section_subgraphs = {} # Values are [begin_node, exit_nodes]
# Whether the guard section can be reached from the statement that precedes
# it.
self.finally_section_has_direct_flow = {}
# Finally sections that await their first node.
self.pending_finally_sections = set()
# Exit jumps keyed by the section they affect.
self.exits = {}
# The entry of loop sections, keyed by the section.
self.section_entry = {}
# Continue jumps keyed by the section they affect.
self.continues = {}
# The entry of conditional sections, keyed by the section.
self.cond_entry = {}
# Lists of leaf nodes corresponding to each branch in the section.
self.cond_leaves = {}
def _connect_nodes(self, first, second):
"""Connects nodes to signify that control flows from first to second.
Args:
first: Union[Set[Node, ...], Node]
second: Node
"""
if isinstance(first, Node):
first.next.add(second)
second.prev.add(first)
else:
for node in first:
self._connect_nodes(node, second)
def _add_new_node(self, ast_node):
"""Grows the graph by adding a CFG node following the current leaves."""
if ast_node is self.node_index:
raise ValueError('%s added twice' % ast_node)
node = Node(next_=set(), prev=set(), ast_node=ast_node)
self.node_index[ast_node] = node
if self.head is None:
self.head = node
for leaf in self.leaves:
self._connect_nodes(leaf, node)
# If any finally section awaits its first node, populate it.
for section_id in self.pending_finally_sections:
self.finally_section_subgraphs[section_id][0] = node
self.pending_finally_sections = set()
return node
def add_ordinary_node(self, ast_node):
"""Grows the graph by adding an ordinary CFG node.
Ordinary nodes are followed by the next node, in lexical order, that is,
they become the new leaf set.
Args:
ast_node: ast.AST
Returns:
Node
"""
node = self._add_new_node(ast_node)
self.leaves = set((node,))
return node
def _add_jump_node(self, ast_node, guards):
"""Grows the graph by adding a jump node.
Jump nodes are added to the current leaf set, and the leaf set becomes
empty. If the jump node is the last in a cond section, then it may be added
back to the leaf set by a separate mechanism.
Args:
ast_node: ast.AST
guards: Tuple[ast.AST, ...], the finally sections active for this node
Returns:
Node
"""
node = self._add_new_node(ast_node)
self.leaves = set()
# The guards themselves may not yet be complete, and will be wired later.
self.finally_sections[node] = guards
return node
def _connect_jump_to_finally_sections(self, node):
"""Connects a jump node to the finally sections protecting it."""
cursor = set((node,))
for guard_section_id in self.finally_sections[node]:
guard_begin, guard_ends = self.finally_section_subgraphs[guard_section_id]
self._connect_nodes(cursor, guard_begin)
cursor = guard_ends
del self.finally_sections[node]
# TODO(mdan): Should garbage-collect finally_section_subgraphs.
return cursor
def add_exit_node(self, ast_node, section_id, guards):
"""Grows the graph by adding an exit node.
This node becomes an exit for the current section.
Args:
ast_node: ast.AST
section_id: Hashable, the node for which ast_node should be considered
to be an exit node
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
"""
node = self._add_jump_node(ast_node, guards)
self.exits[section_id].add(node)
def add_continue_node(self, ast_node, section_id, guards):
"""Grows the graph by adding a reentry node.
This node causes control flow to go back to the loop section's entry.
Args:
ast_node: ast.AST
section_id: Hashable, the node for which ast_node should be considered
to be an exit node
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
"""
node = self._add_jump_node(ast_node, guards)
self.continues[section_id].add(node)
def add_error_node(self, ast_node, guards):
"""Grows the graph by adding an error node.
This node becomes an exit for the entire graph.
Args:
ast_node: ast.AST
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
"""
node = self._add_jump_node(ast_node, guards)
self.errors.add(node)
self.leaves = set()
def enter_section(self, section_id):
"""Enters a regular section.
Regular sections admit exit jumps, which end the section.
Args:
section_id: Hashable, the same node that will be used in calls to the
ast_node arg passed to add_exit_node
"""
assert section_id not in self.exits
self.exits[section_id] = set()
def exit_section(self, section_id):
"""Exits a regular section."""
# Exits are jump nodes, which may be protected.
for exit_ in self.exits[section_id]:
self.leaves |= self._connect_jump_to_finally_sections(exit_)
del self.exits[section_id]
def enter_loop_section(self, section_id, entry_node):
"""Enters a loop section.
Loop sections define an entry node. The end of the section always flows back
to the entry node. These admit continue jump nodes which also flow to the
entry node.
Args:
section_id: Hashable, the same node that will be used in calls to the
ast_node arg passed to add_continue_node
entry_node: ast.AST, the entry node into the loop (e.g. the test node
for while loops)
"""
assert section_id not in self.section_entry
assert section_id not in self.continues
self.continues[section_id] = set()
node = self.add_ordinary_node(entry_node)
self.section_entry[section_id] = node
def exit_loop_section(self, section_id):
"""Exits a loop section."""
self._connect_nodes(self.leaves, self.section_entry[section_id])
# continues are jump nodes, which may be protected.
for reentry in self.continues[section_id]:
guard_ends = self._connect_jump_to_finally_sections(reentry)
self._connect_nodes(guard_ends, self.section_entry[section_id])
# Loop nodes always loop back.
self.leaves = set((self.section_entry[section_id],))
del self.continues[section_id]
del self.section_entry[section_id]
def enter_cond_section(self, section_id):
"""Enters a conditional section.
Conditional sections define an entry node, and one or more branches.
Args:
section_id: Hashable, the same node that will be used in calls to the
section_id arg passed to new_cond_branch
"""
assert section_id not in self.cond_entry
assert section_id not in self.cond_leaves
self.cond_leaves[section_id] = []
def new_cond_branch(self, section_id):
"""Begins a new branch in a cond section."""
assert section_id in self.cond_leaves
if section_id in self.cond_entry:
# Subsequent splits move back to the split point, and memorize the
# current leaves.
self.cond_leaves[section_id].append(self.leaves)
self.leaves = self.cond_entry[section_id]
else:
# If this is the first time we split a section, just remember the split
# point.
self.cond_entry[section_id] = self.leaves
def exit_cond_section(self, section_id):
"""Exits a conditional section."""
for split in self.cond_leaves[section_id]:
self.leaves |= split
del self.cond_entry[section_id]
del self.cond_leaves[section_id]
def enter_finally_section(self, section_id):
"""Enters a finally section."""
# TODO(mdan): This, not the caller, should track the active sections.
self.finally_section_subgraphs[section_id] = [None, None]
if self.leaves:
self.finally_section_has_direct_flow[section_id] = True
else:
self.finally_section_has_direct_flow[section_id] = False
self.pending_finally_sections.add(section_id)
def exit_finally_section(self, section_id):
"""Exits a finally section."""
assert section_id not in self.pending_finally_sections, 'Empty finally?'
self.finally_section_subgraphs[section_id][1] = self.leaves
# If the guard can only be reached by a jump, then it will not flow
# into the statement that follows it.
if not self.finally_section_has_direct_flow[section_id]:
self.leaves = set()
del self.finally_section_has_direct_flow[section_id]
def build(self):
"""Returns the CFG accumulated so far and resets the builder.
Returns:
Graph
"""
# Freeze the nodes.
for node in self.node_index.values():
node.freeze()
result = Graph(
entry=self.head,
exit=self.leaves,
error=self.errors,
index=self.node_index)
# Reset the state.
self.reset()
return result
class AstToCfg(gast.NodeVisitor):
"""Converts an AST to CFGs.
A separate CFG will be constructed for each function.
"""
# TODO(mdan): Figure out how to deal with closures.
def __init__(self):
super(AstToCfg, self).__init__()
self.builder_stack = []
self.builder = None
self.cfgs = {}
self.lexical_scopes = []
def _enter_lexical_scope(self, node):
self.lexical_scopes.append(node)
def _exit_lexical_scope(self, node):
leaving_node = self.lexical_scopes.pop()
assert node == leaving_node
def _get_enclosing_scopes(self, include, stop_at):
included = []
for node in reversed(self.lexical_scopes):
if isinstance(node, include):
included.append(node)
if isinstance(node, stop_at):
return node, included
return None, included
def _process_basic_statement(self, node):
self.generic_visit(node)
self.builder.add_ordinary_node(node)
def _process_exit_statement(self, node, *exits_nodes_of_type):
# Note: this is safe because we process functions separately.
try_node, guards = self._get_enclosing_scopes(
include=(gast.Try,),
stop_at=tuple(exits_nodes_of_type),
)
if try_node is None:
raise ValueError(
'%s that is not enclosed by any of %s' % (node, exits_nodes_of_type))
self.builder.add_exit_node(node, try_node, guards)
def _process_continue_statement(self, node, *loops_to_nodes_of_type):
# Note: this is safe because we process functions separately.
try_node, guards = self._get_enclosing_scopes(
include=(gast.Try,),
stop_at=tuple(loops_to_nodes_of_type),
)
if try_node is None:
raise ValueError('%s that is not enclosed by any of %s' %
(node, loops_to_nodes_of_type))
self.builder.add_continue_node(node, try_node, guards)
def visit_FunctionDef(self, node):
self.builder_stack.append(self.builder)
self.builder = GraphBuilder(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
self._process_basic_statement(node.args)
for stmt in node.body:
self.visit(stmt)
self.builder.exit_section(node)
self._exit_lexical_scope(node)
self.cfgs[node] = self.builder.build()
self.builder = self.builder_stack.pop()
def visit_Lambda(self, node):
# TODO(mdan): Treat like FunctionDef? That would be a separate CFG.
raise NotImplementedError()
def visit_Return(self, node):
self._process_exit_statement(node, gast.FunctionDef)
def visit_Expr(self, node):
self._process_basic_statement(node)
def visit_Assign(self, node):
self._process_basic_statement(node)
def visit_AnnAssign(self, node):
self._process_basic_statement(node)
def visit_AugAssign(self, node):
self._process_basic_statement(node)
def visit_Print(self, node):
self._process_basic_statement(node)
def visit_Raise(self, node):
try_node, guards = self._get_enclosing_scopes(
include=(gast.Try,),
stop_at=(gast.FunctionDef,),
)
if try_node is None:
raise ValueError('%s that is not enclosed by any FunctionDef' % node)
self.builder.add_error_node(node, try_node, guards)
def visit_Assert(self, node):
# Ignoring the effect of exceptions.
self._process_basic_statement(node)
def visit_Delete(self, node):
self._process_basic_statement(node)
def visit_If(self, node):
# No need to track ifs as lexical scopes, for now.
# Lexical scopes are generally tracked in order to be able to resolve the
# targets of jump statements like break/continue/etc. Since there is no
# statement that can interrupt a conditional, we don't need to track their
# lexical scope. That may change in the future.
self.builder.enter_cond_section(node)
self._process_basic_statement(node.test)
self.builder.new_cond_branch(node)
for stmt in node.body:
self.visit(stmt)
self.builder.new_cond_branch(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_cond_section(node)
def visit_While(self, node):
self._enter_lexical_scope(node)
self.builder.enter_section(node)
self.builder.enter_loop_section(node, node.test)
for stmt in node.body:
self.visit(stmt)
self.builder.exit_loop_section(node)
# Note: although the orelse is technically part of the loop node,
# the statements inside it don't affect the loop itself. For example, a
# break in the loop's orelse will not affect the loop itself.
self._exit_lexical_scope(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_section(node)
def visit_For(self, node):
self._enter_lexical_scope(node)
self.builder.enter_section(node)
# TODO(mdan): Strictly speaking, this should be node.target + node.iter.
# A blind dataflow analysis would have to process both node.target and
# node.iter to properly process read and write access.
self.builder.enter_loop_section(node, node.iter)
for stmt in node.body:
self.visit(stmt)
self.builder.exit_loop_section(node)
# Note: although the orelse is technically part of the loop node,
# they don't count as loop bodies. For example, a break in the loop's
# orelse will affect the parent loop, not the current one.
self._exit_lexical_scope(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_section(node)
def visit_Break(self, node):
self._process_exit_statement(node, gast.While, gast.For)
def visit_Continue(self, node):
self._process_continue_statement(node, gast.While, gast.For)
def visit_Try(self, node):
self._enter_lexical_scope(node)
for stmt in node.body:
self.visit(stmt)
# Unlike loops, the orelse is a simple continuation of the body.
for stmt in node.orelse:
self.visit(stmt)
if node.handlers:
# TODO(mdan): Should we still support bare try/except? Might be confusing.
raise NotImplementedError('exceptions are not yet supported')
self._exit_lexical_scope(node)
self.builder.enter_finally_section(node)
for stmt in node.finalbody:
self.visit(stmt)
self.builder.exit_finally_section(node)
def visit_With(self, node):
# TODO(mdan): Mark the context manager's exit call as exit guard.
self._process_basic_statement(node.items)
for stmt in node.body:
self.visit(stmt)
def build(node):
builder = AstToCfg()
builder.visit(node)
return builder.cfgs
|
drpngx/tensorflow
|
tensorflow/contrib/autograph/pyct/cfg.py
|
Python
|
apache-2.0
| 23,771
|
[
"VisIt"
] |
11a8aa482e3d33aa05d819ab4e2047e771b76a2b30150644bc7a5339f78e7e5f
|
#!/usr/bin/env python
# ==============================================================================
# MODULE DOCSTRING
# ==============================================================================
"""
environment.py
Lightweight module for validating SMIRKS using Open Force Field ToolkitWrappers.
AUTHORS
Caitlin Bannan <bannanc@uci.edu> (Original author), Mobley Lab, University of California Irvine,
Jeff Wagner <jeffrey.wagner@openforcefield.org> (refactored to use ToolkitWrappers),
with contributions from John Chodera, Memorial Sloan Kettering Cancer Center
and David Mobley, UC Irvine.
"""
__all__ = [
"SMIRKSMismatchError",
"SMIRKSParsingError",
"ChemicalEnvironment",
"AtomChemicalEnvironment",
"BondChemicalEnvironment",
"AngleChemicalEnvironment",
"TorsionChemicalEnvironment",
"ImproperChemicalEnvironment",
]
# ==============================================================================
# GLOBAL IMPORTS
# ==============================================================================
from openff.toolkit.utils.toolkits import (
GLOBAL_TOOLKIT_REGISTRY,
MessageException,
ToolkitWrapper,
)
class SMIRKSMismatchError(MessageException):
"""
Exception for cases where smirks are inappropriate
for the environment type they are being parsed into
"""
pass
class SMIRKSParsingError(MessageException):
"""
Exception for when SMIRKS are not parseable for any environment
"""
pass
class ChemicalEnvironment:
"""Chemical environment abstract base class used for validating SMIRKS"""
_expected_type = None
def __init__(
self,
smirks=None,
label=None,
validate_parsable=True,
validate_valence_type=True,
toolkit_registry=None,
):
"""Initialize a chemical environment abstract base class.
smirks = string, optional
if smirks is not None, a chemical environment is built
from the provided SMIRKS string
label = anything, optional
intended to be used to label this chemical environment
could be a string, int, or float, or anything
validate_parsable: bool, optional, default=True
If specified, ensure the provided smirks is parsable
validate_valence_type : bool, optional, default=True
If specified, ensure the tagged atoms are appropriate to the specified valence type
toolkit_registry = string or ToolkitWrapper or ToolkitRegistry. Default = None
Either a ToolkitRegistry, ToolkitWrapper, or the strings 'openeye' or 'rdkit',
indicating the backend to use for validating the correct
connectivity of the SMIRKS during initialization. If None,
this function will use the GLOBAL_TOOLKIT_REGISTRY
Raises
------
SMIRKSParsingError
if smirks was unparsable
SMIRKSMismatchError
if smirks did not have expected connectivity between tagged atoms
and validate_valence_type=True
"""
# Support string input for toolkit names for legacy reasons
if toolkit_registry == "openeye":
from openff.toolkit.utils.toolkits import OpenEyeToolkitWrapper
toolkit_registry = OpenEyeToolkitWrapper()
elif toolkit_registry == "rdkit":
from openff.toolkit.utils.toolkits import RDKitToolkitWrapper
toolkit_registry = RDKitToolkitWrapper()
self.smirks = smirks
self.label = label
if validate_parsable or validate_valence_type:
self.validate(
validate_valence_type=validate_valence_type,
toolkit_registry=toolkit_registry,
)
def validate(self, validate_valence_type=True, toolkit_registry=None):
"""
Returns True if the underlying smirks is the correct valence type, False otherwise. If the expected type
is None, this method always returns True.
validate_valence_type : bool, optional, default=True
If specified, ensure the tagged atoms are appropriate to the specified valence type
toolkit_registry = ToolkitWrapper or ToolkitRegistry. Default = None
Either a ToolkitRegistry or ToolkitWrapper,
indicating the backend to use for validating the correct
connectivity of the SMIRKS during initialization. If None,
this function will use the GLOBAL_TOOLKIT_REGISTRY
Raises
------
SMIRKSParsingError
if smirks was unparsable
SMIRKSMismatchError
if smirks did not have expected connectivity between tagged atoms
and validate_valence_type=True
"""
perceived_type = self.get_type(toolkit_registry=toolkit_registry)
if (
(perceived_type != self._expected_type)
and validate_valence_type
and not (self._expected_type is None)
):
raise SMIRKSMismatchError(
f"{self.__class__} expected '{self._expected_type}' chemical environment, but "
f"smirks was set to '{self.smirks}', which is type '{perceived_type}'"
)
@classmethod
def validate_smirks(
cls,
smirks,
validate_parsable=True,
validate_valence_type=True,
toolkit_registry=None,
):
"""
Check the provided SMIRKS string is valid, and if requested, tags atoms appropriate to the
specified valence type.
Parameters
----------
smirks : str
The SMIRKS expression to validate
validate_parsable: bool, optional, default=True
If specified, ensure the provided smirks is parsable
validate_valence_type : bool, optional, default=True
If specified, ensure the tagged atoms are appropriate to the specified valence type
toolkit_registry = string or ToolkitWrapper or ToolkitRegistry. Default = None
Either a ToolkitRegistry, ToolkitWrapper, or the strings 'openeye' or 'rdkit',
indicating the backend to use for validating the correct
connectivity of the SMIRKS during initialization. If None,
this function will use the GLOBAL_TOOLKIT_REGISTRY
Raises
------
SMIRKSParsingError
if smirks was unparsable
SMIRKSMismatchError
if smirks did not have expected connectivity between tagged atoms
and validate_valence_type=True
"""
cls(
smirks,
validate_parsable=validate_parsable,
validate_valence_type=validate_valence_type,
toolkit_registry=toolkit_registry,
)
def get_type(self, toolkit_registry=None):
"""
Return the valence type implied by the connectivity of the bound atoms in this ChemicalEnvironment.
Parameters
-----------
toolkit_registry : openff.toolkit.utils.ToolkitRegistry or openff.toolkit.utils.ToolkitWrapper
The cheminformatics toolkit to use for parsing the smirks
Returns
-------
valence_type : str
One of "Atom", "Bond", "Angle", "ProperTorsion", "ImproperTorsion", or None.
If tagged atoms are not connected in a known pattern this method will return None.
Raises
------
SMIRKSParsingError
if smirks was unparsable
"""
# Query a toolkit wrapper for substructure type
if toolkit_registry is None:
toolkit_registry = GLOBAL_TOOLKIT_REGISTRY
if isinstance(toolkit_registry, ToolkitWrapper):
unique_tags, connectivity = toolkit_registry.get_tagged_smarts_connectivity(
self.smirks
)
else:
unique_tags, connectivity = toolkit_registry.call(
"get_tagged_smarts_connectivity", self.smirks
)
if unique_tags == (1,) and len(connectivity) == 0:
return "Atom"
if unique_tags == (1, 2) and (1, 2) in connectivity:
return "Bond"
elif (
unique_tags == (1, 2, 3)
and (1, 2) in connectivity
and (2, 3) in connectivity
):
return "Angle"
elif (
unique_tags == (1, 2, 3, 4)
and (1, 2) in connectivity
and (2, 3) in connectivity
and (3, 4) in connectivity
):
return "ProperTorsion"
elif (
unique_tags == (1, 2, 3, 4)
and (1, 2) in connectivity
and (2, 3) in connectivity
and (2, 4) in connectivity
):
return "ImproperTorsion"
else:
return None
class AtomChemicalEnvironment(ChemicalEnvironment):
"""Chemical environment matching one labeled atom."""
_expected_type = "Atom"
class BondChemicalEnvironment(ChemicalEnvironment):
"""Chemical environment matching two labeled atoms (or a bond)."""
_expected_type = "Bond"
class AngleChemicalEnvironment(ChemicalEnvironment):
"""Chemical environment matching three marked atoms (angle)."""
_expected_type = "Angle"
class TorsionChemicalEnvironment(ChemicalEnvironment):
"""Chemical environment matching four marked atoms (torsion)."""
_expected_type = "ProperTorsion"
class ImproperChemicalEnvironment(ChemicalEnvironment):
"""Chemical environment matching four marked atoms (improper)."""
_expected_type = "ImproperTorsion"
|
open-forcefield-group/openforcefield
|
openff/toolkit/typing/chemistry/environment.py
|
Python
|
mit
| 9,601
|
[
"RDKit"
] |
e41b2b358ecbe45439fdc6bdea8fac60e43d9cc0c6335df1de89267008ce2289
|
#! /usr/bin/env python
###############################################################################
# Copyright 2016 Adam Jackson
###############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
from __future__ import print_function
import numpy as np
from argparse import ArgumentParser
import ase.io
from kgrid import calc_kpt_tuple
def get_increments(lattice_lengths):
"""
Calculate the vector l0 of increments between significant length cutoffs
for each reciprocal lattice vector.
:param lattice_lengths: Lengths of reciprocal lattice vectors
:type 3-tuple
:returns: Vector l0 of significant increments
:rtype: 3-tuple
"""
return tuple([1. / (2 * a) for a in lattice_lengths])
def cutoff_series(atoms, l_min, l_max, decimals=4):
"""Find multiples of l0 members within a range
:param atoms: Crystal structure
:type ase.atoms.Atoms
:param l_min: Minimum real-space cutoff
:type float
:param l_max: Maximum real-space cutoff
:type float
:param decimals: Number of decimal places used when rounding to remove
duplicates
:type int
:returns: Sorted list of cutoffs
:rtype: list
"""
recip_cell = atoms.cell.reciprocal()
lattice_lengths = np.sqrt(np.sum(np.square(recip_cell), 1))
l0 = get_increments(lattice_lengths)
members = set()
for li in l0:
n_min = np.ceil(l_min / li)
members.update(
set(np.around(
np.arange(n_min * li, l_max, li), decimals=decimals)))
return sorted(members)
def kspacing_series(atoms, l_min, l_max, decimals=4):
"""Find series of KSPACING values with different results
NB: It is strongly recommended to ADD a small delta to these values
to account for truncation/rounding errors
:param atoms: Crystal structure
:type ase.atoms.Atoms
:param l_min: Minimum real-space cutoff
:type float
:param l_max: Maximum real-space cutoff
:type float
:returns: Sorted list of KSPACING values
:rtype: list
"""
return [np.pi / c for c in
cutoff_series(atoms, l_min, l_max, decimals=decimals)]
def get_parser():
parser = ArgumentParser("Calculate a systematic series of k-point samples")
parser.add_argument(
'filename',
nargs='?',
type=str,
default="geometry.in",
help="Path to input file [default: ./geometry.in]")
parser.add_argument(
'-t',
'--type',
type=str,
default=None,
help='Format of crystal structure file')
parser.add_argument(
'--min',
type=float,
default=10,
help='Minimum real-space cutoff / angstroms')
parser.add_argument(
'--max',
type=float,
default=30,
help='Maximum real-space cutoff / angstroms')
parser.add_argument('--comma_sep', action='store_true',
help='Output as comma-separated list on one line')
parser.add_argument('--castep', action='store_true',
help=('Provide CASTEP-like MP spacing instead of '
'vasp-like KSPACING'))
return parser
def main(params=None):
args = get_parser().parse_args(params)
if args.type:
atoms = ase.io.read(args.filename, format=args.type)
else:
atoms = ase.io.read(args.filename)
cutoffs = cutoff_series(atoms, args.min, args.max)
if args.castep:
kspacing = [0.5 / c for c in cutoffs]
else:
kspacing = [np.pi / c for c in cutoffs]
samples = [calc_kpt_tuple(
atoms, cutoff_length=(cutoff - 1e-4)) for cutoff in cutoffs]
if args.comma_sep:
def print_sample(sample):
return ' '.join((str(x) for x in sample))
print(','.join((print_sample(sample) for sample in samples)))
else:
if args.castep:
print("Length cutoff MP SPACING Samples")
print("------------- ---------- ------------")
fstring = "{0:12.3f} {1:9.6f} {2:3d} {3:3d} {4:3d}"
else:
print("Length cutoff KSPACING Samples")
print("------------- -------- ------------")
fstring = "{0:12.3f} {1:7.4f} {2:3d} {3:3d} {4:3d}"
for cutoff, s, sample in zip(cutoffs, kspacing, samples):
print(fstring.format(cutoff, s, *sample))
if __name__ == '__main__':
main()
|
WMD-Bath/kgrid
|
kgrid/series.py
|
Python
|
gpl-3.0
| 5,183
|
[
"ASE",
"CASTEP",
"CRYSTAL",
"VASP"
] |
ef8907945443d5ab555f4cbf0bc80dc52f65be4e0d330cb0588cad7c4db1365c
|
"""
Read data from ECMWF MACC Reanalysis.
"""
import threading
import pandas as pd
try:
import netCDF4
except ImportError:
class netCDF4:
@staticmethod
def Dataset(*a, **kw):
raise ImportError(
'Reading ECMWF data requires netCDF4 to be installed.')
try:
from ecmwfapi import ECMWFDataServer
except ImportError:
def ECMWFDataServer(*a, **kw):
raise ImportError(
'To download data from ECMWF requires the API client.\nSee https:/'
'/confluence.ecmwf.int/display/WEBAPI/Access+ECMWF+Public+Datasets'
)
#: map of ECMWF MACC parameter keynames and codes used in API
PARAMS = {
"tcwv": "137.128",
"aod550": "207.210",
'aod469': '213.210',
'aod670': '214.210',
'aod865': '215.210',
"aod1240": "216.210",
}
def _ecmwf(server, startdate, stopdate, params, targetname):
# see http://apps.ecmwf.int/datasets/data/macc-reanalysis/levtype=sfc/
server.retrieve({
"class": "mc",
"dataset": "macc",
"date": "%s/to/%s" % (startdate, stopdate),
"expver": "rean",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": params,
"step": "3/6/9/12/15/18/21/24",
"stream": "oper",
"format": "netcdf",
"time": "00:00:00",
"type": "fc",
"target": targetname,
})
def get_ecmwf_macc(filename, params, startdate, stopdate, lookup_params=True,
server=None, target=_ecmwf):
"""
Download data from ECMWF MACC Reanalysis API.
Parameters
----------
filename : str
full path of file where to save data, ``.nc`` appended if not given
params : str or sequence of str
keynames of parameter[s] to download
startdate : datetime.datetime or datetime.date
UTC date
stopdate : datetime.datetime or datetime.date
UTC date
lookup_params : bool, default True
optional flag, if ``False``, then codes are already formatted
server : ecmwfapi.api.ECMWFDataServer
optionally provide a server object, default is ``None``
target : callable
optional function that calls ``server.retrieve`` to pass to thread
Returns
-------
t : thread
a thread object, use it to check status by calling `t.is_alive()`
Notes
-----
To download data from ECMWF requires the API client and a registration
key. Please read the documentation in `Access ECMWF Public Datasets
<https://confluence.ecmwf.int/display/WEBAPI/Access+ECMWF+Public+Datasets>`_.
Follow the instructions in step 4 and save the ECMWF registration key
as `$HOME\.ecmwfapirc` or set `ECMWF_API_KEY` as the path to the key.
This function returns a daemon thread that runs in the background. Exiting
Python will kill this thread, however this thread will not block the main
thread or other threads. This thread will terminate when the file is
downloaded or if the thread raises an unhandled exception. You may submit
multiple requests simultaneously to break up large downloads. You can also
check the status and retrieve downloads online at
http://apps.ecmwf.int/webmars/joblist/. This is useful if you kill the
thread. Downloads expire after 24 hours.
.. warning:: Your request may be queued online for an hour or more before
it begins to download
Precipitable water :math:`P_{wat}` is equivalent to the total column of
water vapor (TCWV), but the units given by ECMWF MACC Reanalysis are kg/m^2
at STP (1-atm, 25-C). Divide by ten to convert to centimeters of
precipitable water:
.. math::
P_{wat} \\left( \\text{cm} \\right) \
= TCWV \\left( \\frac{\\text{kg}}{\\text{m}^2} \\right) \
\\frac{100 \\frac{\\text{cm}}{\\text{m}}} \
{1000 \\frac{\\text{kg}}{\\text{m}^3}}
The keynames available for the ``params`` argument are given by
:const:`pvlib.iotools.ecmwf_macc.PARAMS` which maps the keys to codes used
in the API. The following keynames are available:
======= =========================================
keyname description
======= =========================================
tcwv total column water vapor in kg/m^2 at STP
aod550 aerosol optical depth measured at 550-nm
aod469 aerosol optical depth measured at 469-nm
aod670 aerosol optical depth measured at 670-nm
aod865 aerosol optical depth measured at 865-nm
aod1240 aerosol optical depth measured at 1240-nm
======= =========================================
If ``lookup_params`` is ``False`` then ``params`` must contain the codes
preformatted according to the ECMWF MACC Reanalysis API. This is useful if
you want to retrieve codes that are not mapped in
:const:`pvlib.iotools.ecmwf_macc.PARAMS`.
Specify a custom ``target`` function to modify how the ECMWF API function
``server.retrieve`` is called. The ``target`` function must have the
following signature in which the parameter definitions are similar to
:func:`pvlib.iotools.get_ecmwf_macc`. ::
target(server, startdate, stopdate, params, filename) -> None
Examples
--------
Retrieve the AOD measured at 550-nm and the total column of water vapor for
November 1, 2012.
>>> from datetime import date
>>> from pvlib.iotools import get_ecmwf_macc
>>> filename = 'aod_tcwv_20121101.nc' # .nc extension added if missing
>>> params = ('aod550', 'tcwv')
>>> start = end = date(2012, 11, 1)
>>> t = get_ecmwf_macc(filename, params, start, end)
>>> t.is_alive()
True
"""
if not filename.endswith('nc'):
filename += '.nc'
if lookup_params:
try:
params = '/'.join(PARAMS.get(p) for p in params)
except TypeError:
params = PARAMS.get(params)
startdate = startdate.strftime('%Y-%m-%d')
stopdate = stopdate.strftime('%Y-%m-%d')
if not server:
server = ECMWFDataServer()
t = threading.Thread(target=target, daemon=True,
args=(server, startdate, stopdate, params, filename))
t.start()
return t
class ECMWF_MACC(object):
"""container for ECMWF MACC reanalysis data"""
TCWV = 'tcwv' # total column water vapor in kg/m^2 at (1-atm,25-degC)
def __init__(self, filename):
self.data = netCDF4.Dataset(filename)
# data variables and dimensions
variables = set(self.data.variables.keys())
dimensions = set(self.data.dimensions.keys())
self.keys = tuple(variables - dimensions)
# size of lat/lon dimensions
self.lat_size = self.data.dimensions['latitude'].size
self.lon_size = self.data.dimensions['longitude'].size
# spatial resolution in degrees
self.delta_lat = -180.0 / (self.lat_size - 1) # from north to south
self.delta_lon = 360.0 / self.lon_size # from west to east
# time resolution in hours
self.time_size = self.data.dimensions['time'].size
self.start_time = self.data['time'][0]
self.stop_time = self.data['time'][-1]
self.time_range = self.stop_time - self.start_time
self.delta_time = self.time_range / (self.time_size - 1)
def get_nearest_indices(self, latitude, longitude):
"""
Get nearest indices to (latitude, longitude).
Parmaeters
----------
latitude : float
Latitude in degrees
longitude : float
Longitude in degrees
Returns
-------
idx_lat : int
index of nearest latitude
idx_lon : int
index of nearest longitude
"""
# index of nearest latitude
idx_lat = int(round((latitude - 90.0) / self.delta_lat))
# avoid out of bounds latitudes
if idx_lat < 0:
idx_lat = 0 # if latitude == 90, north pole
elif idx_lat > self.lat_size:
idx_lat = self.lat_size # if latitude == -90, south pole
# adjust longitude from -180/180 to 0/360
longitude = longitude % 360.0
# index of nearest longitude
idx_lon = int(round(longitude / self.delta_lon)) % self.lon_size
return idx_lat, idx_lon
def interp_data(self, latitude, longitude, utc_time, param):
"""
Interpolate ``param`` values to ``utc_time`` using indices nearest to
(``latitude, longitude``).
Parmaeters
----------
latitude : float
Latitude in degrees
longitude : float
Longitude in degrees
utc_time : datetime.datetime or datetime.date
Naive or UTC date or datetime to interpolate
param : str
Name of the parameter to interpolate from the data
Returns
-------
Interpolated ``param`` value at (``utc_time, latitude, longitude``)
Examples
--------
Use this to get a single value of a parameter in the data at a specific
time and set of (latitude, longitude) coordinates.
>>> from datetime import datetime
>>> from pvlib.iotools import ecmwf_macc
>>> data = ecmwf_macc.ECMWF_MACC('aod_tcwv_20121101.nc')
>>> dt = datetime(2012, 11, 1, 11, 33, 1)
>>> data.interp_data(38.2, -122.1, dt, 'aod550')
"""
nctime = self.data['time'] # time
ilat, ilon = self.get_nearest_indices(latitude, longitude)
# time index before
before = netCDF4.date2index(utc_time, nctime, select='before')
fbefore = self.data[param][before, ilat, ilon]
fafter = self.data[param][before + 1, ilat, ilon]
dt_num = netCDF4.date2num(utc_time, nctime.units)
time_ratio = (dt_num - nctime[before]) / self.delta_time
return fbefore + (fafter - fbefore) * time_ratio
def read_ecmwf_macc(filename, latitude, longitude, utc_time_range=None):
"""
Read data from ECMWF MACC reanalysis netCDF4 file.
Parameters
----------
filename : string
full path to netCDF4 data file.
latitude : float
latitude in degrees
longitude : float
longitude in degrees
utc_time_range : sequence of datetime.datetime
pair of start and stop naive or UTC date-times
Returns
-------
data : pandas.DataFrame
dataframe for specified range of UTC date-times
"""
ecmwf_macc = ECMWF_MACC(filename)
try:
ilat, ilon = ecmwf_macc.get_nearest_indices(latitude, longitude)
nctime = ecmwf_macc.data['time']
if utc_time_range:
start_idx = netCDF4.date2index(
utc_time_range[0], nctime, select='before')
stop_idx = netCDF4.date2index(
utc_time_range[-1], nctime, select='after')
time_slice = slice(start_idx, stop_idx + 1)
else:
time_slice = slice(0, ecmwf_macc.time_size)
times = netCDF4.num2date(nctime[time_slice], nctime.units)
df = {k: ecmwf_macc.data[k][time_slice, ilat, ilon]
for k in ecmwf_macc.keys}
if ECMWF_MACC.TCWV in df:
# convert total column water vapor in kg/m^2 at (1-atm, 25-degC) to
# precipitable water in cm
df['precipitable_water'] = df[ECMWF_MACC.TCWV] / 10.0
finally:
ecmwf_macc.data.close()
return pd.DataFrame(df, index=times.astype('datetime64[s]'))
|
alorenzo175/pvlib-python
|
pvlib/iotools/ecmwf_macc.py
|
Python
|
bsd-3-clause
| 11,452
|
[
"NetCDF"
] |
4141874f9239557a18a62c2c1d7f42dd1fef291125b135c05f635516991c6915
|
import os
from uuid import uuid4
from pulsar import __version__ as pulsar_version
from .util import filter_destination_params
REMOTE_SYSTEM_PROPERTY_PREFIX = "remote_property_"
def build(client, destination_args):
""" Build a SetupHandler object for client from destination parameters.
"""
# Have defined a remote job directory, lets do the setup locally.
if client.job_directory:
handler = LocalSetupHandler(client, destination_args)
else:
handler = RemoteSetupHandler(client)
return handler
class LocalSetupHandler(object):
""" Parse destination params to infer job setup parameters (input/output
directories, etc...). Default is to get this configuration data from the
remote Pulsar server.
Downside of this approach is that it requires more and more dependent
configuraiton of Galaxy. Upside is that it is asynchronous and thus makes
message queue driven configurations possible.
Remote system properties (such as galaxy_home) can be specified in
destination args by prefixing property with remote_property_ (e.g.
remote_property_galaxy_home).
"""
def __init__(self, client, destination_args):
self.client = client
system_properties = self.__build_system_properties(destination_args)
system_properties["separator"] = client.job_directory.separator
self.system_properties = system_properties
self.jobs_directory = destination_args["jobs_directory"]
self.assign_ids = destination_args.get("assign_ids", "galaxy")
def setup(self, job_id, tool_id=None, tool_version=None, preserve_galaxy_python_environment=None):
if self.assign_ids == "uuid":
job_id = uuid4().hex
# Following is a gross hack but same gross hack in pulsar.client.staging.up
if self.client.job_id != job_id:
self.client.assign_job_id(job_id)
return build_job_config(
job_id=job_id,
job_directory=self.client.job_directory,
system_properties=self.system_properties,
tool_id=tool_id,
tool_version=tool_version,
preserve_galaxy_python_environment=preserve_galaxy_python_environment,
)
@property
def local(self):
"""
"""
return True
def __build_system_properties(self, destination_params):
return filter_destination_params(destination_params, REMOTE_SYSTEM_PROPERTY_PREFIX)
class RemoteSetupHandler(object):
""" Default behavior. Fetch setup information from remote Pulsar server.
"""
def __init__(self, client):
self.client = client
def setup(self, **setup_args):
setup_args["use_metadata"] = "true"
return self.client.remote_setup(**setup_args)
@property
def local(self):
"""
"""
return False
def build_job_config(job_id, job_directory, system_properties={}, tool_id=None, tool_version=None, preserve_galaxy_python_environment=None):
"""
"""
inputs_directory = job_directory.inputs_directory()
working_directory = job_directory.working_directory()
metadata_directory = job_directory.metadata_directory()
outputs_directory = job_directory.outputs_directory()
configs_directory = job_directory.configs_directory()
tools_directory = job_directory.tool_files_directory()
unstructured_files_directory = job_directory.unstructured_files_directory()
sep = system_properties.get("sep", os.sep)
job_config = {
"job_directory": job_directory.path,
"working_directory": working_directory,
"metadata_directory": metadata_directory,
"outputs_directory": outputs_directory,
"configs_directory": configs_directory,
"tools_directory": tools_directory,
"inputs_directory": inputs_directory,
"unstructured_files_directory": unstructured_files_directory,
# Poorly named legacy attribute. Drop at some point.
"path_separator": sep,
"job_id": job_id,
"system_properties": system_properties,
"pulsar_version": pulsar_version,
"preserve_galaxy_python_environment": preserve_galaxy_python_environment,
}
if tool_id:
job_config["tool_id"] = tool_id
if tool_version:
job_config["tool_version"] = tool_version
return job_config
__all__ = ['build_job_config', 'build']
|
natefoo/pulsar
|
pulsar/client/setup_handler.py
|
Python
|
apache-2.0
| 4,404
|
[
"Galaxy"
] |
9300ba7e008a8700fb23f55760acc7de73a3ad224f8b7961b49b3e0a998edce1
|
#!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2011, Regents of the University of California
# All rights reserved.
"""
bonds_by_type.py reads a LAMMPS data file (or an excerpt of a LAMMPS)
data file containing bonded many-body interactions by atom type
(and bond type), and generates a list of additional interactions
in LAMMPS format consistent with those type (to the standard out).
Typical Usage:
bonds_by_type.py -atoms atoms.data \\
-bonds bonds.data \\
-bondsbytype bonds_by_type.data \\
> new_bonds.data
"""
# -bonds-ids-atom-pairs bonds_ids_atom_pairs.data \\
import sys
#from extract_lammps_data import *
#from nbody_by_type_lib import GenInteractions_str
import ttree_lex
#from ttree_lex import *
from lttree_styles import AtomStyle2ColNames, ColNames2AidAtypeMolid
def LookupBondTypes(bond_types,
bond_ids,
bond_pairs,
lines_atoms,
lines_bonds,
lines_bondsbytype,
atom_style,
section_name,
prefix='',
suffix='',
bond_ids_offset=0):
#report_progress = False):
"""
LookupBondTypes() looks up bond types.
Output:
...It looks up the corresponding type of each bond and store it in the
"bond_types" list. (If the bond_ids were not specified by the user,
generate them and store them in the bond_ids list.)
Input (continued):
This function requires:
...a list of bonded pairs of atoms
stored in the lines_bonds variable (from the "Data Bond List"
or "Data Bonds AtomId AtomId" sections)
...and a list of atom types
stored in the lines_atoms variable (from the "Data Atoms" section)
...and a list of bond-types-as-a-function-of-atom-types
stored in the lines_bondsbytype (from the "Data Bonds By Type" section)
Generated bond_ids (if applicable) are of the form
prefix + str(number) + suffix
(where "number" begins at bond_ids_offset+1)
"""
column_names = AtomStyle2ColNames(atom_style)
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(column_names)
atomids = []
atomtypes = []
atomids2types = {}
for iv in range(0, len(lines_atoms)):
line = lines_atoms[iv].strip()
if '#' in line:
icomment = line.find('#')
line = (line[:icomment]).strip()
if len(line) > 0:
tokens = ttree_lex.SplitQuotedString(line)
if ((len(tokens) <= i_atomid) or (len(tokens) <= i_atomtype)):
sys.stderr.write("\""+line+"\"\n")
raise(ttree_lex.InputError('Error not enough columns on line '+str(iv+1)+' of \"Atoms\" section.'))
tokens = ttree_lex.SplitQuotedString(line)
atomid = ttree_lex.EscCharStrToChar(tokens[i_atomid])
atomids.append(atomid)
atomtype = ttree_lex.EscCharStrToChar(tokens[i_atomtype])
atomtypes.append(atomtype)
atomids2types[atomid] = atomtype
assert(isinstance(bond_ids, list))
assert(isinstance(bond_types, list))
assert(isinstance(bond_pairs, list))
del bond_ids[:]
del bond_types[:]
del bond_pairs[:]
for ie in range(0, len(lines_bonds)):
line = lines_bonds[ie].strip()
if '#' in line:
icomment = line.find('#')
line = (line[:icomment]).strip()
if len(line) == 0:
continue
tokens = ttree_lex.SplitQuotedString(line)
if section_name == "Data Bonds AtomId AtomId":
if len(tokens) == 2:
bondid_n = bond_ids_offset + len(bond_ids) + 1
bond_ids.append(prefix+str(bondid_n)+suffix)
bond_pairs.append( (ttree_lex.EscCharStrToChar(tokens[0]),
ttree_lex.EscCharStrToChar(tokens[1])) )
else:
raise(ttree_lex.InputError('Incorrect number of columns on line '+str(ie+1)+' of \"'+section_name+'\" section.'))
elif section_name == "Data Bond List":
if len(tokens) == 3:
bond_ids.append(ttree_lex.EscCharStrToChar(tokens[0]))
bond_pairs.append( (ttree_lex.EscCharStrToChar(tokens[1]),
ttree_lex.EscCharStrToChar(tokens[2])) )
else:
raise(ttree_lex.InputError('Incorrect number of columns on line '+str(ie+1)+' of \"'+section_name+'\" section.'))
else:
raise(ttree_lex.InputError('Internal Error ('+g_program_name+'): Unknown section name: \"'+section_name+'\"'))
assert(len(bond_types) == 0)
typepattern_to_coefftypes = []
for i in range(0, len(lines_bondsbytype)):
line = lines_bondsbytype[i].strip()
if '#' in line:
icomment = line.find('#')
line = (line[:icomment]).strip()
if len(line) > 0:
tokens = ttree_lex.SplitQuotedString(line)
if (len(tokens) != 3):
raise(ttree_lex.InputError('Error: Wrong number of columns in the \"Bonds By Type\" section of data file.\n'
'Offending line:\n'+
'\"'+line+'\"\n'
'Expected 3 columns\n'))
coefftype = ttree_lex.EscCharStrToChar(tokens[0])
typepattern = []
for typestr in tokens[1:]:
if ((len(typestr) >= 2) and
(typestr[0] == '/') and (typestr[-1] == '/')):
regex_str = typestr[1:-1]
typepattern.append( re.compile(regex_str) )
else:
typepattern.append(ttree_lex.EscCharStrToChar(typestr))
typepattern_to_coefftypes.append([typepattern, coefftype])
assert(len(bond_ids) == len(bond_pairs))
for ie in range(0,len(bond_ids)):
bond_types.append(None)
for ie in range(0, len(bond_ids)):
bondid = bond_ids[ie]
(atomid1, atomid2) = bond_pairs[ie]
if atomid1 not in atomids2types:
raise ttree_lex.InputError('Error: atom \"'+atomid1+'\" not defined in \"Data Atoms\".\n'
' This usually happens when the user mistypes one of the names of the\n'
' $atoms in either a \"Data Atoms\" or \"Data Bond List\" section.\n'
' To find out where the mistake occured, search the \n'
' \"ttree_assignments.txt\" file for:\n'
' \"'+atomid1+'\"\n')
if atomid2 not in atomids2types:
raise ttree_lex.InputError('Error: atom \"'+atomid2+'\" not defined in \"Data Atoms\".\n'
' This usually happens when the user mistypes one of the names of the\n'
' $atoms in either a \"Data Atoms\" or \"Data Bond List\" section.\n'
' To find out where the mistake occured, search the \n'
' \"ttree_assignments.txt\" file for:\n'
' \"'+atomid2+'\"\n')
atomtype1 = atomids2types[atomid1]
atomtype2 = atomids2types[atomid2]
for typepattern, coefftype in typepattern_to_coefftypes:
# use string comparisons to check if atom types match the pattern
if (ttree_lex.MatchesAll((atomtype1, atomtype2), typepattern) or
ttree_lex.MatchesAll((atomtype2, atomtype1), typepattern)):
# ("MatchesAll()" defined in "ttree_lex.py")
bond_types[ie] = coefftype
for ie in range(0, len(bond_ids)):
if not bond_types[ie]:
(atomid1, atomid2) = bond_pairs[ie]
atomtype1 = atomids2types[atomid1]
atomtype2 = atomids2types[atomid2]
raise ttree_lex.InputError('Error: No bond types defined for the bond between\n'
' atoms '+atomid1+' (type '+atomtype1+')\n'
' and '+atomid2+' (type '+atomtype2+')\n')
if __name__ == "__main__":
g_program_name = __file__.split('/')[-1] # = 'nbody_by_type.py'
g_date_str = '2015-11-09'
g_version_str = '0.11'
####### Main Code Below: #######
sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+' ')
if sys.version < '3':
sys.stderr.write(' (python version < 3)\n')
else:
sys.stderr.write('\n')
try:
fname_atoms = None
fname_bond_list = None
fname_bondsbytype = None
section_name = 'Data Bond List' # (This will be replaced later.)
atom_style = 'full'
prefix=''
suffix=''
bond_lack_types = False
argv = [arg for arg in sys.argv]
# Loop over the remaining arguments not processed yet.
# These arguments are specific to the lttree.py program
# and are not understood by ttree.py:
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if ((argv[i].lower() == '-?') or
(argv[i].lower() == '--?') or
(argv[i].lower() == '-help') or
(argv[i].lower() == '-help')):
if i+1 >= len(argv):
sys.stdout.write(man_page_text+'\n')
sys.exit(0)
elif argv[i].lower() == '-atoms':
if i+1 >= len(argv):
raise ttree_lex.InputError('Error: '+argv[i]+' flag should be followed by a file name containing lines of\n'
' text which might appear in the "Atoms" section of a LAMMPS data file.\n')
fname_atoms = argv[i+1]
del(argv[i:i+2])
elif argv[i].lower() == '-bonds':
if i+1 >= len(argv):
raise ttree_lex.InputError('Error: '+argv[i]+' flag should be followed by a file name containing lines of\n'
' text which might appear in the "Bonds" section of a LAMMPS data file.\n')
fname_bond_list = argv[i+1]
del(argv[i:i+2])
elif argv[i].lower() == '-bond-list':
if i+1 >= len(argv):
raise ttree_lex.InputError('Error: '+argv[i]+' flag should be followed by a file name\n')
#raise ttree_lex.InputError('Error: '+argv[i]+' flag should be followed by a file name containing lines of\n'
# ' text which might appear in the "Bonds No Types" section of a LAMMPS data file.\n')
fname_bond_list = argv[i+1]
section_name = "Data Bond List"
del(argv[i:i+2])
elif argv[i].lower() == '-bondsbytype':
if i+1 >= len(argv):
raise ttree_lex.InputError('Error: '+argv[i]+' flag should be followed by a file name\n')
#raise ttree_lex.InputError('Error: '+argv[i]+' flag should be followed by a file name containing\n'
# ' text which might appear in the "'+section_name+' By Type" section\n'
# ' of a LAMMPS data file.\n')
fname_bondsbytype = argv[i+1]
del(argv[i:i+2])
elif ((argv[i].lower() == '-atom-style') or
(argv[i].lower() == '-atom_style')):
if i+1 >= len(argv):
raise ttree_lex.InputError('Error: '+argv[i]+' flag should be followed by a an atom_style name.\n'
' (Or single quoted string which includes a space-separated\n'
' list of column names.)\n')
atom_style = argv[i+1]
del(argv[i:i+2])
elif argv[i].lower() == '-prefix':
if i+1 >= len(argv):
raise ttree_lex.InputError('Error: '+argv[i]+' flag should be followed by a prefix string\n'
' (a string you want to appear to the left of the integer\n'
' which counts the bonded interactions you have generated.)\n')
prefix = argv[i+1]
del(argv[i:i+2])
elif argv[i].lower() == '-suffix':
if i+1 >= len(argv):
raise ttree_lex.InputError('Error: '+argv[i]+' flag should be followed by a suffix string\n'
' (a string you want to appear to the right of the integer\n'
' which counts the bonded interactions you have generated.)\n')
prefix = argv[i+1]
del(argv[i:i+2])
elif argv[i][0] == '-':
raise ttree_lex.InputError('Error('+g_program_name+'):\n'
'Unrecognized command line argument \"'+argv[i]+'\"\n')
else:
i += 1
if len(argv) != 1:
# if there are more than 2 remaining arguments,
problem_args = ['\"'+arg+'\"' for arg in argv[1:]]
raise ttree_lex.InputError('Syntax Error('+g_program_name+'):\n\n'
' Problem with argument list.\n'
' The remaining arguments are:\n\n'
' '+(' '.join(problem_args))+'\n\n'
' (The actual problem may be earlier in the argument list.)\n')
bond_types = []
bond_ids = []
bond_pairs = []
fatoms = open(fname_atoms, 'r')
fbonds = open(fname_bond_list, 'r')
fbondsbytype = open(fname_bondsbytype, 'r')
lines_atoms = fatoms.readlines()
lines_bonds = fbonds.readlines()
lines_bondsbytype = fbondsbytype.readlines()
fatoms.close()
fbonds.close()
fbondsbytype.close()
LookupBondTypes(bond_types,
bond_ids,
bond_pairs,
lines_atoms,
lines_bonds,
lines_bondsbytype,
atom_style,
section_name,
prefix='',
suffix='')
assert(len(bond_types) == len(bond_ids) == len(bond_pairs))
ie=0
N = len(bond_types)
for ie in range(0, N):
sys.stdout.write(bond_ids[ie] + ' ' +
bond_types[ie] + ' ' +
bond_pairs[ie][0] + ' ' +
bond_pairs[ie][1] + '\n')
except (ValueError, ttree_lex.InputError) as err:
sys.stderr.write('\n'+str(err)+'\n')
sys.exit(-1)
|
jag1g13/lammps
|
tools/moltemplate/src/bonds_by_type.py
|
Python
|
gpl-2.0
| 15,426
|
[
"LAMMPS"
] |
7d3f285494ff2519ccda6ae9aa613a642ed386bb0956bcb676c663cfbc8c81a4
|
# -*- coding: utf-8 -*-
## Ranking of records using different parameters and methods on the fly.
##
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import string
import time
import math
import re
import ConfigParser
import copy, os
from invenio.config import \
CFG_SITE_LANG, \
CFG_ETCDIR
from invenio.dbquery import run_sql, deserialize_via_marshal
from invenio.errorlib import register_exception
from invenio.webpage import adderrorbox
from invenio.bibindex_engine_stemmer import stem
from invenio.bibindex_engine_stopwords import is_stopword
from invenio.bibrank_citation_searcher import get_cited_by, get_cited_by_weight
from invenio.intbitset import intbitset
from invenio.bibrank_drank_sorter import rnkDict
def rescale(recdict, rank_limit_relevance):
"""Rescale list so that values are between 0-100."""
reclist = []
divideby = max(recdict.values())
for (j, w) in recdict.iteritems():
w = round((w * 99 / divideby),2)
if w >= rank_limit_relevance:
reclist.append((j, w))
return reclist
def rescale_list(reclist, rank_limit_relevance):
"""Rescale list so that values are between 0-100."""
values_new = []
keys, values = zip(*reclist)
divideby = max(values)
for item in values:
item = int(item * 100 / divideby)
if item >= rank_limit_relevance:
values_new.append(item)
reclist = zip(*(keys, values_new))
return reclist
def compare_on_val(first, second):
"""Compare on val."""
return cmp(second[1], first[1])
def check_term(term, col_size, term_rec, max_occ, min_occ, termlength):
"""Check if the tem is valid for use
term - the term to check
col_size - the number of records in database
term_rec - the number of records which contains this term
max_occ - max frequency of the term allowed
min_occ - min frequence of the term allowed
termlength - the minimum length of the terms allowed"""
try:
if is_stopword(term, 1) or (len(term) <= termlength) or ((float(term_rec) / float(col_size)) >= max_occ) or ((float(term_rec) / float(col_size)) <= min_occ):
return ""
if int(term):
return ""
except StandardError, e:
pass
return "true"
def create_rnkmethod_cache():
"""Create cache with vital information for each rank method."""
global methods
bibrank_meths = run_sql("SELECT name from rnkMETHOD")
methods = {}
global voutput
voutput = ""
for (rank_method_code,) in bibrank_meths:
try:
file = CFG_ETCDIR + "/bibrank/" + rank_method_code + ".cfg"
config = ConfigParser.ConfigParser()
config.readfp(open(file))
except StandardError, e:
pass
if os.path.exists(file) and config.has_section("rank_method"):
cfg_function = config.get("rank_method", "function")
if config.has_section(cfg_function):
methods[rank_method_code] = {}
methods[rank_method_code]["function"] = cfg_function
methods[rank_method_code]["prefix"] = config.get(cfg_function, "relevance_number_output_prologue")
methods[rank_method_code]["postfix"] = config.get(cfg_function, "relevance_number_output_epilogue")
methods[rank_method_code]["chars_alphanumericseparators"] = r"[1234567890\!\"\#\$\%\&\'\(\)\*\+\,\-\.\/\:\;\<\=\>\?\@\[\\\]\^\_\`\{\|\}\~]"
else:
raise Exception("Error in configuration file: %s" % (CFG_ETCDIR + "/bibrank/" + rank_method_code + ".cfg"))
i8n_names = run_sql("""SELECT ln,value from rnkMETHODNAME,rnkMETHOD where id_rnkMETHOD=rnkMETHOD.id and rnkMETHOD.name=%s""", (rank_method_code,))
for (ln, value) in i8n_names:
methods[rank_method_code][ln] = value
if config.has_option(cfg_function, "table"):
methods[rank_method_code]["rnkWORD_table"] = config.get(cfg_function, "table")
methods[rank_method_code]["col_size"] = run_sql("SELECT count(*) FROM %sR" % methods[rank_method_code]["rnkWORD_table"][:-1])[0][0]
if config.has_option("cfg_function", "stemming") and config.get(cfg_function, "stemming"):
try:
methods[rank_method_code]["stemmer"] = config.get(cfg_function, "stemming")
except Exception,e:
pass
if config.has_option(cfg_function, "stopword"):
methods[rank_method_code]["stopwords"] = config.get(cfg_function, "stopword")
if config.has_section("find_similar"):
methods[rank_method_code]["max_word_occurence"] = float(config.get("find_similar", "max_word_occurence"))
methods[rank_method_code]["min_word_occurence"] = float(config.get("find_similar", "min_word_occurence"))
methods[rank_method_code]["min_word_length"] = int(config.get("find_similar", "min_word_length"))
methods[rank_method_code]["min_nr_words_docs"] = int(config.get("find_similar", "min_nr_words_docs"))
methods[rank_method_code]["max_nr_words_upper"] = int(config.get("find_similar", "max_nr_words_upper"))
methods[rank_method_code]["max_nr_words_lower"] = int(config.get("find_similar", "max_nr_words_lower"))
methods[rank_method_code]["default_min_relevance"] = int(config.get("find_similar", "default_min_relevance"))
if config.has_section("combine_method"):
i = 1
methods[rank_method_code]["combine_method"] = []
while config.has_option("combine_method", "method%s" % i):
methods[rank_method_code]["combine_method"].append(string.split(config.get("combine_method", "method%s" % i), ","))
i += 1
#Add drank-sorter specific option
try:
if config.has_option(cfg_function,"ranked_by"):
methods[rank_method_code]["ranked_by"] = config.get(cfg_function, "ranked_by")
if config.has_option(cfg_function,"description"):
methods[rank_method_code]["description"] = config.get(cfg_function, "description")
if config.has_option("drank_parameters","weight_of_relevance"):
methods[rank_method_code]["weight_of_relevance"] = config.getfloat("drank_parameters", "weight_of_relevance")
if config.has_option("drank_parameters","weight_of_exposed"):
methods[rank_method_code]["weight_of_exposed"] = config.getfloat("drank_parameters", "weight_of_exposed")
if config.has_option("drank_parameters","drank_lut_table"):
methods[rank_method_code]["drank_lut_table"] = config.get("drank_parameters", "drank_lut_table")
except Exception,e:
pass
def is_method_valid(colID, rank_method_code):
"""
Check if RANK_METHOD_CODE method is valid for the collection given.
If colID is None, then check for existence regardless of collection.
"""
if colID is None:
return run_sql("SELECT COUNT(*) FROM rnkMETHOD WHERE name=%s", (rank_method_code,))[0][0]
enabled_colls = dict(run_sql("SELECT id_collection, score from collection_rnkMETHOD,rnkMETHOD WHERE id_rnkMETHOD=rnkMETHOD.id AND name='%s'" % rank_method_code))
try:
colID = int(colID)
except TypeError:
return 0
if enabled_colls.has_key(colID):
return 1
else:
while colID:
colID = run_sql("SELECT id_dad FROM collection_collection WHERE id_son=%s" % colID)
if colID and enabled_colls.has_key(colID[0][0]):
return 1
elif colID:
colID = colID[0][0]
return 0
def get_bibrank_methods(colID, ln=CFG_SITE_LANG):
"""
Return a list of rank methods enabled for collection colID and the
name of them in the language defined by the ln parameter.
"""
if not globals().has_key('methods'):
create_rnkmethod_cache()
avail_methods = []
for (rank_method_code, options) in methods.iteritems():
if options.has_key("function") and is_method_valid(colID, rank_method_code):
if options.has_key(ln):
avail_methods.append((rank_method_code, options[ln]))
elif options.has_key(CFG_SITE_LANG):
avail_methods.append((rank_method_code, options[CFG_SITE_LANG]))
else:
avail_methods.append((rank_method_code, rank_method_code))
return avail_methods
def ranked(_rnkdict, hitset):
""""""
rnkdict = rnkDict()
rnkdict.loaddict(_rnkdict)
#rnkdict.clean()
rnkdict.filter(list(hitset))
result = (rnkdict.rank(),"(", ")", "")
return result # rnkdict.rank()
def rank_records(rank_method_code, rank_limit_relevance, hitset_global, pattern=[], verbose=0, _rescale=1):
"""rank_method_code, e.g. `jif' or `sbr' (word frequency vector model)
rank_limit_relevance, e.g. `23' for `nbc' (number of citations) or `0.10' for `vec'
hitset, search engine hits;
pattern, search engine query or record ID (you check the type)
verbose, verbose level
output:
list of records
list of rank values
prefix
postfix
verbose_output"""
global voutput
voutput = ""
configcreated = ""
starttime = time.time()
afterfind = starttime - time.time()
aftermap = starttime - time.time()
try:
hitset = copy.deepcopy(hitset_global) #we are receiving a global hitset
if not globals().has_key('methods'):
create_rnkmethod_cache()
function = methods[rank_method_code]["function"]
#we get 'citation' method correctly here
func_object = globals().get(function)
if func_object and pattern and pattern[0][0:6] == "recid:" and function == "word_similarity":
result = find_similar(rank_method_code, pattern[0][6:], hitset, rank_limit_relevance, verbose,_rescale)
elif func_object and function == "distributed_ranking":
result=distributed_ranking(rank_method_code,pattern , hitset, rank_limit_relevance,9,0)
# elif rank_method_code == "qr":
# result = ranked(rank_method_code, hitset)
elif rank_method_code == "citation":
#we get rank_method_code correctly here. pattern[0] is the search word - not used by find_cit
p = ""
if pattern and pattern[0]:
p = pattern[0][6:]
result = find_citations(rank_method_code, p, hitset, verbose)
elif func_object and function == "word_similarity":
result=func_object(rank_method_code, pattern, hitset, rank_limit_relevance, verbose,_rescale)
elif func_object:
result = func_object(rank_method_code, pattern, hitset, rank_limit_relevance, verbose)
else:
result = rank_by_method(rank_method_code, pattern, hitset, rank_limit_relevance, verbose)
except Exception, e:
register_exception()
result = (None, "", adderrorbox("An error occured when trying to rank the search result "+rank_method_code, ["Unexpected error: %s<br />" % (e,)]), voutput)
afterfind = time.time() - starttime
if result[0] and result[1]: #split into two lists for search_engine
results_similar_recIDs = map(lambda x: x[0], result[0])
results_similar_relevances = map(lambda x: x[1], result[0])
result = (results_similar_recIDs, results_similar_relevances, result[1], result[2], "%s" % configcreated + result[3])
aftermap = time.time() - starttime;
else:
result = (None, None, result[1], result[2], result[3])
if verbose > 0:
voutput = voutput+"\nElapsed time after finding: "+str(afterfind)+"\nElapsed after mapping: "+str(aftermap)
#add stuff from here into voutput from result
tmp = result[4]+voutput
result = (result[0],result[1],result[2],result[3],tmp)
#dbg = string.join(map(str,methods[rank_method_code].items()))
#result = (None, "", adderrorbox("Debug ",rank_method_code+" "+dbg),"",voutput);
return result
def combine_method(rank_method_code, pattern, hitset, rank_limit_relevance,verbose):
"""combining several methods into one based on methods/percentage in config file"""
global voutput
result = {}
try:
for (method, percent) in methods[rank_method_code]["combine_method"]:
function = methods[method]["function"]
func_object = globals().get(function)
percent = int(percent)
if func_object:
this_result = func_object(method, pattern, hitset, rank_limit_relevance, verbose)[0]
else:
this_result = rank_by_method(method, pattern, hitset, rank_limit_relevance, verbose)[0]
for i in range(0, len(this_result)):
(recID, value) = this_result[i]
if value > 0:
result[recID] = result.get(recID, 0) + int((float(i) / len(this_result)) * float(percent))
result = result.items()
result.sort(lambda x, y: cmp(x[1], y[1]))
return (result, "(", ")", voutput)
except Exception, e:
return (None, "Warning: %s method cannot be used for ranking your query." % rank_method_code, "", voutput)
def rank_by_method(rank_method_code, lwords, hitset, rank_limit_relevance,verbose):
"""Ranking of records based on predetermined values.
input:
rank_method_code - the code of the method, from the name field in rnkMETHOD, used to get predetermined values from
rnkMETHODDATA
lwords - a list of words from the query
hitset - a list of hits for the query found by search_engine
rank_limit_relevance - show only records with a rank value above this
verbose - verbose value
output:
reclist - a list of sorted records, with unsorted added to the end: [[23,34], [344,24], [1,01]]
prefix - what to show before the rank value
postfix - what to show after the rank value
voutput - contains extra information, content dependent on verbose value"""
global voutput
rnkdict = run_sql("SELECT relevance_data FROM rnkMETHODDATA,rnkMETHOD where rnkMETHOD.id=id_rnkMETHOD and rnkMETHOD.name='%s'" % rank_method_code)
if not rnkdict:
return (None, "Warning: Could not load ranking data for method %s." % rank_method_code, "", voutput)
max_recid = 0
res = run_sql("SELECT max(id) FROM bibrec")
if res and res[0][0]:
max_recid = int(res[0][0])
lwords_hitset = None
for j in range(0, len(lwords)): #find which docs to search based on ranges..should be done in search_engine...
if lwords[j] and lwords[j][:6] == "recid:":
if not lwords_hitset:
lwords_hitset = intbitset()
lword = lwords[j][6:]
if string.find(lword, "->") > -1:
lword = string.split(lword, "->")
if int(lword[0]) >= max_recid or int(lword[1]) >= max_recid + 1:
return (None, "Warning: Given record IDs are out of range.", "", voutput)
for i in range(int(lword[0]), int(lword[1])):
lwords_hitset.add(int(i))
elif lword < max_recid + 1:
lwords_hitset.add(int(lword))
else:
return (None, "Warning: Given record IDs are out of range.", "", voutput)
rnkdict = deserialize_via_marshal(rnkdict[0][0])
if verbose > 0:
voutput += "<br />Running rank method: %s, using rank_by_method function in bibrank_record_sorter<br />" % rank_method_code
voutput += "Ranking data loaded, size of structure: %s<br />" % len(rnkdict)
lrecIDs = list(hitset)
if verbose > 0:
voutput += "Number of records to rank: %s<br />" % len(lrecIDs)
reclist = []
reclist_addend = []
if not lwords_hitset: #rank all docs, can this be speed up using something else than for loop?
for recID in lrecIDs:
if rnkdict.has_key(recID):
reclist.append((recID, rnkdict[recID]))
del rnkdict[recID]
else:
reclist_addend.append((recID, 0))
else: #rank docs in hitset, can this be speed up using something else than for loop?
for recID in lwords_hitset:
if rnkdict.has_key(recID) and recID in hitset:
reclist.append((recID, rnkdict[recID]))
del rnkdict[recID]
elif recID in hitset:
reclist_addend.append((recID, 0))
if verbose > 0:
voutput += "Number of records ranked: %s<br />" % len(reclist)
voutput += "Number of records not ranked: %s<br />" % len(reclist_addend)
reclist.sort(lambda x, y: cmp(x[1], y[1]))
return (reclist_addend + reclist, methods[rank_method_code]["prefix"], methods[rank_method_code]["postfix"], voutput)
def find_citations(rank_method_code, recID, hitset, verbose):
"""Rank by the amount of citations."""
#calculate the cited-by values for all the members of the hitset
#returns: ((recordid,weight),prefix,postfix,message)
global voutput
voutput = ""
#If the recID is numeric, return only stuff that cites it. Otherwise return
#stuff that cites hitset
#try to convert to int
recisint = True
recidint = 0
try:
recidint = int(recID)
except:
recisint = False
ret = []
if recisint:
myrecords = get_cited_by(recidint) #this is a simple list
ret = get_cited_by_weight(myrecords)
else:
ret = get_cited_by_weight(hitset)
ret.sort(lambda x,y:cmp(x[1],y[1])) #ascending by the second member of the tuples
if verbose > 0:
voutput = voutput+"\nrecID "+str(recID)+" is int: "+str(recisint)+" hitset "+str(hitset)+"\n"+"find_citations retlist "+str(ret)
#voutput = voutput + str(ret)
if ret:
return (ret,"(", ")", "")
else:
return ((),"", "", "")
def find_similar(rank_method_code, recID, hitset, rank_limit_relevance,verbose,_rescale=1):
"""Finding terms to use for calculating similarity. Terms are taken from the recid given, returns a list of recids's and relevance,
input:
rank_method_code - the code of the method, from the name field in rnkMETHOD
recID - records to use for find similar
hitset - a list of hits for the query found by search_engine
rank_limit_relevance - show only records with a rank value above this
verbose - verbose value
output:
reclist - a list of sorted records: [[23,34], [344,24], [1,01]]
prefix - what to show before the rank value
postfix - what to show after the rank value
voutput - contains extra information, content dependent on verbose value"""
startCreate = time.time()
global voutput
if verbose > 0:
voutput += "<br />Running rank method: %s, using find_similar/word_frequency in bibrank_record_sorter<br />" % rank_method_code
rank_limit_relevance = methods[rank_method_code]["default_min_relevance"]
try:
recID = int(recID)
except Exception,e :
return (None, "Warning: Error in record ID, please check that a number is given.", "", voutput)
rec_terms = run_sql("""SELECT termlist FROM %sR WHERE id_bibrec=%%s""" % methods[rank_method_code]["rnkWORD_table"][:-1], (recID,))
if not rec_terms:
return (None, "Warning: Requested record does not seem to exist.", "", voutput)
rec_terms = deserialize_via_marshal(rec_terms[0][0])
#Get all documents using terms from the selected documents
if len(rec_terms) == 0:
return (None, "Warning: Record specified has no content indexed for use with this method.", "", voutput)
else:
terms = "%s" % rec_terms.keys()
terms_recs = dict(run_sql("""SELECT term, hitlist FROM %s WHERE term IN (%s)""" % (methods[rank_method_code]["rnkWORD_table"], terms[1:len(terms) - 1])))
tf_values = {}
#Calculate all term frequencies
for (term, tf) in rec_terms.iteritems():
if len(term) >= methods[rank_method_code]["min_word_length"] and terms_recs.has_key(term) and tf[1] != 0:
tf_values[term] = int((1 + math.log(tf[0])) * tf[1]) #calculate term weigth
tf_values = tf_values.items()
tf_values.sort(lambda x, y: cmp(y[1], x[1])) #sort based on weigth
lwords = []
stime = time.time()
(recdict, rec_termcount) = ({}, {})
for (t, tf) in tf_values: #t=term, tf=term frequency
term_recs = deserialize_via_marshal(terms_recs[t])
if len(tf_values) <= methods[rank_method_code]["max_nr_words_lower"] or (len(term_recs) >= methods[rank_method_code]["min_nr_words_docs"] and (((float(len(term_recs)) / float(methods[rank_method_code]["col_size"])) <= methods[rank_method_code]["max_word_occurence"]) and ((float(len(term_recs)) / float(methods[rank_method_code]["col_size"])) >= methods[rank_method_code]["min_word_occurence"]))): #too complicated...something must be done
lwords.append((t, methods[rank_method_code]["rnkWORD_table"])) #list of terms used
(recdict, rec_termcount) = calculate_record_relevance_findsimilar((t, round(tf, 4)) , term_recs, hitset, recdict, rec_termcount, verbose, "true") #true tells the function to not calculate all unimportant terms
if len(tf_values) > methods[rank_method_code]["max_nr_words_lower"] and (len(lwords) == methods[rank_method_code]["max_nr_words_upper"] or tf < 0):
break
if len(recdict) == 0 or len(lwords) == 0:
return (None, "Could not find any similar documents, possibly because of error in ranking data.", "", voutput)
else: #sort if we got something to sort
(reclist, hitset) = sort_record_relevance_findsimilar(recdict,rec_termcount, hitset, rank_limit_relevance, verbose,_rescale)
if verbose > 0:
voutput += "<br />Number of terms: %s<br />" % run_sql("SELECT count(id) FROM %s" % methods[rank_method_code]["rnkWORD_table"])[0][0]
voutput += "Number of terms to use for query: %s<br />" % len(lwords)
voutput += "Terms: %s<br />" % lwords
voutput += "Current number of recIDs: %s<br />" % (methods[rank_method_code]["col_size"])
voutput += "Prepare time: %s<br />" % (str(time.time() - startCreate))
voutput += "Total time used: %s<br />" % (str(time.time() - startCreate))
rank_method_stat(rank_method_code, reclist, lwords)
return (reclist[:len(reclist)], methods[rank_method_code]["prefix"], methods[rank_method_code]["postfix"], voutput)
def distributed_ranking(rank_method_code, lwords, hitset, rank_limit_relevance,verbose=0,_rescale=0):
"""Ranking records containing specified words(word similarity) and merging scores with quality scores.
And returns a sorted list.
input:
rank_method_code - the code of the method, from the name field in rnkMETHOD
lwords - a list of words from the query
hitset - a list of hits for the query found by search_engine
rank_limit_relevance - show only records with a rank value above this
verbose - verbose value
_rescale- rescale scores to stay within 0 and 100
output:
reclist - a list of sorted records: [[23, 34], [344, 24], [1, 01]]
prefix - what to show before the rank value
postfix - what to show after the rank value
voutput - contains extra information, content dependent on verbose value"""
global voutput
startCreate = time.time()
if verbose > 0:
voutput += "<br />Running rank method: %s, using distributed ranking function in bibrank_record_sorter<br />" % rank_method_code
lwords_old = lwords
lwords = []
#Check terms, remove non alphanumeric characters. Use both unstemmed and stemmed version of all terms.
for i in range(0, len(lwords_old)):
term = string.lower(lwords_old[i])
if not methods[rank_method_code]["stopwords"] == "True" or methods[rank_method_code]["stopwords"] and not is_stopword(term, 1):
lwords.append((term, methods[rank_method_code]["rnkWORD_table"]))
terms = string.split(string.lower(re.sub(methods[rank_method_code]["chars_alphanumericseparators"], ' ', term)))
for term in terms:
if methods[rank_method_code].has_key("stemmer"): # stem word
term = stem(string.replace(term, ' ', ''), methods[rank_method_code]["stemmer"])
if lwords_old[i] != term: #add if stemmed word is different than original word
lwords.append((term, methods[rank_method_code]["rnkWORD_table"]))
(recdict, rec_termcount, lrecIDs_remove) = ({}, {}, {})
#For each term, if accepted, get a list of the records using the term
#calculate then relevance for each term before sorting the list of records
for (term, table) in lwords:
term_recs = run_sql("""SELECT term, hitlist FROM %s WHERE term=%%s""" % methods[rank_method_code]["rnkWORD_table"], (term, ))
if term_recs: #if term exists in database, use for ranking
term_recs = deserialize_via_marshal(term_recs[0][1])
(recdict, rec_termcount) = calculate_record_relevance((term, int(term_recs["Gi"][1])) , term_recs, hitset, recdict, rec_termcount, verbose, quick=None)
del term_recs
if methods[rank_method_code]["ranked_by"]:
ranked_by=methods[rank_method_code]["ranked_by"]
if verbose > 0:
voutput += "<br />DRANK method: %s <br />" %methods[rank_method_code]["description"]
if recdict:
wrd_sim = rnkDict()
wrd_sim.put(recdict)
wrd_sim_lookup=rnkDict()
wrd_sim_lookup.loadlut(ranked_by+"_wrd")
relevance=rnkDict()
relevance.lookup_in_lut(wrd_sim_lookup.getdict(), wrd_sim.getdict())
quality = rnkDict()
quality.loaddict(ranked_by)
quality.filter(list(hitset))
fresh = rnkDict()
fresh.loaddict(ranked_by+"_freshness")
fresh.filter(list(hitset))
fresh_quality_list=[]
fresh_quality_list.append(fresh.getdict())
fresh_quality_list.append(quality.getdict())
# = rnkDict()
fresh_quality=rnkDict()
fresh_quality.octopus(fresh_quality_list, [0.5,1-methods[rank_method_code]["weight_of_exposed"],methods[rank_method_code]["weight_of_exposed"]])
quality_relevance = rnkDict()
# r
quality_relevance_list = []
quality_relevance_list.append(relevance.getdict())
quality_relevance_list.append(fresh_quality.getdict())
quality_relevance.octopus(quality_relevance_list, [0.5,1- methods[rank_method_code]["weight_of_relevance"],methods[rank_method_code]["weight_of_relevance"]])
recdict = quality_relevance.getdict()
if len(recdict) == 0 or (len(lwords) == 1 and lwords[0] == ""):
return (None, "Records not ranked. The query is not detailed enough, or not enough records found, for ranking to be possible.", "", voutput)
else: #sort if we got something to sort
(reclist, hitset) = sort_record_relevance(recdict, hitset, rank_limit_relevance, verbose,_rescale=1)
#Add any documents not ranked to the end of the list
if hitset:
lrecIDs = list(hitset) #using 2-3mb
reclist = zip(lrecIDs, [0] * len(lrecIDs)) + reclist #using 6mb
if verbose > 0:
voutput += "<br />Current number of recIDs: %s<br />" % (methods[rank_method_code]["col_size"])
voutput += "Number of terms: %s<br />" % run_sql("SELECT count(id) FROM %s" % methods[rank_method_code]["rnkWORD_table"])[0][0]
voutput += "Terms: %s<br />" % lwords
voutput += "Prepare and pre calculate time: %s<br />" % (str(time.time() - startCreate))
voutput += "Total time used: %s<br />" % (str(time.time() - startCreate))
rank_method_stat(ranked_by,reclist, lwords)
return (reclist, methods[rank_method_code]["prefix"], methods[rank_method_code]["postfix"], voutput)
else:
return (None, "Records not ranked. No DRANK Method defined.", "", voutput)
def word_similarity(rank_method_code, lwords, hitset, rank_limit_relevance,verbose=0, _rescale=1):
"""Ranking a records containing specified words and returns a sorted list.
input:
rank_method_code - the code of the method, from the name field in rnkMETHOD
lwords - a list of words from the query
hitset - a list of hits for the query found by search_engine
rank_limit_relevance - show only records with a rank value above this
verbose - verbose value
output:
reclist - a list of sorted records: [[23,34], [344,24], [1,01]]
prefix - what to show before the rank value
postfix - what to show after the rank value
voutput - contains extra information, content dependent on verbose value"""
global voutput
startCreate = time.time()
if verbose > 0:
voutput += "<br />Running rank method: %s, using word_frequency function in bibrank_record_sorter<br />" % rank_method_code
lwords_old = lwords
lwords = []
#Check terms, remove non alphanumeric characters. Use both unstemmed and stemmed version of all terms.
for i in range(0, len(lwords_old)):
term = string.lower(lwords_old[i])
if not methods[rank_method_code]["stopwords"] == "True" or methods[rank_method_code]["stopwords"] and not is_stopword(term, 1):
lwords.append((term, methods[rank_method_code]["rnkWORD_table"]))
terms = string.split(string.lower(re.sub(methods[rank_method_code]["chars_alphanumericseparators"], ' ', term)))
for term in terms:
if methods[rank_method_code].has_key("stemmer"): # stem word
term = stem(string.replace(term, ' ', ''), methods[rank_method_code]["stemmer"])
if lwords_old[i] != term: #add if stemmed word is different than original word
lwords.append((term, methods[rank_method_code]["rnkWORD_table"]))
(recdict, rec_termcount, lrecIDs_remove) = ({}, {}, {})
#For each term, if accepted, get a list of the records using the term
#calculate then relevance for each term before sorting the list of records
for (term, table) in lwords:
term_recs = run_sql("""SELECT term, hitlist FROM %s WHERE term=%%s""" % methods[rank_method_code]["rnkWORD_table"], (term,))
if term_recs: #if term exists in database, use for ranking
term_recs = deserialize_via_marshal(term_recs[0][1])
(recdict, rec_termcount) = calculate_record_relevance((term, int(term_recs["Gi"][1])) , term_recs, hitset, recdict, rec_termcount, verbose, quick=None)
del term_recs
if len(recdict) == 0 or (len(lwords) == 1 and lwords[0] == ""):
return (None, "Records not ranked. The query is not detailed enough, or not enough records found, for ranking to be possible.", "", voutput)
else: #sort if we got something to sort
(reclist, hitset) = sort_record_relevance(recdict, hitset, rank_limit_relevance, verbose, _rescale)
#Add any documents not ranked to the end of the list
if hitset:
lrecIDs = list(hitset) #using 2-3mb
reclist = zip(lrecIDs, [0] * len(lrecIDs)) + reclist #using 6mb
if verbose > 0:
voutput += "<br />Current number of recIDs: %s<br />" % (methods[rank_method_code]["col_size"])
voutput += "Number of terms: %s<br />" % run_sql("SELECT count(id) FROM %s" % methods[rank_method_code]["rnkWORD_table"])[0][0]
voutput += "Terms: %s<br />" % lwords
voutput += "Prepare and pre calculate time: %s<br />" % (str(time.time() - startCreate))
voutput += "Total time used: %s<br />" % (str(time.time() - startCreate))
rank_method_stat(rank_method_code, reclist, lwords)
return (reclist, methods[rank_method_code]["prefix"], methods[rank_method_code]["postfix"], voutput)
def calculate_record_relevance(term, invidx, hitset, recdict, rec_termcount, verbose, quick=None):
"""Calculating the relevance of the documents based on the input, calculates only one word
term - (term, query term factor) the term and its importance in the overall search
invidx - {recid: tf, Gi: norm value} The Gi value is used as a idf value
hitset - a hitset with records that are allowed to be ranked
recdict - contains currently ranked records, is returned with new values
rec_termcount - {recid: count} the number of terms in this record that matches the query
verbose - verbose value
quick - if quick=yes only terms with a positive qtf is used, to limit the number of records to sort"""
(t, qtf) = term
if invidx.has_key("Gi"):#Gi = weigth for this term, created by bibrank_word_indexer
Gi = invidx["Gi"][1]
del invidx["Gi"]
else: #if not existing, bibrank should be run with -R
return (recdict, rec_termcount)
if not quick or (qtf >= 0 or (qtf < 0 and len(recdict) == 0)):
#Only accept records existing in the hitset received from the search engine
for (j, tf) in invidx.iteritems():
if j in hitset:#only include docs found by search_engine based on query
try: #calculates rank value
recdict[j] = recdict.get(j, 0) + int(math.log(tf[0] * Gi * tf[1] * qtf))
except:
return (recdict, rec_termcount)
rec_termcount[j] = rec_termcount.get(j, 0) + 1 #number of terms from query in document
elif quick: #much used term, do not include all records, only use already existing ones
for (j, tf) in recdict.iteritems(): #i.e: if doc contains important term, also count unimportant
if invidx.has_key(j):
tf = invidx[j]
recdict[j] = recdict.get(j, 0) + int(math.log(tf[0] * Gi * tf[1] * qtf))
rec_termcount[j] = rec_termcount.get(j, 0) + 1 #number of terms from query in document
return (recdict, rec_termcount)
def calculate_record_relevance_findsimilar(term, invidx, hitset, recdict, rec_termcount, verbose, quick=None):
"""Calculating the relevance of the documents based on the input, calculates only one word
term - (term, query term factor) the term and its importance in the overall search
invidx - {recid: tf, Gi: norm value} The Gi value is used as a idf value
hitset - a hitset with records that are allowed to be ranked
recdict - contains currently ranked records, is returned with new values
rec_termcount - {recid: count} the number of terms in this record that matches the query
verbose - verbose value
quick - if quick=yes only terms with a positive qtf is used, to limit the number of records to sort"""
(t, qtf) = term
if invidx.has_key("Gi"): #Gi = weigth for this term, created by bibrank_word_indexer
Gi = invidx["Gi"][1]
del invidx["Gi"]
else: #if not existing, bibrank should be run with -R
return (recdict, rec_termcount)
if not quick or (qtf >= 0 or (qtf < 0 and len(recdict) == 0)):
#Only accept records existing in the hitset received from the search engine
for (j, tf) in invidx.iteritems():
if j in hitset: #only include docs found by search_engine based on query
#calculate rank value
recdict[j] = recdict.get(j, 0) + int((1 + math.log(tf[0])) * Gi * tf[1] * qtf)
rec_termcount[j] = rec_termcount.get(j, 0) + 1 #number of terms from query in document
elif quick: #much used term, do not include all records, only use already existing ones
for (j, tf) in recdict.iteritems(): #i.e: if doc contains important term, also count unimportant
if invidx.has_key(j):
tf = invidx[j]
recdict[j] = recdict[j] + int((1 + math.log(tf[0])) * Gi * tf[1] * qtf)
rec_termcount[j] = rec_termcount.get(j, 0) + 1 #number of terms from query in document
return (recdict, rec_termcount)
def sort_record_relevance(recdict, hitset, rank_limit_relevance,verbose=0, _rescale=0):
"""Sorts the dictionary and returns records with a relevance higher than the given value.
recdict - {recid: value} unsorted
rank_limit_relevance - a value > 0 usually
verbose - verbose value"""
startCreate = time.time()
global voutput
reclist = []
#remove all ranked documents so that unranked can be added to the end
hitset -= recdict.keys()
#gives each record a score between 0-100 only if rescale is not zero. this is usefull for drank-integration
for (j, w) in recdict.iteritems():
reclist.append((j, w))
if _rescale:
reclist = rescale(recdict, rank_limit_relevance)
#sort scores
reclist.sort(lambda x, y: cmp(x[1], y[1]))
if verbose > 0:
voutput += "Number of records sorted: %s<br />" % len(reclist)
voutput += "Sort time: %s<br />" % (str(time.time() - startCreate))
return (reclist, hitset)
def sort_record_relevance_findsimilar(recdict, rec_termcount, hitset, rank_limit_relevance, verbose,_rescale=0):
"""Sorts the dictionary and returns records with a relevance higher than the given value.
recdict - {recid: value} unsorted
rank_limit_relevance - a value > 0 usually
verbose - verbose value"""
startCreate = time.time()
global voutput
reclist = []
#Multiply with the number of terms of the total number of terms in the query existing in the records
for j in recdict.keys():
if recdict[j] > 0 and rec_termcount[j] > 1:
recdict[j] = math.log((recdict[j] * rec_termcount[j]))
else:
recdict[j] = 0
hitset -= recdict.keys()
#gives each record a score between 0-100
if _rescale:
reclist = rescale(recdict, rank_limit_relevance)
#sort scores
reclist.sort(lambda x, y: cmp(x[1], y[1]))
if verbose > 0:
voutput += "Number of records sorted: %s<br />" % len(reclist)
voutput += "Sort time: %s<br />" % (str(time.time() - startCreate))
return (reclist, hitset)
def rank_method_stat(rank_method_code, reclist, lwords):
"""Shows some statistics about the searchresult.
rank_method_code - name field from rnkMETHOD
reclist - a list of sorted and ranked records
lwords - the words in the query"""
global voutput
if len(reclist) > 20:
j = 20
else:
j = len(reclist)
voutput += "<br />Rank statistics:<br />"
for i in range(1, j + 1):
voutput += "%s,Recid:%s,Score:%s<br />" % (i,reclist[len(reclist) - i][0],reclist[len(reclist) - i][1])
for (term, table) in lwords:
term_recs = run_sql("""SELECT hitlist FROM %s WHERE term=%%s""" % table, (term,))
if term_recs:
term_recs = deserialize_via_marshal(term_recs[0][0])
if term_recs.has_key(reclist[len(reclist) - i][0]):
voutput += "%s-%s / " % (term, term_recs[reclist[len(reclist) - i][0]])
voutput += "<br />"
voutput += "<br />Score variation:<br />"
count = {}
for i in range(0, len(reclist)):
count[reclist[i][1]] = count.get(reclist[i][1], 0) + 1
i = 100
while i >= 0:
if count.has_key(i):
voutput += "%s-%s<br />" % (i, count[i])
i -= 1
try:
import psyco
psyco.bind(find_similar)
psyco.bind(rank_by_method)
psyco.bind(calculate_record_relevance)
psyco.bind(distributed_ranking)
psyco.bind(word_similarity)
psyco.bind(sort_record_relevance)
except StandardError, e:
pass
|
pamoakoy/invenio
|
modules/bibrank/lib/bibrank_record_sorter.py
|
Python
|
gpl-2.0
| 41,048
|
[
"Octopus"
] |
25aa9cfdc2f15da292b959cfaca4bf726f702e7bb77504418896f72189de4a80
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import logging
from pylib import android_commands
from pylib.device import device_utils
class PerfControl(object):
"""Provides methods for setting the performance mode of a device."""
_CPU_PATH = '/sys/devices/system/cpu'
_KERNEL_MAX = '/sys/devices/system/cpu/kernel_max'
def __init__(self, device):
# TODO(jbudorick) Remove once telemetry gets switched over.
if isinstance(device, android_commands.AndroidCommands):
device = device_utils.DeviceUtils(device)
self._device = device
# this will raise an AdbCommandFailedError if no CPU files are found
self._cpu_files = self._device.RunShellCommand(
'ls -d cpu[0-9]*', cwd=self._CPU_PATH, check_return=True, as_root=True)
assert self._cpu_files, 'Failed to detect CPUs.'
self._cpu_file_list = ' '.join(self._cpu_files)
logging.info('CPUs found: %s', self._cpu_file_list)
self._have_mpdecision = self._device.FileExists('/system/bin/mpdecision')
def SetHighPerfMode(self):
"""Sets the highest stable performance mode for the device."""
if not self._device.old_interface.IsRootEnabled():
message = 'Need root for performance mode. Results may be NOISY!!'
logging.warning(message)
# Add an additional warning at exit, such that it's clear that any results
# may be different/noisy (due to the lack of intended performance mode).
atexit.register(logging.warning, message)
return
product_model = self._device.product_model
# TODO(epenner): Enable on all devices (http://crbug.com/383566)
if 'Nexus 4' == product_model:
self._ForceAllCpusOnline(True)
if not self._AllCpusAreOnline():
logging.warning('Failed to force CPUs online. Results may be NOISY!')
self._SetScalingGovernorInternal('performance')
elif 'Nexus 5' == product_model:
self._ForceAllCpusOnline(True)
if not self._AllCpusAreOnline():
logging.warning('Failed to force CPUs online. Results may be NOISY!')
self._SetScalingGovernorInternal('performance')
self._SetScalingMaxFreq(1190400)
self._SetMaxGpuClock(200000000)
else:
self._SetScalingGovernorInternal('performance')
def SetPerfProfilingMode(self):
"""Enables all cores for reliable perf profiling."""
self._ForceAllCpusOnline(True)
self._SetScalingGovernorInternal('performance')
if not self._AllCpusAreOnline():
if not self._device.old_interface.IsRootEnabled():
raise RuntimeError('Need root to force CPUs online.')
raise RuntimeError('Failed to force CPUs online.')
def SetDefaultPerfMode(self):
"""Sets the performance mode for the device to its default mode."""
if not self._device.old_interface.IsRootEnabled():
return
product_model = self._device.product_model
if 'Nexus 5' == product_model:
if self._AllCpusAreOnline():
self._SetScalingMaxFreq(2265600)
self._SetMaxGpuClock(450000000)
governor_mode = {
'GT-I9300': 'pegasusq',
'Galaxy Nexus': 'interactive',
'Nexus 4': 'ondemand',
'Nexus 5': 'ondemand',
'Nexus 7': 'interactive',
'Nexus 10': 'interactive'
}.get(product_model, 'ondemand')
self._SetScalingGovernorInternal(governor_mode)
self._ForceAllCpusOnline(False)
def GetCpuInfo(self):
online = (output.rstrip() == '1' and status == 0
for (_, output, status) in self._ForEachCpu('cat "$CPU/online"'))
governor = (output.rstrip() if status == 0 else None
for (_, output, status)
in self._ForEachCpu('cat "$CPU/cpufreq/scaling_governor"'))
return zip(self._cpu_files, online, governor)
def _ForEachCpu(self, cmd):
script = '; '.join([
'for CPU in %s' % self._cpu_file_list,
'do %s' % cmd,
'echo -n "%~%$?%~%"',
'done'
])
output = self._device.RunShellCommand(
script, cwd=self._CPU_PATH, check_return=True, as_root=True)
output = '\n'.join(output).split('%~%')
return zip(self._cpu_files, output[0::2], (int(c) for c in output[1::2]))
def _WriteEachCpuFile(self, path, value):
results = self._ForEachCpu(
'test -e "$CPU/{path}" && echo {value} > "$CPU/{path}"'.format(
path=path, value=value))
cpus = ' '.join(cpu for (cpu, _, status) in results if status == 0)
if cpus:
logging.info('Successfully set %s to %r on: %s', path, value, cpus)
else:
logging.warning('Failed to set %s to %r on any cpus')
def _SetScalingGovernorInternal(self, value):
self._WriteEachCpuFile('cpufreq/scaling_governor', value)
def _SetScalingMaxFreq(self, value):
self._WriteEachCpuFile('cpufreq/scaling_max_freq', '%d' % value)
def _SetMaxGpuClock(self, value):
self._device.WriteFile('/sys/class/kgsl/kgsl-3d0/max_gpuclk',
str(value),
as_root=True)
def _AllCpusAreOnline(self):
results = self._ForEachCpu('cat "$CPU/online"')
# TODO(epenner): Investigate why file may be missing
# (http://crbug.com/397118)
return all(output.rstrip() == '1' and status == 0
for (cpu, output, status) in results
if cpu != 'cpu0')
def _ForceAllCpusOnline(self, force_online):
"""Enable all CPUs on a device.
Some vendors (or only Qualcomm?) hot-plug their CPUs, which can add noise
to measurements:
- In perf, samples are only taken for the CPUs that are online when the
measurement is started.
- The scaling governor can't be set for an offline CPU and frequency scaling
on newly enabled CPUs adds noise to both perf and tracing measurements.
It appears Qualcomm is the only vendor that hot-plugs CPUs, and on Qualcomm
this is done by "mpdecision".
"""
if self._have_mpdecision:
script = 'stop mpdecision' if force_online else 'start mpdecision'
self._device.RunShellCommand(script, check_return=True, as_root=True)
if not self._have_mpdecision and not self._AllCpusAreOnline():
logging.warning('Unexpected cpu hot plugging detected.')
if force_online:
self._ForEachCpu('echo 1 > "$CPU/online"')
|
mohamed--abdel-maksoud/chromium.src
|
build/android/pylib/perf/perf_control.py
|
Python
|
bsd-3-clause
| 6,344
|
[
"Galaxy"
] |
cd34d9b86e90010fee4880b54174176e8cc321b23918e106e7756279bf52d4bc
|
#!/usr/bin/env python3
import torch
from ..distributions import MultivariateNormal
from ..lazy import InterpolatedLazyTensor
from ..utils.broadcasting import _mul_broadcast_shape
from ..utils.interpolation import Interpolation, left_interp
from ..utils.memoize import cached
from ._variational_strategy import _VariationalStrategy
class GridInterpolationVariationalStrategy(_VariationalStrategy):
r"""
This strategy constrains the inducing points to a grid and applies a deterministic
relationship between :math:`\mathbf f` and :math:`\mathbf u`.
It was introduced by `Wilson et al. (2016)`_.
Here, the inducing points are not learned. Instead, the strategy
automatically creates inducing points based on a set of grid sizes and grid
bounds.
.. _Wilson et al. (2016):
https://arxiv.org/abs/1611.00336
:param ~gpytorch.models.ApproximateGP model: Model this strategy is applied to.
Typically passed in when the VariationalStrategy is created in the
__init__ method of the user defined model.
:param int grid_size: Size of the grid
:param list grid_bounds: Bounds of each dimension of the grid (should be a list of (float, float) tuples)
:param ~gpytorch.variational.VariationalDistribution variational_distribution: A
VariationalDistribution object that represents the form of the variational distribution :math:`q(\mathbf u)`
"""
def __init__(self, model, grid_size, grid_bounds, variational_distribution):
grid = torch.zeros(grid_size, len(grid_bounds))
for i in range(len(grid_bounds)):
grid_diff = float(grid_bounds[i][1] - grid_bounds[i][0]) / (grid_size - 2)
grid[:, i] = torch.linspace(grid_bounds[i][0] - grid_diff, grid_bounds[i][1] + grid_diff, grid_size)
inducing_points = torch.zeros(int(pow(grid_size, len(grid_bounds))), len(grid_bounds))
prev_points = None
for i in range(len(grid_bounds)):
for j in range(grid_size):
inducing_points[j * grid_size ** i : (j + 1) * grid_size ** i, i].fill_(grid[j, i])
if prev_points is not None:
inducing_points[j * grid_size ** i : (j + 1) * grid_size ** i, :i].copy_(prev_points)
prev_points = inducing_points[: grid_size ** (i + 1), : (i + 1)]
super(GridInterpolationVariationalStrategy, self).__init__(
model, inducing_points, variational_distribution, learn_inducing_locations=False
)
object.__setattr__(self, "model", model)
self.register_buffer("grid", grid)
def _compute_grid(self, inputs):
n_data, n_dimensions = inputs.size(-2), inputs.size(-1)
batch_shape = inputs.shape[:-2]
inputs = inputs.reshape(-1, n_dimensions)
interp_indices, interp_values = Interpolation().interpolate(self.grid, inputs)
interp_indices = interp_indices.view(*batch_shape, n_data, -1)
interp_values = interp_values.view(*batch_shape, n_data, -1)
if (interp_indices.dim() - 2) != len(self._variational_distribution.batch_shape):
batch_shape = _mul_broadcast_shape(interp_indices.shape[:-2], self._variational_distribution.batch_shape)
interp_indices = interp_indices.expand(*batch_shape, *interp_indices.shape[-2:])
interp_values = interp_values.expand(*batch_shape, *interp_values.shape[-2:])
return interp_indices, interp_values
@property
@cached(name="prior_distribution_memo")
def prior_distribution(self):
out = self.model.forward(self.inducing_points)
res = MultivariateNormal(out.mean, out.lazy_covariance_matrix.add_jitter())
return res
def forward(self, x, inducing_points, inducing_values, variational_inducing_covar=None):
if variational_inducing_covar is None:
raise RuntimeError(
"GridInterpolationVariationalStrategy is only compatible with Gaussian variational "
f"distributions. Got ({self.variational_distribution.__class__.__name__}."
)
variational_distribution = self.variational_distribution
# Get interpolations
interp_indices, interp_values = self._compute_grid(x)
# Compute test mean
# Left multiply samples by interpolation matrix
predictive_mean = left_interp(interp_indices, interp_values, inducing_values.unsqueeze(-1))
predictive_mean = predictive_mean.squeeze(-1)
# Compute test covar
predictive_covar = InterpolatedLazyTensor(
variational_distribution.lazy_covariance_matrix,
interp_indices,
interp_values,
interp_indices,
interp_values,
)
output = MultivariateNormal(predictive_mean, predictive_covar)
return output
|
jrg365/gpytorch
|
gpytorch/variational/grid_interpolation_variational_strategy.py
|
Python
|
mit
| 4,836
|
[
"Gaussian"
] |
753bf71274402e3f22b1c66c8fd780288bc28a0c590009cacd44fa6b0a5bd22f
|
# Copyright (C) 2011 by Brandon Invergo (b.invergo@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import re
def parse_ng86(lines, results):
""" Parse the Nei & Gojobori (1986) section of the results.
Nei_Gojobori results are organized in a lower
triangular matrix, with the sequence names labeling
the rows and statistics in the format:
w (dN dS) per column
Example row (2 columns):
0.0000 (0.0000 0.0207) 0.0000 (0.0000 0.0421)"""
sequences = []
for line in lines:
# Find all floating point numbers in this line
line_floats_res = re.findall("-*\d+\.\d+", line)
line_floats = [float(val) for val in line_floats_res]
matrix_row_res = re.match("(.+)\s{5,15}", line)
if matrix_row_res is not None:
seq_name = matrix_row_res.group(1).strip()
sequences.append(seq_name)
results[seq_name] = {}
for i in range(0, len(line_floats), 3):
NG86 = {}
NG86["omega"] = line_floats[i]
NG86["dN"] = line_floats[i + 1]
NG86["dS"] = line_floats[i + 2]
results[seq_name][sequences[i // 3]] = {"NG86": NG86}
results[sequences[i // 3]][seq_name] = {"NG86": NG86}
return (results, sequences)
def parse_yn00(lines, results, sequences):
""" Parse the Yang & Nielsen (2000) part of the results.
Yang & Nielsen results are organized in a table with
each row comprising one pairwise species comparison.
Rows are labeled by sequence number rather than by
sequence name."""
# Example (header row and first table row):
# seq. seq. S N t kappa omega dN +- SE dS +- SE
# 2 1 67.3 154.7 0.0136 3.6564 0.0000 -0.0000 +- 0.0000 0.0150
# +- 0.0151
for line in lines:
# Find all floating point numbers in this line
line_floats_res = re.findall("-*\d+\.\d+", line)
line_floats = [float(val) for val in line_floats_res]
row_res = re.match("\s+(\d+)\s+(\d+)", line)
if row_res is not None:
seq1 = int(row_res.group(1))
seq2 = int(row_res.group(2))
seq_name1 = sequences[seq1 - 1]
seq_name2 = sequences[seq2 - 1]
YN00 = {}
YN00["S"] = line_floats[0]
YN00["N"] = line_floats[1]
YN00["t"] = line_floats[2]
YN00["kappa"] = line_floats[3]
YN00["omega"] = line_floats[4]
YN00["dN"] = line_floats[5]
YN00["dN SE"] = line_floats[6]
YN00["dS"] = line_floats[7]
YN00["dS SE"] = line_floats[8]
results[seq_name1][seq_name2]["YN00"] = YN00
results[seq_name2][seq_name1]["YN00"] = YN00
seq_name1 = None
seq_name2 = None
return results
def parse_others(lines, results, sequences):
"""Parse the results from the other methods.
The remaining methods are grouped together. Statistics
for all three are listed for each of the pairwise
species comparisons, with each method's results on its
own line.
The stats in this section must be handled differently
due to the possible presence of NaN values, which won't
get caught by my typical "line_floats" method used above.
"""
# Example:
# 2 (Pan_troglo) vs. 1 (Homo_sapie)
# L(i): 143.0 51.0 28.0 sum= 222.0
# Ns(i): 0.0000 1.0000 0.0000 sum= 1.0000
# Nv(i): 0.0000 0.0000 0.0000 sum= 0.0000
# A(i): 0.0000 0.0200 0.0000
# B(i): -0.0000 -0.0000 -0.0000
# LWL85: dS = 0.0227 dN = 0.0000 w = 0.0000 S = 45.0 N = 177.0
# LWL85m: dS = -nan dN = -nan w = -nan S = -nan N = -nan (rho = -nan)
# LPB93: dS = 0.0129 dN = 0.0000 w = 0.0000
seq_name1 = None
seq_name2 = None
for line in lines:
comp_res = re.match("\d+ \((.+)\) vs. \d+ \((.+)\)", line)
if comp_res is not None:
seq_name1 = comp_res.group(1)
seq_name2 = comp_res.group(2)
elif seq_name1 is not None and seq_name2 is not None:
if "dS =" in line:
stats = {}
line_stats = line.split(":")[1].strip()
# Find all of the xx = ###### values in a row
# ie dS = 0.0227
# For dN and dS, the values have 8 characters from the equals
# sign, while the rest have 7 characters. On Windows,
# NaNs take on weird values like -1.#IND, which might fill the
# entire fixed column width.
res_matches = re.findall("[dSNwrho]{1,3} =.{7,8}?",
line_stats)
for stat_pair in res_matches:
stat = stat_pair.split('=')[0].strip()
value = stat_pair.split('=')[1].strip()
try:
stats[stat] = float(value)
except:
stats[stat] = None
if "LWL85:" in line:
results[seq_name1][seq_name2]["LWL85"] = stats
results[seq_name2][seq_name1]["LWL85"] = stats
elif "LWL85m" in line:
results[seq_name1][seq_name2]["LWL85m"] = stats
results[seq_name2][seq_name1]["LWL85m"] = stats
elif "LPB93" in line:
results[seq_name1][seq_name2]["LPB93"] = stats
results[seq_name2][seq_name1]["LPB93"] = stats
return results
|
zjuchenyuan/BioWeb
|
Lib/Bio/Phylo/PAML/_parse_yn00.py
|
Python
|
mit
| 5,743
|
[
"Biopython"
] |
3da1a25e0a045daea31e51e7471509350d4d4e1a8227a0c4f5c3248dbf5aa74f
|
#!/usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
import cairo
import sys
import re
import gtk
class DataRange:
def __init__(self, start = 0, end = 0, value = ''):
self.start = start
self.end = end
self.value = value
class EventString:
def __init__(self, at = 0, value = ''):
self.at = at
self.value = value
class EventFloat:
def __init__(self, at = 0, value = 0.0):
self.at = at
self.value = value
class EventInt:
def __init__(self, at = 0, value = 0.0):
self.at = at
self.value = value
def ranges_cmp(a, b):
diff = a.start - b.start
if diff < 0:
return -1
elif diff > 0:
return +1
else:
return 0
def events_cmp(a, b):
diff = a.at - b.at
if diff < 0:
return -1
elif diff > 0:
return +1
else:
return 0
class TimelineDataRange:
def __init__(self, name = ''):
self.name = name
self.ranges = []
return
def __search(self, key):
l = 0
u = len(self.ranges)-1
while l <= u:
i = int((l + u) / 2)
if key >= self.ranges[i].start and key <= self.ranges[i].end:
return i
elif key < self.ranges[i].start:
u = i - 1
else:
# key > self.ranges[i].end
l = i + 1
return - 1
def add_range(self, range):
self.ranges.append(range)
def get_all(self):
return self.ranges
def get_ranges(self, start, end):
s = self.__search(start)
e = self.__search(end)
if s == -1 and e == -1:
return []
elif s == -1:
return self.ranges[0:e + 1]
elif e == -1:
return self.ranges[s:len(self.ranges)]
else:
return self.ranges[s:e + 1]
def get_ranges_bounds(self, start, end):
s = self.__search(start)
e = self.__search(end)
if s == -1 and e == -1:
return(0, 0)
elif s == -1:
return(0, e + 1)
elif e == -1:
return(s, len(self.ranges))
else:
return(s, e + 1)
def sort(self):
self.ranges.sort(ranges_cmp)
def get_bounds(self):
if len(self.ranges) > 0:
lo = self.ranges[0].start
hi = self.ranges[len(self.ranges)-1].end
return(lo, hi)
else:
return(0, 0)
class TimelineEvent:
def __init__(self, name = ''):
self.name = name
self.events = []
def __search(self, key):
l = 0
u = len(self.events)-1
while l <= u:
i = int((l + u) / 2)
if key == self.events[i].at:
return i
elif key < self.events[i].at:
u = i - 1
else:
# key > self.events[i].at
l = i + 1
return l
def add_event(self, event):
self.events.append(event)
def get_events(self, start, end):
s = self.__search(start)
e = self.__search(end)
return self.events[s:e + 1]
def get_events_bounds(self, start, end):
s = self.__search(start)
e = self.__search(end)
return(s, e + 1)
def sort(self):
self.events.sort(events_cmp)
def get_bounds(self):
if len(self.events) > 0:
lo = self.events[0].at
hi = self.events[-1].at
return(lo, hi)
else:
return(0, 0)
class Timeline:
def __init__(self, name = ''):
self.ranges = []
self.event_str = []
self.event_int = []
self.name = name
def get_range(self, name):
for range in self.ranges:
if range.name == name:
return range
timeline = TimelineDataRange(name)
self.ranges.append(timeline)
return timeline
def get_event_str(self, name):
for event_str in self.event_str:
if event_str.name == name:
return event_str
timeline = TimelineEvent(name)
self.event_str.append(timeline)
return timeline
def get_event_int(self, name):
for event_int in self.event_int:
if event_int.name == name:
return event_int
timeline = TimelineEvent(name)
self.event_int.append(timeline)
return timeline
def get_ranges(self):
return self.ranges
def get_events_str(self):
return self.event_str
def get_events_int(self):
return self.event_int
def sort(self):
for range in self.ranges:
range.sort()
for event in self.event_int:
event.sort()
for event in self.event_str:
event.sort()
def get_bounds(self):
lo = 0
hi = 0
for range in self.ranges:
(range_lo, range_hi) = range.get_bounds()
if range_lo < lo:
lo = range_lo
if range_hi > hi:
hi = range_hi
for event_str in self.event_str:
(ev_lo, ev_hi) = event_str.get_bounds()
if ev_lo < lo:
lo = ev_lo
if ev_hi > hi:
hi = ev_hi
for event_int in self.event_int:
(ev_lo, ev_hi) = event_int.get_bounds()
if ev_lo < lo:
lo = ev_lo
if ev_hi > hi:
hi = ev_hi
return(lo, hi)
class Timelines:
def __init__(self):
self.timelines = []
def get(self, name):
for timeline in self.timelines:
if timeline.name == name:
return timeline
timeline = Timeline(name)
self.timelines.append(timeline)
return timeline
def get_all(self):
return self.timelines
def sort(self):
for timeline in self.timelines:
timeline.sort()
def get_bounds(self):
lo = 0
hi = 0
for timeline in self.timelines:
(t_lo, t_hi) = timeline.get_bounds()
if t_lo < lo:
lo = t_lo
if t_hi > hi:
hi = t_hi
return(lo, hi)
def get_all_range_values(self):
range_values = {}
for timeline in self.timelines:
for ranges in timeline.get_ranges():
for ran in ranges.get_all():
range_values[ran.value] = 1
return range_values.keys()
class Color:
def __init__(self, r = 0.0, g = 0.0, b = 0.0):
self.r = r
self.g = g
self.b = b
def set(self, r, g, b):
self.r = r
self.g = g
self.b = b
class Colors:
# XXX add more
default_colors = [Color(1, 0, 0), Color(0, 1, 0), Color(0, 0, 1), Color(1, 1, 0), Color(1, 0, 1), Color(0, 1, 1)]
def __init__(self):
self.__colors = {}
def add(self, name, color):
self.__colors[name] = color
def lookup(self, name):
if not self.__colors.has_key(name):
self.add(name, self.default_colors.pop())
return self.__colors.get(name)
class TopLegendRenderer:
def __init__(self):
self.__padding = 10
def set_padding(self, padding):
self.__padding = padding
def set_legends(self, legends, colors):
self.__legends = legends
self.__colors = colors
def layout(self, width):
self.__width = width
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1)
ctx = cairo.Context(surface)
line_height = 0
total_height = self.__padding
line_used = self.__padding
for legend in self.__legends:
(t_width, t_height) = ctx.text_extents(legend)[2:4]
item_width = self.__padding + self.__padding + t_width + self.__padding
item_height = t_height + self.__padding
if item_height > line_height:
line_height = item_height
if line_used + item_width > self.__width:
line_used = self.__padding + item_width
total_height += line_height
else:
line_used += item_width
x = line_used - item_width
total_height += line_height
self.__height = total_height
def get_height(self):
return self.__height
def draw(self, ctx):
i = 0
line_height = 0
total_height = self.__padding
line_used = self.__padding
for legend in self.__legends:
(t_width, t_height) = ctx.text_extents(legend)[2:4]
item_width = self.__padding + self.__padding + t_width + self.__padding
item_height = t_height + self.__padding
if item_height > line_height:
line_height = item_height
if line_used + item_width > self.__width:
line_used = self.__padding + item_width
total_height += line_height
else:
line_used += item_width
x = line_used - item_width
ctx.rectangle(x, total_height, self.__padding, self.__padding)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(2)
ctx.stroke_preserve()
ctx.set_source_rgb(self.__colors[i].r,
self.__colors[i].g,
self.__colors[i].b)
ctx.fill()
ctx.move_to(x + self.__padding*2, total_height + t_height)
ctx.set_source_rgb(0, 0, 0)
ctx.show_text(legend)
i += 1
return
class TimelinesRenderer:
def __init__(self):
self.padding = 10
return
def get_height(self):
return self.height
def set_timelines(self, timelines, colors):
self.timelines = timelines
self.colors = colors
def set_render_range(self, start, end):
self.start = start
self.end = end
def get_data_x_start(self):
return self.padding / 2 + self.left_width + self.padding + self.right_width + self.padding / 2
def layout(self, width):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1)
ctx = cairo.Context(surface)
max_text_height = ctx.text_extents("ABCDEFGHIJKLMNOPQRSTUVWXYZabcedefghijklmnopqrstuvwxyz0123456789")[3]
left_width = 0
right_width = 0
left_n_lines = 0
range_n = 0
eventint_n = 0
eventstr_n = 0
for timeline in self.timelines.get_all():
left_n_lines += 1
t_width = ctx.text_extents(timeline.name)[2]
left_width = max(left_width, t_width)
for rang in timeline.get_ranges():
t_width = ctx.text_extents(rang.name)[2]
right_width = max(right_width, t_width)
range_n += 1
for events_int in timeline.get_events_int():
t_width = ctx.text_extents(events_int.name)[2]
right_width = max(right_width, t_width)
eventint_n += 1
for events_str in timeline.get_events_str():
t_width = ctx.text_extents(events_str.name)[2]
right_width = max(right_width, t_width)
eventstr_n += 1
left_height = left_n_lines * max_text_height + (left_n_lines - 1) * self.padding
right_n_lines = range_n + eventint_n + eventstr_n
right_height = (right_n_lines - 1) * self.padding + right_n_lines * max_text_height
right_data_height = (eventint_n + eventstr_n) * (max_text_height + 5) + range_n * 10
right_data_height += (right_n_lines - 1) * self.padding
height = max(left_height, right_height)
height = max(height, right_data_height)
self.left_width = left_width
self.right_width = right_width
self.max_text_height = max_text_height
self.width = width
self.height = height + self.padding
def draw_line(self, ctx, x, y, width, height):
ctx.move_to(x, y)
ctx.rel_line_to(width, height)
ctx.close_path()
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.set_line_width(1.0)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
def draw_events(self, ctx, events, x, y, width, height):
if (self.grey_background % 2) == 0:
ctx.rectangle(x, y - self.padding / 2,
width, height + self.padding)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
last_x_drawn = int(x)
(lo, hi) = events.get_events_bounds(self.start, self.end)
for event in events.events[lo:hi]:
real_x = int(x + (event.at - self.start) * width / (self.end - self.start))
if real_x > last_x_drawn + 2:
ctx.rectangle(real_x, y, 1, 1)
ctx.set_source_rgb(1, 0, 0)
ctx.stroke()
ctx.move_to(real_x, y + self.max_text_height)
ctx.set_source_rgb(0, 0, 0)
ctx.show_text(str(event.value))
last_x_drawn = real_x
self.grey_background += 1
def draw_ranges(self, ctx, ranges, x, y, width, height):
if (self.grey_background % 2) == 0:
ctx.rectangle(x, y - self.padding / 2,
width, height + self.padding)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
last_x_drawn = int(x - 1)
(lo, hi) = ranges.get_ranges_bounds(self.start, self.end)
for data_range in ranges.ranges[lo:hi]:
s = max(data_range.start, self.start)
e = min(data_range.end, self.end)
x_start = int(x + (s - self.start) * width / (self.end - self.start))
x_end = int(x + (e - self.start) * width / (self.end - self.start))
if x_end > last_x_drawn:
ctx.rectangle(x_start, y, x_end - x_start, 10)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke_preserve()
color = self.colors.lookup(data_range.value)
ctx.set_source_rgb(color.r, color.g, color.b)
ctx.fill()
last_x_drawn = x_end
self.grey_background += 1
def draw(self, ctx):
timeline_top = 0
top_y = self.padding / 2
left_x_start = self.padding / 2
left_x_end = left_x_start + self.left_width
right_x_start = left_x_end + self.padding
right_x_end = right_x_start + self.right_width
data_x_start = right_x_end + self.padding / 2
data_x_end = self.width
data_width = data_x_end - data_x_start
cur_y = top_y
self.draw_line(ctx, 0, 0, self.width, 0)
self.grey_background = 1
for timeline in self.timelines.get_all():
(y_bearing, t_width, t_height) = ctx.text_extents(timeline.name)[1:4]
ctx.move_to(left_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(timeline.name);
for events_int in timeline.get_events_int():
(y_bearing, t_width, t_height) = ctx.text_extents(events_int.name)[1:4]
ctx.move_to(right_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(events_int.name)
self.draw_events(ctx, events_int, data_x_start, cur_y, data_width, self.max_text_height + 5)
cur_y += self.max_text_height + 5 + self.padding
self.draw_line(ctx, right_x_start - self.padding / 2, cur_y - self.padding / 2,
self.right_width + self.padding, 0)
for events_str in timeline.get_events_str():
(y_bearing, t_width, t_height) = ctx.text_extents(events_str.name)[1:4]
ctx.move_to(right_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(events_str.name)
self.draw_events(ctx, events_str, data_x_start, cur_y, data_width, self.max_text_height + 5)
cur_y += self.max_text_height + 5 + self.padding
self.draw_line(ctx, right_x_start - self.padding / 2, cur_y - self.padding / 2,
self.right_width + self.padding, 0)
for ranges in timeline.get_ranges():
(y_bearing, t_width, t_height) = ctx.text_extents(ranges.name)[1:4]
ctx.move_to(right_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(ranges.name)
self.draw_ranges(ctx, ranges, data_x_start, cur_y, data_width, 10)
cur_y += self.max_text_height + self.padding
self.draw_line(ctx, right_x_start - self.padding / 2, cur_y - self.padding / 2,
self.right_width + self.padding, 0)
self.draw_line(ctx, 0, cur_y - self.padding / 2,
self.width, 0)
bot_y = cur_y - self.padding / 2
self.draw_line(ctx, left_x_end + self.padding / 2, 0,
0, bot_y)
self.draw_line(ctx, right_x_end + self.padding / 2, 0,
0, bot_y)
return
class ScaleRenderer:
def __init__(self):
self.__top = 0
return
def set_bounds(self, lo, hi):
self.__lo = lo
self.__hi = hi
def get_position(self, x):
real_x = (x - self.__lo ) * self.__width / (self.__hi - self.__lo)
return real_x
def set_top(self):
self.__top = 1
def set_bot(self):
self.__top = 0
def layout(self, width):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1)
ctx = cairo.Context(surface)
# calculate scale delta
data_delta = self.__hi - self.__lo
closest = 1
while (closest*10) < data_delta:
closest *= 10
if (data_delta / closest) == 0:
delta = closest
elif(data_delta / closest) == 1:
delta = closest / 10
else:
delta = closest
start = self.__lo - (self.__lo % delta) + delta
end = self.__hi - (self.__hi % delta)
self.__delta = delta
self.__width = width
# calculate text height
max_text_height = ctx.text_extents("ABCDEFGHIJKLMNOPQRSTUVWXYZabcedefghijklmnopqrstuvwxyz0123456789")[3]
self.max_text_height = max_text_height
height = max_text_height + 10
self.__height = height
def get_height(self):
return self.__height
def draw(self, ctx):
delta = self.__delta
start = self.__lo - (self.__lo % delta) + delta
end = self.__hi - (self.__hi % delta)
if self.__top == 1:
s = -1
else:
s = 1
# print scale points
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1.0)
ticks = range(int(start), int(end + delta), int(delta))
for x in ticks:
real_x = (x - self.__lo ) * self.__width / (self.__hi - self.__lo)
ctx.move_to(real_x, 0)
ctx.line_to(real_x, 5*s)
ctx.close_path()
ctx.stroke()
(t_y_bearing, t_width, t_height) = ctx.text_extents(str(x))[1:4]
if self.__top:
text_delta = t_height + t_y_bearing
else:
text_delta = -t_y_bearing
ctx.move_to(real_x - t_width / 2, (5 + 5 + text_delta)*s)
ctx.show_text(str(x))
# draw subticks
delta /= 10
if delta > 0:
start = self.__lo - (self.__lo % delta) + delta
end = self.__hi - (self.__hi % delta)
for x in range(int(start), int(end + delta), int(delta)):
real_x = (x - self.__lo ) * self.__width / (self.__hi - self.__lo)
ctx.move_to(real_x, 0)
ctx.line_to(real_x, 3*s)
ctx.close_path()
ctx.stroke()
class GraphicRenderer:
def __init__(self, start, end):
self.__start = float(start)
self.__end = float(end)
self.__mid_scale = ScaleRenderer()
self.__mid_scale.set_top()
self.__bot_scale = ScaleRenderer()
self.__bot_scale.set_bounds(start, end)
self.__bot_scale.set_bot()
self.__width = 1
self.__height = 1
def get_width(self):
return self.__width
def get_height(self):
return self.__height
# return x, y, width, height
def get_data_rectangle(self):
y_start = self.__top_legend.get_height()
x_start = self.__data.get_data_x_start()
return(x_start, y_start, self.__width - x_start, self.__data.get_height())
def scale_data(self, x):
x_start = self.__data.get_data_x_start()
x_scaled = x / (self.__width - x_start) * (self.__r_end - self.__r_start)
return x_scaled
# return x, y, width, height
def get_selection_rectangle(self):
y_start = self.__top_legend.get_height() + self.__data.get_height() + self.__mid_scale.get_height() + 20
y_height = self.__bot_scale.get_height() + 20
x_start = self.__bot_scale.get_position(self.__r_start)
x_end = self.__bot_scale.get_position(self.__r_end)
return(x_start, y_start, x_end - x_start, y_height)
def scale_selection(self, x):
x_scaled = x / self.__width * (self.__end - self.__start)
return x_scaled
def set_range(self, start, end):
s = min(start, end)
e = max(start, end)
start = max(self.__start, s)
end = min(self.__end, e)
self.__r_start = start
self.__r_end = end
self.__data.set_render_range(start, end)
self.__mid_scale.set_bounds(start, end)
self.layout(self.__width, self.__height)
def get_range(self):
return(self.__r_start, self.__r_end)
def set_data(self, data):
self.__data = data
def set_top_legend(self, top_legend):
self.__top_legend = top_legend
def layout(self, width, height):
self.__width = width
self.__height = height
self.__top_legend.layout(width)
top_legend_height = self.__top_legend.get_height()
self.__data.layout(width)
self.__mid_scale.layout(width - self.__data.get_data_x_start())
self.__bot_scale.layout(width)
return
def __x_pixel(self, x, width):
new_x = (x - self.__start) * width / (self.__end - self.__start)
return new_x
def draw(self, ctx):
# default background is white
ctx.save()
ctx.set_source_rgb(1, 1, 1)
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.rectangle(0, 0, self.__width, self.__height)
ctx.fill()
# top legend
ctx.save()
self.__top_legend.draw(ctx)
top_legend_height = self.__top_legend.get_height()
ctx.restore()
# separation line
ctx.move_to(0, top_legend_height)
ctx.line_to(self.__width, top_legend_height)
ctx.close_path()
ctx.set_line_width(2)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
# data
ctx.save()
ctx.translate(0,
top_legend_height)
self.__data.draw(ctx)
ctx.restore()
# scale below data
ctx.save()
ctx.translate(self.__data.get_data_x_start(),
top_legend_height + self.__data.get_height() + self.__mid_scale.get_height())
self.__mid_scale.draw(ctx)
ctx.restore()
height_used = top_legend_height + self.__data.get_height() + self.__mid_scale.get_height()
# separation between scale and left pane
ctx.move_to(self.__data.get_data_x_start(), height_used)
ctx.rel_line_to(0, -self.__mid_scale.get_height())
ctx.close_path()
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(2)
ctx.stroke()
# separation below scale
ctx.move_to(0, height_used)
ctx.line_to(self.__width, height_used)
ctx.close_path()
ctx.set_line_width(2)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
select_start = self.__bot_scale.get_position(self.__r_start)
select_end = self.__bot_scale.get_position(self.__r_end)
# left connection between top scale and bottom scale
ctx.move_to(0, height_used);
ctx.line_to(self.__data.get_data_x_start(), height_used)
ctx.line_to(select_start, height_used + 20)
ctx.line_to(0, height_used + 20)
ctx.line_to(0, height_used)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke_preserve()
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
# right connection between top scale and bottom scale
ctx.move_to(self.__width, height_used)
ctx.line_to(self.__width, height_used + 20)
ctx.line_to(select_end, height_used + 20)
ctx.line_to(self.__width, height_used)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke_preserve()
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
height_used += 20
# unused area background
unused_start = self.__bot_scale.get_position(self.__r_start)
unused_end = self.__bot_scale.get_position(self.__r_end)
unused_height = self.__bot_scale.get_height() + 20
ctx.rectangle(0, height_used,
unused_start,
unused_height)
ctx.rectangle(unused_end,
height_used,
self.__width - unused_end,
unused_height)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
# border line around bottom scale
ctx.move_to(unused_end, height_used)
ctx.line_to(self.__width, height_used)
ctx.line_to(self.__width, height_used + unused_height)
ctx.line_to(0, height_used + unused_height)
ctx.line_to(0, height_used)
ctx.line_to(unused_start, height_used)
ctx.close_path()
ctx.set_line_width(2)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
ctx.move_to(unused_start, height_used)
ctx.line_to(unused_end, height_used)
ctx.close_path()
ctx.set_line_width(1)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.stroke()
# unused area dot borders
ctx.save()
ctx.move_to(max(unused_start, 2), height_used)
ctx.rel_line_to(0, unused_height)
ctx.move_to(min(unused_end, self.__width - 2), height_used)
ctx.rel_line_to(0, unused_height)
ctx.set_dash([5], 0)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke()
ctx.restore()
# bottom scale
ctx.save()
ctx.translate(0, height_used)
self.__bot_scale.draw(ctx)
ctx.restore()
class GtkGraphicRenderer(gtk.DrawingArea):
def __init__(self, data):
super(GtkGraphicRenderer, self).__init__()
self.__data = data
self.__moving_left = False
self.__moving_right = False
self.__moving_both = False
self.__moving_top = False
self.__force_full_redraw = True
self.add_events(gtk.gdk.POINTER_MOTION_MASK)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.add_events(gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("expose_event", self.expose)
self.connect('size-allocate', self.size_allocate)
self.connect('motion-notify-event', self.motion_notify)
self.connect('button-press-event', self.button_press)
self.connect('button-release-event', self.button_release)
def set_smaller_zoom(self):
(start, end) = self.__data.get_range()
self.__data.set_range(start, start + (end - start)*2)
self.__force_full_redraw = True
self.queue_draw()
def set_bigger_zoom(self):
(start, end) = self.__data.get_range()
self.__data.set_range(start, start + (end - start) / 2)
self.__force_full_redraw = True
self.queue_draw()
def output_png(self, filename):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
self.__data.get_width(),
self.__data.get_height())
ctx = cairo.Context(self.__buffer_surface)
self.__data.draw(ctx)
surface.write_to_png(filename)
def button_press(self, widget, event):
(x, y, width, height) = self.__data.get_selection_rectangle()
(d_x, d_y, d_width, d_height) = self.__data.get_data_rectangle()
if event.y > y and event.y < y + height:
if abs(event.x - x) < 5:
self.__moving_left = True
return True
if abs(event.x - (x + width)) < 5:
self.__moving_right = True
return True
if event.x > x and event.x < x + width:
self.__moving_both = True
self.__moving_both_start = event.x
self.__moving_both_cur = event.x
return True
if event.y > d_y and event.y < (d_y + d_height):
if event.x > d_x and event.x < (d_x + d_width):
self.__moving_top = True
self.__moving_top_start = event.x
self.__moving_top_cur = event.x
return True
return False
def button_release(self, widget, event):
if self.__moving_left:
self.__moving_left = False
left = self.__data.scale_selection(self.__moving_left_cur)
right = self.__data.get_range()[1]
self.__data.set_range(left, right)
self.__force_full_redraw = True
self.queue_draw()
return True
if self.__moving_right:
self.__moving_right = False
right = self.__data.scale_selection(self.__moving_right_cur)
left = self.__data.get_range()[0]
self.__data.set_range(left, right)
self.__force_full_redraw = True
self.queue_draw()
return True
if self.__moving_both:
self.__moving_both = False
delta = self.__data.scale_selection(self.__moving_both_cur - self.__moving_both_start)
(left, right) = self.__data.get_range()
self.__data.set_range(left + delta, right + delta)
self.__force_full_redraw = True
self.queue_draw()
return True
if self.__moving_top:
self.__moving_top = False
return False
def motion_notify(self, widget, event):
(x, y, width, height) = self.__data.get_selection_rectangle()
if self.__moving_left:
if event.x <= 0:
self.__moving_left_cur = 0
elif event.x >= x + width:
self.__moving_left_cur = x + width
else:
self.__moving_left_cur = event.x
self.queue_draw_area(0, int(y), int(self.__width), int(height))
return True
if self.__moving_right:
if event.x >= self.__width:
self.__moving_right = self.__width
elif event.x < x:
self.__moving_right_cur = x
else:
self.__moving_right_cur = event.x
self.queue_draw_area(0, int(y), int(self.__width), int(height))
return True
if self.__moving_both:
cur_e = self.__width - (x + width - self.__moving_both_start)
cur_s = (self.__moving_both_start - x)
if event.x < cur_s:
self.__moving_both_cur = cur_s
elif event.x > cur_e:
self.__moving_both_cur = cur_e
else:
self.__moving_both_cur = event.x
self.queue_draw_area(0, int(y), int(self.__width), int(height))
return True
if self.__moving_top:
self.__moving_top_cur = event.x
delta = self.__data.scale_data(self.__moving_top_start - self.__moving_top_cur)
(left, right) = self.__data.get_range()
self.__data.set_range(left + delta, right + delta)
self.__force_full_redraw = True
self.__moving_top_start = event.x
self.queue_draw()
return True
(d_x, d_y, d_width, d_height) = self.__data.get_data_rectangle()
if event.y > y and event.y < y + height:
if abs(event.x - x) < 5 or abs(event.x - (x + width)) < 5:
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.SB_H_DOUBLE_ARROW))
return True
if event.x > x and event.x < x + width:
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
return True
if event.y > d_y and event.y < (d_y + d_height):
if event.x > d_x and event.x < (d_x + d_width):
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
return True
widget.window.set_cursor(None)
return False
def size_allocate(self, widget, allocation):
self.__width = allocation.width
self.__height = allocation.height
self.__data.layout(allocation.width, allocation.height)
self.__force_full_redraw = True
self.queue_draw()
def expose(self, widget, event):
if self.__force_full_redraw:
self.__buffer_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
self.__data.get_width(),
self.__data.get_height())
ctx = cairo.Context(self.__buffer_surface)
self.__data.draw(ctx)
self.__force_full_redraw = False
ctx = widget.window.cairo_create()
ctx.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
ctx.clip()
ctx.set_source_surface(self.__buffer_surface)
ctx.paint()
(x, y, width, height) = self.__data.get_selection_rectangle()
if self.__moving_left:
ctx.move_to(max(self.__moving_left_cur, 2), y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.set_line_width(1)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
if self.__moving_right:
ctx.move_to(min(self.__moving_right_cur, self.__width - 2), y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.set_line_width(1)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
if self.__moving_both:
delta_x = self.__moving_both_cur - self.__moving_both_start
left_x = x + delta_x
ctx.move_to(x + delta_x, y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.move_to(x + width + delta_x, y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke()
return False
class MainWindow:
def __init__(self):
return
def run(self, graphic):
window = gtk.Window()
self.__window = window
window.set_default_size(200, 200)
vbox = gtk.VBox()
window.add(vbox)
render = GtkGraphicRenderer(graphic)
self.__render = render
vbox.pack_end(render, True, True, 0)
hbox = gtk.HBox()
vbox.pack_start(hbox, False, False, 0)
smaller_zoom = gtk.Button("Zoom Out")
smaller_zoom.connect("clicked", self.__set_smaller_cb)
hbox.pack_start(smaller_zoom)
bigger_zoom = gtk.Button("Zoom In")
bigger_zoom.connect("clicked", self.__set_bigger_cb)
hbox.pack_start(bigger_zoom)
output_png = gtk.Button("Output Png")
output_png.connect("clicked", self.__output_png_cb)
hbox.pack_start(output_png)
window.connect('destroy', gtk.main_quit)
window.show_all()
#gtk.bindings_activate(gtk.main_quit, 'q', 0)
gtk.main()
def __set_smaller_cb(self, widget):
self.__render.set_smaller_zoom()
def __set_bigger_cb(self, widget):
self.__render.set_bigger_zoom()
def __output_png_cb(self, widget):
dialog = gtk.FileChooserDialog("Output Png", self.__window,
gtk.FILE_CHOOSER_ACTION_SAVE, ("Save", 1))
self.__dialog = dialog
dialog.set_default_response(1)
dialog.connect("response", self.__dialog_response_cb)
dialog.show()
return
def __dialog_response_cb(self, widget, response):
if response == 1:
filename = self.__dialog.get_filename()
self.__render.output_png(filename)
widget.hide()
return
def read_data(filename):
timelines = Timelines()
colors = Colors()
fh = open(filename)
m1 = re.compile('range ([^ ]+) ([^ ]+) ([^ ]+) ([0-9]+) ([0-9]+)')
m2 = re.compile('event-str ([^ ]+) ([^ ]+) ([^ ]+) ([0-9]+)')
m3 = re.compile('event-int ([^ ]+) ([^ ]+) ([0-9]+) ([0-9]+)')
m4 = re.compile('color ([^ ]+) #([a-fA-F0-9]{2,2})([a-fA-F0-9]{2,2})([a-fA-F0-9]{2,2})')
for line in fh.readlines():
m = m1.match(line)
if m:
line_name = m.group(1)
timeline = timelines.get(m.group(1))
rang = timeline.get_range(m.group(2))
data_range = DataRange()
data_range.value = m.group(3)
data_range.start = int(m.group(4))
data_range.end = int(m.group(5))
rang.add_range(data_range)
continue
m = m2.match(line)
if m:
line_name = m.group(1)
timeline = timelines.get(m.group(1))
ev = timeline.get_event_str(m.group(2))
event = EventString()
event.value = m.group(3)
event.at = int(m.group(4))
ev.add_event(event)
continue
m = m3.match(line)
if m:
line_name = m.group(1)
timeline = timelines.get(m.group(1))
ev = timeline.get_event_int(m.group(2))
event = EventInt()
event.value = int(m.group(3))
event.at = int(m.group(4))
ev.add_event(event)
continue
m = m4.match(line)
if m:
r = int(m.group(2), 16)
g = int(m.group(3), 16)
b = int(m.group(4), 16)
color = Color(r / 255, g / 255, b / 255)
colors.add(m.group(1), color)
continue
timelines.sort()
return (colors, timelines)
def main():
(colors, timelines) = read_data(sys.argv[1])
(lower_bound, upper_bound) = timelines.get_bounds()
graphic = GraphicRenderer(lower_bound, upper_bound)
top_legend = TopLegendRenderer()
range_values = timelines.get_all_range_values()
range_colors = []
for range_value in range_values:
range_colors.append(colors.lookup(range_value))
top_legend.set_legends(range_values,
range_colors)
graphic.set_top_legend(top_legend)
data = TimelinesRenderer()
data.set_timelines(timelines, colors)
graphic.set_data(data)
# default range
range_mid = (upper_bound - lower_bound) / 2
range_width = (upper_bound - lower_bound) / 10
range_lo = range_mid - range_width / 2
range_hi = range_mid + range_width / 2
graphic.set_range(range_lo, range_hi)
main_window = MainWindow()
main_window.run(graphic)
main()
|
venzozhang/GProject
|
utils/grid.py
|
Python
|
gpl-2.0
| 39,708
|
[
"FLEUR"
] |
460955e77c4505b1a5a45f9cdb1096a6cb93657df3ccebb6237c82d5f29c23bd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.