text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# coding: utf-8
"""
Acceptance tests for Studio's Setting pages
"""
from __future__ import unicode_literals
import os
import random
import string
import json
from textwrap import dedent
from bok_choy.promise import EmptyPromise
from mock import patch
from common.test.acceptance.tests.studio.base_studio_test import StudioCourseTest
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.common.utils import add_enrollment_course_modes
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from common.test.acceptance.pages.studio.settings import SettingsPage
from common.test.acceptance.pages.studio.settings_advanced import AdvancedSettingsPage
from common.test.acceptance.pages.studio.settings_group_configurations import GroupConfigurationsPage
from common.test.acceptance.pages.studio.utils import get_input_value, type_in_codemirror
from common.test.acceptance.tests.helpers import create_user_partition_json, element_has_text
from openedx.core.lib.tests import attr
from xmodule.partitions.partitions import Group
@attr(shard=19)
class ContentGroupConfigurationTest(StudioCourseTest):
"""
Tests for content groups in the Group Configurations Page.
There are tests for the experiment groups in test_studio_split_test.
"""
def setUp(self):
super(ContentGroupConfigurationTest, self).setUp()
self.group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 1 problems.
The problem is visible only to Group "alpha".
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def create_and_verify_content_group(self, name, existing_groups):
"""
Creates a new content group and verifies that it was properly created.
"""
self.assertEqual(existing_groups, len(self.group_configurations_page.content_groups))
if existing_groups == 0:
self.group_configurations_page.create_first_content_group()
else:
self.group_configurations_page.add_content_group()
config = self.group_configurations_page.content_groups[existing_groups]
config.name = name
# Save the content group
self.assertEqual(config.get_text('.action-primary'), "Create")
self.assertFalse(config.delete_button_is_present)
config.save()
self.assertIn(name, config.name)
return config
def test_no_content_groups_by_default(self):
"""
Scenario: Ensure that message telling me to create a new content group is
shown when no content groups exist.
Given I have a course without content groups
When I go to the Group Configuration page in Studio
Then I see "You have not created any content groups yet." message
"""
self.group_configurations_page.visit()
self.assertTrue(self.group_configurations_page.no_content_groups_message_is_present)
self.assertIn(
"You have not created any content groups yet.",
self.group_configurations_page.no_content_groups_message_text
)
def test_can_create_and_edit_content_groups(self):
"""
Scenario: Ensure that the content groups can be created and edited correctly.
Given I have a course without content groups
When I click button 'Add your first Content Group'
And I set new the name and click the button 'Create'
Then I see the new content is added and has correct data
And I click 'New Content Group' button
And I set the name and click the button 'Create'
Then I see the second content group is added and has correct data
When I edit the second content group
And I change the name and click the button 'Save'
Then I see the second content group is saved successfully and has the new name
"""
self.group_configurations_page.visit()
self.create_and_verify_content_group("New Content Group", 0)
second_config = self.create_and_verify_content_group("Second Content Group", 1)
# Edit the second content group
second_config.edit()
second_config.name = "Updated Second Content Group"
self.assertEqual(second_config.get_text('.action-primary'), "Save")
second_config.save()
self.assertIn("Updated Second Content Group", second_config.name)
def test_cannot_delete_used_content_group(self):
"""
Scenario: Ensure that the user cannot delete used content group.
Given I have a course with 1 Content Group
And I go to the Group Configuration page
When I try to delete the Content Group with name "New Content Group"
Then I see the delete button is disabled.
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration alpha,',
'Content Group Partition',
[Group("0", 'alpha')],
scheme="cohort"
)
],
},
})
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<p>Choose Yes.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('problem', "VISIBLE TO ALPHA", data=problem_data, metadata={"group_access": {0: [0]}}),
)
self.group_configurations_page.visit()
config = self.group_configurations_page.content_groups[0]
self.assertTrue(config.delete_button_is_disabled)
def test_can_delete_unused_content_group(self):
"""
Scenario: Ensure that the user can delete unused content group.
Given I have a course with 1 Content Group
And I go to the Group Configuration page
When I delete the Content Group with name "New Content Group"
Then I see that there is no Content Group
When I refresh the page
Then I see that the content group has been deleted
"""
self.group_configurations_page.visit()
config = self.create_and_verify_content_group("New Content Group", 0)
self.assertTrue(config.delete_button_is_present)
self.assertEqual(len(self.group_configurations_page.content_groups), 1)
# Delete content group
config.delete()
self.assertEqual(len(self.group_configurations_page.content_groups), 0)
self.group_configurations_page.visit()
self.assertEqual(len(self.group_configurations_page.content_groups), 0)
def test_must_supply_name(self):
"""
Scenario: Ensure that validation of the content group works correctly.
Given I have a course without content groups
And I create new content group without specifying a name click the button 'Create'
Then I see error message "Content Group name is required."
When I set a name and click the button 'Create'
Then I see the content group is saved successfully
"""
self.group_configurations_page.visit()
self.group_configurations_page.create_first_content_group()
config = self.group_configurations_page.content_groups[0]
config.save()
self.assertEqual(config.mode, 'edit')
self.assertEqual("Group name is required", config.validation_message)
config.name = "Content Group Name"
config.save()
self.assertIn("Content Group Name", config.name)
def test_can_cancel_creation_of_content_group(self):
"""
Scenario: Ensure that creation of a content group can be canceled correctly.
Given I have a course without content groups
When I click button 'Add your first Content Group'
And I set new the name and click the button 'Cancel'
Then I see that there is no content groups in the course
"""
self.group_configurations_page.visit()
self.group_configurations_page.create_first_content_group()
config = self.group_configurations_page.content_groups[0]
config.name = "Content Group"
config.cancel()
self.assertEqual(0, len(self.group_configurations_page.content_groups))
def test_content_group_empty_usage(self):
"""
Scenario: When content group is not used, ensure that the link to outline page works correctly.
Given I have a course without content group
And I create new content group
Then I see a link to the outline page
When I click on the outline link
Then I see the outline page
"""
self.group_configurations_page.visit()
config = self.create_and_verify_content_group("New Content Group", 0)
config.toggle()
config.click_outline_anchor()
# Waiting for the page load and verify that we've landed on course outline page
self.outline_page.wait_for_page()
@attr(shard=17)
class EnrollmentTrackModeTest(StudioCourseTest):
"""
Tests for the enrollment tracks section
"""
def setUp(self, is_staff=True, test_xss=True):
super(EnrollmentTrackModeTest, self).setUp(is_staff=is_staff)
self.audit_track = "Audit"
self.verified_track = "Verified"
self.staff_user = self.user
def test_all_course_modes_present(self):
"""
This test is meant to ensure that all the course modes show up as groups
on the Group configuration page within the Enrollment Tracks section.
It also checks to make sure that the edit buttons are not available.
"""
add_enrollment_course_modes(self.browser, self.course_id, ['audit', 'verified'])
group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_configurations_page.visit()
self.assertTrue(group_configurations_page.enrollment_track_section_present)
# Make sure the edit buttons are not available.
self.assertFalse(group_configurations_page.enrollment_track_edit_present)
groups = group_configurations_page.get_enrollment_groups()
for g in [self.audit_track, self.verified_track]:
self.assertTrue(g in groups)
def test_one_course_mode(self):
"""
The purpose of this test is to ensure that when there is 1 or fewer course modes
the enrollment track section is not shown.
"""
add_enrollment_course_modes(self.browser, self.course_id, ['audit'])
group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_configurations_page.visit()
self.assertFalse(group_configurations_page.enrollment_track_section_present)
groups = group_configurations_page.get_enrollment_groups()
self.assertEqual(len(groups), 0)
@attr(shard=19)
class AdvancedSettingsValidationTest(StudioCourseTest):
"""
Tests for validation feature in Studio's advanced settings tab
"""
course_name_key = 'Course Display Name'
course_name_value = 'Test Name'
def setUp(self):
super(AdvancedSettingsValidationTest, self).setUp()
self.advanced_settings = AdvancedSettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.type_fields = ['Course Display Name', 'Advanced Module List', 'Discussion Topic Mapping',
'Maximum Attempts', 'Course Announcement Date']
# Before every test, make sure to visit the page first
self.advanced_settings.visit()
def test_course_author_sees_default_advanced_settings(self):
"""
Scenario: Test that advanced settings have the default settings
Given a staff logs in to studio
When this user goes to advanced settings page
Then this user sees 'Allow Anonymous Discussion Posts' as true
And 'Enable Timed Exams' as false
And 'Maximum Attempts' as null
"""
anonymous_discussion_setting = self.advanced_settings.get('Allow Anonymous Discussion Posts')
timed_exam_settings = self.advanced_settings.get('Enable Timed Exams')
max_attempts = self.advanced_settings.get('Maximum Attempts')
page_default_settings = [
anonymous_discussion_setting,
timed_exam_settings,
max_attempts
]
default_anonymous_discussion_setting = 'true'
default_timed_exam_settings = 'false'
default_max_attempts = 'null'
expected_default_settings = [
default_anonymous_discussion_setting,
default_timed_exam_settings,
default_max_attempts
]
self.assertEqual(
page_default_settings,
expected_default_settings
)
def test_keys_appear_alphabetically(self):
"""
Scenario: Test that advanced settings have all the keys in alphabetic order
Given a staff logs in to studio
When this user goes to advanced settings page
Then he sees all the advanced setting keys in alphabetic order
"""
key_names = self.advanced_settings.key_names
self.assertEqual(key_names, sorted(key_names))
def test_cancel_editing_key_value(self):
"""
Scenario: Test that advanced settings does not save the key value, if cancel
is clicked from notification bar
Given a staff logs in to studio
When this user goes to advanced settings page and enters and new course name
Then he clicks 'cancel' buttin when asked to save changes
When this user reloads the page
And then he does not see any change in the original course name
"""
original_course_display_name = self.advanced_settings.get(self.course_name_key)
new_course_name = 'New Course Name'
type_in_codemirror(self.advanced_settings, 16, new_course_name)
self.advanced_settings.cancel()
self.advanced_settings.refresh_and_wait_for_load()
self.assertNotEqual(
original_course_display_name,
new_course_name,
(
'original course name:{} can not not be equal to unsaved course name {}'.format(
original_course_display_name,
new_course_name
)
)
)
self.assertEqual(
self.advanced_settings.get(self.course_name_key),
original_course_display_name,
(
'course name from the page should be same as original_course_display_name:{}'.format(
original_course_display_name
)
)
)
def test_editing_key_value(self):
"""
Scenario: Test that advanced settings saves the key value, if save button
is clicked from notification bar after the editing
Given a staff logs in to studio
When this user goes to advanced settings page and enters a new course name
And he clicks 'save' button from the notification bar
Then he is able to see the updated course name
"""
new_course_name = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
self.advanced_settings.set(self.course_name_key, new_course_name)
self.assertEqual(
self.advanced_settings.get(self.course_name_key),
'"{}"'.format(new_course_name),
(
'course name from the page should be same as new_course_name:{}'.format(
new_course_name
)
)
)
def test_confirmation_is_shown_on_save(self):
"""
Scenario: Test that advanced settings shows confirmation after editing a field successfully
Given a staff logs in to studio
When this user goes to advanced settings page and edits any value
And he clicks 'save' button from the notification bar
Then he is able to see the confirmation message
"""
self.advanced_settings.set('Maximum Attempts', 5)
confirmation_message = self.advanced_settings.confirmation_message
self.assertEqual(
confirmation_message,
'Your policy changes have been saved.',
'Settings must be saved successfully in order to have confirmation message'
)
def test_deprecated_settings_can_be_toggled(self):
"""
Scenario: Test that advanced settings can toggle deprecated settings
Given I am on the Advanced Course Settings page in Studio
When I toggle the display of deprecated settings
Then deprecated settings are then shown
And I toggle the display of deprecated settings
Then deprecated settings are not shown
"""
self.advanced_settings.toggle_deprecated_settings()
button_text = self.advanced_settings.deprecated_settings_button_text
self.assertEqual(
button_text,
'Hide Deprecated Settings',
"Button text should change to 'Hide Deprecated Settings' after the click"
)
self.assertTrue(self.advanced_settings.is_deprecated_setting_visible())
self.advanced_settings.toggle_deprecated_settings()
self.assertFalse(self.advanced_settings.is_deprecated_setting_visible())
self.assertEqual(
self.advanced_settings.deprecated_settings_button_text,
'Show Deprecated Settings',
"Button text should change to 'Show Deprecated Settings' after the click"
)
def test_multi_line_input(self):
"""
Scenario: Test that advanced settings correctly shows the multi-line input
Given I am on the Advanced Course Settings page in Studio
When I create a JSON object as a value for "Discussion Topic Mapping"
Then it is displayed as formatted
"""
inputs = {
"key": "value",
"key_2": "value_2"
}
json_input = json.dumps(inputs)
self.advanced_settings.set('Discussion Topic Mapping', json_input)
self.assertEqual(
self.advanced_settings.get('Discussion Topic Mapping'),
'{\n "key": "value",\n "key_2": "value_2"\n}'
)
def test_automatic_quoting_of_non_json_value(self):
"""
Scenario: Test that advanced settings automatically quotes the field input
upon saving
Given I am on the Advanced Course Settings page in Studio
When I create a non-JSON value not in quotes
Then it is displayed as a string
"""
self.advanced_settings.set(self.course_name_key, self.course_name_value)
self.assertEqual(
self.advanced_settings.get(self.course_name_key),
'"Test Name"'
)
def test_validation_error_for_wrong_input_type(self):
"""
Scenario: Test error if value supplied is of the wrong type
Given I am on the Advanced Course Settings page in Studio
When I create a JSON object as a value for "Course Display Name"
Then I get an error on save
And I reload the page
Then the policy key value is unchanged
"""
course_display_name = self.advanced_settings.get('Course Display Name')
inputs = {
"key": "value",
"key_2": "value_2"
}
json_input = json.dumps(inputs)
self.advanced_settings.set('Course Display Name', json_input)
self.advanced_settings.wait_for_modal_load()
self.check_modal_shows_correct_contents(['Course Display Name'])
self.advanced_settings.refresh_and_wait_for_load()
self.assertEquals(
self.advanced_settings.get('Course Display Name'),
course_display_name,
'Wrong input for Course Display Name must not change its value'
)
def test_modal_shows_one_validation_error(self):
"""
Test that advanced settings don't save if there's a single wrong input,
and that it shows the correct error message in the modal.
"""
# Feed an integer value for String field.
# .set method saves automatically after setting a value
course_display_name = self.advanced_settings.get('Course Display Name')
self.advanced_settings.set('Course Display Name', 1)
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(['Course Display Name'])
self.advanced_settings.refresh_and_wait_for_load()
self.assertEquals(
self.advanced_settings.get('Course Display Name'),
course_display_name,
'Wrong input for Course Display Name must not change its value'
)
def test_modal_shows_multiple_validation_errors(self):
"""
Test that advanced settings don't save with multiple wrong inputs
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(self.type_fields)
self.advanced_settings.refresh_and_wait_for_load()
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Wrong input for Advanced Settings Fields must not change its value'
)
def test_undo_changes(self):
"""
Test that undo changes button in the modal resets all settings changes
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
# Let modal popup
self.advanced_settings.wait_for_modal_load()
# Click Undo Changes button
self.advanced_settings.undo_changes_via_modal()
# Check that changes are undone
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Undoing Should revert back to original value'
)
def test_manual_change(self):
"""
Test that manual changes button in the modal keeps settings unchanged
"""
inputs = {"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
self.advanced_settings.trigger_manual_changes()
# Check that the validation modal went away.
self.assertFalse(self.advanced_settings.is_validation_modal_present())
# Iterate through the wrong values and make sure they're still displayed
for key, val in inputs.iteritems():
self.assertEquals(
str(self.advanced_settings.get(key)),
str(val),
'manual change should keep: ' + str(val) + ', but is: ' + str(self.advanced_settings.get(key))
)
def check_modal_shows_correct_contents(self, wrong_settings_list):
"""
Helper function that checks if the validation modal contains correct
error messages.
"""
# Check presence of modal
self.assertTrue(self.advanced_settings.is_validation_modal_present())
# List of wrong settings item & what is presented in the modal should be the same
error_item_names = self.advanced_settings.get_error_item_names()
self.assertEqual(set(wrong_settings_list), set(error_item_names))
error_item_messages = self.advanced_settings.get_error_item_messages()
self.assertEqual(len(error_item_names), len(error_item_messages))
def get_settings_fields_of_each_type(self):
"""
Get one of each field type:
- String: Course Display Name
- List: Advanced Module List
- Dict: Discussion Topic Mapping
- Integer: Maximum Attempts
- Date: Course Announcement Date
"""
return {
"Course Display Name": self.advanced_settings.get('Course Display Name'),
"Advanced Module List": self.advanced_settings.get('Advanced Module List'),
"Discussion Topic Mapping": self.advanced_settings.get('Discussion Topic Mapping'),
"Maximum Attempts": self.advanced_settings.get('Maximum Attempts'),
"Course Announcement Date": self.advanced_settings.get('Course Announcement Date'),
}
def set_wrong_inputs_to_fields(self):
"""
Set wrong values for the chosen fields
"""
self.advanced_settings.set_values(
{
"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
)
def test_only_expected_fields_are_displayed(self):
"""
Scenario: The Advanced Settings screen displays settings/fields not specifically hidden from
view by a developer.
Given I have a set of CourseMetadata fields defined for the course
When I view the Advanced Settings screen for the course
The total number of fields displayed matches the number I expect
And the actual fields displayed match the fields I expect to see
"""
expected_fields = self.advanced_settings.expected_settings_names
displayed_fields = self.advanced_settings.displayed_settings_names
self.assertEquals(set(displayed_fields), set(expected_fields))
@attr(shard=16)
class ContentLicenseTest(StudioCourseTest):
"""
Tests for course-level licensing (that is, setting the license,
for an entire course's content, to All Rights Reserved or Creative Commons)
"""
def setUp(self): # pylint: disable=arguments-differ
super(ContentLicenseTest, self).setUp()
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.lms_courseware = CoursewarePage(
self.browser,
self.course_id,
)
self.settings_page.visit()
def test_empty_license(self):
"""
When I visit the Studio settings page,
I see that the course license is "All Rights Reserved" by default.
Then I visit the LMS courseware page,
and I see that the default course license is displayed.
"""
self.assertEqual(self.settings_page.course_license, "All Rights Reserved")
self.lms_courseware.visit()
self.assertEqual(self.lms_courseware.course_license, "© All Rights Reserved")
def test_arr_license(self):
"""
When I visit the Studio settings page,
and I set the course license to "All Rights Reserved",
and I refresh the page,
I see that the course license is "All Rights Reserved".
Then I visit the LMS courseware page,
and I see that the course license is "All Rights Reserved".
"""
self.settings_page.course_license = "All Rights Reserved"
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.assertEqual(self.settings_page.course_license, "All Rights Reserved")
self.lms_courseware.visit()
self.assertEqual(self.lms_courseware.course_license, "© All Rights Reserved")
def test_cc_license(self):
"""
When I visit the Studio settings page,
and I set the course license to "Creative Commons",
and I refresh the page,
I see that the course license is "Creative Commons".
Then I visit the LMS courseware page,
and I see that the course license is "Some Rights Reserved".
"""
self.settings_page.course_license = "Creative Commons"
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.assertEqual(self.settings_page.course_license, "Creative Commons")
self.lms_courseware.visit()
# The course_license text will include a bunch of screen reader text to explain
# the selected options
self.assertIn("Some Rights Reserved", self.lms_courseware.course_license)
@attr('a11y')
class StudioSettingsA11yTest(StudioCourseTest):
"""
Class to test Studio pages accessibility.
"""
def setUp(self): # pylint: disable=arguments-differ
super(StudioSettingsA11yTest, self).setUp()
self.settings_page = SettingsPage(self.browser, self.course_info['org'], self.course_info['number'],
self.course_info['run'])
def test_studio_settings_page_a11y(self):
"""
Check accessibility of SettingsPage.
"""
self.settings_page.visit()
self.settings_page.wait_for_page()
self.settings_page.a11y_audit.config.set_rules({
"ignore": [
'link-href', # TODO: AC-590
'aria-allowed-role', # TODO: AC-936
'landmark-complementary-is-top-level', # TODO: AC-939
'radiogroup', # TODO: AC-941
'region', # TODO: AC-932
],
})
self.settings_page.a11y_audit.check_for_accessibility_errors()
@attr('a11y')
class StudioSubsectionSettingsA11yTest(StudioCourseTest):
"""
Class to test accessibility on the subsection settings modals.
"""
def setUp(self): # pylint: disable=arguments-differ
browser = os.environ.get('SELENIUM_BROWSER', 'firefox')
# This test will fail if run using phantomjs < 2.0, due to an issue with bind()
# See https://github.com/ariya/phantomjs/issues/10522 for details.
# The course_outline uses this function, and as such will not fully load when run
# under phantomjs 1.9.8. So, to prevent this test from timing out at course_outline.visit(),
# force the use of firefox vs the standard a11y test usage of phantomjs 1.9.8.
# TODO: remove this block once https://openedx.atlassian.net/browse/TE-1047 is resolved.
if browser == 'phantomjs':
browser = 'firefox'
with patch.dict(os.environ, {'SELENIUM_BROWSER': browser}):
super(StudioSubsectionSettingsA11yTest, self).setUp(is_staff=True)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
course_fixture.add_advanced_settings({
"enable_proctored_exams": {"value": "true"}
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1')
)
)
)
def test_special_exams_menu_a11y(self):
"""
Given that I am a staff member
And I am editing settings on the special exams menu
Then that menu is accessible
"""
self.course_outline.visit()
self.course_outline.open_subsection_settings_dialog()
self.course_outline.select_advanced_tab()
self.course_outline.a11y_audit.config.set_rules({
"ignore": [
'section', # TODO: AC-491
],
})
# limit the scope of the audit to the special exams tab on the modal dialog
self.course_outline.a11y_audit.config.set_scope(
include=['section.edit-settings-timed-examination']
)
self.course_outline.a11y_audit.check_for_accessibility_errors()
@attr(shard=16)
class StudioSettingsImageUploadTest(StudioCourseTest):
"""
Class to test course settings image uploads.
"""
def setUp(self): # pylint: disable=arguments-differ
super(StudioSettingsImageUploadTest, self).setUp()
self.settings_page = SettingsPage(self.browser, self.course_info['org'], self.course_info['number'],
self.course_info['run'])
self.settings_page.visit()
# Ensure jquery is loaded before running a jQuery
self.settings_page.wait_for_ajax()
# This text appears towards the end of the work that jQuery is performing on the page
self.settings_page.wait_for_jquery_value('input#course-name:text', 'test_run')
def test_upload_course_card_image(self):
# upload image
file_to_upload = 'image.jpg'
self.settings_page.upload_image('#upload-course-image', file_to_upload)
self.assertIn(file_to_upload, self.settings_page.get_uploaded_image_path('#course-image'))
def test_upload_course_banner_image(self):
# upload image
file_to_upload = 'image.jpg'
self.settings_page.upload_image('#upload-banner-image', file_to_upload)
self.assertIn(file_to_upload, self.settings_page.get_uploaded_image_path('#banner-image'))
def test_upload_course_video_thumbnail_image(self):
# upload image
file_to_upload = 'image.jpg'
self.settings_page.upload_image('#upload-video-thumbnail-image', file_to_upload)
self.assertIn(file_to_upload, self.settings_page.get_uploaded_image_path('#video-thumbnail-image'))
@attr(shard=16)
class CourseSettingsTest(StudioCourseTest):
"""
Class to test course settings.
"""
COURSE_START_DATE_CSS = "#course-start-date"
COURSE_END_DATE_CSS = "#course-end-date"
ENROLLMENT_START_DATE_CSS = "#course-enrollment-start-date"
ENROLLMENT_END_DATE_CSS = "#course-enrollment-end-date"
COURSE_START_TIME_CSS = "#course-start-time"
COURSE_END_TIME_CSS = "#course-end-time"
ENROLLMENT_START_TIME_CSS = "#course-enrollment-start-time"
ENROLLMENT_END_TIME_CSS = "#course-enrollment-end-time"
course_start_date = '12/20/2013'
course_end_date = '12/26/2013'
enrollment_start_date = '12/01/2013'
enrollment_end_date = '12/10/2013'
dummy_time = "15:30"
def setUp(self, is_staff=False, test_xss=True):
super(CourseSettingsTest, self).setUp()
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Before every test, make sure to visit the page first
self.settings_page.visit()
self.ensure_input_fields_are_loaded()
def set_course_dates(self):
"""
Set dates for the course.
"""
dates_dictionary = {
self.COURSE_START_DATE_CSS: self.course_start_date,
self.COURSE_END_DATE_CSS: self.course_end_date,
self.ENROLLMENT_START_DATE_CSS: self.enrollment_start_date,
self.ENROLLMENT_END_DATE_CSS: self.enrollment_end_date
}
self.settings_page.set_element_values(dates_dictionary)
def ensure_input_fields_are_loaded(self):
"""
Ensures values in input fields are loaded.
"""
EmptyPromise(
lambda: self.settings_page.q(css='#course-organization').attrs('value')[0],
"Waiting for input fields to be loaded"
).fulfill()
def test_user_can_set_course_date(self):
"""
Scenario: User can set course dates
Given I have opened a new course in Studio
When I select Schedule and Details
And I set course dates
And I press the "Save" notification button
And I reload the page
Then I see the set dates
"""
# Set dates
self.set_course_dates()
# Set times
time_dictionary = {
self.COURSE_START_TIME_CSS: self.dummy_time,
self.ENROLLMENT_END_TIME_CSS: self.dummy_time
}
self.settings_page.set_element_values(time_dictionary)
# Save changes
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.ensure_input_fields_are_loaded()
css_selectors = [self.COURSE_START_DATE_CSS, self.COURSE_END_DATE_CSS,
self.ENROLLMENT_START_DATE_CSS, self.ENROLLMENT_END_DATE_CSS,
self.COURSE_START_TIME_CSS, self.ENROLLMENT_END_TIME_CSS]
expected_values = [self.course_start_date, self.course_end_date,
self.enrollment_start_date, self.enrollment_end_date,
self.dummy_time, self.dummy_time]
# Assert changes have been persistent.
self.assertEqual(
[get_input_value(self.settings_page, css_selector) for css_selector in css_selectors],
expected_values
)
def test_clear_previously_set_course_dates(self):
"""
Scenario: User can clear previously set course dates (except start date)
Given I have set course dates
And I clear all the dates except start
And I press the "Save" notification button
And I reload the page
Then I see cleared dates
"""
# Set dates
self.set_course_dates()
# Clear all dates except start date
values_to_set = {
self.COURSE_END_DATE_CSS: '',
self.ENROLLMENT_START_DATE_CSS: '',
self.ENROLLMENT_END_DATE_CSS: ''
}
self.settings_page.set_element_values(values_to_set)
# Save changes and refresh the page
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.ensure_input_fields_are_loaded()
css_selectors = [self.COURSE_START_DATE_CSS, self.COURSE_END_DATE_CSS,
self.ENROLLMENT_START_DATE_CSS, self.ENROLLMENT_END_DATE_CSS]
expected_values = [self.course_start_date, '', '', '']
# Assert changes have been persistent.
self.assertEqual(
[get_input_value(self.settings_page, css_selector) for css_selector in css_selectors],
expected_values
)
def test_cannot_clear_the_course_start_date(self):
"""
Scenario: User cannot clear the course start date
Given I have set course dates
And I press the "Save" notification button
And I clear the course start date
Then I receive a warning about course start date
And I reload the page
And the previously set start date is shown
"""
# Set dates
self.set_course_dates()
# Save changes
self.settings_page.save_changes()
# Get default start date
default_start_date = get_input_value(self.settings_page, self.COURSE_START_DATE_CSS)
# Set course start date to empty
self.settings_page.set_element_values({self.COURSE_START_DATE_CSS: ''})
# Make sure error message is show with appropriate message
error_message_css = '.message-error'
self.settings_page.wait_for_element_presence(error_message_css, 'Error message is present')
self.assertEqual(element_has_text(self.settings_page, error_message_css,
"The course must have an assigned start date."), True)
# Refresh the page and assert start date has not changed.
self.settings_page.refresh_and_wait_for_load()
self.ensure_input_fields_are_loaded()
self.assertEqual(
get_input_value(self.settings_page, self.COURSE_START_DATE_CSS),
default_start_date
)
def test_user_can_correct_course_start_date_warning(self):
"""
Scenario: User can correct the course start date warning
Given I have tried to clear the course start
And I have entered a new course start date
And I press the "Save" notification button
Then The warning about course start date goes away
And I reload the page
Then my new course start date is shown
"""
# Set course start date to empty
self.settings_page.set_element_values({self.COURSE_START_DATE_CSS: ''})
# Make sure we get error message
error_message_css = '.message-error'
self.settings_page.wait_for_element_presence(error_message_css, 'Error message is present')
self.assertEqual(element_has_text(self.settings_page, error_message_css,
"The course must have an assigned start date."), True)
# Set new course start value
self.settings_page.set_element_values({self.COURSE_START_DATE_CSS: self.course_start_date})
self.settings_page.un_focus_input_field()
# Error message disappears
self.settings_page.wait_for_element_absence(error_message_css, 'Error message is not present')
# Save the changes and refresh the page.
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.ensure_input_fields_are_loaded()
# Assert changes are persistent.
self.assertEqual(
get_input_value(self.settings_page, self.COURSE_START_DATE_CSS),
self.course_start_date
)
def test_settings_are_only_persisted_when_saved(self):
"""
Scenario: Settings are only persisted when saved
Given I have set course dates
And I press the "Save" notification button
When I change fields
And I reload the page
Then I do not see the changes
"""
# Set course dates.
self.set_course_dates()
# Save changes.
self.settings_page.save_changes()
default_value_enrollment_start_date = get_input_value(self.settings_page,
self.ENROLLMENT_START_TIME_CSS)
# Set the value of enrollment start time and
# reload the page without saving.
self.settings_page.set_element_values({self.ENROLLMENT_START_TIME_CSS: self.dummy_time})
self.settings_page.refresh_and_wait_for_load()
self.ensure_input_fields_are_loaded()
css_selectors = [self.COURSE_START_DATE_CSS, self.COURSE_END_DATE_CSS,
self.ENROLLMENT_START_DATE_CSS, self.ENROLLMENT_END_DATE_CSS,
self.ENROLLMENT_START_TIME_CSS]
expected_values = [self.course_start_date, self.course_end_date,
self.enrollment_start_date, self.enrollment_end_date,
default_value_enrollment_start_date]
# Assert that value of enrolment start time
# is not saved.
self.assertEqual(
[get_input_value(self.settings_page, css_selector) for css_selector in css_selectors],
expected_values
)
def test_settings_are_reset_on_cancel(self):
"""
Scenario: Settings are reset on cancel
Given I have set course dates
And I press the "Save" notification button
When I change fields
And I press the "Cancel" notification button
Then I do not see the changes
"""
# Set course date
self.set_course_dates()
# Save changes
self.settings_page.save_changes()
default_value_enrollment_start_date = get_input_value(self.settings_page,
self.ENROLLMENT_START_TIME_CSS)
# Set value but don't save it.
self.settings_page.set_element_values({self.ENROLLMENT_START_TIME_CSS: self.dummy_time})
self.settings_page.click_button("cancel")
# Make sure changes are not saved after cancel.
css_selectors = [self.COURSE_START_DATE_CSS, self.COURSE_END_DATE_CSS,
self.ENROLLMENT_START_DATE_CSS, self.ENROLLMENT_END_DATE_CSS,
self.ENROLLMENT_START_TIME_CSS]
expected_values = [self.course_start_date, self.course_end_date,
self.enrollment_start_date, self.enrollment_end_date,
default_value_enrollment_start_date]
self.assertEqual(
[get_input_value(self.settings_page, css_selector) for css_selector in css_selectors],
expected_values
)
def test_confirmation_is_shown_on_save(self):
"""
Scenario: Confirmation is shown on save
Given I have opened a new course in Studio
When I select Schedule and Details
And I change the "<field>" field to "<value>"
And I press the "Save" notification button
Then I see a confirmation that my changes have been saved
"""
# Set date
self.settings_page.set_element_values({self.COURSE_START_DATE_CSS: self.course_start_date})
# Confirmation is showed upon save.
# Save_changes function ensures that save
# confirmation is shown.
self.settings_page.save_changes()
def test_changes_in_course_overview_show_a_confirmation(self):
"""
Scenario: Changes in Course Overview show a confirmation
Given I have opened a new course in Studio
When I select Schedule and Details
And I change the course overview
And I press the "Save" notification button
Then I see a confirmation that my changes have been saved
"""
# Change the value of course overview
self.settings_page.change_course_description('Changed overview')
# Save changes
# Save_changes function ensures that save
# confirmation is shown.
self.settings_page.save_changes()
def test_user_cannot_save_invalid_settings(self):
"""
Scenario: User cannot save invalid settings
Given I have opened a new course in Studio
When I select Schedule and Details
And I change the "Course Start Date" field to ""
Then the save notification button is disabled
"""
# Change the course start date to invalid date.
self.settings_page.set_element_values({self.COURSE_START_DATE_CSS: ''})
# Confirm that save button is disabled.
self.assertEqual(self.settings_page.is_element_present(".action-primary.action-save.is-disabled"), True)
|
jolyonb/edx-platform
|
common/test/acceptance/tests/studio/test_studio_settings.py
|
Python
|
agpl-3.0
| 48,807
|
[
"VisIt"
] |
5e5ea32e17f2bc11a209b00dcce60d5d0eaad9619ccca8e8f7cd9d9f73251c03
|
# pt.po
val = {" days." : " dias.",
"(all)" : "(todos)",
"(any)" : "(qualquer)",
"(anyone)" : "(qualquer pessoa)",
"(available)" : "(disponíveis)",
"(blank)" : "(em branco)",
"(both)" : "(ambos)",
"(depleted)" : "(esgotados)",
"(everyone)" : "(todas as pessoas)",
"(master user, not editable)" : "(utilizador principal, não editável)",
"(no change)" : "(sem alterações)",
"(no deduction)" : "(sem deduções)",
"(none)" : "(nenhum)",
"(unknown)" : "(desconhecido)",
"(use system)" : "(utilizar sistema)",
"({0} given)" : "({0} administradas)",
"({0} given, {1} remaining)" : "({0} administradas, {1} restantes)",
"1 treatment" : "1 tratamento",
"1 week" : "1 semana",
"1 year" : "1 ano",
"2 weeks" : "2 semanas",
"3 months" : "3 meses",
"4 weeks" : "4 semanas",
"5 Year" : "5 anos",
"6 months" : "6 meses",
"8 weeks" : "8 semanas",
"9 months" : "9 meses",
"A (Stray Dog)" : "A (Cão Errante)",
"A description or other information about the animal" : "Uma descrição ou outras informações sobre o animal",
"A list of areas this person will homecheck - eg: S60 S61" : "Uma lista das áreas onde esta pessoa faz inspeções de domicílios, por exemplo 1000-002",
"A movement must have a reservation date or type." : "Um movimento tem de ter uma data ou tipo de reserva.",
"A person is required for this movement type." : "É necessária uma pessoa para este tipo de movimento.",
"A publish job is already running." : "Já está em curso um trabalho de publicação.",
"A short version of the reference number" : "Uma versão abreviada do número de referência",
"A task is already running." : "Já existe uma tarefa em curso.",
"A unique number to identify this movement" : "Um número exclusivo para identificar este movimento",
"A unique reference for this litter" : "Uma referência exclusiva para esta ninhada",
"A4" : "A4",
"ACO" : "Agente de Controlo Animal",
"AM" : "AM",
"ASM" : "ASM",
"ASM 3 is compatible with your iPad and other tablets." : "O ASM 3 é compatível com iPad e outros tablets.",
"ASM News" : "Notícias do ASM",
"ASM can talk to payment processors and request payment from your customers and donors." : "O ASM pode comunicar com processadores de pagamentos e solicitar-lhes pagamentos por parte dos seus clientes e autores de donativos.",
"ASM can track detailed monthly and annual figures for your shelter. Install the Monthly Figures and Annual Figures reports from Settings-Reports-Browse sheltermanager.com" : "O ASM permite gerir os relatórios mensais e anuais detalhados do seu abrigo. Instale os relatórios Valores Mensais e Valores Anuais através do menu Definições-Relatórios-Procurar sheltermanager.com",
"ASM comes with a dictionary of 4,000 animal names. Just click the generate random name button when adding an animal." : "O ASM inclui um dicionário com 4000 nomes de animais. Basta clicar no botão para gerar um nome aleatório ao adicionar um animal.",
"ASM will remove this animal from the waiting list after a set number of weeks since the last owner contact date." : "O ASM irá remover este animal da lista de espera após um determinado número de semanas após a data do último contacto do dono.",
"Abandoned" : "Abandonado",
"About" : "Sobre...",
"Abuse" : "Abuso",
"Abyssinian" : "Abissínio",
"Access Settings Menu" : "Aceder ao Menu Definições",
"Account" : "Conta",
"Account Types" : "Tipos de Conta",
"Account code '{0}' has already been used." : "O código de conta '{0}' já está a ser utilizado.",
"Account code '{0}' is not valid." : "O código de conta '{0}' não é válido.",
"Account code cannot be blank." : "Código de conta não pode estar vazio.",
"Account disabled." : "Conta desativada.",
"Accountant" : "Contabilista",
"Accounts" : "Contas",
"Accounts need a code." : "As contas precisam de um código.",
"Action" : "Ação",
"Active" : "Ativo",
"Active Incidents" : "Incidentes ativos",
"Active Trap Loans" : "Empréstimos de Armadilhas Ativos",
"Active license held" : "Titular de licença válida",
"Active users: {0}" : "Utilizadores ativos: {0}",
"Add" : "Adicionar",
"Add Accounts" : "Adicionar Contas",
"Add Animal" : "Adicionar Animal",
"Add Animals" : "Adicionar Animais",
"Add Appointment" : "Adicionar Agendamento",
"Add Call" : "Adicionar Chamada",
"Add Citations" : "Adicionar Advertências",
"Add Clinic Appointment" : "Adicionar Agendamento Clínico",
"Add Cost" : "Adicionar Custo",
"Add Diary" : "Adicionar Diário",
"Add Diets" : "Adicionar Dietas",
"Add Document to Repository" : "Adicionar Documento ao Repositório",
"Add Flag" : "Adicionar Sinalização",
"Add Found Animal" : "Adicionar Animal Encontrado",
"Add Incidents" : "Adicionar Incidentes",
"Add Investigation" : "Adicionar Investigação",
"Add Invoice Item" : "Adicionar Item de Fatura",
"Add Licenses" : "Adicionar Licenças",
"Add Litter" : "Adicionar Ninhada",
"Add Log" : "Adicionar Registo",
"Add Log to Animal" : "Adicionar Registo ao Animal",
"Add Lost Animal" : "Adicionar Animal Perdido",
"Add Media" : "Adicionar Imagens",
"Add Medical Records" : "Adicionar Registos Clínicos",
"Add Message" : "Adicionar Mensagem",
"Add Movement" : "Adicionar Movimento",
"Add Online Forms" : "Adicionar Formulários Online",
"Add Payments" : "Adicionar Pagamentos",
"Add Person" : "Adicionar Pessoa",
"Add Report" : "Adicionar Relatório",
"Add Rota" : "Adicionar Turno",
"Add Stock" : "Adicionar Stock",
"Add Tests" : "Adicionar Testes",
"Add Transport" : "Adicionar Transporte",
"Add Trap Loans" : "Adicionar Empréstimos de Armadilhas",
"Add Users" : "Adicionar Utilizadores",
"Add Vaccinations" : "Adicionar Vacinações",
"Add Vouchers" : "Adicionar Vouchers",
"Add Waiting List" : "Adicionar Lista de Espera",
"Add a diary note" : "Adicionar nota ao diário",
"Add a found animal" : "Adicionar um animal encontrado",
"Add a log entry" : "Adicionar entrada de registo",
"Add a lost animal" : "Adicionar um animal perdido",
"Add a medical regimen" : "Adicionar um regime clínico",
"Add a new animal" : "Adicionar um novo animal",
"Add a new log" : "Adicionar novo registo",
"Add a new person" : "Adicionar uma nova pessoa",
"Add a person" : "Adicionar uma pessoa",
"Add a photo" : "Adicionar uma foto",
"Add a test" : "Adicionar um teste",
"Add a vaccination" : "Adicionar uma vacinação",
"Add account" : "Adicionar conta",
"Add additional field" : "Adicionar campo extra",
"Add an animal to the waiting list" : "Adicionar um animal à lista de espera",
"Add an extra message to the fosterer email" : "Adicionar uma mensagem extra ao email da FAT",
"Add citation" : "Adicionar advertência",
"Add cost" : "Adicionar custo",
"Add details of this email to the log after sending" : "Adicionar detalhes deste email ao registo após o envio",
"Add diary" : "Adicionar diário",
"Add diary task" : "Adicionar tarefa diária",
"Add diet" : "Adicionar dieta",
"Add extra images for use in reports and documents" : "Adicionar imagens extra para usar em relatórios e documentos",
"Add form field" : "Adicionar campo de formulário",
"Add found animal" : "Adicionar animal encontrado",
"Add investigation" : "Adicionar investigação",
"Add license" : "Adicionar licença",
"Add litter" : "Adicionar ninhada",
"Add log" : "Adicionar registo",
"Add lost animal" : "Adicionar animal perdido",
"Add medical profile" : "Adicionar perfil clínico",
"Add medical regimen" : "Adicionar regime clínico",
"Add message" : "Adicionar mensagem",
"Add movement" : "Adicionar movimento",
"Add online form" : "Adicionar formulário online",
"Add payment" : "Adicionar pagamento",
"Add person" : "Adicionar pessoa",
"Add report" : "Adicionar relatório",
"Add role" : "Adicionar função",
"Add rota item" : "Adicionar item de turno",
"Add stock" : "Adicionar stock",
"Add template" : "Adicionar modelo",
"Add test" : "Adicionar teste",
"Add this text to all animal descriptions" : "Adicionar este texto a todas as descrições de animais",
"Add to log" : "Adicionar ao registo",
"Add transport" : "Adicionar transporte",
"Add trap loan" : "Adicionar empréstimo de armadilha",
"Add user" : "Adicionar utilizador",
"Add vaccination" : "Adicionar vacinação",
"Add voucher" : "Adicionar voucher",
"Add waiting list" : "Adicionar lista de espera",
"Add {0}" : "Adicionar {0}",
"Added" : "Adicionado",
"Added by {0} on {1}" : "Adicionado por {0} em {1}",
"Additional" : "Adicional",
"Additional Fields" : "Campos Adicionais",
"Additional fields" : "Campos adicionais",
"Additional fields need a name, label and type." : "Os campos extra precisam de um nome, etiqueta e tipo.",
"Address" : "Morada",
"Address Contains" : "Morada Contém",
"Address contains" : "Morada contém",
"Administered" : "Administrado",
"Administering Vet" : "Veterinário que Administra",
"Adopt" : "Adotar",
"Adopt an animal" : "Adotar um animal",
"Adoptable" : "Adotável",
"Adoptable Animal" : "Animal Adotável",
"Adoptable and published for the first time" : "Adotável e publicado pela primeira vez",
"Adopted" : "Adotado",
"Adopted Animals" : "Animais Adotados",
"Adopted Transferred In {0}" : "Transferência de Adoção em {0}",
"Adopter" : "Adotante",
"Adoption" : "Adoção",
"Adoption Coordinator" : "Coordenador de Adoção",
"Adoption Coordinator and Fosterer" : "Coordenador de Adoção e FAT",
"Adoption Event" : "Evento de Adoção",
"Adoption Fee" : "Taxa de Adoção",
"Adoption fee donations" : "Donativos para taxa de adoção",
"Adoption movements must have a valid adoption date." : "Os movimentos de adoção precisam de ter uma data de adoção válida.",
"Adoption successfully created." : "Adoção criada com êxito.",
"Adoptions {0}" : "Adoções {0}",
"Adult" : "Adulto",
"Advanced" : "Avançado",
"Advanced find animal screen defaults to on shelter" : "O ecrã de pesquisa avançada de animais utiliza como padrão os animais disponíveis no abrigo",
"Affenpinscher" : "Affenpinscher",
"Afghan Hound" : "Galgo Afegão",
"African Grey" : "Cinzento Africano",
"After the user presses submit and ASM has accepted the form, redirect the user to this URL" : "Após o utilizador submeter e o ASM aceitar o formulário, redirecionar o utilizador para este link",
"Age" : "Idade",
"Age Group" : "Grupo Etário",
"Age Group 1" : "Grupo Etário 1",
"Age Group 2" : "Grupo Etário 2",
"Age Group 3" : "Grupo Etário 3",
"Age Group 4" : "Grupo Etário 4",
"Age Group 5" : "Grupo Etário 5",
"Age Group 6" : "Grupo Etário 6",
"Age Group 7" : "Grupo Etário 7",
"Age Group 8" : "Grupo Etário 8",
"Age Groups" : "Grupos Etários",
"Age groups are assigned based on the age of an animal. The figure in the left column is the upper limit in years for that group." : "Os grupos etários são atribuídos com base na idade do animal. Os valores da coluna da esquerda correspondem ao limite máximo de anos para cada grupo.",
"Aged Between" : "Idade Entre",
"Aged From" : "Idade Desde",
"Aged To" : "Idade Até",
"Aged over 6 months" : "Idade superior a 6 meses",
"Aged under 6 months" : "Idade inferior a 6 meses",
"Aggression" : "Agressividade",
"Airedale Terrier" : "Airedale Terrier",
"Akbash" : "Akbash",
"Akita" : "Akita",
"Alaskan Malamute" : "Malamute do Alasca",
"Alerts" : "Alertas",
"All Animals" : "Todos os Animais",
"All On-Shelter Animals" : "Todos os Animais do Abrigo",
"All Publishers" : "Todos os Editores",
"All accounts" : "Todas as contas",
"All animal care officers on file." : "Todos os agentes de proteção animal em arquivo.",
"All animal shelters on file." : "Todos os abrigos animais em arquivo.",
"All animals matching current publishing options." : "Todos os animais que correspondem às opções de publicação atuais.",
"All animals on the shelter." : "Todos os animais no abrigo.",
"All animals where the hold ends today." : "Todos os animais cuja retenção termina hoje.",
"All animals who are currently held in case of reclaim." : "Todos os animais atualmente retidos para a eventualidade de reivindicação.",
"All animals who are currently quarantined." : "Todos os animais atualmente em quarentena.",
"All animals who are flagged as not for adoption." : "Todos os animais que se encontram sinalizados como não adotáveis.",
"All animals who have been on the shelter longer than {0} months." : "Todos os animais que estiveram no abrigo por mais de {0} meses.",
"All animals who have not been microchipped" : "Todos os animais que não foram microchipados",
"All animals who have not received a rabies vaccination" : "Todos os animais que não foram vacinados contra a raiva",
"All banned owners on file." : "Todos os donos em lista negra no arquivo.",
"All diary notes" : "Todas as anotações de diário",
"All donors on file." : "Todos os doadores em arquivo.",
"All drivers on file." : "Todos os condutores em arquivo.",
"All existing data and media in your database will be REMOVED before importing the CSV file." : "Todos os dados existentes na base de dados serão REMOVIDOS antes da importação do ficheiro CSV.",
"All fields should be completed." : "Todos os campos têm de ser preenchidos.",
"All fosterers on file." : "Todas as FAT em arquivo.",
"All homechecked owners on file." : "Todos os donos inspecionados em arquivo.",
"All homecheckers on file." : "Todos os inspetores de donos em arquivo.",
"All members on file." : "Todos os associados em arquivo.",
"All notes upto today" : "Todas as anotações até à data",
"All people on file." : "Todas as pessoas em arquivo.",
"All retailers on file." : "Todos os lojistas em arquivo.",
"All staff on file." : "Todo o pessoal em arquivo.",
"All time" : "Desde sempre",
"All vets on file." : "Todos os veterinários em arquivo.",
"All volunteers on file." : "Todos os voluntários em arquivo.",
"Allergies" : "Alergias",
"Allow a fosterer to be selected" : "Permitir a seleção de uma FAT",
"Allow an adoption coordinator to be selected" : "Permitir a seleção de um coordenador de adoção",
"Allow creation of payments on the Move-Reserve screen" : "Permitir a criação de pagamentos no ecrã Movimentos-Reservar",
"Allow drag and drop to move animals between locations" : "Permitir arrastar e soltar para mover animais entre locais",
"Allow duplicate license numbers" : "Permitir números de licença duplicados",
"Allow duplicate microchip numbers" : "Permitir números de microchip duplicados",
"Allow editing of latitude/longitude with minimaps" : "Permitir a edição da latitude/longitude nos mini mapas",
"Allow overriding of the movement number on the Move menu screens" : "Permitir a anulação do número de movimento nos ecrãs do menu Movimentos",
"Allow reservations to be created that are not linked to an animal" : "Permitir a criação de reservas que não estejam vinculadas a um animal",
"Allow use of OpenOffice document templates" : "Permitir a utilização de modelos de documentos OpenOffice",
"Allow use of tokens" : "Permitir a utilização de tokens",
"Alphabetically A-Z" : "Alfabética A-Z",
"Alphabetically Z-A" : "Alfabética Z-A",
"Already Signed" : "Já Assinado",
"Already fostered to this person." : "Já atribuído a esta FAT.",
"Altered" : "Esterilizado/Castrado",
"Altered Date" : "Data da Esterilização/Castração",
"Altered Dog - 1 year" : "Cão Esterilizado/Castrado - 1 ano",
"Altered Dog - 3 year" : "Cão Esterilizado/Castrado - 3 anos",
"Altered between" : "Esterilizado/Castrado entre",
"Altered between two dates" : "Esterilizado/Castrado entre duas datas",
"Altering Vet" : "Veterinário Responsável pela Esterilização/Castração",
"Always show an emblem to indicate the current location" : "Mostrar sempre um emblema para indicar a localização atual",
"Amazon" : "Amazonas",
"Amber" : "Âmbar",
"American" : "Americano",
"American Bulldog" : "Buldogue Americano",
"American Curl" : "Curl Americano",
"American Eskimo Dog" : "Cão Esquimó Americano",
"American Fuzzy Lop" : "Fuzzy Lop Americano",
"American Sable" : "Sable Americano",
"American Shorthair" : "Pelo Curto Americano",
"American Staffordshire Terrier" : "Staffordshire Terrier Americano",
"American Water Spaniel" : "Water Spaniel Americano",
"American Wirehair" : "Americano de Pelo Cerdoso",
"Amount" : "Montante",
"An age in years, eg: 1, 0.5" : "Idade em anos, p.ex 1 ou 0,5",
"An animal cannot have multiple open movements." : "Um animal não pode ter vários movimentos em aberto.",
"An optional comma separated list of email addresses to send the output of this report to" : "Uma lista opcional de endereços de email separados por vírgulas para enviar a saída deste relatório",
"Anatolian Shepherd" : "Pastor Anatólio",
"Angora Rabbit" : "Coelho Angorá",
"Animal" : "Animal",
"Animal '{0}' created with code {1}" : "Animal '{0}' criado com o código {1}",
"Animal '{0}' successfully marked deceased." : "Animal '{0}' corretamente registado como morto.",
"Animal (optional)" : "Animal (opcional)",
"Animal (via animalname field)" : "Animal (através do campo animalname)",
"Animal - Additional" : "Animal - Adicional",
"Animal - Death" : "Animal - Morte",
"Animal - Details" : "Animal - Detalhes",
"Animal - Entry" : "Animal - Entrada",
"Animal - Health and Identification" : "Animal - Saúde e Identificação",
"Animal - Notes" : "Animal - Anotações",
"Animal Codes" : "Códigos de Animais",
"Animal Control" : "Controlo Animal",
"Animal Control Caller" : "Denunciador de Controlo Animal",
"Animal Control Incident" : "Incidente de Controlo Animal",
"Animal Control Officer" : "Responsável de Controlo Animal",
"Animal Control Victim" : "Vítima de Controlo Animal",
"Animal Emblems" : "Emblemas de Animal",
"Animal Flags" : "Sinalizações de Animais",
"Animal Links" : "Links de Animais",
"Animal Name" : "Nome do Animal",
"Animal Selection" : "Seleção de Animal",
"Animal Shelter Manager" : "Animal Shelter Manager",
"Animal Shelter Manager Login" : "Login do Animal Shelter Manager",
"Animal Sponsorship" : "Apadrinhamento de Animais",
"Animal Type" : "Tipo de Animal",
"Animal Types" : "Tipos de Animais",
"Animal board costs" : "Custos com o alojamento do animal",
"Animal cannot be deceased before it was brought to the shelter" : "O animal não pode estar morto antes de ter sido trazido para o abrigo",
"Animal code format" : "Formato de código de animal",
"Animal comments MUST contain this phrase in order to match." : "Os comentários sobre TÊM de incluir esta frase para que haja uma correspondência.",
"Animal control calendar" : "Agenda de controlo de animais",
"Animal control incidents matching '{0}'." : "Incidentes de controlo animal correspondentes a '{0}'.",
"Animal defecation" : "Fezes de animais",
"Animal descriptions" : "Descrições de animais",
"Animal destroyed" : "Animal abatido",
"Animal emblems are the little icons that appear next to animal names in shelter view, the home page and search results." : "Os emblemas dos animais são os pequenos ícones que aparecem junto aos nomes dos animais na página do abrigo, na página de entrada e nos resultados de pesquisa.",
"Animal food costs" : "Custos com a alimentação dos animais",
"Animal picked up" : "Animal recolhido",
"Animal shortcode format" : "Formato de código abreviado de animal",
"Animals" : "Animais",
"Animals at large" : "Animais errantes",
"Animals left in vehicle" : "Animais deixados em veículo",
"Animals matching '{0}'." : "Animais correspondentes a '{0}'.",
"Animals per page" : "Animais por página",
"Annual" : "Anual",
"Annually" : "Anualmente",
"Anonymize" : "Anonimizar",
"Anonymize personal data after this many years" : "Anonimizar os dados pessoais após este número de anos",
"Any animal types, species, breeds, colors, locations, etc. in the CSV file that aren't already in the database will be created during the import." : "Todos os tipos de animais, espécies, raças, cores, localizações, etc. encontradas no ficheiro CSV que ainda não existirem na base de dados serão criados durante a importação.",
"Any health problems the animal has" : "Quaisquer problemas de saúde do animal",
"Any information about the animal" : "Qualquer informação sobre o animal",
"Any markings or distinguishing features the animal has" : "Marcas ou características distintivas do animal",
"Appaloosa" : "Appaloosa",
"Appenzell Mountain Dog" : "Appenzell Mountain Dog",
"Applehead Siamese" : "Siamês Applehead",
"Appointment" : "Agendamento",
"Appointment date must be a valid date" : "A data do agendamento ter de ser uma data válida",
"Appointment {0}. {1} on {2} for {3}" : "Agendamento {0}. {1} em {2} para {3}",
"Appointments need a date and time." : "Os agendamentos necessitam de uma data e hora.",
"Approved" : "Aprovado",
"Apr" : "Abr",
"April" : "Abril",
"Arabian" : "Árabe",
"Are you sure?" : "Tem a certeza?",
"Area" : "Área",
"Area Found" : "Área Onde Foi Encontrado",
"Area Lost" : "Área do Desaparecimento",
"Area Postcode" : "Código Postal da Área",
"Area where the animal was found" : "Área onde o animal foi encontrado",
"Area where the animal was lost" : "Área onde o animal foi perdido",
"Areas" : "Áreas",
"Arrived" : "Chegada",
"Ask the user for a city" : "Solicitar cidade ao utilizador",
"Ask the user for a flag" : "Solicitar sinalização ao utilizador",
"Ask the user for a location" : "Solicitar local ao utilizador",
"Ask the user for a species" : "Solicitar espécie ao utilizador",
"Ask the user for a type" : "Solicitar tipo ao utilizador",
"Asset" : "Ativo",
"Asset::Premises" : "Ativo::Instalações",
"At least the last name should be completed." : "Precisa de preencher pelo menos o apelido.",
"Attach" : "Anexar",
"Attach File" : "Anexar Ficheiro",
"Attach Link" : "Anexar Link",
"Attach a file" : "Anexar um ficheiro",
"Attach a link to a web resource" : "Anexar um link para um recurso na web",
"Attach link" : "Anexar link",
"Audit Trail" : "Histórico de Auditoria",
"Aug" : "Ago",
"August" : "Agosto",
"Australian Cattle Dog/Blue Heeler" : "Australian Cattle Dog/Blue Heeler",
"Australian Kelpie" : "Kelpie Australiano",
"Australian Shepherd" : "Pastor Australiano",
"Australian Terrier" : "Terrier Australiano",
"Auto log users out after this many minutes of inactivity" : "Terminar automaticamente a sessão dos utilizadores após estes minutos de inatividade",
"Auto remove on {0}" : "Remover automaticamente em {0}",
"Auto removed due to lack of owner contact." : "Automaticamente removido devido a falta de contacto com o dono.",
"Automatically cancel any outstanding reservations on an animal when it is adopted" : "Cancelar automaticamente quaisquer reservas do animal quando o mesmo for adotado",
"Automatically remove" : "Remover automaticamente",
"Automatically remove this media item on this date" : "Remover automaticamente este item de imagem nesta data",
"Automatically return any outstanding foster movements on an animal when it is adopted" : "Devolver automaticamente quaisquer movimentos de FAT associados a um animal quando o mesmo for adotado",
"Automatically return any outstanding foster movements on an animal when it is transferred" : "Devolver automaticamente quaisquer movimentos de FAT associados a um animal quando o mesmo for transferido",
"Available for adoption" : "Disponível para adoção",
"Available sheltermanager.com reports" : "Relatórios do sheltermanager.com disponíveis",
"B (Boarding Animal)" : "B (Alojamento de Animais)",
"BCC" : "BCC",
"Baby" : "Bebé",
"Balance" : "Balanço",
"Balinese" : "Balinês",
"Bank" : "Banco",
"Bank account interest" : "Juros da conta bancária",
"Bank current account" : "Conta bancária à ordem",
"Bank deposit account" : "Conta bancária de depósito",
"Bank savings account" : "Conta bancária de poupança",
"Bank::Current" : "Banco::À Ordem",
"Bank::Deposit" : "Banco::Depósito",
"Bank::Savings" : "Banco::Poupança",
"Banned" : "Em lista negra",
"Base Color" : "Cor Base",
"Basenji" : "Basenji",
"Basset Hound" : "Basset Hound",
"Batch" : "Lote",
"Batch Number" : "Número de lote",
"Beagle" : "Beagle",
"Bearded Collie" : "Bearded Collie",
"Beauceron" : "Beauceron",
"Bedlington Terrier" : "Bedlington Terrier",
"Beginning of month" : "Início do mês",
"Belgian Hare" : "Hare Belga",
"Belgian Shepherd Dog Sheepdog" : "Cão Pastor Belga",
"Belgian Shepherd Laekenois" : "Pastor Belga Laekenois",
"Belgian Shepherd Malinois" : "Pastor Belga Malinois",
"Belgian Shepherd Tervuren" : "Pastor Belga Tervuren",
"Bengal" : "Bengal",
"Bernese Mountain Dog" : "Boiadeiro de Berna",
"Beveren" : "Beveren",
"Bichon Frise" : "Bichon Frisé",
"Bird" : "Ave",
"Birman" : "Birmanês",
"Bite" : "Mordedura",
"Biting" : "Mordedor",
"Black" : "Preto",
"Black Labrador Retriever" : "Labrador Retriever Preto",
"Black Mouth Cur" : "Black Mouth Cur",
"Black Tortie" : "Tortie Preto",
"Black and Brindle" : "Preto e Tigrado",
"Black and Brown" : "Preto e Castanho",
"Black and Tan" : "Preto e Amarelo",
"Black and Tan Coonhound" : "Coonhound Preto e Amarelo",
"Black and White" : "Preto e Branco",
"Bloodhound" : "Perdigueiro",
"Blue" : "Azul",
"Blue Tortie" : "Tortie Azul",
"Bluetick Coonhound" : "Bluetick Coonhound",
"Board and Food" : "Alojamento e Alimentação",
"Boarding" : "Alojamento",
"Boarding Cost" : "Custos de Alojamento",
"Boarding cost type" : "Tipo de custo de alojamento",
"Bobtail" : "Bobtail",
"Body" : "Corpo",
"Bombay" : "Bombay",
"Bonded" : "Casal/Companheiro",
"Bonded With" : "Casal/Companheiro De",
"Books" : "Livros",
"Border Collie" : "Border Collie",
"Border Terrier" : "Border Terrier",
"Bordetella" : "Bordetella",
"Born in Shelter" : "Nascido no Abrigo",
"Born on Foster {0}" : "Nascido em FAT {0}",
"Born on Shelter {0}" : "Nascido no Abrigo {0}",
"Borzoi" : "Borzoi",
"Boston Terrier" : "Boston Terrier",
"Both" : "Ambos",
"Bouvier des Flanders" : "Bouvier des Flanders",
"Boxer" : "Boxer",
"Boykin Spaniel" : "Boykin Spaniel",
"Breed" : "Raça",
"Breed to use when publishing to third party services and adoption sites" : "Raça a utilizar ao publicar em serviços de terceiros e sites de adoção",
"Breeds" : "Raças",
"Briard" : "Briard",
"Brindle" : "Malhado",
"Brindle and Black" : "Preto com manchas",
"Brindle and White" : "Branco com manchas",
"Britannia Petite" : "Petite Britânico",
"British Shorthair" : "Pelo Curto Inglês",
"Brittany Spaniel" : "Brittany Spaniel",
"Brotogeris" : "Brotogeris",
"Brought In" : "Entregue no Abrigo",
"Brought In By" : "Entregue no Abrigo Por",
"Brown" : "Castanho",
"Brown and Black" : "Castanho e Preto",
"Brown and White" : "Castanho e Branco",
"Browse sheltermanager.com" : "Procurar em sheltermanager.com",
"Browse sheltermanager.com and install some reports, charts and mail merges into your new system." : "Procurar em sheltermanager.com para instalar alguns relatórios, gráficos e envios de emails no seu novo sistema.",
"Brussels Griffon" : "Brussels Griffon",
"Budgie/Budgerigar" : "Budgie/Budgerigar",
"Bulk Complete Diary" : "Preencher Diário em Lote",
"Bulk Complete Medical Records" : "Preencher Registos Clínicos em Lote",
"Bulk Complete Vaccinations" : "Preencher Vacinações em Lote",
"Bulk Complete Waiting List" : "Preencher Lista de Espera em Lote",
"Bulk Regimen" : "Regime em Lote",
"Bulk Test" : "Testes em Lote",
"Bulk Transport" : "Transporte em Lote",
"Bulk Vaccination" : "Vacinação em Lote",
"Bulk change animals" : "Alteração de Animais em Lote",
"Bull Terrier" : "Bull Terrier",
"Bullmastiff" : "Bullmastiff",
"Bunny Rabbit" : "Bunny Rabbit",
"Burmese" : "Birmanês",
"Burmilla" : "Burmilla",
"By" : "Por",
"CC" : "CC",
"CSV of animal/adopter data" : "CSV de dados de animais/adotantes",
"CSV of animal/medical data" : "CSV de dados de animais/clínicos",
"CSV of incident data" : "CSV de dados de incidentes",
"CSV of license data" : "CSV de dados de licença",
"CSV of media data" : "CSV dos dados de imagens",
"CSV of payment data" : "CSV de dados de pagamento",
"CSV of person data" : "CSV de dados de pessoas",
"Caique" : "Caique",
"Cairn Terrier" : "Cairn Terrier",
"Calendar View" : "Vista de Agenda",
"Calendar view" : "Vista de agenda",
"Calico" : "Calico",
"Californian" : "Californiano",
"Call" : "Chamada",
"Call Date/Time" : "Data/Hora da Chamada",
"Call between" : "Chamada entre",
"Call between two dates" : "Chamada entre duas datas",
"Caller" : "Autor da Chamada",
"Caller Name" : "Nome do Autor da Chamada",
"Caller Phone" : "Telefone do Autor da Chamada",
"Camel" : "Camelo",
"Can Login" : "Pode Aceder",
"Can afford donation?" : "Pode pagar o donativo?",
"Can't reserve an animal that has an active movement." : "Não é possível reservar um animal que tenha um movimento ativo.",
"Canaan Dog" : "Canaan",
"Canadian Hairless" : "Sem Pelo Canadiano",
"Canary" : "Canário",
"Cancel" : "Cancelar",
"Cancel holds on animals this many days after the brought in date, or 0 to never cancel" : "Cancelar retenções de animais este número de dias após a data de entrega no abrigo, ou indicar 0 para nunca cancelar",
"Cancel unadopted reservations after" : "Cancelar reservas sem adoção após",
"Cancel unadopted reservations after this many days, or 0 to never cancel" : "Cancelar reservas sem adoção após este número de dias, ou indicar 0 para nunca cancelar",
"Cancelled" : "Cancelado",
"Cancelled Reservation" : "Reserva Cancelada",
"Cane Corso Mastiff" : "Cane Corso Mastiff",
"Cardcom" : "Cardcom",
"Cardcom Document Type" : "Tipo de Documento Cardcom",
"Cardcom Error URL" : "URL de Erro Cardcom",
"Cardcom Payment Gateway" : "Intermediário de Pagamento Cardcom",
"Cardcom Success URL" : "URL de Êxito Cardcom",
"Cardcom Terminal Number" : "Número de Terminal Cardcom",
"Cardcom Token Charge" : "Débito de Token Cardcom",
"Cardcom User Name" : "Nome de Utilizador Cardcom",
"Carolina Dog" : "Carolina",
"Cash" : "Dinheiro",
"Cat" : "Gato",
"Catahoula Leopard Dog" : "Catahoula Leopardo",
"Category" : "Categoria",
"Cats" : "Gatos",
"Cattery" : "Gatil",
"Cattle Dog" : "Cão de Proteção de Rebanhos",
"Cavalier King Charles Spaniel" : "Cavalier King Charles Spaniel",
"Cell" : "Célula",
"Cell Phone" : "Telemóvel",
"Champagne D'Argent" : "Champagne D'Argent",
"Change" : "Alterar",
"Change Accounts" : "Alterar Contas",
"Change Animals" : "Alterar Animais",
"Change Citations" : "Alterar Advertências",
"Change Clinic Apointment" : "Alterar Agendamento Clínico",
"Change Cost" : "Alterar Custo",
"Change Date Required" : "Estipular Data para a Alteração",
"Change Diets" : "Alterar Dietas",
"Change Found Animal" : "Alterar Animal Encontrado",
"Change Incidents" : "Alterar Incidentes",
"Change Investigation" : "Alterar Investigação",
"Change Licenses" : "Alterar Licenças",
"Change Litter" : "Alterar Ninhada",
"Change Log" : "Registo de Alterações",
"Change Lost Animal" : "Alterar Animal Perdido",
"Change Media" : "Alterar Imagens",
"Change Medical Records" : "Alterar Registos Clínicos",
"Change Movement" : "Alterar Movimento",
"Change Online Forms" : "Alterar Formulários Online",
"Change Password" : "Alterar Senha",
"Change Payments" : "Alterar Pagamentos",
"Change Person" : "Alterar Pessoa",
"Change Publishing Options" : "Alterar Opções de Publicação",
"Change Report" : "Alterar Relatório",
"Change Rota" : "Alterar Turno",
"Change Stock" : "Alterar Stock",
"Change System Options" : "Alterar Opções do Sistema",
"Change Tests" : "Alterar Testes",
"Change Transactions" : "Alterar Transações",
"Change Transport" : "Alterar Transporte",
"Change Trap Loans" : "Alterar Empréstimos de Armadilhas",
"Change User Settings" : "Alterar Definições do Utilizador",
"Change Vaccinations" : "Alterar Vacinas",
"Change Vouchers" : "Alterar Vouchers",
"Change Waiting List" : "Alterar Lista de Espera",
"Change date required on selected treatments" : "Data de alteração necessária para os tratamentos selecionados",
"Changed Mind" : "Mudou de Ideias",
"Chart" : "Gráfico",
"Chart (Bar)" : "Gráfico (Barras)",
"Chart (Line)" : "Gráfico (Linhas)",
"Chart (Pie)" : "Gráfico (Circular)",
"Chart (Point)" : "Gráfico (Pontos)",
"Chart (Steps)" : "Gráfico (Passos)",
"Chartreux" : "Chartreux",
"Check" : "Cheque",
"Check License" : "Verificar Licença",
"Check No" : "Cheque N.º",
"Checkbox" : "Caixa de Seleção",
"Checkbox Group" : "Grupo de Caixas de Seleção",
"Checked By" : "Verificado Por",
"Checkered Giant" : "Checkered Giant",
"Cheque" : "Cheque",
"Chesapeake Bay Retriever" : "Retriever de Chesapeake Bay",
"Chicken" : "Galinha",
"Chihuahua" : "Chihuahua",
"Children" : "Filhos",
"Chinchilla" : "Chinchila",
"Chinese Crested Dog" : "Cão Chinês Crested",
"Chinese Foo Dog" : "Cão Chinês Foo",
"Chlamydophila" : "Clamidófila",
"Chocolate" : "Chocolate",
"Chocolate Labrador Retriever" : "Labrador Retriever Chocolate",
"Chocolate Tortie" : "Tortie Chocolate",
"Chow Chow" : "Chow Chow",
"Cinnamon" : "Canela",
"Cinnamon Tortoiseshell" : "Atartarugado Canela",
"Citation Type" : "Tipo de Advertência",
"Citation Types" : "Tipos de Advertências",
"Citations" : "Advertências",
"City" : "Cidade",
"City contains" : "Cidade contém",
"Class" : "Classe",
"Clear" : "Limpar",
"Clear and sign again" : "Limpar e assinar novamente",
"Clinic" : "Consultas Veterinárias",
"Clinic Calendar" : "Agenda de Consultas Veterinárias",
"Clinic Invoice - {0}" : "Fatura de Consulta Veterinária - {0}",
"Clinic Statuses" : "Estados de Consultas Veterinárias",
"Clone" : "Clonar",
"Clone Animals" : "Clonar Animais",
"Clone Rota" : "Clonar Turno",
"Clone the rota this week to another week" : "Clonar os turnos desta semana para outra semana",
"Cloning..." : "A clonar...",
"Close" : "Fechar",
"Clumber Spaniel" : "Clumber Spaniel",
"Clydesdale" : "Clydesdale",
"Coat" : "Pelagem",
"Coat Type" : "Tipo de Pelagem",
"Coat Types" : "Tipos de Pelagem",
"Cockapoo" : "Cockapoo",
"Cockatiel" : "Cockatiel",
"Cockatoo" : "Cockatoo",
"Cocker Spaniel" : "Cocker Spaniel",
"Code" : "Código",
"Code contains" : "Código contém",
"Code format tokens:" : "Tokens de formato de código:",
"Collie" : "Collie",
"Color" : "Cor",
"Color to use when publishing to third party services and adoption sites" : "Cor a utilizar ao publicar em serviços e sites de adoção de terceiros",
"Colors" : "Cores",
"Columns" : "Colunas",
"Columns displayed" : "Colunas apresentadas",
"Comma separated list of extra addresses that the From email field of send email dialogs will prompt with" : "Lista separada por vírgulas com os endereços extra que serão apresentados pelo campo \"De\" da caixa de diálogo de envio de e-mails",
"Comma separated list of extra addresses that the To and CC email fields of send email dialogs will prompt with" : "Lista separada por vírgulas com os endereços extra que serão apresentados pelos campos \"Para\" e \"CC\" da caixa de diálogo de envio de e-mails",
"Comma separated list of units for this location, eg: 1,2,3,4,Isolation,Pen 5" : "Lista separada por vírgulas das secções deste local, por exemplo: 1,2,3,4, Isolamento, Jaula 5",
"Comments" : "Comentários",
"Comments Contain" : "Comentários Contêm",
"Comments contain" : "Comentários contêm",
"Comments copied to web preferred media." : "Comentários copiados para as imagens preferenciais para a Web.",
"Complaint" : "Queixa",
"Complete" : "Concluído",
"Complete Tasks" : "Completar Tarefas",
"Completed" : "Concluído em",
"Completed Between" : "Concluído Entre",
"Completed Type" : "Tipo de Conclusão",
"Completed between" : "Concluído entre",
"Completed between two dates" : "Concluído entre duas datas",
"Completed notes upto today" : "Anotações concluídas até à data",
"Completed type {0}" : "Tipo de conclusão {0}",
"Completion Date/Time" : "Data/Hora de Conclusão",
"Completion Type" : "Tipo de Conclusão",
"Configuration" : "Configuração",
"Confirm" : "Confirmar",
"Confirm Password" : "Confirmar Senha",
"Confirmation message" : "Mensagem de confirmação",
"Confirmed" : "Confirmado",
"Consulting Room" : "Consultório",
"Consulting Room - {0}" : "Consultório - {0}",
"Consumed" : "Consumido",
"Contact" : "Contacto",
"Contact Contains" : "Contacto Contém",
"Conure" : "Periquito Conure",
"Convert this reservation to an adoption" : "Converter esta reserva para uma adoção",
"Coonhound" : "Coonhound",
"Coordinator" : "Coordenador",
"Copy absolute service URL to the clipboard (for external use in web pages and emails)" : "Copiar o URL absoluto do serviço para a área de transferência (para uso externo em páginas Web e emails)",
"Copy description to the notes field of the web preferred media for this animal" : "Copiar a descrição para o campo de anotações de imagens preferenciais da Web deste animal",
"Copy form URL to the clipboard" : "Copiar o URL do formulário para a área de transferência",
"Copy from animal comments" : "Copiar a partir dos comentários do animal",
"Copy of {0}" : "Cópia de {0}",
"Copy recipient list to the clipboard" : "Copiar lista de destinatários para a área de transferência",
"Copy relative URL to the clipboard (for use with documents and reports)" : "Copiar URL relativo para a área de transferência (para utilização em documentos e relatórios)",
"Corded" : "Encordoado",
"Corgi" : "Corgi",
"Cornish Rex" : "Cornish Rex",
"Cost" : "Custo",
"Cost For" : "Custo De",
"Cost Type" : "Tipo de Custo",
"Cost Types" : "Tipos de Custo",
"Cost date must be a valid date" : "A data do custo tem de ser uma data válida",
"Cost record" : "Registo do custo",
"Costs" : "Custos",
"Costs need a date and amount." : "Os custos necessitam de uma data e montante.",
"Coton de Tulear" : "Coton de Tulear",
"Could not find animal with name '{0}'" : "Não foi possível encontrar o animal com o nome '{0}'",
"Country" : "País",
"Courtesy Listing" : "Listagem de Cortesia",
"Cow" : "Vaca",
"Cream" : "Creme",
"Create" : "Criar",
"Create Animal" : "Criar Animal",
"Create Log" : "Criar Registo",
"Create Payment" : "Criar Pagamento",
"Create Waiting List" : "Criar Lista de Espera",
"Create a cost record" : "Criar um registo de custo",
"Create a due or received payment record from this appointment" : "Criar um registo de pagamento em falta ou recebido a partir deste agendamento",
"Create a new animal by copying this one" : "Criar um novo animal a partir de uma cópia deste animal",
"Create a new animal from this found animal record" : "Criar um novo animal a partir deste registo de animal encontrado",
"Create a new animal from this incident" : "Criar um novo animal a partir deste incidente",
"Create a new animal from this waiting list entry" : "Criar um novo animal a partir desta entrada de lista de espera",
"Create a new document" : "Criar um novo documento",
"Create a new template" : "Criar um novo modelo",
"Create a new template by copying the selected template" : "Criar um novo modelo a partir da cópia do modelo selecionado",
"Create a new waiting list entry from this found animal record" : "Criar uma nova entrada de lista de espera a partir deste registo de animal encontrado",
"Create and edit" : "Criar e editar",
"Create boarding cost record when animal is adopted" : "Criar um registo de custos de alojamento quando o animal for adotado",
"Create diary notes from a task" : "Criar anotações de diário a partir de uma tarefa",
"Create missing lookup values" : "Criar valores de consulta em falta",
"Create note this many days from today, or 9999 to ask" : "Criar uma anotação este número de dias após hoje, ou 9999 para perguntar",
"Create this message" : "Criar esta mensagem",
"Create this person" : "Criar esta pessoa",
"Created By" : "Criado Por",
"Created Date" : "Data de Criação",
"Created Since" : "Criado Desde",
"Created since" : "Criado desde",
"Creating cost and cost types creates matching accounts and transactions" : "A criação de custos e tipos de custo cria as contas e transações correspondentes",
"Creating payments and payments types creates matching accounts and transactions" : "A criação de pagamentos e tipos de pagamentos cria as contas e transações correspondentes",
"Creating..." : "A criar…",
"Credit" : "Crédito",
"Credit Card" : "Cartão de Crédito",
"Credit card" : "Cartão de crédito",
"Creme D'Argent" : "Creme D'Argent",
"Criteria" : "Critérios",
"Criteria:" : "Critérios:",
"Crossbreed" : "Arraçado",
"Cruelty Case" : "Caso de Crueldade",
"Culling" : "Abate",
"Curly" : "Encaracolado",
"Current" : "Atual",
"Current Owner" : "Dono Atual",
"Current Vet" : "Veterinário atual",
"Cymric" : "Cymric",
"D (Dog)" : "C (Cão)",
"DD = current day" : "DD = dia atual",
"DDL dump (DB2)" : "Dump DDL (DB2)",
"DDL dump (MySQL)" : "Dump DDL (MySQL)",
"DDL dump (PostgreSQL)" : "Dump DDL (PostgreSQL)",
"DHLPP" : "DHLPP",
"DO NOT use this field to store notes about what the person is looking for." : "NÃO UTILIZE este campo para guardar anotações sobre o que a pessoa pretende.",
"DOA {0}" : "Morto à Chegada {0}",
"DOB" : "Data de Nascimento",
"Dachshund" : "Dachshund",
"Daily Boarding Cost" : "Custo Diário de Alojamento",
"Dalmatian" : "Dálmata",
"Dandi Dinmont Terrier" : "Dandi Dinmont Terrier",
"Data" : "Dados",
"Data Protection" : "Proteção de Dados",
"Database" : "Base de Dados",
"Date" : "Data",
"Date '{0}' is not valid." : "A data '{0}' não é válida.",
"Date Brought In" : "Data de Admissão no Abrigo",
"Date Found" : "Data em que foi encontrado",
"Date Lost" : "Data em que foi perdido",
"Date Of Birth" : "Data de Nascimento",
"Date Put On" : "Data de Colocação",
"Date Removed" : "Data de Remoção",
"Date Reported" : "Data de Comunicação",
"Date and notes are mandatory." : "A data e as anotações são obrigatórias.",
"Date brought in between two dates" : "Data de admissão no abrigo entre duas datas",
"Date brought in cannot be blank" : "A data de admissão não pode estar vazia",
"Date brought in cannot be in the future." : "A data de admissão não pode ser futura.",
"Date brought in is not valid" : "A data de admissão não é válida",
"Date found cannot be blank" : "A data em que foi encontrado não pode estar vazia",
"Date found cannot be blank." : "A data em que foi encontrado não pode estar vazia.",
"Date lost cannot be blank" : "A data em que se perdeu não pode estar vazia",
"Date lost cannot be blank." : "A data em que se perdeu não pode estar vazia.",
"Date of Birth" : "Data de Nascimento",
"Date of birth cannot be blank" : "A data de nascimento não pode estar vazia",
"Date of birth cannot be in the future." : "A data de nascimento não pode ser futura.",
"Date of birth is not valid" : "A data de nascimento não é válida",
"Date of last owner contact" : "Data do último contacto com o dono",
"Date put on" : "Data de colocação",
"Date put on cannot be blank" : "A data de colocação não pode estar vazia",
"Date put on list" : "Data de colocação na lista",
"Date removed" : "Data da remoção",
"Date reported cannot be blank" : "A data de comunicação não pode estar vazia",
"Date reported cannot be blank." : "A data de comunicação não pode estar vazia.",
"Date/Time" : "Data/Hora",
"Day" : "Dia",
"Day Pivot" : "Dia Pivô",
"Days On Shelter" : "Dias no Abrigo",
"Dead On Arrival" : "Morto à Chegada",
"Dead animal" : "Animal Morto",
"Dead on arrival" : "Morto à chegada",
"Death" : "Morte",
"Death Comments" : "Comentários sobre a Morte",
"Death Reason" : "Motivo da Morte",
"Death Reasons" : "Motivos da Morte",
"Debit" : "Débito",
"Debit Card" : "Cartão de Débito",
"Dec" : "Dez",
"Deceased" : "Falecido",
"Deceased Date" : "Data de Falecimento",
"December" : "Dezembro",
"Declawed" : "Sem Unhas",
"Declined" : "Recusado",
"Default Breed" : "Raça Padrão",
"Default Brought In By" : "Trazido Por Padrão",
"Default Coat Type" : "Tipo de Pelagem Padrão",
"Default Color" : "Cor Padrão",
"Default Cost" : "Custo Padrão",
"Default Death Reason" : "Motivo da Morte Padrão",
"Default Diary Person" : "Pessoa de Diário Padrão",
"Default Entry Reason" : "Motivo de Admissão Padrão",
"Default Incident Type" : "Tipo de Incidente Padrão",
"Default Jurisdiction" : "Jurisdição Padrão",
"Default Location" : "Localização Padrão",
"Default Log Filter" : "Filtro de Registo Padrão",
"Default Log Type" : "Tipo de Registo Padrão",
"Default Payment Method" : "Método de Pagamento Padrão",
"Default Payment Type" : "Tipo de Pagamento Padrão",
"Default Reservation Status" : "Estado de Reserva Padrão",
"Default Return Reason" : "Motivo de Devolução Padrão",
"Default Rota Shift" : "Turno Padrão",
"Default Size" : "Tamanho Padrão",
"Default Species" : "Espécie Padrão",
"Default Test Type" : "Tipo de Teste Padrão",
"Default Type" : "Tipo Padrão",
"Default Vaccination Type" : "Tipo de Vacinação Padrão",
"Default Value" : "Valor Padrão",
"Default daily boarding cost" : "Custo diário de alojamento padrão",
"Default destination account for payments" : "Conta de destino padrão para pagamentos",
"Default image for documents" : "Imagem padrão para documentos",
"Default image for this record and the web" : "Imagem padrão para este registo e a Web",
"Default source account for costs" : "Conta de origem padrão para despesas",
"Default to advanced find animal screen" : "Padrão para o ecrã de pesquisa avançada de animais",
"Default to advanced find person screen" : "Padrão para o ecrã de pesquisa avançada de pessoas",
"Default transaction view" : "Vista de transação padrão",
"Default urgency" : "Urgência padrão",
"Default video for publishing" : "Vídeo padrão para publicação",
"Default view" : "Vista padrão",
"Default zoom level when converting documents to PDF" : "Nível de ampliação padrão ao converter documentos para PDF",
"Defaults" : "Predefinições",
"Defaults formats for code and shortcode are TYYYYNNN and NNT" : "Os formatos padrão de código e código abreviado são TYYYYNNN e NNT",
"Delete" : "Eliminar",
"Delete Accounts" : "Eliminar Contas",
"Delete Animals" : "Eliminar Animais",
"Delete Citations" : "Eliminar Advertências",
"Delete Clinic Appointment" : "Eliminar Agendamento Clínico",
"Delete Cost" : "Eliminar Custo",
"Delete Diary" : "Eliminar Diário",
"Delete Diets" : "Eliminar Dietas",
"Delete Document from Repository" : "Eliminar Documento do Repositório",
"Delete Found Animal" : "Eliminar Animal Encontrado",
"Delete Incidents" : "Eliminar Incidentes",
"Delete Incoming Forms" : "Eliminar Formulários Recebidos",
"Delete Investigation" : "Eliminar Investigação",
"Delete Licenses" : "Eliminar Licenças",
"Delete Litter" : "Eliminar Ninhada",
"Delete Log" : "Eliminar Registo",
"Delete Lost Animal" : "Eliminar Animal Perdido",
"Delete Media" : "Eliminar Imagens",
"Delete Medical Records" : "Eliminar Registos Clínicos",
"Delete Movement" : "Eliminar Movimento",
"Delete Online Forms" : "Eliminar Formulários Online",
"Delete Payments" : "Eliminar Pagamentos",
"Delete Person" : "Eliminar Pessoa",
"Delete Regimen" : "Eliminar Regime",
"Delete Report" : "Eliminar Relatório",
"Delete Rota" : "Eliminar Turno",
"Delete Stock" : "Eliminar Stock",
"Delete Tests" : "Eliminar Testes",
"Delete Transport" : "Eliminar Transporte",
"Delete Trap Loans" : "Eliminar Empréstimos de Armadilhas",
"Delete Treatments" : "Eliminar Tratamentos",
"Delete Vaccinations" : "Eliminar Vacinações",
"Delete Vouchers" : "Eliminar Vouchers",
"Delete Waiting List" : "Eliminar Lista de Espera",
"Delete all rota entries for this week" : "Eliminar todas as entradas de turno para esta semana",
"Delete database before importing" : "Eliminar base de dados antes da importação",
"Delete this animal" : "Eliminar este animal",
"Delete this incident" : "Eliminar este incidente",
"Delete this person" : "Eliminar esta pessoa",
"Delete this record" : "Eliminar este registo",
"Delete this waiting list entry" : "Eliminar esta entrada de lista de espera",
"Deleting..." : "A eliminar...",
"Denied" : "Recusado",
"Deposit" : "Depósito",
"Deposit Account" : "Conta à Ordem",
"Deposit Returned" : "Depósito Devolvido",
"Description" : "Descrição",
"Description Contains" : "Descrição Contém",
"Description cannot be blank" : "A descrição não pode estar vazia",
"Deselect" : "Desmarcar",
"Desktop/Tablet UI" : "Interface de PC/Tablet",
"Details" : "Detalhes",
"Devon Rex" : "Devon Rex",
"Dialog title" : "Título da caixa de diálogo",
"Diary" : "Diário",
"Diary Task" : "Tarefa do Diário",
"Diary Task: {0}" : "Tarefa do Diário: {0}",
"Diary Tasks" : "Tarefas do Diário",
"Diary and Messages" : "Diário e Mensagens",
"Diary calendar" : "Agenda do diário",
"Diary change triggered by {0} on {1}" : "Alteração ao diário desencadeada por {0} em {1}",
"Diary complete: {0}" : "Diário concluído: {0}",
"Diary completion triggered by {0} on {1}" : "Conclusão do diário desencadeada por {0} em {1}",
"Diary date cannot be blank" : "A data do diário não pode estar vazia",
"Diary date is not valid" : "A data do diário não é válida",
"Diary for {0}" : "Diário para {0}",
"Diary note cannot be blank" : "A nota de diário não pode estar vazia",
"Diary note {0} marked completed" : "Anotação de diário {0} assinalada como concluída",
"Diary note {0} rediarised for {1}" : "Anotação de diário {0} transferida para {1}",
"Diary notes for: {0}" : "Anotações de diário para: {0}",
"Diary notes need a date and subject." : "As anotações de diário necessitam de uma data e assunto.",
"Diary subject cannot be blank" : "O assunto do diário não pode estar vazio",
"Diary task items need a pivot, subject and note." : "Os itens de tarefas do diário necessitam de um pivô, de um assunto e de uma anotação.",
"Diary tasks need a name." : "As tarefas do diário necessitam de um nome.",
"Diary update: {0}" : "Atualização de diário: {0}",
"Did not ask" : "Não perguntou",
"Did you know?" : "Sabia que...",
"Died" : "Morreu",
"Died between" : "Morreu entre",
"Died between two dates" : "Morreu entre duas datas",
"Died in care" : "Morreu ao cuidado de FAT",
"Died off shelter" : "Morreu fora do abrigo",
"Died today" : "Morreu hoje",
"Died {0}" : "Morreu em {0}",
"Diet" : "Dieta",
"Diets" : "Dietas",
"Diets need a start date." : "As dietas necessitam de uma data de início.",
"Dispatch" : "Expedição",
"Dispatch Address" : "Morada de Expedição",
"Dispatch Between" : "Enviar Entre",
"Dispatch Date/Time" : "Data/Hora de Expedição",
"Dispatch {0}: {1}" : "Expedição {0}: {1}",
"Dispatched ACO" : "ACO Enviado",
"Display" : "Mostrar",
"Display Index" : "Mostrar Índice",
"Display a search button at the right side of the search box" : "Mostrar um botão de pesquisa à direita da caixa de pesquisa",
"Distemper" : "Cinomose",
"Do Not Publish" : "Não Publicar",
"Do Not Register Microchip" : "Não Registar Microchip",
"Do not send an email if there are no medical items due for animals in the care of this fosterer" : "Não enviar um e-mail se não houver itens médicos agendados para animais ao cuidado desta FAT",
"Do not send email" : "Não enviar email",
"Do not show" : "Não mostrar",
"Doberman Pinscher" : "Doberman Pinscher",
"Document" : "Documento",
"Document Link" : "Link do Documento",
"Document Repository" : "Repositório de Documentos",
"Document Templates" : "Modelos de Documentos",
"Document file" : "Ficheiro de documento",
"Document signed" : "Documento assinado",
"Document signing request" : "Pedido de assinatura de documento",
"Document signing requests issued in the last month that are unsigned" : "Pedidos de assinatura de documentos emitidos no mês passado que ainda não estão cumpridos",
"Document signing requests received in the last week" : "Pedidos de assinatura de documentos recebidos na semana passada",
"Document templates" : "Modelos de documento",
"Documents" : "Documentos",
"Dog" : "Cão",
"Dogo Argentino" : "Dogo Argentino",
"Dogs" : "Cães",
"Dogue de Bordeaux" : "Dogue de Bordéus",
"Domestic Long Hair" : "Pelo Comprido Doméstico",
"Domestic Medium Hair" : "Pelo Médio Doméstico",
"Domestic Short Hair" : "Pelo Curto Doméstico",
"Don't create a cost record" : "Não criar um registo de custo",
"Don't scale" : "Não redimensionar",
"Donated" : "Doado",
"Donation" : "Donativo",
"Donation?" : "Donativo?",
"Donations for animals entering the shelter" : "Donativos para animais que são admitidos no abrigo",
"Done" : "Concluído",
"Donkey" : "Burro",
"Donkey/Mule" : "Burro/Mula",
"Donor" : "Doador",
"Dosage" : "Dosagem",
"Dove" : "Pombo",
"Download" : "Download",
"Download File" : "Transferir Ficheiro",
"Draft" : "Rascunho",
"Driver" : "Condutor",
"Drop files here..." : "Solte os ficheiro aqui...",
"Dropoff" : "Entrega",
"Dropoff Address" : "Morada de Entrega",
"Duck" : "Pato",
"Due" : "Prazo",
"Due in next month" : "Prazo de entrada expira no próximo mês",
"Due in next week" : "Prazo de entrada expira na próxima semana",
"Due in next year" : "Prazo de entrada expira no próximo ano",
"Due today" : "Agendado para hoje",
"Duration" : "Duração",
"Dutch" : "Holandês",
"Dutch Shepherd" : "Pastor Holandês",
"Dwarf" : "Anão",
"Dwarf Eared" : "Coelho Anão de Orelhas Eretas",
"E = first letter of animal entry category" : "E = primeira letra da categoria de entrada de animais",
"EE = first and second letter of animal entry category" : "EE = primeira e segunda letra da categoria de entrada de animais",
"Eclectus" : "Eclectus",
"Edit" : "Editar",
"Edit All Diary Notes" : "Editar Todas as Anotações do Diário",
"Edit Appointment" : "Editar Agendamento",
"Edit Diary Tasks" : "Editar Tarefas do Diário",
"Edit HTML publishing templates" : "Editar modelos de publicação em HTML",
"Edit Header/Footer" : "Editar Cabeçalho/Rodapé",
"Edit Invoice Item" : "Editar Item de Fatura",
"Edit Lookups" : "Editar Consultas",
"Edit My Diary Notes" : "Editar as Minhas Anotações de Diário",
"Edit Online Forms" : "Editar Formulários Online",
"Edit Reports" : "Editar Relatórios",
"Edit Roles" : "Editar Funções",
"Edit Users" : "Editar Utilizadores",
"Edit account" : "Editar conta",
"Edit additional field" : "Editar campo extra",
"Edit citation" : "Editar advertência",
"Edit cost" : "Editar custo",
"Edit diary" : "Editar diário",
"Edit diary notes" : "Editar anotações de diário",
"Edit diary task" : "Editar tarefa do diário",
"Edit diary tasks" : "Editar tarefas do diário",
"Edit diet" : "Editar dieta",
"Edit form field" : "Editar campo de formulário",
"Edit investigation" : "Editar investigação",
"Edit invoice" : "Editar fatura",
"Edit license" : "Editar licença",
"Edit litter" : "Editar ninhada",
"Edit litters" : "Editar ninhadas",
"Edit log" : "Editar registo",
"Edit media" : "Editar imagens",
"Edit medical profile" : "Editar perfil clínico",
"Edit medical regimen" : "Editar regime clínico",
"Edit movement" : "Editar movimento",
"Edit my diary notes" : "Editar as minhas anotações do diário",
"Edit my diary notes" : "Editar as minhas anotações de diário",
"Edit online form" : "Editar formulário online",
"Edit online form HTML header/footer" : "Editar cabeçalho/rodapé HTML do formulário online",
"Edit payment" : "Editar pagamento",
"Edit report" : "Editar relatório",
"Edit report template HTML header/footer" : "Editar cabeçalho/rodapé HTML do modelo de relatório",
"Edit role" : "Editar função",
"Edit roles" : "Editar funções",
"Edit rota item" : "Editar item de turno",
"Edit stock" : "Editar stock",
"Edit system users" : "Editar utilizadores de sistema",
"Edit template" : "Editar modelo",
"Edit test" : "Editar teste",
"Edit the current waiting list" : "Editar a lista de espera atual",
"Edit transaction" : "Editar transação",
"Edit transport" : "Editar transporte",
"Edit trap loan" : "Editar empréstimo de armadilha",
"Edit user" : "Editar utilizador",
"Edit vaccination" : "Editar vacinação",
"Edit voucher" : "Editar voucher",
"Edit {0}" : "Editar {0}",
"Egyptian Mau" : "Mau Egípcio",
"Electricity Bills" : "Contas de Eletricidade",
"Email" : "Email",
"Email Address" : "Endereço de Email",
"Email PDF" : "Enviar PDF por Email",
"Email Person" : "Enviar Email a Pessoa",
"Email To" : "Enviar por Email a",
"Email a copy of the selected HTML documents as PDFs" : "Enviar por email uma cópia dos documentos HTML selecionados em formato PDF",
"Email a copy of the selected media files" : "Enviar por email uma cópia dos ficheiros de imagem selecionados",
"Email address" : "Endereço de email",
"Email diary note creators when a diary note is marked complete" : "Enviar aos criadores das anotações de diário um email sempre que uma anotação de diário for assinalada como concluída",
"Email document for electronic signature" : "Enviar por email o documento para assinatura eletrónica",
"Email incident notes to ACO" : "Enviar à OCA por email as anotações relativas a incidentes",
"Email incoming form submissions to this comma separated list of email addresses" : "Enviar por email os formulários recebidos para esta lista de endereços de e-mail separados por vírgulas",
"Email me a signed copy of the document at {0}" : "Enviar-me por email uma cópia assinada do documento para {0}",
"Email media" : "Imagens de emails",
"Email person" : "Enviar email a pessoa",
"Email request for payment" : "Enviar o pedido de pagamento por email",
"Email scheduled reports with no data" : "Enviar os relatórios agendados sem dados por email",
"Email signature" : "Assinatura de email",
"Email submissions to" : "Enviar requerimentos por email para",
"Email this message to all matching users" : "Enviar esta mensagem por email para todos os utilizadores que corresponderem",
"Email this person" : "Enviar por email a esta pessoa",
"Email users immediately when a diary note assigned to them is created or updated" : "Enviar de imediato um email aos utilizadores quando uma anotação de diário que lhes esteja atribuída for criada ou atualizada",
"Email users their outstanding diary notes once per day" : "Enviar aos utilizadores por email as respetivas anotações de diário em aberto uma vez por dia",
"Emu" : "Ema",
"Enable FTP uploading" : "Ativar o carregamento por FTP",
"Enable accounts functionality" : "Ativar a funcionalidade de contas",
"Enable location filters" : "Ativar os filtros de localização",
"Enable lost and found functionality" : "Ativar a funcionalidade de perdidos e encontrados",
"Enable multiple sites" : "Ativar vários abrigos",
"Enable the waiting list functionality" : "Ativar a funcionalidade de lista de espera",
"Enable visual effects" : "Ativar efeitos visuais",
"Enabled" : "Ativado",
"End Of Day" : "Fim do Dia",
"End Time" : "Hora de Fim",
"End at" : "Terminar em",
"End of month" : "Fim do mês",
"End of year" : "Fim do ano",
"Ends" : "Termina",
"Ends after" : "Termina após",
"English Bulldog" : "Buldogue Inglês",
"English Cocker Spaniel" : "Cocker Spaniel Inglês",
"English Coonhound" : "Coonhound Inglês",
"English Lop" : "Lop Inglês",
"English Pointer" : "Pointer Inglês",
"English Setter" : "Setter Inglês",
"English Shepherd" : "Cão Pastor Inglês",
"English Spot" : "Spot Inglês",
"English Springer Spaniel" : "Springer Spaniel Inglês",
"English Toy Spaniel" : "Toy Spaniel Inglês",
"Enter a city" : "Especifique uma cidade",
"Entered (newest first)" : "Admitido (mais recente primeiro)",
"Entered (oldest first)" : "Admitido (mais antigo primeiro)",
"Entered Between" : "Admitido Entre",
"Entered shelter" : "Admitido no abrigo",
"Entered the shelter between" : "Admitido no abrigo entre",
"Entered the shelter between two dates" : "Admitido no abrigo entre duas datas",
"Entered the shelter today" : "Admitido no abrigo hoje",
"Entering 'activelost' or 'activefound' in the search box will show you lost and found animals reported in the last 30 days." : "Introduza \"activelost\" ou \"activefound\" na caixa de pesquisa para ver os animais perdidos e encontrados comunicados nos últimos 30 dias.",
"Entering 'deceased' in the search box will show you recently deceased animals." : "Pode escrever \"deceased\" na caixa de pesquisa para ver os animais recentemente falecidos.",
"Entering 'fosterers', 'homecheckers', 'staff', 'volunteers', 'aco' or 'members' in the search box will show you those groups of people." : "Pode escrever \"FAT\", \"inspetores de domicílio\", \"pessoal\", \"voluntários\", \"aco\" ou \"membros\" na caixa de pesquisa para ver as pessoas pertencentes a cada um desses grupos.",
"Entering 'notforadoption' in the search box will show you all shelter animals with the not for adoption flag set." : "Pode escrever \"notforadoption\" na caixa de pesquisa para ver todos os animais do abrigo com a sinalização não para adoção.",
"Entering 'os' in the search box will show you all shelter animals." : "Pode escrever 'os' na caixa de pesquisa para ver todos os animais do abrigo.",
"Entlebucher" : "Entlebucher",
"Entry" : "Admissão",
"Entry Category" : "Categoria de Admissão",
"Entry Donation" : "Donativo de Admissão",
"Entry Reason" : "Motivo da Admissão",
"Entry Reason Category" : "Categoria de Motivo de Admissão",
"Entry Reasons" : "Motivos de Admissão",
"Entry category is {0}" : "A categoria de admissão é {0}",
"Entry reason" : "Motivo da entrada",
"Error contacting server." : "Erro ao contactar o servidor.",
"Escaped" : "Fugiu",
"Escaped {0}" : "Fugiu {0}",
"Eskimo Dog" : "Cão Esquimó",
"Estimate" : "Estimativa",
"Estimated age '{0}' is not valid." : "A idade estimada '{0}' não é válida.",
"Euthanized" : "Eutanasiado",
"Euthanized {0}" : "Eutanasiado {0}",
"Every day" : "Todos os dias",
"Exclude animals who are aged under" : "Excluir os animais de idade inferior a",
"Exclude from bulk email" : "Excluir do mailing",
"Exclude new animal photos from publishing" : "Excluir novas fotos de animais da publicação",
"Exclude this image when publishing" : "Excluir esta imagem ao publicar",
"Execute" : "Executar",
"Execute Script" : "Executar Script",
"Execute the SQL in the box below" : "Executar o SQL da caixa abaixo",
"Executing Task" : "A Executar Tarefa",
"Executing..." : "A executar...",
"Exotic Shorthair" : "Pelo Curto Exótico",
"Expense" : "Despesa",
"Expense account for transaction fees" : "Conta de despesas para taxas de transação",
"Expense::" : "Despesa::",
"Expenses::Board" : "Despesas::Alojamento",
"Expenses::Electricity" : "Despesas::Eletricidade",
"Expenses::Food" : "Despesas::Alimentação",
"Expenses::Gas" : "Despesas::Gás",
"Expenses::Phone" : "Despesas::Telefone",
"Expenses::Postage" : "Despesas::Correio",
"Expenses::Stationary" : "Despesas::Estacionário",
"Expenses::TransactionFee" : "Despesas::TaxaTransação",
"Expenses::Water" : "Despesas::Água",
"Expiration" : "Expiração",
"Expire in next month" : "Expira no próximo mês",
"Expired" : "Expirado",
"Expired in the last month" : "Expirou no mês passado",
"Expired in the last week" : "Expirou na semana passada",
"Expires" : "Expira em",
"Expiring in next month" : "Expiram no próximo mês",
"Expiry" : "Validade",
"Expiry date" : "Data de validade",
"Export" : "Exportar",
"Export Animals as CSV" : "Exportar Animais como CSV",
"Export Report" : "Exportar Relatório",
"Export Reports as CSV" : "Exportar Relatórios como CSV",
"Export a CSV file of animal records that ASM can import into another database." : "Exportar um ficheiro CSV com registos de animais que o ASM poderá importar para outra base de dados.",
"Export complete ({0} entries)." : "Exportação completa ({0} entradas).",
"Export this database in various formats" : "Exportar esta base de dados em vários formatos",
"Exporting the complete database can take some time and generate a very large file, are you sure?" : "A exportação da base de dados completa pode demorar algum tempo e gerar um ficheiro muito grande. Tem a certeza?",
"Extra Images" : "Imagens Extra",
"Extra images" : "Imagens extra",
"Extra-Toes Cat (Hemingway Polydactyl)" : "Gato Polidáctilo (Polidáctilo de Hemingway)",
"F (Feral Cat)" : "F (Gato Selvagem)",
"FECV/FeCoV" : "FECV/FeCoV",
"FIPV" : "FIPV",
"FIV" : "FIV",
"FIV Result" : "Resultado FIV",
"FIV Tested" : "Testado para FIV",
"FIV+" : "FIV+",
"FIV-" : "FIV-",
"FIV/L Test Date" : "Data do Teste FIV/L",
"FIV/L Tested" : "Testado para FIV/L",
"FLV" : "FLV",
"FLV Result" : "Resultado FLV",
"FLV+" : "FLV+",
"FLV-" : "FLV-",
"FTP hostname" : "Nome de anfitrião FTP",
"FTP password" : "Senha de FTP",
"FTP username" : "Nome de utilizador de FTP",
"FVRCP" : "FVRCP",
"Facebook" : "Facebook",
"Failed to create payment." : "Não foi possível criar o pagamento.",
"Failed to renew license." : "Não foi possível renovar a licença.",
"Fawn" : "Amarelo Torrado",
"Fawn Tortoiseshell" : "Atartarugado Amarelo Torrado",
"FeLV" : "FeLV",
"Features" : "Características",
"Feb" : "Fev",
"February" : "Fevereiro",
"Fee" : "Taxa",
"Fees" : "Taxas",
"Female" : "Fêmea",
"Feral" : "Selvagem",
"Ferret" : "Furão",
"Field Spaniel" : "Field Spaniel",
"Field names should not contain spaces." : "Os nomes dos campos não devem incluir espaços.",
"Fields" : "Campos",
"Fila Brasileiro" : "Fila Brasileiro",
"File" : "Ficheiro",
"Filter" : "Filtrar",
"Financial" : "Finanças",
"Finch" : "Tentilhão",
"Find Animal" : "Pesquisar Animal",
"Find Animal/Person" : "Pesquisar Animal/Pessoa",
"Find Found Animal" : "Pesquisar Animal Encontrado",
"Find Incident" : "Pesquisar Incidente",
"Find Lost Animal" : "Pesquisar Animal Perdido",
"Find Person" : "Encontrar Pessoa",
"Find a found animal" : "Pesquisar um animal encontrado",
"Find a lost animal" : "Pesquisar um animal perdido",
"Find aco" : "Pesquisar aco",
"Find an incident" : "Pesquisar um incidente",
"Find animal" : "Pesquisar animal",
"Find animal columns" : "Pesquisar colunas de animais",
"Find animal control incidents returned {0} results." : "A pesquisa de incidentes de controlo animal devolveu {0} resultados.",
"Find animals matching the looking for criteria of this person" : "Pesquisar animais que correspondam aos critérios de procura desta pessoa",
"Find donor" : "Pesquisar doador",
"Find driver" : "Pesquisar condutor",
"Find fosterer" : "Pesquisar FAT",
"Find found animal returned {0} results." : "A pesquisa de animais encontrados devolveu {0} resultados.",
"Find homechecked" : "Pesquisar inspecionados no domicílio",
"Find homechecker" : "Pesquisar inspetor de domicílio",
"Find incident" : "Pesquisar incidente",
"Find lost animal returned {0} results." : "A pesquisa de animais perdidos devolveu {0} resultados.",
"Find member" : "Pesquisar associado",
"Find person" : "Pesquisar pessoa",
"Find person columns" : "Pesquisar colunas de pessoa",
"Find retailer" : "Pesquisar lojista",
"Find shelter" : "Pesquisar abrigo",
"Find staff" : "Pesquisar pessoal",
"Find staff/volunteer" : "Pesquisar pessoal/voluntário",
"Find this address on a map" : "Pesquisar esta morada num mapa",
"Find vet" : "Pesquisar veterinário",
"Find volunteer" : "Pesquisar voluntário",
"Fine Amount" : "Valor da Multa",
"Finnish Lapphund" : "Lapphund Finlandês",
"Finnish Spitz" : "Spitz Finlandês",
"First Last" : "Primeiro Último",
"First Names" : "Primeiros Nomes",
"First name(s)" : "Primeiro(s) nome(s)",
"First offence" : "Primeira infração",
"Fish" : "Peixe",
"Flag" : "Sinalização",
"Flag missing {0}" : "Sinalização em falta {0}",
"Flag {0}" : "Sinalização {0}",
"Flags" : "Sinalizações",
"Flat-coated Retriever" : "Retriever de Pelagem Curta",
"Flemish Giant" : "Gigante Flamengo",
"Florida White" : "Branco Flórida",
"Followup" : "Seguimento",
"Followup Between" : "Seguimento Entre",
"Followup Date/Time" : "Data/Hora de Seguimento",
"Followup between" : "Seguimento entre",
"Followup between two dates" : "Seguimento entre duas datas",
"Footer" : "Rodapé",
"For" : "Para",
"Forbidden" : "Proibido",
"Forenames" : "Nomes próprios",
"Forget" : "Esquecer",
"Form URL" : "URL do formulário",
"Format telephone numbers according to my locale" : "Formatar números de telefone de acordo com a minha localização",
"Forms need a name." : "Os formulários necessitam de um nome.",
"Fortnightly" : "Quinzenalmente",
"Foster" : "FAT",
"Foster Book" : "Livro de FATs",
"Foster Capacity" : "Capacidade para FAT",
"Foster Transfer" : "Transferência para FAT",
"Foster an animal" : "Colocar animal em FAT",
"Foster book" : "Livro de FATs",
"Foster movements must have a valid foster date." : "Os movimentos de FAT necessitam de uma data de adoção válida.",
"Foster successfully created." : "FAT criada com êxito.",
"Fostered" : "Entregue a FAT",
"Fostered Animals" : "Animais em FAT",
"Fostered between" : "Colocado em FAT entre",
"Fostered between two dates" : "Entregues a FAT entre duas datas",
"Fostered to {0} since {1}" : "Colocado na FAT {0} desde {1}",
"Fosterer" : "FAT",
"Fosterer (Active Only)" : "FAT (Apenas Ativas)",
"Fosterer (Space Available)" : "FAT (Espaço Disponível)",
"Fosterer Medical Report" : "Registo Clínico da FAT",
"Found" : "Encontrado",
"Found Animal" : "Animal Encontrado",
"Found Animal - Additional" : "Animal Encontrado - Adicional",
"Found Animal - Details" : "Animal Encontrado - Detalhes",
"Found Animal Contact" : "Contacto do Animal Encontrado",
"Found Animal {0}" : "Animal Encontrado {0}",
"Found Animal: {0}" : "Animal Encontrado: {0}",
"Found animal - {0} {1} [{2}]" : "Animal encontrado - {0} {1} [{2}]",
"Found animal entries matching '{0}'." : "Entradas de animais encontrados correspondentes a '{0}'.",
"Found animals must have a contact" : "Os animais encontrados necessitam de ter um contacto",
"Found animals reported in the last 30 days." : "Animais encontrados comunicados nos últimos 30 dias.",
"Found from" : "Encontrados desde",
"Found to" : "Encontrados até",
"FoundLost animal entry {0} successfully created." : "Entrada de animal encontrado/perdido {0} criada com êxito.",
"Fox Terrier" : "Fox Terrier",
"Foxhound" : "Foxhound",
"Fr" : "Fr",
"French Bulldog" : "Bulldog Francês",
"French-Lop" : "Lop Francês",
"Frequency" : "Frequência",
"Frequently Asked Questions" : "Perguntas Mais Frequentes",
"Fri" : "Sex",
"Friday" : "Sexta-feira",
"From" : "De",
"From Fostering" : "De FAT",
"From Other" : "De Outros",
"From address book" : "Do livro de endereços",
"From retailer is only valid on adoption movements." : "De lojista apenas é válido para movimentos de adoção.",
"Future notes" : "Anotações futuras",
"GDPR Contact Opt-In" : "Consentimento de Contacto do RGPD",
"Gaited" : "Treinado para a Marcha",
"Gas Bills" : "Contas do Gás",
"Gecko" : "Osga",
"General" : "Geral",
"Generate" : "Gerar",
"Generate Documents" : "Gerar Documentos",
"Generate HTML from this SQL" : "Gerar HTML a partir deste SQL",
"Generate Report" : "Gerar Relatório",
"Generate a document from this animal" : "Gerar um documento a partir deste animal",
"Generate a document from this incident" : "Gerar um documento a partir deste incidente",
"Generate a document from this movement" : "Gerar um documento a partir deste movimento",
"Generate a document from this person" : "Gerar um documento a partir desta pessoa",
"Generate a document from this record" : "Gerar um documento a partir deste registo",
"Generate a javascript database for the search page" : "Gerar uma base de dados javascript para a página de pesquisa",
"Generate a new animal code" : "Gerar um código novo para o animal",
"Generate a random name for this animal" : "Gerar um nome aleatório para este animal",
"Generate a unique voucher code" : "Gerar um código de voucher exclusivo",
"Generate document from this appointment" : "Gerar um documento a partir deste agendamento",
"Generate document from this license" : "Gerar um documento a partir desta licença",
"Generate document from this payment" : "Gerar um documento a partir deste pagamento",
"Generate document from this transport" : "Gerar um documento a partir deste transporte",
"Generate document from this voucher" : "Gerar um documento a partir deste voucher",
"Generate documentation" : "Gerar documentação",
"Generate documents" : "Gerar documentos",
"Generate image thumbnails as tn_$$IMAGE$$" : "Gerar miniaturas de imagens como tn_$$IMAGE$$",
"Generated document '{0}'" : "Documento '{0}' gerado",
"Gerbil" : "Gerbil",
"German Pinscher" : "Pinscher Alemão",
"German Shepherd Dog" : "Cão Pastor Alemão",
"German Shorthaired Pointer" : "Pointer Alemão de Pelo Curto",
"German Wirehaired Pointer" : "Pointer Alemão de Pelo Cerdoso",
"Get more reports from sheltermanager.com" : "Obtenha mais relatórios em sheltermanager.com",
"Gift Aid" : "Donativo",
"GiftAid" : "Donativo",
"Ginger" : "Avermelhado",
"Ginger and White" : "Avermelhado e Branco",
"Give" : "Administrar",
"Give Treatments" : "Administrar Tratamentos",
"Give Vaccination" : "Administrar Vacinação",
"Given" : "Dada em",
"Given in last month" : "Administradas no mês passado",
"Given in last week" : "Administradas na semana passada",
"Given today" : "Administradas hoje",
"Glen of Imaal Terrier" : "Glen of Imaal Terrier",
"Go" : "Iniciar",
"Go to the lookup data screen and add/remove breeds, species and animal types according to the animals your shelter deals with." : "Aceda ao ecrã de dados de pesquisa e adicione/remova raças, espécies e tipos de animais de acordo com os animais que o seu abrigo acolhe.",
"Go to the options screen and set your shelter's contact details and other settings." : "Aceda ao ecrã de opções e defina os detalhes de contacto do seu abrigo, entre outras definições.",
"Go to the system users screen and add user accounts for your staff." : "Abra o ecrã de utilizadores de sistema e adicione contas de utilizador para a sua equipa.",
"Goat" : "Cabra",
"Golden" : "Dourado",
"Golden Retriever" : "Golden Retriever",
"Goldfish" : "Peixe dourado",
"Good With" : "Gentil Com",
"Good With Cats" : "Gentil com Gatos",
"Good With Children" : "Gentil com Crianças",
"Good With Dogs" : "Gentil com Cães",
"Good with" : "Gentil com",
"Good with Cats" : "Gentil com Gatos",
"Good with Children" : "Gentil com Crianças",
"Good with Dogs" : "Gentil com Cães",
"Good with cats" : "Gentil com gatos",
"Good with children" : "Gentil com crianças",
"Good with children over 12" : "Gentil com crianças com mais de 12 anos de idade",
"Good with children over 5" : "Gentil com crianças com mais de 5 anos de idade",
"Good with dogs" : "Gentil com cães",
"Good with kids" : "Gentil com crianças",
"Google+" : "Google+",
"Goose" : "Ganso",
"Gordon Setter" : "Gordon Setter",
"Grade" : "Nível",
"Great Dane" : "Dogue Alemão",
"Great Pyrenees" : "Pastor dos Pirinéus",
"Greater Swiss Mountain Dog" : "Greater Swiss Mountain Dog",
"Green" : "Verde",
"Grey" : "Cinzento",
"Grey and White" : "Cinzento e Branco",
"Greyhound" : "Galgo",
"Gross" : "Bruto",
"Guinea Pig" : "Porquinho da Guiné",
"Guinea fowl" : "Galinha da Guiné",
"HMRC Gift Aid Spreadsheet" : "Folha de Cálculo de Donativos HMRC",
"HTML" : "HTML",
"HTML Publishing Templates" : "Modelos de Publicação em HTML",
"HTML/FTP Publisher" : "Editor HTML/FTP",
"Hairless" : "Sem pelo",
"Half-Yearly" : "Semestralmente",
"Hamster" : "Hamster",
"Harlequin" : "Arlequim",
"Havana" : "Havana",
"Havanese" : "Havaneza",
"Header" : "Cabeçalho",
"Health Problems" : "Problemas de Saúde",
"Health and Identification" : "Saúde e Identificação",
"Healthy" : "Saudável",
"Heartworm" : "Dirofilariose",
"Heartworm Test Date" : "Data do Teste de Dirofilariose",
"Heartworm Test Result" : "Resultado do Teste de Dirofilariose",
"Heartworm Tested" : "Testado para Dirofilariose",
"Heartworm+" : "Dirofilariose+",
"Heartworm-" : "Dirofilariose-",
"Hedgehog" : "Ouriço",
"Held" : "Retido",
"Help" : "Ajuda",
"Hepatitis" : "Hepatite",
"Here are some things you should do before you start adding animals and people to your database." : "Eis alguns procedimentos que deve realizar antes de começar a adicionar animais e pessoas à sua base de dados.",
"Hidden" : "Oculto",
"Hidden Comments" : "Comentários Ocultos",
"Hidden comments about the animal" : "Comentários ocultos sobre o animal",
"Hidden comments are for staff information only and will never be used on any adoption websites" : "Os comentários ocultos destinam-se exclusivamente à comunicação entre pessoal e nunca serão enviadas para quaisquer sites de adoção",
"Hide deceased animals from the home page" : "Ocultar animais falecidos da página inicial",
"Hide financial stats from the home page" : "Ocultar estatísticas financeiras da página inicial",
"High" : "Alto",
"Highlight" : "Destacar",
"Highlight unadopted reservations after" : "Realçar reservas sem adoção após",
"Himalayan" : "Himalaia",
"History" : "Histórico",
"Hold" : "Reter",
"Hold the animal until this date or blank to hold indefinitely" : "Reter o animal até esta data ou deixar em branco para reter indefinidamente",
"Hold until" : "Reter até",
"Hold until {0}" : "Reter até {0}",
"Holland Lop" : "Lop Holandês",
"Home" : "Residência",
"Home Phone" : "Telefone Residencial",
"Home page" : "Página Inicial",
"Homecheck Areas" : "Áreas de Inspeção de Domicílios",
"Homecheck Date" : "Data da Inspeção de Domicílio",
"Homecheck History" : "Histórico da Inspeção de Domicílio",
"Homecheck areas" : "Áreas de inspeção de domicílios",
"Homechecked" : "Domicílio inspecionado",
"Homechecked By" : "Domicílio Inspecionado Por",
"Homechecked between" : "Domicílio inspecionado entre",
"Homechecked between two dates" : "Domicílio inspecionado entre duas datas",
"Homechecked by" : "Domicílio inspecionado por",
"Homechecker" : "Inspetor de domicílios",
"Horizontal Pitch" : "Ângulo Horizontal",
"Horse" : "Cavalo",
"Hotot" : "Hotot",
"Hound" : "Cão de caça",
"Hours" : "Horas",
"Housetrained" : "Treino de casa",
"Hovawart" : "Hovawart",
"How urgent is it that we take this animal?" : "Qual o grau de urgência da nossa retirada deste animal?",
"Husky" : "Husky",
"I've finished, Don't show me this popup again." : "Já terminei, não quero voltar a ver esta janela.",
"ID" : "ID",
"IP Restriction" : "Restrição de IP",
"IP restriction is a space-separated list of IPv4 addresses or IPv6 prefixes that this user is ONLY permitted to login from." : "A restrição de IP consiste numa lista de endereços IPv4 ou de prefixos IPv6 separados por espaços que são os ÚNICOS a partir dos quais o utilizador está autorizado a iniciar uma sessão.",
"Ibizan Hound" : "Galgo de Ibiza",
"If blank, the address from the Email tab will be used" : "Se estiver vazio, será utilizado o endereço do separador Email",
"If left blank, the user can login from any IP address." : "Se for deixada vazia, o utilizador poderá iniciar a sessão a partir de qualquer endereço IP.",
"If the shelter provides initial insurance cover to new adopters, the policy number" : "Caso o abrigo disponibilize uma cobertura inicial de seguro aos novos adotantes, o número da apólice",
"If this form has a populated emailaddress field during submission, send a confirmation email to it" : "Se este formulário tiver um campo emailaddress preenchido durante o envio, enviar um email de confirmação para o mesmo",
"If this person is a fosterer, the maximum number of animals they can care for." : "Se esta pessoa é uma FAT, o número máximo de animais que pode acolher.",
"If this person is a member, the date that membership expires." : "Se esta pessoa for um associado, a data de expiração da quota.",
"If this person is a member, their membership number" : "Se esta pessoa for um associado, o respetivo número de sócio",
"If this person is a member, their membership number." : "Se esta pessoa for um associado, o respetivo número de sócio.",
"If this stock record is for a drug, the batch number from the container" : "Se este registo de stock for relativo a um medicamento, o número de lote do contentor",
"If this stock record is for a perishable good, the expiry date on the container" : "Se este registo de stock for relativo a um bem perecível, a data de validade no contentor",
"If you assign view or edit roles, only users within those roles will be able to view and edit this account." : "Se atribuir funções de visualização ou de edição, apenas os utilizadores com essas funções poderão visualizar e editar esta conta.",
"If you don't select any locations, publishers will include animals in all locations." : "Se não selecionar qualquer local, os editores incluirão os animais de todos os locais.",
"If you were charged a transaction fee for receiving this payment, the amount" : "O montante, caso tenha lhe sido cobrada uma taxa de transação pela receção deste pagamento",
"Iguana" : "Iguana",
"Illyrian Sheepdog" : "Cão Pastor da Ilíria",
"Image" : "Imagem",
"Image file" : "Ficheiro de imagem",
"Import" : "Importar",
"Import a CSV file" : "Importar um ficheiro CSV",
"Import a PayPal CSV file" : "Importar um ficheiro CSV do PayPal",
"Import from file" : "Importar de ficheiro",
"Important" : "Importante",
"In" : "Entradas",
"In SubTotal" : "Subtotal de Entradas",
"In the Stripe dashboard, create a webhook to send 'checkout.session.completed' events to {0}" : "No painel de controlo do Stripe, crie um webhook de destino para o envio de eventos 'checkout.session.completed' a {0}",
"In the last month" : "No último mês",
"In the last quarter" : "No último trimestre",
"In the last week" : "Na última semana",
"In the last year" : "No último ano",
"In your PayPal account, enable Instant Payment Notifications with a URL of {0}" : "Na sua conta PayPal, ative a opção Instant Payment Notifications (Notificações Instantâneas de Pagamentos) com um URL {0}",
"In-Kind Donation" : "Donativo em Espécie",
"Inactive" : "Inativo",
"Inactive - do not include" : "Inativo - não incluir",
"Incident" : "Incidente",
"Incident - Additional" : "Incidente - Adicional",
"Incident - Citation" : "Incidente - Advertência",
"Incident - Details" : "Incidente - Detalhes",
"Incident - Dispatch" : "Incidente - Processar",
"Incident - Owner" : "Incidente - Dono",
"Incident Between" : "Incidente Entre",
"Incident Completed Types" : "Tipos de Incidentes Concluídos",
"Incident Date/Time" : "Data/Hora do Incidente",
"Incident Type" : "Tipo de Incidente",
"Incident Types" : "Tipos de Incidentes",
"Incident between" : "Incidente entre",
"Incident between two dates" : "Incidente entre duas datas",
"Incident date cannot be blank" : "A data do incidente não pode estar vazia",
"Incident followup" : "Seguimento do incidente",
"Incident {0} successfully created." : "O incidente {0} foi criado com êxito.",
"Incident {0}, {1}: {2}" : "Incidente {0}, {1}: {2}",
"Incidents" : "Incidentes",
"Incidents Requiring Followup" : "Incidentes que Necessitam de Seguimento",
"Include CSV header line" : "Incluir linha de cabeçalho CSV",
"Include Removed" : "Incluir Removidos",
"Include animals in the following locations" : "Incluir animais nos seguintes locais",
"Include animals on trial adoption" : "Incluir animais em adoção experimental",
"Include animals who don't have a description" : "Incluir animais que não têm uma descrição",
"Include animals who don't have a picture" : "Incluir animais que não têm uma imagem",
"Include cruelty case animals" : "Incluir animais provenientes de casos de crueldade",
"Include deceased animals" : "Incluir animais falecidos",
"Include fostered animals" : "Incluir animais em FAT",
"Include found" : "Incluir encontrados",
"Include held animals" : "Incluir animais retidos",
"Include incomplete medical records when generating document templates" : "Incluir registos clínicos incompletos ao gerar modelos de documentos",
"Include non-shelter animals" : "Incluir animais que não se encontram no abrigo",
"Include off-shelter animals in medical calendar and books" : "Incluir animais que não se encontram no abrigo na agenda e nos livros clínicos",
"Include preferred photo" : "Incluir foto preferencial",
"Include quarantined animals" : "Incluir animais em quarentena",
"Include reserved animals" : "Incluir animais reservados",
"Include retailer animals" : "Incluir animais de lojistas",
"Include returned" : "Incluir devolvidos",
"Include this image when publishing" : "Incluir esta imagem ao publicar",
"Include unaltered animals" : "Incluir animais não esterilizados/castrados",
"Income" : "Receitas",
"Income account for sales tax" : "Conta de receitas para imposto sobre vendas",
"Income from an on-site shop" : "Receitas de uma loja online",
"Income::" : "Receitas::",
"Income::Adoption" : "Receita::Adoção",
"Income::Donation" : "Receitas::Donativo",
"Income::EntryDonation" : "Receitas::DonativoEntrada",
"Income::Interest" : "Receitas::Juros",
"Income::OpeningBalances" : "Receitas::SaldosAbertura",
"Income::SalesTax" : "Receitas::ImpostoVendas",
"Income::Shop" : "Receitas::Loja",
"Income::Sponsorship" : "Receita::Apadrinhamento",
"Income::WaitingList" : "Receitas::ListaDeEspera",
"Incoming" : "Recebido",
"Incoming Forms" : "Formulários recebidos",
"Incoming donations (misc)" : "Donativos recebidos (diversos)",
"Incoming forms are online forms that have been completed and submitted by people on the web." : "Os formulários recebidos são formulários online que foram preenchidos e submetidos por pessoas na Web.",
"Incoming forms that have been used to create records will be automatically removed when you leave this screen." : "Os formulários recebidos que tenham sido utilizados para criar registos serão automaticamente removidos após abandonar este ecrã.",
"Incoming forms will be automatically removed after {0} days." : "Os formulários recebidos serão automaticamente removidos após {0} dias.",
"Incomplete incidents" : "Incidentes incompletos",
"Incomplete notes upto today" : "Anotações por concluir até à data",
"Indefinitely" : "Indefinidamente",
"Index" : "Índice",
"Individual/Couple" : "Pessoa Individual/Casal",
"Induct a new animal" : "Integrar um novo animal",
"Information" : "Informação",
"Initials" : "Iniciais",
"Install" : "Instalar",
"Install the selected reports to your database" : "Instalar os relatórios selecionados na sua base de dados",
"Insurance" : "Seguro",
"Insurance No" : "N.º de Seguro",
"Intake" : "Admissão",
"Intakes {0}" : "Admissões {0}",
"Internal Location" : "Localização Interna",
"Internal Locations" : "Localizações Internas",
"Invalid email address '{0}'" : "Endereço de email '{0}' inválido",
"Invalid microchip number length" : "Comprimento do número de microchip inválido",
"Invalid time '{0}', times should be in 00:00 format" : "Hora inválida '{0}': as horas devem estar no formato 00:00",
"Invalid time, times should be in HH:MM format" : "Hora inválida, as horas devem estar no formato HH:MM",
"Invalid username or password." : "Nome de utilizador ou senha inválidos.",
"Investigation" : "Investigação",
"Investigations" : "Investigações",
"Investigator" : "Responsável pela Investigação",
"Invoice Only" : "Faturar apenas",
"Invoice items need a description and amount." : "Os itens de fatura necessitam de uma descrição e montante.",
"Irish Setter" : "Setter Irlandês",
"Irish Terrier" : "Terrier Irlandês",
"Irish Water Spaniel" : "Spaniel de Água Irlandês",
"Irish Wolfhound" : "Wolfhound Irlandês",
"Is this a permanent foster?" : "Esta colocação em FAT é definitiva?",
"Is this a trial adoption?" : "Esta adoção é experimental?",
"Issue a new insurance number for this animal/adoption" : "Emitir um novo número de seguro para este animal/adoção",
"Issue date and expiry date must be valid dates." : "A data de emissão e a data de expiração têm de ser datas válidas.",
"Issued" : "Emitida em",
"Issued in last month" : "Emitidos no mês passado",
"Issued in the last month" : "Emitida no último mês",
"Issued in the last week" : "Emitida na última semana",
"Italian Greyhound" : "Galgo Italiano",
"Italian Spinone" : "Spinone Italiano",
"Item" : "Item",
"Jack Russell Terrier" : "Jack Russell Terrier",
"Jan" : "Jan",
"January" : "Janeiro",
"Japanese Bobtail" : "Bobtail Japonês",
"Japanese Chin" : "Chin Japonês",
"Javanese" : "Javanês",
"Jersey Wooly" : "Jersey Wooly",
"Jindo" : "Jindo",
"Jul" : "Jul",
"July" : "Julho",
"Jump to diary" : "Aceder ao diário",
"Jump to donations" : "Aceder aos donativos",
"Jump to media" : "Aceder às imagens",
"Jump to movements" : "Aceder aos movimentos",
"Jun" : "Jun",
"June" : "Junho",
"Jurisdiction" : "Jurisdição",
"Jurisdiction is {0}" : "A jurisdição é {0}",
"Jurisdictions" : "Jurisdições",
"Kai Dog" : "Kai Dog",
"Kakariki" : "Kakariki",
"Karelian Bear Dog" : "Cão de Ursos da Carélia",
"Keep table headers visible when scrolling" : "Manter cabeçalhos de tabela visíveis ao deslocar",
"Keeshond" : "Keeshond",
"Kennel" : "Canil",
"Kerry Blue Terrier" : "Kerry Blue Terrier",
"Kishu" : "Kishu",
"Kittens (under {0} months)" : "Gatinhos (com menos de {0} meses)",
"Km" : "Kms",
"Komondor" : "Komondor",
"Korat" : "Korat",
"Kuvasz" : "Kuvasz",
"Kyi Leo" : "Kyi Leo",
"Label" : "Etiqueta",
"Labrador Retriever" : "Labrador Retriever",
"Lakeland Terrier" : "Lakeland Terrier",
"Lancashire Heeler" : "Lancashire Heeler",
"Large" : "Grande",
"Last First" : "Último Primeiro",
"Last Location" : "Última Localização",
"Last Month" : "Último Mês",
"Last Movement" : "Último Movimento",
"Last Name" : "Apelido",
"Last Week" : "Última Semana",
"Last changed by {0} on {1}" : "Última alteração por {0} em {1}",
"Last name" : "Apelido",
"Last, First" : "Último, Primeiro",
"Latency" : "Latência",
"Latency Tester" : "Testador de Latência",
"Latitude/Longitude" : "Latitude/Longitude",
"Least recently changed" : "Alterado há menos tempo",
"Leave" : "Sair",
"Leave of absence" : "Licença",
"Left Margin" : "Margem Esquerda",
"Left shelter" : "Saiu do Abrigo",
"Left the shelter between" : "Saiu do abrigo entre",
"Left the shelter between two dates" : "Deixados no abrigo entre duas datas",
"Left the shelter today" : "Saiu do abrigo hoje",
"Leonberger" : "Leonberger",
"Leptospirosis" : "Leptospirose",
"Letter" : "Ninhada",
"Lhasa Apso" : "Lhasa Apso",
"Liability" : "Responsabilidade Legal",
"Licence for {0} successfully renewed {1} - {2}" : "Licença de {0} renovada com êxito {1} - {2}",
"License" : "Licença",
"License Number" : "Número de Licença",
"License Types" : "Tipos de Licença",
"License number '{0}' has already been issued." : "O número de licença '{0}' já foi emitido.",
"License numbers matching '{0}'." : "Números de licença correspondentes a '{0}'.",
"License requires a number" : "A licença requer um número",
"License requires a person" : "A licença requer uma pessoa",
"License requires issued and expiry dates" : "A licença requer datas de emissão e expiração",
"Licensed" : "Titular de licença",
"Licenses" : "Licenças",
"Licensing" : "Licenciamento",
"Lifetime" : "Vitalícia",
"Light Amber" : "Âmbar Claro",
"Lilac" : "Lilás",
"Lilac Tortie" : "Tortie Lilás",
"Limited to {0} matches" : "Limitado a {0} correspondências",
"Link" : "Link",
"Link an animal" : "Criar link para um animal",
"Link this user account to a staff person record." : "Vincular esta conta de utilizador ao registo de um elemento do pessoal.",
"Link to a photo of this animal" : "Link para uma fotografia deste animal",
"Link to an external web resource" : "Criar link para um recurso web externo",
"Link to this animal" : "Criar link para este animal",
"Links" : "Links",
"List" : "Lista",
"Litter" : "Ninhada",
"Litter Ref" : "Ref. da Ninhada",
"Litter Reference" : "Referência da Ninhada",
"Littermates" : "Companheiros de Ninhada",
"Litters" : "Ninhadas",
"Litters need at least a required date and number." : "As ninhadas necessitam de pelo menos uma data prevista e de um número.",
"Live Releases {0}" : "Edições Diretas {0}",
"Liver" : "Fígado",
"Liver and White" : "Fígado e Branco",
"Lizard" : "Lagarto",
"Llama" : "Lama",
"Loading..." : "A carregar...",
"Loan" : "Empréstimo",
"Local" : "Local",
"Locale" : "Localização",
"Location" : "Localização",
"Location (No Virtual)" : "Localização (Não Virtual)",
"Location Filter" : "Filtro de Localização",
"Location and Breed" : "Localização e Raça",
"Location and Species" : "Localização e Espécie",
"Location and Type" : "Localização e Tipo",
"Location and Unit" : "Localização e Unidade",
"Location is {0}" : "A localização é {0}",
"Locations" : "Localizações",
"Locked" : "Bloqueado",
"Log" : "Registo",
"Log Text" : "Texto do Registo",
"Log Type" : "Tipo de Relatório",
"Log Types" : "Tipos de Registos",
"Log date must be a valid date" : "A data do registo tem de ser uma data válida",
"Log entries need a date and text." : "As entradas de registo necessitam de uma data e texto.",
"Log requires a date." : "O registo necessita de uma data.",
"Log requires a person." : "O registo necessita de uma pessoa.",
"Log requires an animal." : "O registo necessita de um animal.",
"Log successfully added." : "Registo adicionado com êxito.",
"Login" : "Iniciar Sessão",
"Logout" : "Terminar Sessão",
"Long" : "Longo",
"Long term" : "Longo prazo",
"Longest On Shelter" : "Há Mais Tempo no Abrigo",
"Looking For" : "À Procura De",
"Looking for" : "À procura de",
"Lookup" : "Consulta",
"Lookup (Multiple Select)" : "Consulta (Seleção Múltipla)",
"Lookup Values" : "Valores de Consulta",
"Lookup data" : "Consultar dados",
"Lookups" : "Consultas",
"Lop Eared" : "Coelho de Orelhas Caídas",
"Lory/Lorikeet" : "Lory/Lorikeet",
"Lost" : "Perdido",
"Lost Animal" : "Animal Perdido",
"Lost Animal - Additional" : "Animal Perdido - Adicional",
"Lost Animal - Details" : "Animal Perdido - Detalhes",
"Lost Animal Contact" : "Contacto de Animal Perdido",
"Lost Animal: {0}" : "Animal Perdido: {0}",
"Lost and Found" : "Perdidos e Encontrados",
"Lost and found entries must have a contact" : "As entradas de perdidos e encontrados necessitam de um contacto",
"Lost animal - {0} {1} [{2}]" : "Animal perdido - {0} {1} [{2}]",
"Lost animal entries matching '{0}'." : "Entradas de animais perdidos correspondentes a '{0}'.",
"Lost animal entry {0} successfully created." : "Entrada de animal perdido {0} criada com êxito.",
"Lost animals must have a contact" : "Os animais perdidos necessitam de um contacto",
"Lost animals reported in the last 30 days." : "Animais perdidos comunicados nos últimos 30 dias.",
"Lost from" : "Perdido de",
"Lost to" : "Perdido para",
"Lost/Found" : "Perdidos/Encontrados",
"Lots of reports installed? Clean up the Reports menu with Settings, Options, Display, Show report menu items in collapsed categories." : "Tem demasiados relatórios instalados? Limpe o menu Relatórios através da opção Definições/Opções/Mostrar/Mostrar itens de menu de relatório em categorias reduzidas.",
"Lovebird" : "Periquito",
"Low" : "Baixo",
"Lowchen" : "Lowchen",
"Lowest" : "Mais Baixa",
"M (Miscellaneous)" : "D (Diversos)",
"MM = current month" : "MM = mês atual",
"Macaw" : "Arara",
"Mail" : "Mailing",
"Mail Merge" : "Mailing",
"Mail Merge - {0}" : "Mailing - {0}",
"Maine Coon" : "Maine Coon",
"Make this the default image when creating documents" : "Definir esta imagem como padrão ao criar documentos",
"Make this the default image when viewing this record and publishing to the web" : "Definir esta imagem como padrão ao visualizar este registo e publicar na web",
"Make this the default video link when publishing to the web" : "Definir este link de vídeo como padrão ao publicar na web",
"Male" : "Macho",
"Maltese" : "Maltês",
"Manchester Terrier" : "Manchester Terrier",
"Mandatory" : "Obrigatório",
"Manual" : "Manual",
"Manually enter codes (do not generate)" : "Inserir códigos manualmente (não gerar)",
"Manufacturer" : "Fabricante",
"Manx" : "Manês",
"Map" : "Mapa",
"Map of active incidents" : "Mapa de incidentes ativos",
"Mar" : "Mar",
"March" : "Março",
"Maremma Sheepdog" : "Cão Pastor Maremano",
"Mark Deceased" : "Assinalar como Falecido",
"Mark an animal deceased" : "Assinalar um animal como falecido",
"Mark dispatched now" : "Assinalar como processado agora",
"Mark new animals as not for adoption" : "Assinalar os novos animais como não adotáveis",
"Mark responded now" : "Assinalar como respondido agora",
"Mark selected payments received" : "Assinalar os pagamentos selecionados como recebidos",
"Mark this owner homechecked" : "Assinalar este proprietário como inspecionado no domicílio",
"Mark treatments given" : "Assinalar tratamentos como administrados",
"Marketer" : "Comerciante",
"Markings" : "Marcas",
"Markup" : "Código",
"Marriage/Relationship split" : "Fim de casamento/relação",
"Mastiff" : "Mastim",
"Match" : "Confrontar",
"Match Lost and Found" : "Confrontar Perdidos e Encontrados",
"Match against other lost/found animals" : "Confrontar com outros animais perdidos/encontrados",
"Match lost and found animals" : "Confrontar animais perdidos e encontrados",
"Match this animal with the lost and found database" : "Confrontar este animal com a base de dados de perdidos e encontrados",
"Maternity" : "Maternidade",
"May" : "Maio",
"McNab" : "McNab",
"Media" : "Imagens",
"Media Notes" : "Anotações de Imagens",
"Media notes contain" : "Anotações de imagens contêm",
"Medical" : "Clínica",
"Medical Book" : "Livro Clínico",
"Medical Profiles" : "Perfis Clínicos",
"Medical book" : "Livro clínico",
"Medical calendar" : "Agenda Clínica",
"Medical profiles" : "Perfis clínicos",
"Medical profiles need a profile name, treatment, dosage and frequencies." : "Os perfis clínicos necessitam de um nome de perfil, tratamento, dosagem e frequências.",
"Medical regimens need an animal, name, dosage, a start date and frequencies." : "Os regimes clínicos necessitam de um animal, nome, dosagem, data de início e frequências.",
"Medicate" : "Medicar",
"Medicate Animal" : "Medicar Animal",
"Medium" : "Médio",
"Member" : "Associado",
"Membership Expiry" : "Expiração da Quota",
"Membership Number" : "Número de Associado",
"Merge" : "Juntar",
"Merge Animals" : "Juntar Animais",
"Merge Person" : "Juntar Pessoa",
"Merge another animal into this one" : "Juntar outro registo de animal a este",
"Merge another person into this one" : "Juntar outro registo de pessoa a este",
"Merge bonded animals into a single record" : "Juntar animais que são par/companheiros num único registo",
"Merge duplicate records" : "Juntar registos duplicados",
"Message" : "Mensagem",
"Message Board" : "Quadro de Mensagens",
"Message from {0}" : "Mensagem de {0}",
"Message successfully sent to {0}" : "Mensagem enviada com êxito a {0}",
"Messages" : "Mensagens",
"Messages successfully sent" : "Mensagens enviadas com êxito",
"Method" : "Método",
"Microchip" : "Microchip",
"Microchip Date" : "Data do Microchip",
"Microchip Number" : "Número do Microchip",
"Microchip number {0} has already been allocated to another animal." : "O número de microchip {0} já foi atribuído a outro animal.",
"Microchipped" : "Microchipado",
"Microchips Implanted In {0}" : "Microchips Implantados em {0}",
"Miles" : "Milhas",
"Mini Rex" : "Mini Rex",
"Mini-Lop" : "Mini-Lop",
"Miniature Pinscher" : "Pinscher Miniatura",
"Minutes" : "Minutos",
"Missouri Foxtrotter" : "Foxtrotter do Missouri",
"Mixed Breed" : "Raça Mista",
"Mo" : "Seg",
"Mobile signing pad" : "Bloco de assinatura móvel",
"Modify Additional Fields" : "Modificar Campos Adicionais",
"Modify Document Templates" : "Alterar Modelos de Documentos",
"Modify Lookups" : "Alterar Vistas",
"Mon" : "Seg",
"Monday" : "Segunda-feira",
"Money" : "Dinheiro",
"Month" : "Mês",
"Monthly" : "Mensal",
"More Info Needed" : "Necessárias Mais Informações",
"More Medications" : "Mais Medicações",
"More Tests" : "Mais Testes",
"More Vaccinations" : "Mais Vacinações",
"More diary notes" : "Mais anotações de diário",
"Morgan" : "Morgan",
"Most browsers let you search in dropdowns by typing the first few letters of the item you want." : "A maioria dos browsers permite pesquisar nas listas suspensas através da digitação das primeiras letras do item pretendido.",
"Most browsers will let you visit a record you have been to in this session by typing part of its name in the address bar." : "A maioria dos browsers permitem-lhe consultar um registo que tenha visitado durante esta sessão escrevendo parte do respetivo nome na barra de endereços.",
"Most recently changed" : "Alterado há menos tempo",
"Most relevant" : "Mais relevante",
"Mother" : "Mãe",
"Mountain Cur" : "Cur da Montanha",
"Mountain Dog" : "Cão da Montanha",
"Mouse" : "Rato",
"Move" : "Movimentos",
"Move an animal to a retailer" : "Mover um animal para um lojista",
"Moved to animal record {0}" : "Movido para o registo de animal {0}",
"Movement" : "Movimento",
"Movement Date" : "Data do Movimento",
"Movement Number" : "Número do Movimento",
"Movement Type" : "Tipo de Movimento",
"Movement Types" : "Tipos de Movimentos",
"Movement date cannot be before brought in date." : "A data do movimento não pode ser anterior à data de admissão.",
"Movement dates clash with an existing movement." : "As datas de movimento estão em conflito com um movimento existente.",
"Movement numbers must be unique." : "Os números de movimento têm de ser exclusivos.",
"Movements" : "Movimentos",
"Movements require an animal" : "Os movimentos necessitam de um animal",
"Movements require an animal." : "Os movimentos necessitam de um animal.",
"Moving..." : "A mover...",
"Multi-Lookup" : "Consulta múltipla",
"Multiple Treatments" : "Tratamentos Múltiplos",
"Munchkin" : "Munchkin",
"Munsterlander" : "Munsterlander",
"Mustang" : "Mustang",
"My Fosters" : "As Minhas FAT",
"My Incidents" : "Os Meus Incidentes",
"My Undispatched Incidents" : "Os Meus Incidentes Não Processados",
"My diary notes" : "As minhas anotações de diário",
"My sheltermanager.com account" : "A minha conta no sheltermanager.com",
"Mynah" : "Mynah",
"N (Non-Shelter Animal)" : "N (Animal não Residente no Abrigo)",
"NNN or NN = number unique for this type of animal for this year" : "NNN ou NN = número exclusivo para este tipo de animal para este ano",
"Name" : "Nome",
"Name Contains" : "Código Contém",
"Name and Address" : "Nome e Morada",
"Name cannot be blank" : "O nome não pode estar em branco",
"Name contains" : "Nome contém",
"Neapolitan Mastiff" : "Mastim Napolitano",
"Negative" : "Negativo",
"Neglect" : "Negligência",
"Net" : "Líquido",
"Netherland Dwarf" : "Anão Holandês",
"Neuter/Spay" : "Esterilizar/Castrar",
"Neutered" : "Castrado",
"Neutered/Spayed Non-Shelter Animals In {0}" : "Animais não residentes no abrigo esterilizados/castrados em {0}",
"Neutered/Spayed Shelter Animals In {0}" : "Animais residentes no abrigo esterilizados/castrados em {0}",
"Never" : "Nunca",
"New" : "Novo",
"New Account" : "Nova Conta",
"New Appointment" : "Novo Agendamento",
"New Citation" : "Nova Advertência",
"New Cost" : "Novo Custo",
"New Diary" : "Novo Diário",
"New Diet" : "Nova Dieta",
"New Document" : "Novo Documento",
"New Field" : "Novo Campo",
"New Fosterer" : "Nova FAT",
"New Guinea Singing Dog" : "Cão Cantor da Nova Guiné",
"New Item" : "Novo Item",
"New License" : "Nova Licença",
"New Litter" : "Nova Ninhada",
"New Log" : "Novo Registo",
"New Movement" : "Novo Movimento",
"New Owner" : "Novo Dono",
"New Password" : "Nova Senha",
"New Payment" : "Novo Pagamento",
"New Profile" : "Novo Perfil",
"New Record" : "Novo Registo",
"New Regimen" : "Novo Regime",
"New Report" : "Novo Relatório",
"New Role" : "Nova Função",
"New Stock" : "Novo Stock",
"New Task" : "Nova Tarefa",
"New Template" : "Novo Modelo",
"New Test" : "Novo Teste",
"New Transport" : "Novo Transporte",
"New Trap Loan" : "Novo Empréstimo de Armadilha",
"New User" : "Novo Utilizador",
"New Vaccination" : "Nova Vacinação",
"New Voucher" : "Novo Voucher",
"New Waiting List Entry" : "Nova Entrada de Lista de Espera",
"New Zealand" : "Nova Zelândia",
"New diary task" : "Nova tarefa de diário",
"New form field" : "Novo campo de formulário",
"New name" : "Novo nome",
"New online form" : "Novo formulário online",
"New password and confirmation password don't match." : "A nova senha e a senha de confirmação não correspondem.",
"New task detail" : "Detalhes da nova tarefa",
"New template" : "Novo modelo",
"Newfoundland Dog" : "Cão Terra Nova",
"Next" : "Seguinte",
"No" : "Não",
"No active license held" : "Não titular de licença válida",
"No adjustment" : "Nenhum ajuste",
"No data to show on the report." : "Nenhum dado para apresentar no relatório.",
"No data." : "Nenhum dado.",
"No description" : "Nenhuma descrição",
"No license" : "Sem licença",
"No longer retained" : "Já não se encontra retido",
"No matches found." : "Nenhum resultado encontrado.",
"No picture" : "Nenhuma imagem",
"No publishers are running." : "Nenhum editor está em execução.",
"No results found." : "Nenhum resultado encontrado.",
"No results." : "Nenhum resultado.",
"No tasks are running." : "Nenhuma tarefa está a ser executada.",
"No tattoo" : "Sem tatuagem",
"No view permission for this report" : "Nenhuma permissão de visualização para este relatório",
"Noise" : "Ruído",
"Non-Shelter" : "Não Residente no Abrigo",
"Non-Shelter Animal" : "Animal não Residente no Abrigo",
"Non-Shelter Animals" : "Animais não Residentes no Abrigo",
"Non-shelter" : "Não residente no abrigo",
"Non-shelter Animals" : "Animais não residentes no abrigo",
"None" : "Nenhum",
"Norfolk Terrier" : "Norfolk Terrier",
"Normal user" : "Utilizador normal",
"Norwegian Buhund" : "Buhund Norueguês",
"Norwegian Elkhound" : "Elkhound Norueguês",
"Norwegian Forest Cat" : "Gato da Floresta Norueguês",
"Norwegian Lundehund" : "Lundehund Norueguês",
"Norwich Terrier" : "Terrier Norueguês",
"Not Arrived" : "Não chegou",
"Not Available For Adoption" : "Não Disponível para Adoção",
"Not Available for Adoption" : "Não Disponível para Adoção",
"Not For Adoption" : "Não Adotável",
"Not Microchipped" : "Não Microchipado",
"Not Reconciled" : "Não Reconciliado",
"Not adoptable" : "Não adotável",
"Not altered" : "Não esterilizado/castrado",
"Not available for adoption" : "Não disponível para adoção",
"Not deceased" : "Não falecido",
"Not dispatched" : "Não processado",
"Not for adoption" : "Não adotáveis",
"Not for adoption flag set" : "Sinalização Não Adotável Definida",
"Not in chosen publisher location" : "Noutro local que não escolhido para o editor",
"Not microchipped" : "Não microchipado",
"Not non-shelter" : "Não residente no abrigo",
"Not reconciled" : "Não reconciliado",
"Not {0}" : "Não {0}",
"Note" : "Nota",
"Notes" : "Anotações",
"Notes about the death of the animal" : "Anotações sobre a morte do animal",
"Nov" : "Nov",
"Nova Scotia Duck-Tolling Retriever" : "Retriever Cobrador de Patos da Nova Escócia",
"November" : "Novembro",
"Now" : "Agora",
"Number" : "Número",
"Number in litter" : "Número na ninhada",
"Number of Tasks" : "Número de Tarefas",
"Number of animal links to show" : "Número de links de animais a mostrar",
"Number of fields" : "Número de campos",
"Number of pets" : "Número de animais de estimação",
"Ocicat" : "Ocicat",
"Oct" : "Out",
"October" : "Outubro",
"Office" : "Office",
"Ok" : "Ok",
"Old English Sheepdog" : "Cão Pastor Tradicional Inglês",
"Old Password" : "Senha Antiga",
"Omit criteria" : "Omitir critérios",
"Omit header/footer" : "Omitir Cabeçalho/Rodapé",
"On Foster (in figures)" : "Em FAT (em números)",
"On Shelter" : "No Abrigo",
"On foster" : "Entregue a FAT",
"On permanent foster" : "Entregue a FAT definitiva",
"On shelter" : "Residente no abrigo",
"On shelter (no fosters)" : "Residente no abrigo (sem FAT)",
"On shelter for {0} days, daily cost {1}, cost record total <b>{2}</b>" : "No abrigo há {0} dias, custo diário {1}, custo total em arquivo <b>{2}</b>",
"On shelter for {0} days. Total cost: {1}" : "No abrigo há {0} dias. Custo total: {1}",
"On trial adoption" : "Em adoção experimental",
"Once assigned, codes cannot be changed" : "Uma vez atribuídos, os códigos não podem ser alterados",
"Once linked, a user account cannot access and edit its linked person record." : "Após a criação do vínculo, uma conta de utilizador não poderá abrir e editar o registo de pessoa a ela vinculado.",
"Once signed, this document cannot be edited or tampered with." : "Uma vez assinado, este documento não pode ser editado ou adulterado.",
"One Off" : "Único",
"One-Off" : "Único",
"Online Form: {0}" : "Formulário Online: {0}",
"Online Forms" : "Formulários Online",
"Online form fields need a name and label." : "Os campos de formulário online necessitam de um nome e de uma etiqueta.",
"Online forms can be linked to from your website and used to take information from visitors for applications, etc." : "Os formulários online podem ser acedidos através de links publicados no seu website e utilizados para recolher informações de visitantes para candidaturas, etc.",
"Only PDF, HTML and JPG image files can be attached." : "Apenas podem ser anexados ficheiros de imagem em PDF, HTML e JPG.",
"Only active accounts" : "Apenas contas ativas",
"Only allow users with one of these roles to view this incident" : "Apenas permitir que utilizadores com uma destas funções visualizem este incidente",
"Only show account totals for the current period, which starts on " : "Apenas mostrar os totais da conta para o período atual, o qual tem início em ",
"Only show declawed" : "Apenas mostrar com unhas removidas",
"Only show pickups" : "Mostrar apenas recolhas",
"Only show special needs" : "Mostrar apenas necessidades especiais",
"Only show this field based on a conditional expression, eg: field1=dog" : "Apenas mostrar este campo com base numa expressão condicional, por exemplo campo1=cão",
"Only show transfers" : "Mostrar apenas transferências",
"Open Incidents" : "Incidentes em Aberto",
"Open records in a new browser tab" : "Abrir registos num novo separador do browser",
"Open reports in a new browser tab" : "Abrir os relatórios num novo separador do browser",
"Opening balances" : "Saldos iniciais",
"Optional, the date the vaccination \"wears off\" and needs to be administered again" : "Opcionalmente, a data em que a vacina perde efeito e precisa de ser administrada novamente",
"Options" : "Opções",
"Or move this diary on to" : "Ou transferir este diário para",
"Order published animals by" : "Ordenar animais publicados por",
"Organisation" : "Organização",
"Organization" : "Organização",
"Organization name" : "Nome da organização",
"Oriental Long Hair" : "Pelo Longo Oriental",
"Oriental Short Hair" : "Pelo Curto Oriental",
"Oriental Tabby" : "Tabby Oriental",
"Original Owner" : "Dono Original",
"Ostrich" : "Avestruz",
"Other Account" : "Outra Conta",
"Other Organisation" : "Outra Organização",
"Other Shelter" : "Outro Abrigo",
"Otterhound" : "Otterhound",
"Our shelter does soft releases, allow us to mark these on movement screens" : "O nosso abrigo realiza libertações controladas, permita-nos que as identifiquemos nos ecrãs de movimentos",
"Our shelter does trial adoptions, allow us to mark these on movement screens" : "O nosso abrigo aceita adoções experimentais, permita-nos que as identifiquemos nos ecrãs de movimentos",
"Out" : "Saída",
"Out Between" : "Saída Entre",
"Out SubTotal" : "Subtotal de Saída",
"Output a deceased animals page" : "Gerar uma página de animais falecidos",
"Output a page with links to available online forms" : "Gerar uma página com links para formulários online disponíveis",
"Output a separate page for each animal type" : "Gerar uma página separada para cada tipo de animal",
"Output a separate page for each species" : "Gerar uma página separada para cada espécie",
"Output an adopted animals page" : "Gerar uma página de animais adotados",
"Output an rss.xml page" : "Gerar uma página rss.xml",
"Over 12" : "Mais de 12",
"Over 5" : "Mais de 5",
"Overdue" : "Em atraso",
"Overdue medical items" : "Itens clínicos em atraso",
"Overtime" : "Horas extra",
"Owl" : "Mocho",
"Owner" : "Dono",
"Owner Vet" : "Veterinário do Dono",
"Owner given citation" : "Advertência ao dono",
"Owners Vet" : "Veterinário dos Donos",
"PM" : "PM",
"Page extension" : "Extensão da página",
"Paid" : "Pago",
"Paint/Pinto" : "Paint/Pinto",
"Palomino" : "Palomino",
"Paper Size" : "Tamanho de Papel",
"Papillon" : "Papillon",
"Parainfluenza" : "Parainfluenza",
"Parakeet (Other)" : "Periquito (Outro)",
"Parent" : "Pai",
"Parrot (Other)" : "Papagaio (Outro)",
"Parrotlet" : "Tuim",
"Parvovirus" : "Parvovirose",
"Paso Fino" : "Paso Fino",
"Pass Homecheck" : "Aprovação de Inspeção Domiciliária",
"Password" : "Senha",
"Password for '{0}' has been reset." : "A senha de '{0}' foi reposta.",
"Password is incorrect." : "A senha está incorreta.",
"Password reset information has been sent to your email." : "Foram enviadas para o seu email informações sobre a reposição da sua senha.",
"Password successfully changed." : "Senha alterada com êxito.",
"Passwords cannot be blank." : "As senhas não podem ficar em branco.",
"Path" : "Caminho",
"Patterdale Terrier (Fell Terrier)" : "Patterdale Terrier (Fell Terrier)",
"PayPal" : "PayPal",
"PayPal Business Email" : "PayPal Business Email",
"Payment" : "Pagamento",
"Payment Book" : "Livro de Pagamentos",
"Payment From" : "Formulário de Pagamento",
"Payment Methods" : "Métodos de Pagamento",
"Payment Processors" : "Processadores de Pagamentos",
"Payment Type" : "Tipo de Pagamento",
"Payment Types" : "Tipos de Pagamento",
"Payment book" : "Livro de pagamentos",
"Payment calendar" : "Calendário de pagamentos",
"Payment of {0} successfully received ({1})." : "Pagamento de {0} recebido com êxito ({1}).",
"Payments" : "Pagamentos",
"Payments need at least one date, an amount and a person." : "Os pagamentos precisam de, pelo menos, uma data, um montante e uma pessoa.",
"Payments of type" : "Pagamentos do tipo",
"Payments require a person" : "Os pagamentos necessitam de uma pessoa",
"Payments require a received date" : "Os pagamentos necessitam de uma data de recebimento",
"Peacock/Pea fowl" : "Pavão",
"Pekingese" : "Pequinês",
"Pending Adoption" : "Aguarda Adoção",
"Pending Apartment Verification" : "Aguarda Inspeção de Apartamento",
"Pending Home Visit" : "Aguarda Visita Domiciliária",
"Pending Vet Check" : "Aguarda Inspeção pelo Veterinário",
"Pension" : "Pensão",
"People" : "Pessoas",
"People Looking For" : "Pessoas que Procuram",
"People matching '{0}'." : "Pessoas que correspondem a '{0}'.",
"People or animal records that already exist in the database will not be imported again and movement/payment data will be attached to the existing records instead." : "Os registos de pessoas ou animais que já existam na base de dados não serão novamente importados e os dados de movimentos/pagamentos serão em vez disso anexados aos registos existentes.",
"People with active reservations, but no homecheck has been done." : "Pessoas com reservas ativas, mas sem uma inspeção domiciliária realizada.",
"People with overdue donations." : "Pessoas com donativos em atraso.",
"Percheron" : "Percheron",
"Perform" : "Realizar",
"Perform Homecheck" : "Realizar Inspeção Domiciliária",
"Perform Test" : "Realizar Teste",
"Performed" : "Realizado em",
"Permanent Foster" : "FAT Definitiva",
"Persian" : "Persa",
"Person" : "Pessoa",
"Person - Additional" : "Pessoa - Adicional",
"Person - Name and Address" : "Pessoa - Nome e Morada",
"Person - Type" : "Pessoa - Tipo",
"Person Flags" : "Sinalizações de Pessoa",
"Person looking for report" : "Relatório de pessoas que procuram",
"Person must have a surname." : "A pessoa necessita de um apelido.",
"Person successfully created" : "Pessoa criada com êxito.",
"Personal" : "Pessoal",
"Peruvian Inca Orchid" : "Pelado Peruano",
"Peruvian Paso" : "Paso Peruano",
"Petit Basset Griffon Vendeen" : "Pequeno Basset Griffon Vendeen",
"Pharaoh Hound" : "Cão de Caça do Faraó",
"Pheasant" : "Faisão",
"Phone" : "Telefone",
"Phone contains" : "Telefone contém",
"Photo successfully uploaded." : "Foto carregada com êxito.",
"Picked Up" : "Recolhido",
"Picked Up By" : "Recolhido Por",
"Pickup" : "Recolha",
"Pickup Address" : "Morada de Recolha",
"Pickup Location" : "Local de Recolha",
"Pickup Locations" : "Locais de Recolha",
"Pig" : "Porco",
"Pig (Farm)" : "Porco (quinta)",
"Pigeon" : "Pombo",
"Pinterest" : "Pinterest",
"Pionus" : "Pionus",
"Pit Bull Terrier" : "Pit Bull Terrier",
"Pixie-Bob" : "Pixie-Bob",
"Please click the Sign button when you are finished." : "Clique no botão Assinar após terminar.",
"Please see the manual for more information." : "Consulte o manual para mais informações.",
"Please select a PDF, HTML or JPG image file to attach" : "Selecione um ficheiro PDF, HTML ou imagem JPG para anexar",
"Please tighten the scope of your email campaign to {0} emails or less." : "Limite o âmbito da sua campanha de email a {0} emails ou menos.",
"Please use the link below to pay." : "Utilize o seguinte link para efetuar o pagamento.",
"Please use the links below to electronically sign these documents." : "Utilize os links abaixo para assinar eletronicamente estes documentos.",
"Plott Hound" : "Plott Hound",
"Poicephalus/Senegal" : "Poicephalus/Senegal",
"Pointer" : "Pointer",
"Points for being found within 2 weeks of being lost" : "Pontos por ter sido encontrado menos de 2 semanas após se ter perdido",
"Points for matching age group" : "Pontos por correspondência de grupo etário",
"Points for matching breed" : "Pontos por correspondência de raça",
"Points for matching color" : "Pontos por correspondência de cores",
"Points for matching features" : "Pontos por correspondência de características",
"Points for matching lost/found area" : "Pontos por correspondência de área onde foi perdido/encontrado",
"Points for matching microchip" : "Pontos por correspondência de microchip",
"Points for matching sex" : "Pontos por correspondência de sexo",
"Points for matching species" : "Pontos por correspondência de espécie",
"Points for matching zipcode" : "Pontos por correspondência de código postal",
"Points required to appear on match report" : "Pontos necessários para figurar no relatório de correspondências",
"Polish" : "Polaco",
"Polish Lowland Sheepdog" : "Cão Pastor Lowland Polaco",
"Pomeranian" : "Pomerânio",
"Pony" : "Pónei",
"Poodle" : "Caniche",
"Portugese Podengo" : "Podengo Português",
"Portuguese Water Dog" : "Cão D'Água Português",
"Positive" : "Positivo",
"Positive for Heartworm, FIV or FLV" : "Positivo para Dirofilariose, FIV ou FLV",
"Positive/Negative" : "Positivo/Negativo",
"Post" : "Correio",
"Postage costs" : "Despesas de correspondência",
"Pot Bellied" : "Porco Barrigudo",
"Prairie Dog" : "Cão-da-pradaria",
"Prefill new media notes for animal images with animal comments if left blank" : "Pré-preencher as novas anotações de imagens de animais com os comentários sobre o animal, caso sejam deixadas em branco",
"Prefill new media notes with the filename if left blank" : "Pré-preencher as novas anotações de imagens com o nome do ficheiro, caso sejam deixadas em branco",
"Premises" : "Instalações",
"Presa Canario" : "Dogue Canário",
"Press F11 in HTML or SQL code editing boxes to edit in fullscreen mode" : "Prima F11 nas caixas de edição de código HTML ou SQL para editar no modo de ecrã completo",
"Preview" : "Pré-visualizar",
"Preview allows you to test your HTML templates while bypassing server side caching and without making your animals adoptable" : "A pré-visualização permite-lhe testar os seus modelos HTML contornando a cache em servidor e sem tornar os seus animais adotáveis",
"Previous" : "Anterior",
"Previous Adopter" : "Adotante Anterior",
"Print" : "Imprimir",
"Print Preview" : "Pré-visualizar Impressão",
"Print selected forms" : "Imprimir formulários selecionados",
"Printable Manual" : "Manual Imprimível",
"Printing word processor documents uses hidden iframe and window.print" : "A impressão de documentos do processador de texto utiliza iframe e window.print ocultos",
"Priority" : "Prioridade",
"Priority Floor" : "Piso Prioritário",
"Processed by {0} on {1}" : "Processado por {0} em {1}",
"Produce a CSV File" : "Produzir um Ficheiro CSV",
"Produce a PDF of printable labels" : "Produzir um Ficheiro PDF de etiquetas imprimíveis",
"Profile" : "Perfil",
"Profile name cannot be blank" : "O nome do perfil não pode estar vazio",
"Public Holiday" : "Feriado",
"Publish Animals to the Internet" : "Publicar Animais na Internet",
"Publish HTML via FTP" : "Publicar HTML através de FTP",
"Publish now" : "Publicar agora",
"Publish to folder" : "Publicar em pasta",
"Published to Website" : "Publicado no site",
"Publisher" : "Editor",
"Publisher Breed" : "Raça do Editor",
"Publisher Color" : "Cor do Editor",
"Publisher Logs" : "Registros do Editor",
"Publisher Species" : "Espécie do Editor",
"Publishing" : "Publicação",
"Publishing History" : "Histórico de Publicações",
"Publishing Logs" : "Histórico de Publicações",
"Publishing Options" : "Opções de Publicação",
"Publishing complete." : "Publicação concluída.",
"Publishing template" : "Modelo de publicação",
"Pug" : "Pug",
"Puli" : "Puli",
"Pumi" : "Pumi",
"Puppies (under {0} months)" : "Cachorros (com menos de {0} meses)",
"Purchased" : "Comprado",
"Qty" : "Unids",
"Quaker Parakeet" : "Quaker Parakeet",
"Quantity" : "Quantidade",
"Quarantine" : "Quarentena",
"Quarterhorse" : "Cavalo quarto de milha",
"Quarterly" : "Trimestral",
"Query Builder" : "Criador de Consultas",
"Quick Links" : "Links Rápidos",
"Quicklinks" : "Links Rápidos",
"Quicklinks are shown on the home page and allow quick access to areas of the system." : "Os links rápidos são apresentados na página inicial e permitem acesso rápido às áreas do sistema.",
"R" : "R",
"Rabbit" : "Coelho",
"Rabies" : "Raiva",
"Rabies Tag" : "Etiqueta da Raiva",
"Rabies not given" : "Raiva não administrada",
"RabiesTag" : "EtiquetaRaiva",
"Radio Buttons" : "Botões de Opção",
"Ragamuffin" : "Ragamuffin",
"Ragdoll" : "Ragdoll",
"Rank" : "Classificação",
"Rat" : "Rato",
"Rat Terrier" : "Terrier Rato",
"Raw Markup" : "Marcação Básica",
"Read the manual for more information about Animal Shelter Manager." : "Leia o manual para obter mais informações sobre o Animal Shelter Manager.",
"Real name" : "Nome real",
"Reason" : "Motivo",
"Reason For Appointment" : "Motivo do Agendamento",
"Reason For Entry" : "Motivo da Entrada",
"Reason Not From Owner" : "Motivo Alheio ao Dono",
"Reason for Entry" : "Motivo da Entrada",
"Reason not from Owner" : "Motivo alheio ao dono",
"Reason the owner did not bring in the animal themselves" : "Motivo pelo qual o dono não trouxe ele próprio o animal",
"Recalculate ALL animal ages/times" : "Recalcular TODAS as idades/horas dos animais",
"Recalculate ALL animal locations" : "Recalcular TODOS os locais dos animais",
"Recalculate on-shelter animal locations" : "Recalcular os locais de animais no abrigo",
"Receipt" : "Recibo",
"Receipt No" : "N.º de Recibo",
"Receipt/Invoice" : "Recibo/Fatura",
"Receive" : "Receber",
"Receive a payment" : "Receber um pagamento",
"Received" : "Recebido em",
"Received in last day" : "Recebidos no último dia",
"Received in last month" : "Recebidos no mês passado",
"Received in last week" : "Recebidos na semana passada",
"Received in last year" : "Recebidos no ano passado",
"Received today" : "Recebidos hoje",
"Recently Adopted" : "Adotados Recentemente",
"Recently Changed" : "Recentemente Alterados",
"Recently Entered Shelter" : "Recentemente Admitidos no Abrigo",
"Recently Fostered" : "Recentemente Acolhidos por FAT",
"Recently deceased" : "Recentemente falecidos",
"Recently deceased shelter animals (last 30 days)." : "Animais do abrigo recentemente falecidos (últimos 30 dias).",
"Reception" : "Receção",
"Reclaim" : "Reivindicar",
"Reclaim an animal" : "Reivindicar um animal",
"Reclaim movements must have a valid reclaim date." : "Os movimentos de reivindicação necessitam de uma data de reivindicação válida.",
"Reclaim successfully created." : "Reivindicação criada com êxito.",
"Reclaimed" : "Reivindicado",
"Reconcile" : "Reconciliar",
"Reconciled" : "Reconciliado",
"Records must match all of the selected criteria in order to appear on the report" : "Os registos precisam de cumprir todos os critérios selecionados para serem incluídos no relatório",
"Redbone Coonhound" : "Redbone Coonhound",
"Redeemed" : "Resgatados",
"Redeemed in last month" : "Resgatados no mês passado",
"Rediarised" : "Transferido para Outro Diário",
"Redirect to URL after POST" : "Redirecionar para URL após POST",
"Redirect to this URL after successful payment" : "Redirecionar para este URL após um pagamento com êxito",
"Reference" : "Referência",
"Refresh" : "Atualizar",
"Regenerate 'Match lost and found animals' report" : "Voltar a gerar relatório 'Confrontar animais perdidos e achados'",
"Regenerate 'Person looking for' report" : "Voltar a gerar relatório 'Pessoa que procura'",
"Regenerate annual animal figures for" : "Voltar a gerar valores anuais de animais para",
"Regenerate monthly animal figures for" : "Voltar a gerar valores mensais de animais para",
"Regenerate person flags column" : "Regenerar a coluna de sinalizações de pessoa",
"Regenerate person names in selected format" : "Voltar a gerar nomes de pessoas no formato selecionado",
"Register Microchip" : "Registar Microchip",
"Register microchips after" : "Registar microchips após",
"Released To Wild" : "Libertado na Natureza",
"Released To Wild {0}" : "Libertado na Natureza {0}",
"Released to Wild" : "Libertado na Natureza",
"Reload" : "Recarregar",
"Reload page ..." : "A recarregar página...",
"Reload the medical book/tab automatically after adding new medical items" : "Recarregar automaticamente o livro/separador Clínica após adicionar novos itens da categoria clínica",
"Remaining" : "Restantes",
"Remember me on this computer" : "Memorizar neste computador",
"Removal" : "Remoção",
"Removal Reason" : "Motivo da Remoção",
"Removal reason" : "Motivo da remoção",
"Remove" : "Remover",
"Remove HTML and PDF document media after this many years" : "Remover as imagens de documentos HTML e PDF após este número de anos",
"Remove clinic functionality from screens and menus" : "Remover a funcionalidade de clínica dos ecrãs e menus",
"Remove fine-grained animal control incident permissions" : "Remover as permissões avançadas de incidentes de controlo de animais",
"Remove holds after" : "Remover retenções após",
"Remove incoming forms after" : "Remover formulários recebidos após",
"Remove move menu and the movements tab from animal and person screens" : "Remover o menu Movimentos e o separador Movimentos dos ecrãs de animais e pessoas",
"Remove personally identifiable data" : "Remover dados pessoalmente identificáveis",
"Remove previously published files before uploading" : "Remover ficheiros anteriormente publicados antes de fazer o carregamento",
"Remove processed forms when I leave the incoming forms screens" : "Remover formulários processados após o utilizador fechar os ecrãs de formulários recebidos",
"Remove retailer functionality from the movement screens and menus" : "Remover a funcionalidade de lojista dos ecrãs e menus de movimentos",
"Remove short shelter code box from the animal details screen" : "Remover a caixa de código abreviado de abrigo do ecrã de detalhes sobre o animal",
"Remove the FIV/L test fields from animal health details" : "Remover os campos de teste FIV/L dos detalhes de saúde do animal",
"Remove the Litter ID field from animal details" : "Remover o campo de ID de Ninhada dos detalhes do animal",
"Remove the Rabies Tag field from animal health details" : "Remover o campo Identificador de Raiva dos detalhes de saúde do animal",
"Remove the adoption coordinator field from animal entry details" : "Remover o campo de coordenador de adoção dos detalhes de entrada de animais",
"Remove the adoption fee field from animal details" : "Remover o campo de taxa de adoção dos detalhes do animal",
"Remove the animal control functionality from menus and screens" : "Remover a funcionalidade de gestão de animais dos menus e ecrãs",
"Remove the bonded with fields from animal entry details" : "Remover os campos de companheiro dos detalhes de entrada de animais",
"Remove the city/state fields from person details" : "Remover os campos de cidade/distrito dos detalhes da pessoa",
"Remove the coat type field from animal details" : "Remover o campo de tipo de pelagem dos detalhes do animal",
"Remove the country field from person details" : "Remover o campo de país dos detalhes da pessoa",
"Remove the declawed box from animal health details" : "Remover a caixa de remoção de unhas dos detalhes de saúde do animal",
"Remove the document repository functionality from menus" : "Remover a funcionalidade de repositório de documentos dos menus",
"Remove the good with fields from animal notes" : "Remova os campos Gentil Com das anotações de animais",
"Remove the heartworm test fields from animal health details" : "Remover os campos de teste de dirofilariose dos detalhes de saúde do animal",
"Remove the homechecked/by fields from person type according to the homechecked flag" : "Remover os campos de inspecionado no domicílio/por do tipo de pessoa, de acordo com a sinalização inspecionado no domicílio",
"Remove the insurance number field from the movement screens" : "Remover o campo do número de seguro dos ecrãs de movimentos",
"Remove the jurisdiction field from animal entry details" : "Remover o campo de jurisdição dos detalhes de entrada do animal",
"Remove the location unit field from animal details" : "Remover o campo de secção do abrigo dos detalhes do animal",
"Remove the microchip fields from animal identification details" : "Remover os campos de microchip dos detalhes de identificação do animal",
"Remove the neutered fields from animal health details" : "Remover os campos de castração dos detalhes de saúde do animal",
"Remove the online form functionality from menus" : "Remover dos menus a funcionalidade de formulários online",
"Remove the picked up fields from animal entry details" : "Remover os campos de recolha dos detalhes de introdução de animais",
"Remove the rota functionality from menus and screens" : "Remover a funcionalidade de turnos dos menus e ecrãs",
"Remove the size field from animal details" : "Remover o campo de tamanho dos detalhes do animal",
"Remove the stock control functionality from menus and screens" : "Remover a funcionalidade de gestão de stocks dos menus e ecrãs",
"Remove the tattoo fields from animal identification details" : "Remover os campos de tatuagem dos detalhes de identificação de animais",
"Remove the transport functionality from menus and screens" : "Remover a funcionalidade de transporte dos menus e ecrãs",
"Remove the trap loan functionality from menus and screens" : "Remover a funcionalidade de empréstimo de armadilhas dos menus e ecrãs",
"Remove the weight field from animal details" : "Remover o campo de peso dos detalhes dos animais",
"Removed" : "Removido",
"Rename" : "Mudar o nome",
"Renew License" : "Renovar Licença",
"Renew licence" : "Renovar licença",
"Renew license" : "Renovar licença",
"Replies to the fosterer email should go to" : "As respostas ao email da FAT devem ser encaminhadas para",
"Report" : "Relatório",
"Report Title" : "Título do Relatório",
"Report a new incident" : "Comunicar um novo incidente",
"Reports" : "Relatórios",
"Request Payment" : "Pedir Pagamento",
"Request payments in" : "Pedir pagamento em",
"Request signature by email" : "Solicitar assinatura por email",
"Requested" : "Solicitado",
"Require followup" : "Solicitar seguimento",
"Required" : "Toma em",
"Required date must be a valid date" : "A data prevista tem de ser uma data válida",
"Reschedule" : "Remarcar",
"Reschedule for" : "Remarcar para",
"Reservation" : "Reserva",
"Reservation Book" : "Livro de Reservas",
"Reservation Cancelled" : "Reserva Cancelada",
"Reservation Date" : "Data de Reserva",
"Reservation For" : "Reservado Para",
"Reservation Status" : "Estado de Reserva",
"Reservation Statuses" : "Estados de Reserva",
"Reservation book" : "Livro de reservas",
"Reservation date cannot be after cancellation date." : "A data de reserva não pode ser posterior à data de cancelamento.",
"Reservation successfully created." : "Reserva criada com êxito.",
"Reservations must have a valid reservation date." : "As reservas necessitam de uma data de reserva válida.",
"Reserve" : "Reservar",
"Reserve an animal" : "Reservar um animal",
"Reserved" : "Reservado",
"Reset" : "Repor",
"Reset Password" : "Repor a Senha",
"Reset password request" : "Pedido de reposição da senha",
"Respond" : "Responder",
"Responded" : "Respondido",
"Responded Between" : "Respondido Entre",
"Responded Date/Time" : "Data/Hora da Resposta",
"Restore" : "Restaurar",
"Result" : "Resultado",
"Results" : "Resultados",
"Results for '{0}'." : "Resultados para '{0}'.",
"Retailer" : "Lojista",
"Retailer Animals" : "Animais do Lojista",
"Retailer Book" : "Livro de Lojistas",
"Retailer book" : "Livro de lojistas",
"Retailer movement successfully created." : "Movimento de lojista criado com êxito.",
"Retailer movements must have a valid movement date." : "Os movimentos de lojista necessitam de uma data de movimento válida.",
"Retain Until" : "Reter Até",
"Retain for" : "Reter durante",
"Retain until {0}" : "Reter até {0}",
"Retest" : "Repetir teste",
"Retriever" : "Retriever",
"Return" : "Recuperação",
"Return Category" : "Categoria de Recuperação",
"Return Date" : "Data de Recuperação",
"Return a transferred animal" : "Recuperar um animal transferido",
"Return an animal from adoption" : "Recuperar um animal de uma adoção",
"Return an animal from another movement" : "Recuperar um animal de outro movimento",
"Return an animal from transfer" : "Recuperar um animal de uma transferência",
"Return date cannot be before the movement date." : "A data de recuperação não pode ser anterior à data do movimento.",
"Return this movement and bring the animal back to the shelter" : "Recuperar este movimento e trazer o animal de volta para o abrigo",
"Returned" : "Devolvido",
"Returned By" : "Devolvido Por",
"Returned To Owner" : "Devolvido ao Dono",
"Returned from" : "Recuperado de",
"Returned to" : "Devolvido a",
"Returned to Owner {0}" : "Devolvido ao Dono {0}",
"Returning" : "A devolver",
"Returns {0}" : "Devoluções {0}",
"Reupload animal images every time" : "Reenviar sempre as imagens dos animais",
"Rex" : "Rex",
"Rhea" : "Rhea",
"Rhinelander" : "Rhinelander",
"Rhodesian Ridgeback" : "Leão da Rodésia",
"Right-click on the map to change the marker location" : "Clique com o botão direito no mapa para alterar a localização do marcador",
"Ringneck/Psittacula" : "Ringneck/Psittacula",
"Role is in use and cannot be deleted." : "A função está a ser utilizada e não pode ser eliminada.",
"Roles" : "Funções",
"Roles need a name." : "As funções necessitam de um nome.",
"Rosella" : "Rosella",
"Rostered day off" : "Dia de folga",
"Rota" : "Turnos",
"Rota Types" : "Tipos de Turnos",
"Rota cloned successfully." : "Turno clonado com êxito.",
"Rotate image 90 degrees anticlockwise" : "Rodar a imagem 90 graus no sentido contrário ao dos ponteiros do relógio",
"Rotate image 90 degrees clockwise" : "Rodar a imagem 90 graus no sentido dos ponteiros do relógio",
"Rottweiler" : "Rottweiler",
"Rough" : "Áspero",
"Rows" : "Linhas",
"Ruddy" : "Avermelhado",
"Russian Blue" : "Azul Russo",
"S (Stray Cat)" : "V (Gato Vadio)",
"S = first letter of animal species" : "S = primeira letra da espécie animal",
"SM Account" : "Conta SM",
"SMS" : "SMS",
"SQL" : "SQL",
"SQL Interface" : "Interface SQL",
"SQL dump" : "Dump SQL",
"SQL dump (ASM2 HSQLDB Format)" : "Dump SQL (Formato HSQLDB do ASM2)",
"SQL editor: Press F11 to go full screen and press CTRL+SPACE to autocomplete table and column names" : "Editor SQL: prima F11 para mudar para ecrã completo e prima CTRL + ESPAÇO para preencher automaticamente os nomes das tabelas e colunas",
"SQL interface" : "Interface SQL",
"SQL is syntactically correct." : "SQL sintaticamente correto.",
"SS = first and second letter of animal species" : "SS = primeira e segunda letras da espécie animal",
"Sa" : "Sáb",
"Saddlebred" : "Saddlebred Americano",
"Saint Bernard St. Bernard" : "São Bernardo",
"Sales Tax" : "IVA",
"Saluki" : "Saluki",
"Samoyed" : "Samoiedo",
"Sat" : "Sáb",
"Satin" : "Cetim",
"Saturday" : "Sábado",
"Save" : "Guardar",
"Save and leave" : "Guardar e sair",
"Save this animal" : "Guardar este animal",
"Save this incident" : "Guardar este incidente",
"Save this person" : "Guardar esta pessoa",
"Save this record" : "Guardar este registo",
"Save this waiting list entry" : "Guardar esta entrada de lista de espera",
"Saving..." : "A guardar...",
"Scale published animal images to" : "Redimensionar as imagens de animais publicadas para",
"Scheduled" : "Agendado",
"Schipperke" : "Schipperke",
"Schnauzer" : "Schnauzer",
"Scottish Deerhound" : "Deerhound Escocês",
"Scottish Fold" : "Fold Escocês",
"Scottish Terrier Scottie" : "Terrier Scottie Escocês",
"Script" : "Script",
"Seal" : "Cinzento Acastanhado",
"Sealyham Terrier" : "Sealyham Terrier",
"Search" : "Pesquisar",
"Search Results for '{0}'" : "Resultados de Pesquisa para '{0}'",
"Search returned {0} results." : "A pesquisa devolveu {0} resultados.",
"Search sort order" : "Ordenação da pesquisa",
"Searchable" : "Pesquisável",
"Second offence" : "Segunda infração",
"Select" : "Selecionar",
"Select a person" : "Selecionar uma pessoa",
"Select a person to attach this form to." : "Selecionar uma pessoa à qual anexar este formulário.",
"Select a person to merge into this record. The selected person will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "Selecionar uma pessoa para juntar a este registo. A pessoa selecionada será removida e os respetivos movimentos, anotações de diário, entradas de registo, etc. serão redirecionados para este registo.",
"Select all" : "Selecionar tudo",
"Select an animal" : "Selecionar um animal",
"Select an animal to attach this form to." : "Selecione um animal ao qual anexar este formulário.",
"Select an animal to merge into this record. The selected animal will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "Selecionar um animal para juntar a este registo. O animal selecionado será removido e os respetivos movimentos, anotações de diário, entradas de registo, etc. serão redirecionados para este registo.",
"Select animal to merge" : "Selecione um animal para juntar",
"Select animals" : "Selecionar animais",
"Select date for diary task" : "Selecionar a data da tarefa do diário",
"Select person to merge" : "Selecionar a pessoa a juntar",
"Select recommended" : "Selecionar recomendados",
"Selected On-Shelter Animals" : "Animais do Abrigo Selecionados",
"Selkirk Rex" : "Selkirk Rex",
"Send" : "Enviar",
"Send Emails" : "Enviar Emails",
"Send a weekly email to fosterers with medical information about their animals" : "Enviar um email semanal às FATs com informações clínicas sobre os respetivos animais",
"Send an email relating to this animal" : "Enviar um email relativo a este animal",
"Send an email request for payment via a payment processor" : "Enviar um email com um pedido de pagamento através de um processador de pagamentos",
"Send and include a copy of the form submission" : "Enviar e incluir uma cópia do formulário submetido",
"Send confirmation email to form submitter" : "Enviar email de confirmação ao remetente do formulário",
"Send confirmation message only" : "Enviar apenas mensagem de confirmação",
"Send email" : "Enviar email",
"Send emails" : "Enviar emails",
"Send mass emails and perform mail merges" : "Enviar emails em lote e elaborar mailings",
"Send via email" : "Enviar por email",
"Sending {0} emails is considered abusive and will damage the reputation of the email server." : "O envio de {0} emails é considerado abusivo e irá prejudicar a reputação do servidor de email.",
"Sending..." : "A enviar...",
"Senior" : "Sénior",
"Sent to mobile signing pad." : "Enviado para o bloco de assinatura móvel.",
"Sep" : "Set",
"Separate waiting list rank by species" : "Separar posição na lista de espera por espécies",
"September" : "Setembro",
"Server clock adjustment" : "Ajuste do relógio do servidor",
"Set publishing options" : "Definir opções de publicação",
"Set this to 0 to never automatically remove." : "Defina esta opção como 0 para nunca remover automaticamente.",
"Set to 0 to never update urgencies." : "Defina como 0 para nunca atualizar urgências.",
"Set wether or not this user account can log in to the user interface." : "Definir se esta conta de utilizador pode iniciar uma sessão na interface de utilizador.",
"Setter" : "Setter",
"Setting a location filter will prevent this user seeing animals who are not in these locations on shelterview, find animal and search." : "Pode definir um filtro de localização para evitar que este utilizador veja animais que não se encontram nestes locais da Vista de Abrigo, encontre animais e utilize a pesquisa.",
"Settings" : "Definições",
"Sex" : "Sexo",
"Sex and Species" : "Sexo e Espécie",
"Sex is female" : "Sexo feminino",
"Sex is male" : "Sexo masculino",
"Sexes" : "Sexos",
"Shar Pei" : "Shar Pei",
"Share" : "Partilhar",
"Shared email" : "Email partilhado",
"Shared photo" : "Foto partilhada",
"Shared weblink" : "Link de página partilhado",
"Shares" : "Partilhas",
"Sheep" : "Ovelha",
"Sheep Dog" : "Cão Pastor",
"Shelter" : "Abrigo",
"Shelter Animal" : "Animal do Abrigo",
"Shelter Animals" : "Animais do Abrigo",
"Shelter Details" : "Detalhes do Abrigo",
"Shelter animal {0} '{1}'" : "Animal do abrigo {0} '{1}'",
"Shelter animals" : "Animais do abrigo",
"Shelter code cannot be blank" : "O código do abrigo não pode estar vazio",
"Shelter code {0} has already been allocated to another animal." : "O código de abrigo {0} já foi atribuído a outro animal.",
"Shelter stats (all time)" : "Estatísticas do abrigo (desde sempre)",
"Shelter stats (this month)" : "Estatísticas do abrigo (este mês)",
"Shelter stats (this week)" : "Estatísticas do abrigo (esta semana)",
"Shelter stats (this year)" : "Estatísticas do abrigo (este ano)",
"Shelter stats (today)" : "Estatísticas do abrigo (hoje)",
"Shelter view" : "Vista de abrigo",
"Shepherd" : "Pastor",
"Shetland Sheepdog Sheltie" : "Cão Pastor Shetland Sheltie",
"Shiba Inu" : "Shiba Inu",
"Shift" : "Mudança",
"Shih Tzu" : "Shih Tzu",
"Short" : "Curto",
"Show GDPR Contact Opt-In field on person screens" : "Mostrar campo de Consentimento de Contacto do RGPD em ecrãs de pessoas",
"Show ID numbers when editing lookup data" : "Mostrar números de ID ao editar dados de consultas",
"Show If" : "Mostrar Se",
"Show PDF files inline instead of sending them as attachments" : "Mostrar os ficheiros PDF no corpo da mensagem em vez de enviá-los como anexos",
"Show a cost field on medical/test/vaccination screens" : "Mostrar um campo de custo nos ecrãs clínica/testes/vacinações",
"Show a minimap of the address on person screens" : "Mostrar um mini mapa do endereço nos ecrãs de pessoas",
"Show a separate paid date field with costs" : "Mostrar um campo de data de pagamento separado com os custos",
"Show a warning when viewing this animal" : "Mostrar um aviso ao apresentar este animal",
"Show alerts on the home page" : "Mostrar alertas na página inicial",
"Show an alert when these species of animals are not altered" : "Mostrar um alerta se estas espécies de animais não forem esterilizadas/castradas",
"Show an alert when these species of animals are not microchipped" : "Mostrar um alerta se estas espécies de animais não forem microchipadas",
"Show an alert when these species of animals do not have a rabies vaccination" : "Mostrar um aviso se esta espécie animal não tiver uma vacinação contra a raiva",
"Show animal thumbnails in movement and medical books" : "Mostrar miniaturas dos animais em movimento e os livros clínicos",
"Show animals adopted" : "Mostrar animais adotados",
"Show codes on the shelter view screen" : "Mostrar códigos no ecrã da vista de abrigo",
"Show complete comments in table views" : "Mostrar comentários completos nas vistas de tabela",
"Show empty locations" : "Mostrar locais vazios",
"Show on new record screens" : "Mostrar em novos ecrãs de registo",
"Show pink and blue borders around animal thumbnails to indicate sex" : "Mostrar molduras cor de rosa e azuis em torno das miniaturas dos animais para indicar o sexo",
"Show quick links on all pages" : "Mostrar links rápidos em todas as páginas",
"Show quick links on the home page" : "Mostrar links rápidos na página inicial",
"Show record views in the audit trail" : "Mostrar vistas de registo no registo de auditoria",
"Show report menu items in collapsed categories" : "Mostrar itens de menu de relatório em categorias reduzidas",
"Show short shelter codes on screens" : "Mostrar códigos de abrigo abreviados nos ecrãs",
"Show the adoption fee field" : "Não existem adoções em ficheiro",
"Show the altered fields" : "Mostrar os campos de esterilização/castração",
"Show the breed fields" : "Mostrar os campos da raça",
"Show the brought in by field" : "Mostrar o campo trazido por",
"Show the color field" : "Mostrar o campo de cor",
"Show the date brought in field" : "Mostrar o campo de data em que foi trazido",
"Show the entry category field" : "Mostrar o campo de categoria de entrada",
"Show the full diary (instead of just my notes) on the home page" : "Mostrar o diário completo (em vez de apenas as minhas anotações) na página inicial",
"Show the hold fields" : "Mostrar os campos de retenção",
"Show the internal location field" : "Mostrar o campo de localização interna",
"Show the jurisdiction field" : "Mostrar o campo de jurisdição",
"Show the litter ID field" : "Mostrar o campo de ID da ninhada",
"Show the location unit field" : "Mostrar o campo da secção de localização",
"Show the microchip fields" : "Mostrar os campos de microchip",
"Show the original owner field" : "Mostrar o campo de dono original",
"Show the size field" : "Mostrar o campo de tamanho",
"Show the tattoo fields" : "Mostrar os campos de tatuagem",
"Show the time brought in field" : "Mostrar o campo de hora em que foi trazido",
"Show the transfer in field" : "Mostrar o campo de transferência para o abrigo",
"Show the weight field" : "Mostrar o campo de peso",
"Show timeline on the home page" : "Mostrar a cronologia na página inicial",
"Show tips on the home page" : "Mostrar dicas na página inicial",
"Show transactions from" : "Mostrar transações desde",
"Show weights as decimal lb" : "Mostrar os pesos em libras decimais",
"Show weights as lb and oz" : "Mostrar pesos como libras e onças",
"Showing {0} timeline events." : "A mostrar {0} eventos da cronologia",
"Siamese" : "Siamês",
"Siberian" : "Siberiano",
"Siberian Husky" : "Husky Siberiano",
"Sick leave" : "Atestado médico",
"Sick/Injured" : "Doente/Ferido",
"Sick/injured animal" : "Animal doente/ferido",
"Sign" : "Assinar",
"Sign document" : "Assinar documento",
"Sign on screen" : "Assine no ecrã",
"Signature" : "Assinatura",
"Signed" : "Assinado",
"Signed Document" : "Documento Assinado",
"Signing" : "A assinar",
"Signing Pad" : "Bloco de Assinatura",
"Signup" : "Inscrição",
"Silky Terrier" : "Terrier Sedoso",
"Silver" : "Prata",
"Silver Fox" : "Fox Prata",
"Silver Marten" : "Marten Prata",
"Similar Animal" : "Animal Semelhante",
"Similar Person" : "Pessoa Semelhante",
"Simple" : "Simples",
"Singapura" : "Singapura",
"Single Treatment" : "Tratamento Único",
"Site" : "Local",
"Site matches current user" : "Local corresponde ao utilizador atual",
"Sites" : "Locais",
"Size" : "Tamanho",
"Size is {0}" : "O tamanho é {0}",
"Sizes" : "Tamanhos",
"Skunk" : "Skunk",
"Skye Terrier" : "Skye Terrier",
"Sloughi" : "Sloughi",
"Small" : "Pequeno",
"SmartTag PETID" : "SmartTag PETID",
"Smooth Fox Terrier" : "Smooth Fox Terrier",
"Snake" : "Cobra",
"Snowshoe" : "Raquete de neve",
"Social" : "Social",
"Soft release" : "Libertação controlada",
"Soft release book" : "Livro de libertações controladas",
"Soft release ends on" : "A libertação controlada termina em",
"Softbill (Other)" : "Pássaro que come frutas (Outro)",
"Sold" : "Vendido",
"Somali" : "Somali",
"Some batch processes may take a few minutes to run and could prevent other users being able to use the system for a short time." : "Alguns processos em lote podem demorar vários minutos a serem executados e podem impedir a utilização do sistema pelos outros utilizadores por um curto período de tempo.",
"Some browsers allow shortcut keys, press SHIFT+ALT+A in Chrome or Firefox to jump to the animal adoption screen." : "Alguns browsers permitem a utilização de teclas de atalho, prima SHIFT + ALT + A no Chrome ou no Firefox para abrir o ecrã de adoção de animais.",
"Some info text" : "Algum texto de informação",
"Sorrel" : "Castanho Avermelhado",
"Sorrel Tortoiseshell" : "Atartarugado Castanho Avermelhado",
"Sorry, this document has already been signed" : "Este documento já foi assinado",
"Sort" : "Ordenar",
"South Russian Ovcharka" : "Ovcharka do Sul da Rússia",
"Spaniel" : "Spaniel",
"Special Needs" : "Necessidades Especiais",
"Species" : "Espécie",
"Species A-Z" : "Espécies A-Z",
"Species Z-A" : "Espécies Z-A",
"Species and Breed" : "Espécie e Raça",
"Species and Code" : "Espécie e Código",
"Species and Color" : "Espécie e Cor",
"Species is {0}" : "A espécie é {0}",
"Species to use when publishing to third party services and adoption sites" : "Espécie a utilizar ao publicar informações em serviços e sites de adoção de terceiros",
"Specify a unique code to identify this voucher" : "Especificar um código exclusivo para identificar este voucher",
"Specifying a reschedule date will make copies of the selected vaccinations and mark them to be given on the reschedule date. Example: If this vaccination needs to be given every year, set the reschedule date to be 1 year from today." : "Pode especificar uma data de remarcação para criar cópias das vacinas selecionadas e assinalá-las para serem administradas na data de remarcação. Por exemplo, se esta vacina precisar ser administrada todos os anos, defina a data de remarcação para daqui a 1 ano.",
"Sphynx (hairless cat)" : "Sphynx (gato sem pelo)",
"Spitz" : "Spitz",
"Split baby/adult age at" : "Separar idade de bebé/adulto em",
"Split species pages with a baby/adult prefix" : "Separar páginas de espécies com um prefixo de bebé/adulto",
"Sponsorship donations" : "Donativos de padrinhos",
"Staff" : "Equipa",
"Staff Rota" : "Turnos da Equipa",
"Staff record" : "Registo de pessoal",
"Staff rota" : "Turnos de pessoal",
"Staffordshire Bull Terrier" : "Staffordshire Bull Terrier",
"Standard" : "Padrão",
"Standardbred" : "Raça Padrão",
"Start Date" : "Data de Início",
"Start Of Day" : "Início do Dia",
"Start Time" : "Hora de Início",
"Start at" : "Iniciar em",
"Start date" : "Data de início",
"Start date must be a valid date" : "A data de início tem de ser uma data válida",
"Start of year" : "Início do ano",
"Started" : "Iniciado",
"Starts" : "Tem início em",
"State" : "Distrito",
"State contains" : "Distrito contém",
"Stationary costs" : "Custos com estacionário",
"Stats" : "Estatísticas",
"Stats period" : "Período das estatísticas",
"Stats show running figures for the selected period of animals entering and leaving the shelter on the home page." : "As estatísticas apresentam os valores acumulados para o período selecionado de animais que entram e saem do abrigo na página inicial.",
"Status" : "Estado",
"Status and Species" : "Estado e Espécie",
"Stay" : "Permanecer",
"Stock" : "Stock",
"Stock Control" : "Controlo de Stocks",
"Stock Levels" : "Níveis de Stocks",
"Stock Locations" : "Locais de Armazenamento de Stocks",
"Stock Take" : "Retirada de Stock",
"Stock Usage Type" : "Tipo de Utilização de Stock",
"Stock level must have a name" : "O nível de stock necessita de um nome",
"Stock level must have a unit" : "O nível de stock necessita de uma secção",
"Stock needs a name and unit." : "O stock necessita de um nome e de uma unidade.",
"Stocktake" : "Retirada de stock",
"Stolen" : "Roubado",
"Stolen {0}" : "Roubado {0}",
"Stop" : "Parar",
"Stop Publishing" : "Parar a Publicação",
"Stores" : "Lojas",
"Stray" : "Errante",
"Stripe" : "Stripe",
"Stripe Key" : "Chave do Stripe",
"Stripe Secret Key" : "Chave Secreta do Stripe",
"Su" : "Dom",
"SubTotal" : "Subtotal",
"Subject" : "Assunto",
"Submission received: {0}" : "Requerimento recebido: {0}",
"Success" : "Êxito",
"Successful token charge." : "Débito com token efetuado com êxito.",
"Successfully attached to {0}" : "Anexado com êxito a {0}",
"Successfully copied to the clipboard." : "Copiado com êxito para a área de transferência.",
"Sugar Glider" : "Sugar Glider",
"Sun" : "Dom",
"Sunday" : "Domingo",
"Super user" : "Super utilizador",
"Superuser" : "Superutilizador",
"Surname" : "Apelido",
"Surrender" : "Restituição",
"Surrender Pickup" : "Recolha de Restituição",
"Suspect" : "Suspeito",
"Suspect 1" : "Suspeito 1",
"Suspect 2" : "Suspeito 2",
"Suspect 3" : "Suspeito 3",
"Suspect/Animal" : "Suspeito/Animal",
"Swan" : "Cisne",
"Swedish Vallhund" : "Vallhund Sueco",
"Syntax check this SQL" : "Verificar a sintaxe deste SQL",
"System" : "Sistema",
"System Admin" : "Administrador de Sistema",
"System Options" : "Opções de Sistema",
"System user accounts" : "Contas de utilizador de sistema",
"T = first letter of animal type" : "T = primeira letra do tipo de animal",
"TNR" : "CCL",
"TNR - Trap/Neuter/Release" : "CCL - Capturar/Castrar/Libertar",
"TT = first and second letter of animal type" : "TT = primeira e segunda letras do tipo de animal",
"Tabby" : "Malhado",
"Tabby and White" : "Malhado e Branco",
"Table" : "Tabela",
"Take another payment" : "Receber outro pagamento",
"Taken By" : "Recebido Por",
"Tan" : "Bronze",
"Tan and Black" : "Bronze e Preto",
"Tan and White" : "Bronze e Branco",
"Task complete." : "Tarefa concluída.",
"Task items are executed in order of index, lowest to highest" : "Os itens da tarefa são executados pela ordem do índice, do menor para o maior",
"Tattoo" : "Tatuagem",
"Tattoo Date" : "Data da Tatuagem",
"Tattoo Number" : "Número de Tatuagem",
"Tax" : "Imposto",
"Tax Amount" : "Total de Impostos",
"Tax Rate %" : "Taxa de Imposto (%)",
"Telephone" : "Telefone",
"Telephone Bills" : "Contas de Telefone",
"Template" : "Modelo",
"Template Name" : "Nome do Modelo",
"Template names can include a path portion with /, eg: Vets/Rabies Certificate" : "Os nomes de modelos podem incluir um caminho com /, por exemplo: Certificado de Veterinários/Raiva",
"Tennessee Walker" : "Walker do Tennessee",
"Terrapin" : "Terrapin",
"Terrier" : "Terrier",
"Test" : "Testes",
"Test Animal" : "Testar Animal",
"Test Book" : "Livro de Testes",
"Test Performed" : "Teste Realizado",
"Test Results" : "Resultado do Teste",
"Test Types" : "Tipos de Teste",
"Test book" : "Livro de testes",
"Test marked as performed for {0} - {1}" : "Teste sinalizado como executado para {0} - {1}",
"Tests" : "Testes",
"Tests need an animal and at least a required date." : "Os testes necessitam de um animal e de, pelo menos, uma data para a realização dos mesmos.",
"Text" : "Texto",
"Text Encoding" : "Codificação do Texto",
"Th" : "Qui",
"Thai Ridgeback" : "Thai Ridgeback",
"Thank you for choosing Animal Shelter Manager for your shelter!" : "Obrigado por ter escolhido o Animal Shelter Manager para o seu abrigo!",
"Thank you, the document is now signed." : "Obrigado, o documento agora está assinado.",
"That animal is already linked to the incident" : "O animal em questão já se encontra associado ao incidente",
"The ASM password for {0} has been reset to:" : "A senha do ASM para {0} foi reposta para:",
"The CSV file should be created by PayPal's \"All Activity\" report." : "O ficheiro CSV deverá ser criado através do relatório \"All Activity\" do PayPal.",
"The SmartTag PETID number" : "O número SmartTag PETID",
"The SmartTag type" : "O tipo de SmartTag",
"The URL is the address of a web resource, eg: www.youtube.com/watch?v=xxxxxx" : "O URL corresponde ao endereço de um recurso Web, por exemplo: www.youtube.com/watch?v=xxxxxx",
"The animal name" : "O nome do animal",
"The animal record to merge must be different from the original." : "O registo de animal a juntar deve ser diferente do original.",
"The animal sex" : "O sexo do animal",
"The base color of this animal" : "A cor base deste animal",
"The coat type of this animal" : "O tipo de pelagem deste animal",
"The confirmation email message to send to the form submitter." : "A mensagem de email de confirmação que é enviada ao remetente do formulário.",
"The database will be inaccessible to all users while the export is in progress." : "A base de dados vai ficar inacessível a todos os utilizadores enquanto a exportação estiver em curso.",
"The date reported to the shelter" : "A data comunicada ao abrigo",
"The date the animal died" : "A data em que o animal morreu",
"The date the animal was FIV/L tested" : "Data em que o animal foi testado para FIV/L",
"The date the animal was adopted" : "A data em que o animal foi adotado",
"The date the animal was altered" : "A data em que o animal foi esterilizado/castrado",
"The date the animal was born" : "A data de nascimento do animal",
"The date the animal was brought into the shelter" : "A data em que o animal foi admitido no abrigo",
"The date the animal was heartworm tested" : "Data em que o animal foi testado para dirofilariose",
"The date the animal was microchipped" : "A data em que o animal foi microchipado",
"The date the animal was reclaimed" : "A data em que o animal foi reivindicado",
"The date the animal was tattooed" : "A data em que o animal foi tatuado",
"The date the foster animal will be returned if known" : "A data em que o animal em FAT vai ser devolvido, se for conhecida",
"The date the foster is effective from" : "A data de entrada em vigor do acolhimento por FAT",
"The date the litter entered the shelter" : "A data em que a ninhada foi admitida no abrigo",
"The date the owner last contacted the shelter" : "A data do último contacto do dono com o abrigo",
"The date the payment was received" : "A data em que o pagamento foi recebido",
"The date the reservation is effective from" : "A data de entrada em vigor da reserva",
"The date the retailer movement is effective from" : "A data de entrada em vigor do movimento de lojista",
"The date the transfer is effective from" : "A data de entrada em vigor da transferência",
"The date the trial adoption is over" : "A data de fim da adoção experimental",
"The date the vaccination is required/due to be administered" : "Data em que a vacina precisa de ser administrada/deve ser administrada",
"The date the vaccination was administered" : "Data em que a vacina foi administrada",
"The date this animal was found" : "A data em que este animal foi encontrado",
"The date this animal was lost" : "A data em que este animal foi perdido",
"The date this animal was put on the waiting list" : "A data em que este animal foi colocado na lista de espera",
"The date this animal was removed from the waiting list" : "A data em que este animal foi retirado da lista de espera",
"The date this animal was reserved" : "A data em que este animal foi reservado",
"The date this animal was returned to its owner" : "A data em que este animal foi devolvido ao respetivo dono",
"The date this person was homechecked." : "A data em que o domicílio desta pessoa foi inspecionado.",
"The date this voucher was used" : "A data em que o voucher foi utilizado",
"The default username is 'user' with the password 'letmein'" : "O nome de utilizador padrão é 'user' e a senha 'letmein'",
"The entry reason for this animal" : "O motivo da admissão deste animal",
"The litter this animal belongs to" : "A ninhada à qual pertence este animal",
"The locale determines the language ASM will use when displaying text, dates and currencies." : "A localização define o idioma que o ASM irá utilizar para apresentar o texto, as datas e a moeda.",
"The location where the animal was picked up" : "O local onde o animal foi recolhido",
"The microchip number" : "O número do microchip",
"The movement number '{0}' is not unique." : "O número de movimento '{0}' não é exclusivo.",
"The number of stock records to create" : "O número de registos de stock a criar",
"The period in days before waiting list urgency is increased" : "O período, em dias, antes de aumentar a urgência da lista de espera",
"The person record to merge must be different from the original." : "O registo de pessoa a juntar tem de ser diferente do original.",
"The primary breed of this animal" : "A raça principal deste animal",
"The reason the owner wants to part with the animal" : "O motivo pelo qual o dono se quer separar do animal",
"The reason this animal was removed from the waiting list" : "O motivo pelo qual este animal foi removido da lista de espera",
"The remaining units in the container" : "As unidades restantes no contentor",
"The result of the FIV test" : "O resultado do teste FIV",
"The result of the FLV test" : "O resultado do teste FLV",
"The result of the heartworm test" : "O resultado do teste de dirofilariose",
"The retail/resale price per unit" : "O preço de retalho/revenda, por unidade",
"The secondary breed of this animal" : "A raça secundária deste animal",
"The selected file is not an image." : "O ficheiro selecionado não é uma imagem.",
"The shelter category for this animal" : "A categoria de abrigo para este animal",
"The shelter reference number" : "O número de referência do abrigo",
"The sheltermanager.com admin account password cannot be changed here, please visit {0}" : "A senha da conta de administrador do sheltermanager.com não pode ser alterada aqui, por favor visite {0}",
"The sheltermanager.com admin account password cannot be reset here, please visit {0}" : "A senha da conta de administrador do sheltermanager.com não pode ser reposta aqui, visite {0}",
"The size of this animal" : "O tamanho deste animal",
"The species of this animal" : "A espécie deste animal",
"The tattoo number" : "O número de tatuagens",
"The type of unit in the container, eg: tablet, vial, etc." : "O tipo de unidade do contentor, por exemplo: carteira, frasco, etc.",
"The veterinary license number." : "O número de referência do veterinário.",
"The wholesale/trade price the container was bought for" : "O preço de venda em quantidade/de mercado ao qual o contentor foi adquirido",
"There is not enough information in the form to attach to a shelter animal record (need an animal name)." : "Não existem informações suficientes no formulário para que possa ser anexado a um registo de animal do abrigo (é necessário um nome de animal).",
"There is not enough information in the form to create a found animal record (need a description and area found)." : "Não existe informação suficiente no formulário para a criação de um registo de animal encontrado (são necessárias uma descrição e uma área onde foi encontrado).",
"There is not enough information in the form to create a lost animal record (need a description and area lost)." : "Não existe informação suficiente no formulário para a criação de um registo de animal perdido (são necessárias uma descrição e uma área onde foi perdido).",
"There is not enough information in the form to create a person record (need a surname)." : "Não existem informações suficientes no formulário para criar um registo de pessoa (é necessário um apelido).",
"There is not enough information in the form to create a transport record (need animalname)." : "Não existem informações suficientes no formulário para criar um registo de transporte (é necessário o nome do animal).",
"There is not enough information in the form to create a transport record (need pickupdate and dropoffdate)." : "Não existem informações suficientes no formulário para criar um registo de transporte (são necessárias a data de recolha e a data de entrega).",
"There is not enough information in the form to create a waiting list record (need a description)." : "Não existem informações suficientes no formulário para criar um registo de lista de espera (é necessária uma descrição).",
"There is not enough information in the form to create an animal record (need animalname)." : "Não existem informações suficientes no formulário para criar um registo de animal (é necessário o nome do animal).",
"There is not enough information in the form to create an incident record (need call notes and dispatch address)." : "Não existem informações suficientes no formulário para criar um registo de incidente (são necessárias as anotações da chamada e a morada de comparência).",
"These are the HTML headers and footers used when displaying online forms." : "Estes são os cabeçalhos e rodapés de HTML utilizados para apresentar os formulários online.",
"These are the HTML headers and footers used when generating reports." : "Estes são os cabeçalhos e rodapés de HTML utilizados para gerar relatórios.",
"These are the default values for these fields when creating new records." : "Estes são os valores padrão destes campos para a criação de novos registos.",
"These batch processes are run each night by the system and should not need to be run manually." : "Estes processos em lote são executados todas as noites pelo sistema e não devem precisar de ser executados manualmente.",
"These fields allow you to deduct stock for the test(s) given. This single deduction should cover the selected tests being performed." : "Estes campos permitem-lhe deduzir stocks para o(s) teste(s) administrado(s). Esta dedução única deve cobrir os testes selecionados a realizar.",
"These fields allow you to deduct stock for the treatment(s) given. This single deduction should cover the selected treatments being administered." : "Estes campos permitem-lhe deduzir stocks para o(s) tratamento(s) administrado(s). Esta dedução única deve cobrir os tratamentos selecionados a administrar.",
"These fields allow you to deduct stock for the vaccination(s) given. This single deduction should cover the selected vaccinations being administered." : "Estes campos permitem-lhe deduzir stocks para a(s) vacinação(ões) administrada(s). Esta dedução única deve cobrir as vacinações selecionadas a administrar.",
"These fields determine which columns are shown on the find animal and find person screens." : "Estes campos determinam as colunas a apresentar nos ecrãs de pesquisa de animais e de pessoas.",
"These numbers are for shelters who have agreements with insurance companies and are given blocks of policy numbers to allocate." : "Estes números referem-se a abrigos que disponham de acordos com seguradoras e que recebam blocos de números de apólices para atribuir.",
"These options change the behaviour of the search box at the top of the page." : "Estas opções alteram o comportamento da caixa de pesquisa, localizada na parte superior da página.",
"These values are required for correct operation of the system. ONLY change them if you are translating to another language." : "Estes valores são necessários ao correto funcionamento do sistema. APENAS as deve alterar se estiver a traduzir o software para outro idioma.",
"Third offence" : "Terceira infração",
"This Month" : "Este Mês",
"This Week" : "Esta Semana",
"This Year" : "Este Ano",
"This animal already has an active reservation." : "Este animal já tem uma reserva ativa.",
"This animal has a SmartTag PETID" : "Este animal tem um SmartTag PETID",
"This animal has a tattoo" : "Este animal tem uma tatuagem",
"This animal has active reservations, they will be cancelled." : "Este animal tem reservas ativas, as quais vão ser canceladas.",
"This animal has an adoption fee of {0}" : "Este animal tem uma taxa de adoção de {0}",
"This animal has been FIV/L tested" : "Este animal foi testado para FIV/L",
"This animal has been altered" : "Este animal foi esterilizado/castrado",
"This animal has been declawed" : "Este animal foi submetido a remoção de garras",
"This animal has been heartworm tested" : "Este animal foi testado para dirofilariose",
"This animal has movements and cannot be removed." : "Este animal tem movimentos e não pode ser removido.",
"This animal has not been altered." : "Este animal não foi esterilizado/castrado.",
"This animal has not been microchipped." : "Este animal não tem microchip.",
"This animal has special needs" : "Este animal tem necessidades especiais",
"This animal has the same name as another animal recently added to the system." : "Este animal tem o mesmo nome que outro animal recentemente adicionado ao sistema.",
"This animal is a crossbreed" : "Este animal é arraçado",
"This animal is bonded with {0}" : "Este animal é par/companheiro de {0}",
"This animal is bonded with {0}. Adoption movement records will be created for all bonded animals." : "Este animal é par/companheiro de {0}. Serão criados registos de movimentos de adoção para todos os animais que são pares/companheiros.",
"This animal is currently at a retailer, it will be automatically returned first." : "Este animal encontra-se de momento numa loja de animais, será automaticamente recuperado em primeiro lugar.",
"This animal is currently fostered, it will be automatically returned first." : "Este animal encontra-se atualmente numa FAT, será automaticamente recuperado em primeiro lugar.",
"This animal is currently held and cannot be adopted." : "Este animal encontra-se atualmente retido e não pode ser adotado.",
"This animal is currently quarantined and should not leave the shelter." : "Este animal encontra-se atualmente em quarentena e não deve sair do abrigo.",
"This animal is marked not for adoption." : "Este animal está sinalizado como não para adoção.",
"This animal is microchipped" : "Este animal está microchipado",
"This animal is not on the shelter." : "Este animal não se encontra no abrigo.",
"This animal is part of a cruelty case and should not leave the shelter." : "Este animal provém de um caso de crueldade e não deve sair do abrigo.",
"This animal should be held in case it is reclaimed" : "Este animal deverá ser retido caso seja reivindicado",
"This animal should not be shown in figures and is not in the custody of the shelter" : "Este animal não deve ser incluído nos valores e não se encontra sob a custódia do abrigo",
"This animal was dead on arrival to the shelter" : "Este animal estava morto quando chegou ao abrigo",
"This animal was euthanized" : "Este animal foi eutanasiado",
"This animal was picked up" : "Este animal foi recolhido",
"This animal was transferred from another shelter" : "Este animal foi transferido de outro abrigo",
"This code has already been used." : "Este código já foi utilizado.",
"This database is locked and in read-only mode. You cannot add, change or delete records." : "Esta base de dados está bloqueada e encontra-se no modo apenas de leitura. Não pode adicionar, alterar ou eliminar registos.",
"This database is locked." : "Esta base de dados está bloqueada.",
"This date of birth is an estimate" : "Esta data de nascimento é aproximada",
"This email address is the default From address when sending emails" : "Este endereço de email é o endereço \"De\" padrão para o envio de emails",
"This expense account is the source for costs of this type" : "Esta conta de despesas é a base para este tipo de custos",
"This income account is the source for payments received of this type" : "Esta conta de receitas é a base para pagamentos recebidos deste tipo",
"This is the gross payment amount, inclusive of any fees and taxes" : "Este é o montante bruto do pagamento, incluindo taxas e impostos",
"This item is referred to in the database ({0}) and cannot be deleted until it is no longer in use." : "Este elemento está registado na base de dados ({0}) e não pode ser eliminado até já não estar a ser utilizado.",
"This link will remain active for 10 minutes." : "Este link permanecerá ativo durante 10 minutos.",
"This many years after creation of a person record, the name, address and telephone data will be anonymized." : "Este número de anos após a criação de um registo de pessoa, o nome, morada e dados telefónicos serão anonimizados.",
"This month" : "Este mês",
"This movement cannot be from a retailer when the animal has no prior retailer movements." : "Este movimento não pode ter origem num lojista se o animal não tiver movimentos anteriores num lojista.",
"This payment has already been received" : "Este pagamento já foi recebido",
"This person has an animal control incident against them." : "Esta pessoa tem um incidente de controlo animal associado.",
"This person has been banned from adopting animals." : "Esta pessoa foi colocada na lista negra de adotantes de animais.",
"This person has been under investigation." : "Esta pessoa tem estado submetida a investigação",
"This person has movements and cannot be removed." : "Esta pessoa tem movimentos e não pode ser removida.",
"This person has not passed a homecheck." : "Esta pessoa não passou na inspeção domiciliária.",
"This person has payments and cannot be removed." : "Esta pessoa tem pagamentos e não pode ser removida.",
"This person has previously surrendered an animal." : "Esta pessoa restituiu previamente um animal.",
"This person is linked to a waiting list record and cannot be removed." : "Esta pessoa está associada a um registo da lista de espera e não pode ser removida.",
"This person is linked to an animal and cannot be removed." : "Esta pessoa está associada a um animal e não pode ser removida.",
"This person is linked to an investigation and cannot be removed." : "Esta pessoa está associada a uma investigação e não pode ser removida.",
"This person is linked to animal control and cannot be removed." : "Esta pessoa está associada a um controlo animal e não pode ser removida.",
"This person is linked to animal licenses and cannot be removed." : "Esta pessoa está associada a licenças de animais e não pode ser removida.",
"This person is linked to animal transportation and cannot be removed." : "Esta pessoa está associada ao transporte de animais e não pode ser removida.",
"This person is linked to citations and cannot be removed." : "Esta pessoa está associada a advertências e não pode ser removida.",
"This person is linked to found animals and cannot be removed." : "Esta pessoa está associada a animais encontrados e não pode ser removida.",
"This person is linked to lost animals and cannot be removed." : "Esta pessoa está associada a animais perdidos e não pode ser removida.",
"This person is linked to trap loans and cannot be removed." : "Esta pessoa está associada a empréstimos de armadilhas e não pode ser removida.",
"This person is not flagged as a fosterer and cannot foster animals." : "Esta pessoa não está assinalada como FAT e não poder acolher animais.",
"This person is not flagged as a retailer and cannot handle retailer movements." : "Esta pessoa não está assinalada como lojista e não pode gerir movimentos de retalho.",
"This person is very similar to another person on file, carry on creating this record?" : "Esta pessoa tem muitas semelhanças com outra pessoa em arquivo, deseja prosseguir com a criação deste registo?",
"This person lives in the same area as the person who brought the animal to the shelter." : "Esta pessoa habita na mesma área que a pessoa que trouxe o animal para o abrigo.",
"This record has been changed by another user, please reload." : "Este registo foi alterado por outro utilizador, precisa de recarregar.",
"This removal is permanent and cannot be reversed, are you absolutely sure you wish to do this?" : "Esta remoção é permanente e não pode ser revertida. Tem a certeza absoluta de que deseja prosseguir?",
"This report cannot be sent by email as it requires criteria to run." : "Este relatório não pode ser enviado por email porque requer a execução de critérios.",
"This screen allows you to add extra documents to your database, for staff training, reference materials, etc." : "Este ecrã permite-lhe adicionar documentos extra à sua base de dados para efeitos de formação de pessoal, materiais de referência, etc.",
"This screen allows you to add extra images to your database, for use in reports and documents." : "Este ecrã permite adicionar imagens extra à base de dados para efeitos de relatórios e documentos.",
"This text will be added to the bottom of all send email dialogs" : "Este texto será adicionado no final de todas as caixas de diálogo de envio de emails",
"This type of movement requires a date." : "Este tipo de movimento requer uma data.",
"This type of movement requires a person." : "Este tipo de movimento requer uma pessoa.",
"This week" : "Esta semana",
"This will permanently remove the selected animals, are you sure?" : "Este procedimento irá remover definitivamente os animais selecionados. Tem a certeza?",
"This will permanently remove the selected records, are you sure?" : "Este procedimento irá remover definitivamente os registos selecionados. Tem a certeza?",
"This will permanently remove the selected roles, are you sure?" : "Este procedimento irá Este procedimento irá remover definitivamente as funções selecionadas. Tem a certeza?",
"This will permanently remove the selected user accounts. Are you sure?" : "Este procedimento irá remover definitivamente as contas de utilizador selecionadas. Tem a certeza?",
"This will permanently remove this account and ALL TRANSACTIONS HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "Este procedimento irá remover definitivamente esta conta e TODAS AS TRANSAÇÕES SOBRE A MESMA e será irreversível. Tem a certeza de que quer prosseguir?",
"This will permanently remove this additional field and ALL DATA CURRENTLY HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "Este procedimento irá remover definitivamente este campo extra e TODOS OS DADOS A ELE ASSOCIADOS e será irreversível. Tem a certeza de que quer prosseguir?",
"This will permanently remove this animal, are you sure?" : "Este procedimento irá remover definitivamente este animal. Tem a certeza?",
"This will permanently remove this incident, are you sure?" : "Este procedimento irá remover definitivamente este incidente. Tem a certeza?",
"This will permanently remove this person, are you sure?" : "Este procedimento irá remover definitivamente esta pessoa. Tem a certeza?",
"This will permanently remove this record, are you sure?" : "Este procedimento irá remover definitivamente este registo. Tem a certeza?",
"This will permanently remove this waiting list entry, are you sure?" : "Este procedimento irá remover definitivamente esta entrada da lista de espera. Tem a certeza?",
"This will remove ALL rota entries for the week beginning {0}. This action is irreversible, are you sure?" : "Este procedimento irá remover TODAS as entradas de turnos para a semana com início em {0} e será irreversível. Tem a certeza?",
"This year" : "Este ano",
"Thoroughbred" : "Puro-sangue",
"Thu" : "Qui",
"Thumbnail size" : "Tamanho da miniatura",
"Thursday" : "Quinta-feira",
"Tibetan Mastiff" : "Mastim Tibetano",
"Tibetan Spaniel" : "Spaniel Tibetano",
"Tibetan Terrier" : "Terrier Tibetano",
"Tiger" : "Tigre",
"Time" : "Hora",
"Time Brought In" : "Hora de Admissão no Abrigo",
"Time On List" : "Tempo na Lista",
"Time On Shelter" : "Tempo no Abrigo",
"Time on list" : "Tempo na Lista",
"Time on shelter" : "Tempo no abrigo",
"Timeline" : "Cronologia",
"Timeline ({0})" : "Cronologia ({0})",
"Times should be in HH:MM format, eg: 09:00, 16:30" : "Os horários devem estar no formato HH:MM, por exemplo: 09:00, 16:30",
"Title" : "Título",
"Title First Last" : "Título Primeiro Último",
"Title Initials Last" : "Título Iniciais Último",
"To" : "Para",
"To Adoption" : "Para Adoção",
"To Fostering" : "Para Entrega a FAT",
"To Other" : "Para Outros",
"To Retailer" : "Para Lojista",
"To add people to the rota, create new person records with the staff or volunteer flag." : "Para adicionar pessoas ao turno, crie novos registos de pessoas com a sinalização de pessoal ou voluntário.",
"To address book" : "Para livro de endereços",
"To continue using ASM, please renew {0}" : "Para continuar a utilizar o ASM precisa de renovar {0}",
"To reset your ASM password, please follow this link:" : "Para repor a sua senha do ASM, siga o link abaixo:",
"To week beginning" : "Até à semana com início em",
"Today" : "Hoje",
"Toggle table/icon view" : "Alternar vista de tabela/ícone",
"Tonkinese" : "Tonkinese",
"Too Many Animals" : "Demasiados Animais",
"Tooltip" : "Balão explicativo",
"Top Margin" : "Margem Superior",
"Tortie" : "Tortie",
"Tortie and White" : "Tortie e Branco",
"Tortoise" : "Tartaruga",
"Tosa Inu" : "Tosa Inu",
"Total" : "Total",
"Total number of units in the container" : "Número total de unidades no contentor",
"Toucan" : "Tucano",
"Toy Fox Terrier" : "Toy Fox Terrier",
"Training" : "Formação",
"Transaction Fee" : "Taxa de Transação",
"Transaction fees" : "Taxas de transação",
"Transactions" : "Transações",
"Transactions need a date and description." : "As transações precisam de uma data e descrição.",
"Transfer" : "Transferir",
"Transfer In" : "Transferência de Entrada",
"Transfer To" : "Transferência Para",
"Transfer an animal" : "Transferir um animal",
"Transfer from Municipal Shelter" : "Transferência do Canil Municipal",
"Transfer from Other Shelter" : "Transferência de Outro Abrigo",
"Transfer successfully created." : "Adoção criada com êxito.",
"Transfer?" : "Transferir?",
"Transferred" : "Transferido",
"Transferred From" : "Transferido De",
"Transferred In" : "Transferido para o Abrigo",
"Transferred In {0}" : "Transferido para o Abrigo {0}",
"Transferred Out" : "Transferido para o Exterior",
"Transferred Out {0}" : "Transferido para o Exterior {0}",
"Transfers must have a valid transfer date." : "As transferências precisam de uma data de transferência válida.",
"Transport" : "Transporte",
"Transport Book" : "Livro de Transportes",
"Transport Statuses" : "Estados dos Transportes",
"Transport Types" : "Tipos de Transporte",
"Transport book" : "Livro de transportes",
"Transport requires an animal" : "O transporte requer um animal",
"Transports must have valid pickup and dropoff dates and times." : "Os transportes precisam de datas e horas de recolha e entrega válidas.",
"Trap Loans" : "Empréstimos de Armadilhas",
"Trap Number" : "Número de Armadilha",
"Trap Types" : "Tipos de Armadilhas",
"Trap loan" : "Empréstimo de armadilha",
"Trap loans" : "Empréstimos de armadilhas",
"Treat animals at retailers as part of the shelter inventory" : "Tratar animais em lojas como parte do inventário do abrigo",
"Treat foster animals as part of the shelter inventory" : "Tratar animais em FAT como parte do inventário do abrigo",
"Treat soft releases as part of the shelter inventory" : "Processar as libertações controladas como parte do inventário do abrigo",
"Treat trial adoptions as part of the shelter inventory" : "Tratar as adoções de teste como parte do inventário de abrigos",
"Treatment" : "Tratamento",
"Treatment Given" : "Tratamento Administrado",
"Treatment marked as given for {0} - {1}" : "Tratamento assinalado com administrado para {0} - {1}",
"Treatment name cannot be blank" : "O nome do tratamento não pode estar vazio",
"Treatments" : "Tratamentos",
"Treeing Walker Coonhound" : "Treeing Walker Coonhound",
"Trial" : "Experiência",
"Trial Adoption" : "Adoção Experimental",
"Trial adoption" : "Adoção experimental",
"Trial adoption book" : "Livro de adoções experimentais",
"Trial ends on" : "A experiência termina em",
"Tricolour" : "Tricolor",
"Trigger Batch Processes" : "Desencadear Processos em Lote",
"Tu" : "Ter",
"Tue" : "Ter",
"Tuesday" : "Terça-feira",
"Tumblr" : "Tumblr",
"Turkey" : "Turquia",
"Turkish Angora" : "Angorá Turco",
"Turkish Van" : "Van Turco",
"Turtle" : "Tartaruga",
"Twitter" : "Twitter",
"Type" : "Tipo",
"Type is {0}" : "O tipo é {0}",
"Type of animal links to show" : "Tipo de links de animais a apresentar",
"U (Unwanted Cat)" : "I (Gato indesejado)",
"UK Giftaid" : "UK Giftaid",
"URL" : "URL",
"UUUUUUUUUU or UUUU = unique number" : "UUUUUUUUUU ou UUUU = número exclusivo",
"Unable to Afford" : "Sem Capacidade Financeira",
"Unable to Cope" : "Não se Consegue Adaptar",
"Unaltered" : "Não esterilizado/castrado",
"Unaltered Adopted Animals" : "Animais Adotados Não Esterilizados/Castrados",
"Unaltered Dog - 1 year" : "Cão Não Esterilizado/Castrado - 1 ano",
"Unaltered Dog - 3 year" : "Cão Não Esterilizado/Castrado - 3 anos",
"Unavailable" : "Indisponível",
"Undelete" : "Reverter eliminação",
"Under {0} weeks old" : "Menos de {0} semanas de idade",
"Undo" : "Anular",
"Undo given treatments" : "Anular tratamentos administrados",
"Unit" : "Secção",
"Unit Price" : "Preço Unitário",
"Unit and Species" : "Unidade e Espécie",
"Unit within the location, eg: pen or cage number" : "Secção no perímetro do local, por exemplo gaiola ou jaula",
"Units" : "Unidades",
"Unknown" : "Desconhecido",
"Unknown microchip brand" : "Marca de microchip desconhecida",
"Unpaid Fines" : "Multas por Pagar",
"Unredeemed" : "Não Resgatado",
"Unreserved" : "Não Reservado",
"Unsaved Changes" : "Alterações Não Guardadas",
"Unspecified" : "Não Especificado",
"Unsuitable Accomodation" : "Alojamento Inadequado",
"Up for adoption" : "Disponíveis para adoção",
"Upcoming medical items" : "Itens clínicos para breve",
"Update" : "Atualizar",
"Update adoption websites every" : "Atualizar websites de adoções a cada",
"Update publishing options" : "Atualizar opções de publicação",
"Update system options" : "Atualizar opções do sistema",
"Update the current owner of this animal without returning and creating movements that affect your figures." : "Atualizar o dono atual deste animal sem uma devolução nem a criação de movimentos que alterem as suas contas.",
"Update the daily boarding cost for this animal" : "Atualizar o custo diário de alojamento para este animal",
"Updated" : "Atualizado",
"Updated database to version {0}" : "Base de dados atualizada para a versão {0}",
"Updated existing record" : "Atualizar registo existente",
"Updated." : "Atualizado.",
"Updating..." : "A atualizar...",
"Upload" : "Carregar",
"Upload Document" : "Carregar Documento",
"Upload ODT" : "Carregar ODT",
"Upload Photo" : "Carregar Foto",
"Upload a new OpenOffice template" : "Carregar um novo modelo do OpenOffice",
"Upload all available images for animals" : "Carregar todas as imagens disponíveis de animais",
"Upload an SQL script" : "Carregar um script SQL",
"Upload splash.jpg and logo.jpg to override the login screen image and logo at the top left of ASM." : "Carregue os ficheiros splash.jpg e logo.jpg para substituir a imagem do ecrã de início de sessão e o logótipo apresentado no canto superior esquerdo do ASM.",
"Uploading..." : "A carregar...",
"Urgencies" : "Urgências",
"Urgency" : "Urgência",
"Urgent" : "Urgente",
"Usage Date" : "Data de Uso",
"Usage Type" : "Tipo de Uso",
"Usage explains why this stock record was created or adjusted. Usage records will only be created if the balance changes." : "A utilização explica por que motivo este registo de stock foi criado ou ajustado. Os registos de utilização só serão criados se houver uma alteração de saldo.",
"Use Automatic Insurance Numbers" : "Utilizar Números de Seguro Automáticos",
"Use HTML5 client side image scaling where available to speed up image uploads" : "Utilizar o redimensionamento de imagem do lado do cliente em HTML5, se estiver disponível, para acelerar os carregamentos de imagens",
"Use SQL Interface" : "Utilizar Interface SQL",
"Use a single breed field" : "Utilizar um campo de raça única",
"Use animal description" : "Utilizar a descrição do animal",
"Use fancy tooltips" : "Utilizar balões de explicação sofisticados",
"Use notes from preferred photo" : "Utilizar anotações da foto preferida",
"Use the icon in the lower right of notes fields to view them in a separate window." : "Utilizar o ícone no canto inferior direito dos campos de anotações para visualizá-los numa janela separada.",
"Use the query builder" : "Utilizar o criador de consultas",
"User" : "Utilizador",
"User Accounts" : "Contas de Utilizador",
"User Roles" : "Funções do Utilizador",
"User accounts that will only ever call the Service API should set this to No." : "As contas de utilizador que poderão eventualmente chamar a API de serviço devem definir esta opção como Não.",
"User does not exist or have a valid email address." : "O utilizador não existe ou tem um endereço de email inválido.",
"User roles" : "Funções de Utilizador",
"Username" : "Nome de utilizador",
"Username '{0}' already exists" : "O nome de utilizador '{0}' já existe",
"Users" : "Utilizadores",
"Users need a username, password and at least one role or the superuser flag setting." : "Os utilizadores necessitam de um nome de utilizador, senha e de, pelo menos, uma função ou a sinalização de superutilizador.",
"Vacation" : "Férias",
"Vaccinate" : "Vacinar",
"Vaccinate Animal" : "Vacinar Animal",
"Vaccinated Non-Shelter Animals In {0}" : "Animais não residentes no abrigo vacinados em {0}",
"Vaccinated Shelter Animals In {0}" : "Animais residentes no abrigo vacinados em {0}",
"Vaccination" : "Vacinação",
"Vaccination Book" : "Livro de Vacinações",
"Vaccination Given" : "Vacinação Administrada",
"Vaccination Types" : "Tipos de Vacinações",
"Vaccination book" : "Livro de Vacinações",
"Vaccination due between" : "Vacinação a realizar entre",
"Vaccination due {0}" : "Vacinação a realizar em {0}",
"Vaccination given {0}" : "Vacinação administrada em {0}",
"Vaccination marked as given for {0} - {1}" : "Vacinação assinalada como administrada para {0} - {1}",
"Vaccination not given {0}" : "Vacinação não administrada em {0}",
"Vaccinations" : "Vacinações",
"Vaccinations need an animal and at least a required date." : "As vacinações necessitam de um animal e de, pelo menos, uma data prevista para a realização.",
"Vaccinations require an animal" : "As vacinações necessitam de um animal",
"Vaccinations: {0}, Tests: {1}, Medical Treatments: {2}, Transport: {3}, Costs: {4}, Total Costs: {5} Total Payments: {6}, Balance: {7}" : "Vacinações: {0}, Testes: {1}, Tratamentos Clínicos: {2}, Transporte: {3}, Custos: {4}, Custos Totais: {5} Pagamentos Totais: {6}, Saldo: {7}",
"Valid tokens for the subject and text" : "Tokens válidos para o assunto e texto",
"Various" : "Vários",
"Vertical Pitch" : "Ângulo Vertical",
"Very Large" : "Muito Grande",
"Vet" : "Veterinário",
"Vet Visit" : "Visita ao Veterinário",
"Victim" : "Vítima",
"Victim Name" : "Nome da Vítima",
"Video Link" : "Link de Vídeo",
"Vietnamese Pot Bellied" : "Pot Bellied Vietnamita",
"View" : "Ver",
"View Accounts" : "Ver Contas",
"View Animals" : "Ver Animais",
"View Audit Trail" : "Ver Histórico de Auditoria",
"View Citations" : "Ver Advertências",
"View Clinic Appointment" : "Ver Agendamento Clínico",
"View Cost" : "Ver Custo",
"View Diary" : "Ver Diário",
"View Diets" : "Ver Dietas",
"View Document" : "Ver Documento",
"View Document Repository" : "Ver Repositório de Documentos",
"View Form" : "Ver Formulário",
"View Found Animal" : "Ver Animal Encontrado",
"View Incidents" : "Ver Incidentes",
"View Incoming Forms" : "Ver Formulários Recebidos",
"View Investigations" : "Ver Investigações",
"View Licenses" : "Ver Licenças",
"View Litter" : "Ver Ninhada",
"View Log" : "Ver Registo",
"View Lost Animal" : "Ver Animal Perdido",
"View Manual" : "Ver Manual",
"View Media" : "Ver Imagens",
"View Medical Records" : "Ver Registos Clínicos",
"View Movement" : "Ver Movimento",
"View Online Forms" : "Ver Formulários Online",
"View Payments" : "Ver Pagamentos",
"View Person" : "Ver Pessoa",
"View Person Links" : "Ver Links de Pessoas",
"View Report" : "Ver Relatório",
"View Roles" : "Ver Funções",
"View Rota" : "Ver Turnos",
"View Shelter Animals" : "Ver Animais do Abrigo",
"View Staff Person Records" : "Ver Registos Individuais de Pessoal",
"View Staff Rota" : "Ver Turnos da Equipa",
"View Stock" : "Ver Stock",
"View Tests" : "Ver Testes",
"View Training Videos" : "Ver Vídeos de Formação",
"View Transport" : "Ver Transporte",
"View Trap Loans" : "Ver Empréstimos de Armadilhas",
"View Vaccinations" : "Ver Vacinações",
"View Volunteer Person Records" : "Ver Registos de Pessoas Voluntárias",
"View Vouchers" : "Ver Vouchers",
"View Waiting List" : "Ver Lista de Espera",
"View animals matching publishing options" : "Visualizar animais que correspondem às opções de publicação",
"View email recipient list" : "Ver lista de destinatários de emails",
"View littermates" : "Ver companheiros de ninhada",
"View matching records" : "Ver registos correspondentes",
"View publishing logs" : "Ver registos de publicação",
"View the form in development mode without caching" : "Ver o formulário no modo de programação sem cache",
"Visual Theme" : "Tema Visual",
"Vizsla" : "Vizsla",
"Volunteer" : "Voluntário",
"Voucher" : "Voucher",
"Voucher Book" : "Livro de Vouchers",
"Voucher Types" : "Tipos de Vouchers",
"Voucher book" : "Livro de vouchers",
"Voucher codes matching '{0}'." : "Códigos de voucher correspondentes a '{0}'.",
"Vouchers" : "Vouchers",
"Vouchers need an issue and expiry date." : "Os vouchers precisam de uma data de emissão e expiração.",
"WARNING: This animal has not been microchipped" : "AVISO: Este animal não tem microchip",
"WARNING: This animal is over 6 months old and has not been neutered/spayed" : "AVISO: Este animal tem mais de 6 meses de idade e não foi esterilizado/castrado",
"Waiting" : "Em espera",
"Waiting List" : "Lista de Espera",
"Waiting List - Additional" : "Lista de Espera - Adicional",
"Waiting List - Details" : "Lista de Espera - Detalhes",
"Waiting List - Removal" : "Lista de Espera - Remoção",
"Waiting List Contact" : "Contacto da Lista de Espera",
"Waiting List Donation" : "Donativo de Lista de Espera",
"Waiting List {0}" : "Lista de Espera {0}",
"Waiting List: {0}" : "Lista de Espera: {0}",
"Waiting Room" : "Sala de Espera",
"Waiting for documents..." : "A aguardar documentos...",
"Waiting list donations" : "Donativos de lista de espera",
"Waiting list entries matching '{0}'." : "Entradas da lista de espera correspondentes a '{0}'.",
"Waiting list entries must have a contact" : "As entradas de lista de espera precisam de ter um contacto",
"Waiting list entry for {0} ({1})" : "Entrada de lista de espera para {0} ({1})",
"Waiting list entry successfully added." : "Entrada de lista de espera adicionada com êxito.",
"Waiting list urgency update period in days" : "Período de atualização da urgência da lista de espera, em dias",
"Warmblood" : "Cavalo de sangue quente",
"Warn if the name of the new animal is similar to one entered recently" : "Avisar se o nome do novo animal for semelhante a outro recentemente introduzido",
"Warn when adopting an animal who has not been microchipped" : "Avisar ao adotar um animal que não tenha sido microchipado",
"Warn when adopting an unaltered animal" : "Avisar ao adotar um animal não esterilizado/castrado",
"Warn when adopting to a person who has been banned from adopting animals" : "Avisar ao autorizar a adoção por uma pessoa que tenha sido colocada na lista negra de adoção de animais",
"Warn when adopting to a person who has not been homechecked" : "Avisar ao autorizar a adoção por uma pessoa cujo domicílio não tenha sido inspecionado",
"Warn when adopting to a person who has previously brought an animal to the shelter" : "Avisar ao autorizar a adoção por uma pessoa que tenha trazido previamente um animal para o abrigo",
"Warn when adopting to a person who lives in the same area as the original owner" : "Avisar ao autorizar a adoção por uma pessoa que resida na mesma área que o dono original",
"Warn when creating multiple reservations on the same animal" : "Avisar ao criar várias reservas para o mesmo animal",
"Warning" : "Aviso",
"Warnings" : "Avisos",
"Wasted" : "Desperdiçado",
"Water Bills" : "Contas da Água",
"Watermark image with name and logo" : "Imagem de fundo com nome e logótipo",
"We" : "Qua",
"Wed" : "Qua",
"Wednesday" : "Quarta-feira",
"Week" : "Semana",
"Week beginning {0}" : "Semana com início em {0}",
"Weekly" : "Semanal",
"Weekly Fosterer Email" : "Email Semanal para FATs",
"Weight" : "Peso",
"Weimaraner" : "Weimaraner",
"Welcome!" : "Bem-vindo!",
"Welsh Corgi" : "Welsh Corgi",
"Welsh Springer Spaniel" : "Welsh Springer Spaniel",
"Welsh Terrier" : "Welsh Terrier",
"West Highland White Terrier Westie" : "West Highland White Terrier Westie",
"Wheaten Terrier" : "Wheaten Terrier",
"When" : "Quando",
"When ASM should stop showing this message" : "Altura em que o ASM deve parar de apresentar esta mensagem",
"When I change the location of an animal, make a note of it in the log with this type" : "Após eu alterar a localização de um animal, criar a respetiva anotação no registo com este tipo",
"When I change the weight of an animal, make a note of it in the log with this type" : "Após eu alterar o peso de um animal, criar a respetiva anotação no registo com este tipo",
"When I generate a document, make a note of it in the log with this type" : "Após eu gerar um documento, criar a respetiva anotação no registo com este tipo",
"When I mark an animal held, make a note of it in the log with this type" : "Após eu sinalizar um animal como retido, criar a respetiva anotação no registo com este tipo",
"When I set a new GDPR Opt-In contact option, make a note of it in the log with this type" : "Após eu definir eu definir uma nova opção de contacto de Consentimento de RGPD, criar a respetiva anotação no registo com este tipo",
"When a message is created, email it to each matching user" : "Após a criação de uma mensagem, enviá-la por email a cada utilizador que cumpra os critérios",
"When calculating sales tax, assume the payment amount is net and add it" : "Ao calcular o imposto sobre vendas, assumir que o montante pago é líquido e adicioná-lo",
"When creating payments from the Move menu screens, mark them due instead of received" : "Ao criar pagamentos a partir dos ecrãs do menu Movimentos, sinalizá-los como estando em dívida em vez de recebidos",
"When displaying calendars, the first day of the week is" : "Para a apresentação de agendas, o primeiro dia da semana é",
"When displaying person names, use the format" : "Para a apresentação de nomes de pessoas, utilizar o formato",
"When entering addresses, restrict states to valid US 2 letter state codes" : "Ao introduzir moradas, limitar os estados a códigos de estado dos EUA válidos de 2 letras",
"When entering dates, hold down CTRL and use the cursor keys to move around the calendar. Press t to go to today." : "Ao introduzir datas, mantenha premida a tecla CTRL e utilize as teclas do cursor para percorrer a agenda. Prima t para ir para o dia de hoje.",
"When entering vaccinations, default the last batch number and manufacturer for that type" : "Ao introduzir vacinações, utilizar como predefinição o número do último lote e o fabricante para o tipo correspondente",
"When matching lost animals, include shelter animals" : "Ao procurar animais perdidos que correspondam, incluir animais de abrigos",
"When publishing to third party services, add this extra text to the bottom of all animal descriptions" : "Ao publicar em serviços de terceiros, adicionar este texto extra no final de todas as descrições de animais",
"When receiving multiple payments, allow the due and received dates to be set" : "Ao receber vários pagamentos, permitir o preenchimento das datas de vencimento e recebimento",
"When receiving payments, allow a quantity and unit price to be set" : "Ao receber pagamentos, permitir a definição de uma quantidade e preço unitário",
"When receiving payments, allow a transaction fee to be set" : "Ao receber pagamentos, permitir a definição de uma taxa de transação",
"When receiving payments, allow recording of sales tax with a default rate of" : "Ao receber pagamentos, permitir o registo de um imposto sobre vendas com uma taxa padrão de",
"When receiving payments, allow the deposit account to be overridden" : "Ao receber pagamentos, permitir a seleção de uma conta à ordem diferente",
"When storing processed forms as media, apply tamper proofing and make them read only" : "Ao guardar formulários processados como imagens, aplicar a proteção contra adulteração e a propriedade apenas de leitura",
"When you use Move > Adopt an animal, ASM will automatically return any open foster or retailer movement before creating the adoption." : "Quando seleciona Movimentos > Adotar um animal, o ASM apresenta automaticamente todos os movimentos de FAT ou de lojistas em aberto antes de criar a adoção.",
"When you use Move > Foster an animal, ASM will automatically return any open foster movement before moving the animal to its new home." : "Quando seleciona Movimentos > Colocar animal em FAT, o ASM apresenta automaticamente todos os movimentos de adoção antes de mover o animal para a sua nova casa.",
"Where this animal is located within the shelter" : "A localização deste animal no abrigo",
"Whippet" : "Whippet",
"White" : "Branco",
"White German Shepherd" : "Pastor Alemão Branco",
"White and Black" : "Branco e Preto",
"White and Brindle" : "Branco e Tigrado",
"White and Brown" : "Branco e Castanho",
"White and Grey" : "Branco e Cinzento",
"White and Liver" : "Branco e Fígado",
"White and Tabby" : "Branco e Malhado",
"White and Tan" : "Branco e Afogueado",
"White and Torti" : "Branco e Tortie",
"Will this owner give a donation?" : "Este dono vai fazer um donativo?",
"Wire-haired Pointing Griffon" : "Pointing Griffon de Pelo Cerdoso",
"Wirehaired Terrier" : "Terrier de Pelo Cerdoso",
"With Vet" : "Com veterinário",
"With overnight batch" : "Com lote de pernoita",
"Wk" : "Wk",
"Work" : "Trabalho",
"Work Phone" : "Telefone do Emprego",
"Work Types" : "Tipos de Trabalho",
"XXX or XX = number unique for this year" : "XXX ou XX = número exclusivo para este ano",
"Xoloitzcuintle/Mexican Hairless" : "Xoloitzcuintle/Mexican Hairless",
"YY or YYYY = current year" : "AA ou AAAA = ano atual",
"Yellow Labrador Retriever" : "Labrador Retriever Amarelo",
"Yellow and Grey" : "Amarelo e Cinzento",
"Yes" : "Sim",
"Yes/No" : "Sim/Não",
"Yes/No/Unknown" : "Sim/Não/Desconhecido",
"Yorkshire Terrier Yorkie" : "Yorkshire Terrier Yorkie",
"You can assign a custom emblem to your additional animal flags" : "Pode atribuir um emblema personalizado às suas sinalizações de animal extra",
"You can bookmark search results, animals, people and most data entry screens." : "Pode adicionar marcadores para resultados de pesquisa, animais, pessoas e a maioria dos ecrãs de entrada de dados.",
"You can drag and drop animals in shelter view to change their locations." : "Pode arrastar e soltar animais na vista de abrigo para alterar os respetivos locais.",
"You can middle click a link to open it in a new browser tab (push the wheel on most modern mice)." : "Pode clicar com o botão do meio do rato num link para abri-lo num novo separador do browser (na maioria dos ratos mais recentes, pode clicar com a roda).",
"You can override the search result sort by adding one of the following to the end of your search - sort:az, sort:za, sort:mr, sort:lr" : "Pode ignorar a ordenação dos resultados da pesquisa se adicionar uma das seguintes opções ao final da pesquisa - sort:az, sort:za, sort:mr, sort:lr",
"You can prefix your term in the search box with a: to search only animals, p: to search only people, wl: to search waiting list entries, la: to search lost animals and fa: to search found animals." : "Pode adicionar o prefixo a: ao termo da caixa de pesquisa para pesquisar apenas animais, p: para pesquisar apenas pessoas, wl: para pesquisar entradas da lista de espera, la: para pesquisar animais perdidos e fa: para pesquisar animais encontrados.",
"You can set a default amount for different payment types in the Settings, Lookup Data screen. Very handy when creating adoptions." : "Pode definir um valor padrão para os diferentes tipos de pagamentos no ecrã Definições/Consultar Dados. Muito útil ao criar adoções.",
"You can sort tables by clicking on the column headings." : "Pode ordenar as tabelas clicando nos títulos das colunas.",
"You can upload images called logo.jpg and splash.jpg to the Settings, Reports, Extra Images screen to override the login splash screen and logo in the upper left corner of the application." : "Pode carregar imagens com os nomes logo.jpg e splash.jpg para o ecrã Definições/Relatórios/Imagens Extra para personalizar o ecrã de entrada de início de sessão e o logótipo localizado no canto superior esquerdo da aplicação.",
"You can use incoming forms to create new records or attach them to existing records." : "Pode utilizar formulários recebidos para criar novos registos ou anexá-los a registos existentes.",
"You can't have a return without a movement." : "Não pode ter uma devolução sem um movimento.",
"You didn't specify any search criteria, so an on-shelter search was assumed." : "Não especificou qualquer critério de pesquisa, por isso foi utilizada uma pesquisa de abrigo.",
"You have unsaved changes, are you sure you want to leave this page?" : "Tem alterações não guardadas. Tem a certeza de que deseja sair desta página?",
"You must supply a code." : "Precisa de especificar um código.",
"Young Adult" : "Jovem Adulto",
"Your CSV file should have a header row with field names ASM recognises." : "O seu ficheiro CSV deve ter uma linha de cabeçalho com nomes de campo reconhecidos pelo ASM.",
"Your sheltermanager.com account is due to expire on {0}, please renew {1}" : "A sua conta no sheltermanager.com vai expirar em {0}. Proceda à respetiva renovação {1}",
"Zipcode" : "Código postal",
"Zipcode contains" : "Código postal contém",
"[None]" : "[Nenhum]",
"after connecting, chdir to" : "após estabelecer a ligação, chdir para",
"and" : "e",
"are sent to" : "são enviados para",
"at" : "às",
"auto adjust for daylight savings" : "ajustar automaticamente para a hora de verão",
"cm" : "cm",
"days" : "dias",
"estimate" : "estimar",
"filters: a:animal, p:person, wl:waitinglist, la:lostanimal, fa:foundanimal keywords: onshelter/os, notforadoption, aco, banned, donors, deceased, vets, retailers, staff, fosterers, volunteers, homecheckers, members, activelost, activefound" : "filtros: a:animal, p:person, wl:waitinglist, la:lostanimal, fa:foundanimal keywords: onshelter/os, notforadoption, aco, banned, donors, deceased, vets, retailers, staff, fosterers, volunteers, homecheckers, members, activelost, activefound",
"if animal does not have" : "se o animal não possuir",
"if animal has" : "se o animal possuir",
"inches" : "polegadas",
"invalid" : "inválido",
"kg" : "kg",
"lb" : "lb",
"less" : "menos",
"mins" : "mins",
"months" : "meses",
"more" : "mais",
"on" : "em",
"or" : "ou",
"or estimated age in years" : "ou idade estimada, em anos",
"oz" : "oz",
"to" : "até",
"today" : "hoje",
"treatments" : "tratamentos",
"treatments, every" : "tratamentos, a cada",
"weekdays" : "dias úteis",
"weeks" : "semanas",
"weeks after last contact." : "semanas após o último contacto.",
"years" : "anos",
"yesterday" : "ontem",
"{0} (under {1} months)" : "{0} (com menos de {1} meses)",
"{0} - {1} ({2} {3} aged {4})" : "{0} - {1} ({2} {3} com {4} de idade)",
"{0} - {1} {2}" : "{0} - {1} {2}",
"{0} - {1} {2} ({3}), contact {4} ({5}) - lost in {6}, postcode {7}, on {8}" : "{0} - {1} {2} ({3}), contacto {4} ({5}) - perdido em {6}, código postal {7}, em {8}",
"{0} animals successfully deleted." : "{0} animais eliminados com êxito.",
"{0} animals successfully updated." : "{0} animais atualizados com êxito.",
"{0} cannot be blank" : "{0} não pode estar vazio",
"{0} fine, paid" : "{0} multa, paga",
"{0} fine, unpaid" : "{0} multa, não paga",
"{0} hours" : "{0} horas",
"{0} incurred in costs" : "{0} incorreu em custos",
"{0} is running ({1}% complete)." : "{0} em execução ({1}% concluído).",
"{0} payment records created." : "{0} registos de pagamento criados.",
"{0} received" : "{0} recebido",
"{0} record(s) match the mail merge." : "{0} registo(s) correspondem ao mailing.",
"{0} records displayed, use the 'Medical History' report to see all records for this animal." : "{0} registos apresentados, utilize o relatório 'Histórico Clínico' para ver todos os registos deste animal.",
"{0} results." : "{0} resultados.",
"{0} rows affected." : "{0} linhas abrangidas.",
"{0} selected" : "{0} selecionado",
"{0} treatments every {1} days" : "{0} tratamentos a cada {1} dias",
"{0} treatments every {1} months" : "{0} tratamentos a cada {1} meses",
"{0} treatments every {1} weekdays" : "{0} tratamentos a cada {1} dias úteis",
"{0} treatments every {1} weeks" : "{0} tratamentos a cada {1} semanas",
"{0} treatments every {1} years" : "Tratamento(s) {0} a cada {1} ano(s)",
"{0} weeks" : "{0} semanas",
"{0} years" : "{0} anos",
"{0} {1} ({2} treatments)" : "{0} {1} ({2} tratamentos)",
"{0} {1} aged {2}" : "{1} {0} com {2} de idade",
"{0} {1} {2} aged {3}" : "{1} {0} {2} com {3} de idade",
"{0} {1}: Moved from {2} to {3}" : "{0} {1}: movido de {2} para {3}",
"{0} {1}: adopted by {2}" : "{0} {1}: adotado por {2}",
"{0} {1}: altered" : "{0} {1}: esterilizado/castrado",
"{0} {1}: available for adoption" : "{0} {1}: disponível para adoção",
"{0} {1}: cancelled reservation to {2}" : "{0} {1}: reserva cancelada para {2}",
"{0} {1}: died ({2})" : "{0} {1}: morreu ({2})",
"{0} {1}: end of trial adoption to {2}" : "{0} {1}: fim de adoção experimental por {2}",
"{0} {1}: entered the shelter" : "{0} {1}: entrou no abrigo",
"{0} {1}: escaped" : "{0} {1}: fugiu",
"{0} {1}: euthanised ({2})" : "{0} {1}: eutanasiado ({2})",
"{0} {1}: fostered to {2}" : "{0} {1}: entregue à FAT {2}",
"{0} {1}: held" : "{0} {1}: retido",
"{0} {1}: microchipped" : "{0} {1}: microchipado",
"{0} {1}: not available for adoption" : "{0} {1}: não disponível para adoção",
"{0} {1}: quarantined" : "{0} {1}: em quarentena",
"{0} {1}: received {2}" : "{0} {1}: recebeu {2}",
"{0} {1}: reclaimed by {2}" : "{0} {1}: reivindicado por {2}",
"{0} {1}: released" : "{0} {1}: libertado",
"{0} {1}: reserved by {2}" : "{0} {1}: reservado por {2}",
"{0} {1}: returned by {2}" : "{0} {1}: devolvido por {2}",
"{0} {1}: sent to retailer {2}" : "{0} {1}: enviado para o lojista {2}",
"{0} {1}: stolen" : "{0} {1}: roubado",
"{0} {1}: tested positive for FIV" : "{0} {1}: testou positivo para FIV",
"{0} {1}: tested positive for FeLV" : "{0} {1}: testou positivo para FeLV",
"{0} {1}: tested positive for Heartworm" : "{0} {1}: testou positivo para dirofilariose",
"{0} {1}: transferred to {2}" : "{0} {1}: transferido para {2}",
"{0} {1}: trial adoption to {2}" : "{0} {1}: adoção experimental por {2}",
"{0}, Week {1}" : "{0}, Semana {1}",
"{0}: Entered shelter {1}, Last changed on {2} by {3}. {4} {5} {6} aged {7}" : "{0}: Entrada no abrigo {1}, Última alteração em {2} por {3}. {4} {5} {6} idade {7}",
"{0}: closed {1} ({2})" : "{0}: fechado {1} ({2})",
"{0}: opened {1}" : "{0}: aberto {1}",
"{0}: waiting list - {1}" : "{0}: lista de espera - {1}",
"{0}: {1} {2} - {3} {4}" : "{0}: {1} {2} - {3} {4}",
"{2}: found in {1}: {0}" : "{2}: encontrados em {1}: {0}",
"{2}: lost in {1}: {0}" : "{2}: perdidos em {1}: {0}",
"{browser} version {version}, running on {os}." : "{browser} version {version}, executada em {os}.",
"{plural0} animal control call due for followup today" : "{plural0} chamada de controlo animal agendada para seguimento hoje",
"{plural0} animal died" : "{plural0} animal morreu",
"{plural0} animal entered the shelter" : "{plural0} animal entrou no abrigo",
"{plural0} animal has a hold ending today" : "{plural0} animal com uma retenção que termina hoje",
"{plural0} animal has been on the shelter longer than {0} months" : "{plural0} animal permaneceu no abrigo mais do que {0} meses",
"{plural0} animal has not had a rabies vaccination" : "{plural0} animal não recebeu vacinação contra a raiva",
"{plural0} animal is not available for adoption" : "{plural0} animal não está disponível para adoção",
"{plural0} animal was TNR" : "{plural0} animal foi capturado/esterilizado/libertado",
"{plural0} animal was adopted" : "{plural0} animal foi adotado",
"{plural0} animal was dead on arrival" : "{plural0} animal estava morto à chegada",
"{plural0} animal was euthanized" : "{plural0} animal foi eutanasiado",
"{plural0} animal was reclaimed by its owner" : "{plural0} animal foi reivindicado por respetivo dono",
"{plural0} animal was released to wild" : "{plural0} animal foi libertado na natureza",
"{plural0} animal was transferred to another shelter" : "{plural0} animal foi transferido para outro abrigo",
"{plural0} clinic appointment today" : "{plural0} agendamento clínico para hoje",
"{plural0} day." : "{plural0} dia.",
"{plural0} document signing request has been received in the last week" : "{plural0} pedido de assinatura de documentos recebido na semana passada",
"{plural0} document signing request issued in the last month is unsigned" : "{plural0} pedido de assinatura de documentos emitidos no mês passado ainda não está cumprido",
"{plural0} incomplete animal control call" : "{plural0} chamada de controlo animal incompleta",
"{plural0} item of stock expires in the next month" : "{plural0} item de stock expira no próximo mês",
"{plural0} item of stock has expired" : "{plural0} item de stock expirou",
"{plural0} medical treatment needs to be administered today" : "{plural0} tratamento clínico precisa de ser administrado hoje",
"{plural0} month." : "{plural0} mês.",
"{plural0} new online form submission" : "{plural0} novo requerimento por formulário online",
"{plural0} person has an overdue payment" : "{plural0} pessoa tem um pagamento em atraso",
"{plural0} person with an active reservation has not been homechecked" : "{plural0} pessoa com uma reserva ativa não foi inspecionada no domicílio",
"{plural0} potential match for a lost animal" : "{plural0} potencial correspondência para um animal perdido",
"{plural0} recent publisher run had errors" : "{plural0} execução recente do editor gerou erros",
"{plural0} reservation has been active over a week without adoption" : "{plural0} reserva tem estado ativa por mais de uma semana sem adoção",
"{plural0} result found in {1} seconds. Order: {2}" : "{plural0} resultado encontrado em {1} segundos. Pedido: {2}",
"{plural0} shelter animal has not been microchipped" : "{plural0} animal do abrigo não foi microchipado",
"{plural0} shelter animal has people looking for them" : "{plural0} animal do abrigo tem pessoas que o procuram",
"{plural0} test needs to be performed today" : "{plural0} teste precisa de ser realizado hoje",
"{plural0} transport does not have a driver assigned" : "{plural0} transporte não tem um condutor atribuído",
"{plural0} trap is overdue for return" : "{plural0} armadilha já devia ter sido devolvida",
"{plural0} trial adoption has ended" : "{plural0} adoção experimental terminou",
"{plural0} unaltered animal has been adopted in the last month" : "{plural0} animal não esterilizado/castrado foi adotado no último mês",
"{plural0} undispatched animal control call" : "{plural0} chamada de controlo animal não processada",
"{plural0} unpaid fine" : "{plural0} multa não paga",
"{plural0} urgent entry on the waiting list" : "{plural0} entrada urgente na lista de espera",
"{plural0} vaccination has expired" : "{plural0} vacinação expirou",
"{plural0} vaccination needs to be administered today" : "{plural0} vacinação precisa de ser administrada hoje",
"{plural0} week." : "{plural0} semana.",
"{plural0} year." : "{plural0} ano.",
"{plural1} animal control calls due for followup today" : "{plural1} chamadas de controlo animal com seguimento agendado para hoje",
"{plural1} animals are not available for adoption" : "{plural1} animais não estão disponíveis para adoção",
"{plural1} animals died" : "{plural1} animais morreram",
"{plural1} animals entered the shelter" : "{plural1} animais entraram no abrigo",
"{plural1} animals have been on the shelter longer than {0} months" : "{plural1} animais encontram-se no abrigo há mais de {0} meses",
"{plural1} animals have holds ending today" : "{plural1} animais têm retenções que terminam hoje",
"{plural1} animals have not had a rabies vaccination" : "{plural1} animais não receberam vacinação contra a raiva",
"{plural1} animals were TNR" : "{plural1} animais foram capturados/esterilizados/libertados",
"{plural1} animals were adopted" : "{plural1} animais foram adotados",
"{plural1} animals were dead on arrival" : "{plural1} animais estavam mortos à chegada",
"{plural1} animals were euthanized" : "{plural1} animais foram eutanasiados",
"{plural1} animals were reclaimed by their owners" : "{plural1} animais foram reivindicados por respetivos donos",
"{plural1} animals were released to wild" : "{plural1} animais foram libertados na natureza",
"{plural1} animals were transferred to other shelters" : "{plural1} animais foram transferidos para outros abrigos",
"{plural1} clinic appointments today" : "{plural1} agendamentos clínicos para hoje",
"{plural1} days." : "{plural1} dias.",
"{plural1} document signing requests have been received in the last week" : "{plural1} pedidos de assinatura de documentos recebidos na semana passada",
"{plural1} document signing requests issued in the last month are unsigned" : "{plural1} pedidos de assinatura de documentos emitidos no mês passado ainda não estão cumpridos",
"{plural1} incomplete animal control calls" : "{plural1} chamadas de controlo animal incompletas",
"{plural1} items of stock expire in the next month" : "{plural1} itens de stock expiram no próximo mês",
"{plural1} items of stock have expired" : "{plural1} itens de stock expiraram",
"{plural1} medical treatments need to be administered today" : "{plural1} tratamentos clínicos precisam de ser administrados hoje",
"{plural1} months." : "{plural1} meses.",
"{plural1} new online form submissions" : "{plural1} novos requerimentos por de formulários online",
"{plural1} people have overdue payments" : "{plural1} pessoas têm pagamentos em atraso",
"{plural1} people with active reservations have not been homechecked" : "{plural1} pessoas com reservas ativas não foram inspecionadas no domicílio",
"{plural1} potential matches for lost animals" : "{plural1} potenciais correspondências para animais perdidos",
"{plural1} recent publisher runs had errors" : "{plural1} execuções recentes do editor geraram erros",
"{plural1} reservations have been active over a week without adoption" : "{plural1} reservas têm estado ativas por mais de uma semana sem adoção",
"{plural1} results found in {1} seconds. Order: {2}" : "{plural1} resultados encontrados em {1} segundos. Pedido: {2}",
"{plural1} shelter animals have not been microchipped" : "{plural1} animais do abrigo não foram microchipados",
"{plural1} shelter animals have people looking for them" : "{plural1} animais de abrigo têm pessoas que os procuram",
"{plural1} tests need to be performed today" : "{plural1} testes precisam de ser realizados hoje",
"{plural1} transports do not have a driver assigned" : "{plural1} transportes não têm um condutor atribuído",
"{plural1} traps are overdue for return" : "{plural1} armadilhas já deviam ter sido devolvidas",
"{plural1} trial adoptions have ended" : "{plural1} adoções experimentais terminaram",
"{plural1} unaltered animals have been adopted in the last month" : "{plural1} animais não esterilizados/castrados foram adotados no último mês",
"{plural1} undispatched animal control calls" : "{plural1} chamadas de controlo animal não processadas",
"{plural1} unpaid fines" : "{plural1} multas não pagas",
"{plural1} urgent entries on the waiting list" : "{plural1} entradas urgentes na lista de espera",
"{plural1} vaccinations have expired" : "{plural1} vacinas expiraram",
"{plural1} vaccinations need to be administered today" : "{plural1} vacinas precisam de ser administradas hoje",
"{plural1} weeks." : "{plural1} semanas.",
"{plural1} years." : "{plural1} anos.",
"{plural2} animal control calls due for followup today" : "{plural2} chamadas de controlo animal com seguimento agendado para hoje",
"{plural2} animals are not available for adoption" : "{plural2} animais não estão disponíveis para adoção",
"{plural2} animals died" : "{plural2} animais morreram",
"{plural2} animals entered the shelter" : "{plural2} animais entraram no abrigo",
"{plural2} animals have been on the shelter longer than {0} months" : "{plural2} animais encontram-se no abrigo há mais de {0} meses",
"{plural2} animals have holds ending today" : "{plural2} animais têm retenções que terminam hoje",
"{plural2} animals have not had a rabies vaccination" : "{plural2} animais não receberam vacinação contra a raiva",
"{plural2} animals were TNR" : "{plural2} animais foram capturados/esterilizados/libertados",
"{plural2} animals were adopted" : "{plural2} animais foram adotados",
"{plural2} animals were dead on arrival" : "{plural2} animais estavam mortos à chegada",
"{plural2} animals were euthanized" : "{plural2} animais foram eutanasiados",
"{plural2} animals were reclaimed by their owners" : "{plural2} animais foram reivindicados por respetivos donos",
"{plural2} animals were released to wild" : "{plural2} animais foram libertados na natureza",
"{plural2} animals were transferred to other shelters" : "{plural2} animais foram transferidos para outros abrigos",
"{plural2} clinic appointments today" : "{plural2} agendamentos clínicos para hoje",
"{plural2} days." : "{plural2} dias.",
"{plural2} document signing requests have been received in the last week" : "{plural2} pedidos de assinatura de documentos recebidos na semana passada",
"{plural2} document signing requests issued in the last month are unsigned" : "{plural2} pedidos de assinatura de documentos emitidos no mês passado ainda não estão cumpridos",
"{plural2} incomplete animal control calls" : "{plural2} chamadas de controlo animal incompletas",
"{plural2} items of stock expire in the next month" : "{plural2} itens de stock expiram no próximo mês",
"{plural2} items of stock have expired" : "{plural2} itens de stock expiraram",
"{plural2} medical treatments need to be administered today" : "{plural2} tratamentos clínicos precisam de ser administrados hoje",
"{plural2} months." : "{plural2} meses.",
"{plural2} new online form submissions" : "{plural2} novos requerimentos por de formulários online",
"{plural2} people have overdue payments" : "{plural2} pessoas têm pagamentos em atraso",
"{plural2} people with active reservations have not been homechecked" : "{plural2} pessoas com reservas ativas não foram inspecionadas no domicílio",
"{plural2} potential matches for lost animals" : "{plural2} potenciais correspondências para animais perdidos",
"{plural2} recent publisher runs had errors" : "{plural2} execuções recentes do editor geraram erros",
"{plural2} reservations have been active over a week without adoption" : "{plural2} reservas têm estado ativas por mais de uma semana sem adoção",
"{plural2} results found in {1} seconds. Order: {2}" : "{plural2} resultados encontrados em {1} segundos. Pedido: {2}",
"{plural2} shelter animals have not been microchipped" : "{plural2} animais do abrigo não foram microchipados",
"{plural2} shelter animals have people looking for them" : "{plural2} animais de abrigo têm pessoas que os procuram",
"{plural2} tests need to be performed today" : "{plural2} testes precisam de ser realizados hoje",
"{plural2} transports do not have a driver assigned" : "{plural2} transportes não têm um condutor atribuído",
"{plural2} traps are overdue for return" : "{plural2} armadilhas já deviam ter sido devolvidas",
"{plural2} trial adoptions have ended" : "{plural2} adoções experimentais terminaram",
"{plural2} unaltered animals have been adopted in the last month" : "{plural2} animais não esterilizados/castrados foram adotados no último mês",
"{plural2} undispatched animal control calls" : "{plural2} chamadas de controlo animal não processadas",
"{plural2} unpaid fines" : "{plural2} multas não pagas",
"{plural2} urgent entries on the waiting list" : "{plural2} entradas urgentes na lista de espera",
"{plural2} vaccinations have expired" : "{plural2} vacinas expiraram",
"{plural2} vaccinations need to be administered today" : "{plural2} vacinas precisam de ser administradas hoje",
"{plural2} weeks." : "{plural2} semanas.",
"{plural2} years." : "{plural2} anos.",
"{plural3} animal control calls due for followup today" : "{plural3} chamadas de controlo animal com seguimento agendado para hoje",
"{plural3} animals are not available for adoption" : "{plural3} animais não estão disponíveis para adoção",
"{plural3} animals died" : "{plural3} animais morreram",
"{plural3} animals entered the shelter" : "{plural3} animais entraram no abrigo",
"{plural3} animals have been on the shelter longer than {0} months" : "{plural3} animais encontram-se no abrigo há mais de {0} meses",
"{plural3} animals have holds ending today" : "{plural3} animais têm retenções que terminam hoje",
"{plural3} animals have not had a rabies vaccination" : "{plural3} animais não receberam vacinação contra a raiva",
"{plural3} animals were TNR" : "{plural3} animais foram capturados/esterilizados/libertados",
"{plural3} animals were adopted" : "{plural3} animais foram adotados",
"{plural3} animals were dead on arrival" : "{plural3} animais estavam mortos à chegada",
"{plural3} animals were euthanized" : "{plural3} animais foram eutanasiados",
"{plural3} animals were reclaimed by their owners" : "{plural3} animais foram reivindicados por respetivos donos",
"{plural3} animals were released to wild" : "{plural3} animais foram libertados na natureza",
"{plural3} animals were transferred to other shelters" : "{plural3} animais foram transferidos para outros abrigos",
"{plural3} clinic appointments today" : "{plural3} agendamentos clínicos para hoje",
"{plural3} days." : "{plural3} dias.",
"{plural3} document signing requests have been received in the last week" : "{plural3} pedidos de assinatura de documentos recebidos na semana passada",
"{plural3} document signing requests issued in the last month are unsigned" : "{plural3} pedidos de assinatura de documentos emitidos no mês passado ainda não estão cumpridos",
"{plural3} incomplete animal control calls" : "{plural3} chamadas de controlo animal incompletas",
"{plural3} items of stock expire in the next month" : "{plural3} itens de stock expiram no próximo mês",
"{plural3} items of stock have expired" : "{plural3} itens de stock expiraram",
"{plural3} medical treatments need to be administered today" : "{plural3} tratamentos clínicos precisam de ser administrados hoje",
"{plural3} months." : "{plural3} meses.",
"{plural3} new online form submissions" : "{plural3} novos requerimentos por de formulários online",
"{plural3} people have overdue payments" : "{plural3} pessoas têm pagamentos em atraso",
"{plural3} people with active reservations have not been homechecked" : "{plural3} pessoas com reservas ativas não foram inspecionadas no domicílio",
"{plural3} potential matches for lost animals" : "{plural3} potenciais correspondências para animais perdidos",
"{plural3} recent publisher runs had errors" : "{plural3} execuções recentes do editor geraram erros",
"{plural3} reservations have been active over a week without adoption" : "{plural3} reservas têm estado ativas por mais de uma semana sem adoção",
"{plural3} results found in {1} seconds. Order: {2}" : "{plural3} resultados encontrados em {1} segundos. Pedido: {2}",
"{plural3} shelter animals have not been microchipped" : "{plural3} animais do abrigo não foram microchipados",
"{plural3} shelter animals have people looking for them" : "{plural3} animais de abrigo têm pessoas que os procuram",
"{plural3} tests need to be performed today" : "{plural3} testes precisam de ser realizados hoje",
"{plural3} transports do not have a driver assigned" : "{plural3} transportes não têm um condutor atribuído",
"{plural3} traps are overdue for return" : "{plural3} armadilhas já deviam ter sido devolvidas",
"{plural3} trial adoptions have ended" : "{plural3} adoções experimentais terminaram",
"{plural3} unaltered animals have been adopted in the last month" : "{plural3} animais não esterilizados/castrados foram adotados no último mês",
"{plural3} undispatched animal control calls" : "{plural3} chamadas de controlo animal não processadas",
"{plural3} unpaid fines" : "{plural3} multas não pagas",
"{plural3} urgent entries on the waiting list" : "{plural3} entradas urgentes na lista de espera",
"{plural3} vaccinations have expired" : "{plural3} vacinas expiraram",
"{plural3} vaccinations need to be administered today" : "{plural3} vacinas precisam de ser administradas hoje",
"{plural3} weeks." : "{plural3} semanas.",
"{plural3} years." : "{plural3} anos."
}
|
bobintetley/asm3
|
src/asm3/locales/locale_pt.py
|
Python
|
gpl-3.0
| 229,844
|
[
"Amber",
"VisIt"
] |
08da536b7c3bd7f6d4a296d08571b55928c6df5bc26039609a7497735788b229
|
#!/usr/bin/env python
"""
# =============================================================================
Copyright Government of Canada 2015-2017
Written by: Eric Marinier, Public Health Agency of Canada,
National Microbiology Laboratory
Funded by the National Micriobiology Laboratory and the Genome Canada / Alberta
Innovates Bio Solutions project "Listeria Detection and Surveillance
using Next Generation Genomics"
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
# =============================================================================
"""
"""
# =============================================================================
This script consolidates multiple signatures files, produced by Neptune, into
a single file containing the best signatures from all files. This script
attempts to avoid overlapping signatures. However, this is not guaranteed.
# =============================================================================
"""
import argparse
import os
import Signature
import Database
import Utility
"""
# =============================================================================
GLOBALS
# =============================================================================
"""
PROGRAM_DESCRIPTION = 'Consolidates signatures from several Neptune signature \
files (FASTA format) into a single representative signature file, \
determined by signature score and sequence similarity.'
# DEFAULTS #
SEED_SIZE_DEFAULT = 11
# ARGUMENTS #
LONG = "--"
SHORT = "-"
# REQUIRED ARGUMENTS #
# Signatures
SIGNATURES = "signatures"
SIGNATURES_LONG = LONG + SIGNATURES
SIGNATURES_SHORT = SHORT + "s"
SIGNATURES_HELP = "The file locations of all signatures to consolidate."
# Output
OUTPUT = "output"
OUTPUT_LONG = LONG + OUTPUT
OUTPUT_SHORT = SHORT + "o"
OUTPUT_HELP = "The output directory to place the consolidate signatures and \
any additional files."
# OPTIONAL ARGUMENTS #
# Seed Size
SEED_SIZE = "seed-size"
SEED_SIZE_LONG = LONG + SEED_SIZE
SEED_SIZE_SHORT = SHORT + "ss"
SEED_SIZE_HELP = "The seed size used during sequence alignment."
# OTHER #
COMPILED_SIGNATURES = "compiled.fasta"
COMPILED_DATABASE = "compiled.db"
COMPILED_DATABASE_QUERY = COMPILED_DATABASE + ".query"
CONSOLIDATED_SIGNATURES = "consolidated.fasta"
"""
# =============================================================================
COMPILE SIGNATURES
------------------
PURPOSE
-------
Compiles the signatures from several Neptune signature files (FASTA format)
into a single dictionary containing all signatures. This may result in repeated
signatures.
INPUT
-----
[(STRING ID) -> (SIGNATURE) DICTIONARY] [compiledSignatures]
An initially empty dictionary that will be filled with signatures located
in within the [signatureLocations] files.
[(FILE LOCATION) LIST] [signatureLocations]
A list of signature file locations from which to compile signatures from.
RETURN
------
[(STRING ID) -> (SIGNATURE) DICTIONARY] [compiledSignatures]
A dictionary containing all compiled signatures. This dictionary is the
same object as the initially passed [compiledSignatures] dictionary.
POST
----
The [compiledSignatures] dictionary will be filled with the signatures.
# =============================================================================
"""
def compileSignatures(compiledSignatures, signatureLocations):
fileID = 0
# -- Read Files -- #
for location in signatureLocations:
signatures = Signature.readSignatures(location)
for signatureID in signatures:
compileID = str(fileID) + "." + signatureID
compiledSignatures[compileID] = signatures[signatureID]
compiledSignatures[compileID].ID = compileID
fileID += 1
return compiledSignatures
"""
# =============================================================================
PRODUCE SIGNATURES
------------------
PURPOSE
-------
Produces a list of consolidated signatures by outputting signatures to a file,
while attempting to avoid outputting duplicate signatures.
INPUT
-----
[SIGNATURE LIST] [sortedSignatures]
A list of signatures, sorted by their corresponding Neptune signature
scores. This list of signatures may contain apparently-duplicate
signatures.
[FILE] [blastOutputFile]
A readable BLASTN output file. This query is the output of aligning all
[sortedSignatures] against themselves.
[FILE] [destination]
A writable file-like object to write the consolidated signatures.
RETURN
------
[NONE]
POST
----
The list of consolidated signatures will be written to the [destination].
# =============================================================================
"""
def produceSignatures(sortedSignatures, blastOutputFile, destination):
hits = {} # [SIGNATURE ID] -> [(SIGNATURE ID) LIST] // (alignments)
outputSignatures = {} # Collection of already-output signatures.
# Build a list of all query hits.
# This creates a dictionary mapping signatures that align to each other.
# [SIGNATURE ID] -> [(SIGNATURE ID) LIST]
for line in blastOutputFile:
hit = Database.Hit(line)
# We only keep the hit if the ratio of the signature-to-alignment
# length is sufficiently long.
if (float(hit.alignmentLength) / float(hit.length) < float(0.50)):
continue
# Append the signature ID to the existing list of IDs.
if hit.ID in hits:
hits[hit.ID].append(hit.reference)
# Create a new list of signature IDs associated with specific
# signature ID.
else:
hits[hit.ID] = [hit.reference]
# Write the signatures to output, while maintaining a dictionary of
# signatures that were previously written to output. This attempts to
# avoid writing signatures appear to be duplicates or appear to overlap
# significantly.
for signature in sortedSignatures:
# Is the signature close to anything already written to output?
if(all((ID not in outputSignatures) for ID in hits[signature.ID])):
# The signature appears to be sufficiently unique.
# Write the signature to output and update outputed signatures.
outputSignatures[signature.ID] = signature
Signature.writeSignature(signature, destination)
"""
# =============================================================================
CONSOLIDATE SIGNATURES
----------------------
PURPOSE
-------
Consolidates signatures from several Neptune signature files into a single
representative Neptune signature file, determined by the signature score and
sequence similarity of all the contributing signatures.
The function compiles all the signatures into a single dictionary file, sorts
these signatures according to their Neptune signature score, and writes these
sorted signatures to a file. It then uses BLAST to query the signatures against
themselves and uses this information to report signatures in a greedy manner.
The signatures are reported in an order according to their signature score and
only if there has been no other similar signature (determined by BLAST) that
has already been reported.
INPUT
-----
[(FILE LOCATION) LIST] [signatureLocations]
A list of Neptune signature file locations corresponding to files to
consolidate.
[4 <= INT] [seedSize]
The seed size used in alignments to determine similarity.
[(FILE DIRECTORY) LOCATION] [outputDirectoryLocation]
The directory to write the output files.
RETURN
------
[NONE]
POST
----
The signatures and associated files will be written to several locations within
the [outputDirectoryLocation].
# =============================================================================
"""
def consolidateSignatures(
signatureLocations, seedSize, outputDirectoryLocation):
# --- Compile Signatures --- #
compiledSignatures = {}
compileSignatures(compiledSignatures, signatureLocations)
# -- Sort Signatures -- #
sortedSignatures = Signature.sortSignatures(compiledSignatures)
# -- Write Signatures -- #
compiledSignatureLocation = os.path.join(
outputDirectoryLocation, COMPILED_SIGNATURES)
compiledSignatureFile = open(compiledSignatureLocation, 'w')
Signature.writeSignatures(sortedSignatures, compiledSignatureFile)
compiledSignatureFile.close()
# --- Build and Query Database --- #
databaseLocation = os.path.join(
outputDirectoryLocation, COMPILED_DATABASE)
queryLocation = os.path.join(
outputDirectoryLocation, COMPILED_DATABASE_QUERY)
Database.createDatabaseJob(compiledSignatureLocation, databaseLocation)
Database.queryDatabase(
databaseLocation, compiledSignatureLocation,
queryLocation, 0.50, seedSize)
# --- Produce Signatures --- #
outputLocation = os.path.join(
outputDirectoryLocation, CONSOLIDATED_SIGNATURES)
outputFile = open(outputLocation, 'w')
queryFile = open(queryLocation, 'r')
produceSignatures(sortedSignatures, queryFile, outputFile)
outputFile.close()
queryFile.close()
# --- Clean Output --- #
filelist = [f for f in os.listdir(outputDirectoryLocation)
if f.startswith(COMPILED_DATABASE)]
for f in filelist:
os.remove(os.path.join(outputDirectoryLocation, f))
os.remove(os.path.join(outputDirectoryLocation, COMPILED_SIGNATURES))
"""
# =============================================================================
PARSE
# =============================================================================
"""
def parse(parameters):
signatureLocations = []
Utility.expandInput(parameters[SIGNATURES], signatureLocations)
outputDirectoryLocation = parameters[OUTPUT]
seedSize = parameters[SEED_SIZE] \
if parameters[SEED_SIZE] else SEED_SIZE_DEFAULT
consolidateSignatures(
signatureLocations, seedSize, outputDirectoryLocation)
"""
# =============================================================================
MAIN
# =============================================================================
"""
def main():
# --- Parser ---
parser = argparse.ArgumentParser(description=PROGRAM_DESCRIPTION)
parser.add_argument(
SIGNATURES_SHORT,
SIGNATURES_LONG,
dest=SIGNATURES,
help=SIGNATURES_HELP,
type=str, required=True, nargs='+')
parser.add_argument(
OUTPUT_SHORT,
OUTPUT_LONG,
dest=OUTPUT,
help=OUTPUT_HELP,
type=str, required=True)
parser.add_argument(
SEED_SIZE_SHORT,
SEED_SIZE_LONG,
dest=SEED_SIZE,
help=SEED_SIZE_HELP,
type=int, required=False)
args = parser.parse_args()
parameters = vars(args)
parse(parameters)
"""
# =============================================================================
# =============================================================================
"""
if __name__ == '__main__':
main()
|
phac-nml/neptune
|
neptune/ConsolidateSignatures.py
|
Python
|
apache-2.0
| 11,504
|
[
"BLAST"
] |
65f1e3f19bd10a1ca0d2c5857172f842316a1ef3c0c7e23bc7e9759bc2388028
|
from __future__ import print_function
from __future__ import division
try:
from builtins import str
except:
print("Warning: No str in builtins")
try:
from builtins import range
except:
print("Warning: No range in builtins")
# Requires:
# - boututils
# - NumPy
try:
from boututils.datafile import DataFile
except ImportError:
print("ERROR: boututils.DataFile couldn't be loaded")
raise
try:
import os
import sys
import glob
except ImportError:
print("ERROR: os, sys or glob modules not available")
raise
try:
import numpy as np
except ImportError:
print("ERROR: NumPy module not available")
raise
def findVar(varname, varlist):
"""
Find variable name in a list
First does case insensitive comparison, then
checks for abbreviations.
Returns the matched string, or raises a ValueError
"""
# Try a variation on the case
v = [name for name in varlist if name.lower() == varname.lower()]
if len(v) == 1:
# Found case match
print("Variable '%s' not found. Using '%s' instead" % (varname, v[0]))
return v[0]
elif len(v) > 1:
print("Variable '"+varname+"' not found, and is ambiguous. Could be one of: "+str(v))
raise ValueError("Variable '"+varname+"' not found")
# None found. Check if it's an abbreviation
v = [name for name in varlist if name[:len(varname)].lower() == varname.lower()]
if len(v) == 1:
print("Variable '%s' not found. Using '%s' instead" % (varname, v[0]))
return v[0]
if len(v) > 1:
print("Variable '"+varname+"' not found, and is ambiguous. Could be one of: "+str(v))
raise ValueError("Variable '"+varname+"' not found")
def collect(varname, xind=None, yind=None, zind=None, tind=None, path=".",yguards=False, xguards=True, info=True,prefix="BOUT.dmp"):
"""Collect a variable from a set of BOUT++ outputs.
data = collect(name)
name Name of the variable (string)
Optional arguments:
xind = [min,max] Range of X indices to collect
yind = [min,max] Range of Y indices to collect
zind = [min,max] Range of Z indices to collect
tind = [min,max] Range of T indices to collect
path = "." Path to data files
prefix = "BOUT.dmp" File prefix
yguards = False Collect Y boundary guard cells?
xguards = True Collect X boundary guard cells?
(Set to True to be consistent with the
definition of nx)
info = True Print information about collect?
"""
# Search for BOUT++ dump files in NetCDF format
file_list_nc = glob.glob(os.path.join(path, prefix+".nc"))
file_list_h5 = glob.glob(os.path.join(path, prefix+".hdf5"))
if file_list_nc != [] and file_list_h5 != []:
raise IOError("Error: Both NetCDF and HDF5 files are present: do not know which to read.")
elif file_list_h5 != []:
suffix = ".hdf5"
file_list = file_list_h5
else:
suffix = ".nc"
file_list = file_list_nc
if file_list != []:
print("Single (parallel) data file")
f = DataFile(file_list[0]) # Open the file
data = f.read(varname)
return data
file_list_nc = glob.glob(os.path.join(path, prefix+".*nc"))
file_list_h5 = glob.glob(os.path.join(path, prefix+".*hdf5"))
if file_list_nc != [] and file_list_h5 != []:
raise IOError("Error: Both NetCDF and HDF5 files are present: do not know which to read.")
elif file_list_h5 != []:
suffix = ".hdf5"
file_list = file_list_h5
else:
suffix = ".nc"
file_list = file_list_nc
file_list.sort()
if file_list == []:
raise ValueError("ERROR: No data files found")
nfiles = len(file_list)
# Read data from the first file
f = DataFile(file_list[0])
try:
dimens = f.dimensions(varname)
#ndims = len(dimens)
ndims = f.ndims(varname)
except:
# Find the variable
varname = findVar(varname, f.list())
dimens = f.dimensions(varname)
#ndims = len(dimens)
ndims = f.ndims(varname)
if ndims < 2:
# Just read from file
data = f.read(varname)
f.close()
return data
if ndims > 4:
print("ERROR: Too many dimensions")
raise CollectError
mxsub = f.read("MXSUB")
if mxsub is None:
raise CollectError("Missing MXSUB variable")
mysub = f.read("MYSUB")
mz = f.read("MZ")
myg = f.read("MYG")
t_array = f.read("t_array")
if t_array is None:
nt = 1
t_array = np.zeros(1)
else:
nt = len(t_array)
if info:
print("mxsub = %d mysub = %d mz = %d\n" % (mxsub, mysub, mz))
# Get the version of BOUT++ (should be > 0.6 for NetCDF anyway)
try:
v = f.read("BOUT_VERSION")
# 2D decomposition
nxpe = f.read("NXPE")
mxg = f.read("MXG")
nype = f.read("NYPE")
npe = nxpe * nype
if info:
print("nxpe = %d, nype = %d, npe = %d\n" % (nxpe, nype, npe))
if npe < nfiles:
print("WARNING: More files than expected (" + str(npe) + ")")
elif npe > nfiles:
print("WARNING: Some files missing. Expected " + str(npe))
if xguards:
nx = nxpe * mxsub + 2*mxg
else:
nx = nxpe * mxsub
except KeyError:
print("BOUT++ version : Pre-0.2")
# Assume number of files is correct
# No decomposition in X
nx = mxsub
mxg = 0
nxpe = 1
nype = nfiles
if yguards:
ny = mysub * nype + 2*myg
else:
ny = mysub * nype
f.close();
# Check ranges
def check_range(r, low, up, name="range"):
r2 = r
if r != None:
try:
n = len(r2)
except:
# No len attribute, so probably a single number
r2 = [r2,r2]
if (len(r2) < 1) or (len(r2) > 2):
print("WARNING: "+name+" must be [min, max]")
r2 = None
else:
if len(r2) == 1:
r2 = [r2,r2]
if r2[0] < low:
r2[0] = low
if r2[0] > up:
r2[0] = up
if r2[1] < 0:
r2[1] = 0
if r2[1] > up:
r2[1] = up
if r2[0] > r2[1]:
tmp = r2[0]
r2[0] = r2[1]
r2[1] = tmp
else:
r2 = [low, up]
return r2
xind = check_range(xind, 0, nx-1, "xind")
yind = check_range(yind, 0, ny-1, "yind")
zind = check_range(zind, 0, mz-2, "zind")
tind = check_range(tind, 0, nt-1, "tind")
xsize = xind[1] - xind[0] + 1
ysize = yind[1] - yind[0] + 1
zsize = zind[1] - zind[0] + 1
tsize = tind[1] - tind[0] + 1
# Map between dimension names and output size
sizes = {'x':xsize, 'y':ysize, 'z':zsize, 't':tsize}
# Create a list with size of each dimension
ddims = [sizes[d] for d in dimens]
# Create the data array
data = np.zeros(ddims)
for i in range(npe):
# Get X and Y processor indices
pe_yind = int(i/nxpe)
pe_xind = i % nxpe
inrange = True
if yguards:
# Get local ranges
ymin = yind[0] - pe_yind*mysub
ymax = yind[1] - pe_yind*mysub
# Check lower y boundary
if pe_yind == 0:
# Keeping inner boundary
if ymax < 0: inrange = False
if ymin < 0: ymin = 0
else:
if ymax < myg: inrange = False
if ymin < myg: ymin = myg
# Upper y boundary
if pe_yind == (nype - 1):
# Keeping outer boundary
if ymin >= (mysub + 2*myg): inrange = False
if ymax > (mysub + 2*myg - 1): ymax = (mysub + 2*myg - 1)
else:
if ymin >= (mysub + myg): inrange = False
if ymax >= (mysub + myg): ymax = (mysub+myg-1)
# Calculate global indices
ygmin = ymin + pe_yind * mysub
ygmax = ymax + pe_yind * mysub
else:
# Get local ranges
ymin = yind[0] - pe_yind*mysub + myg
ymax = yind[1] - pe_yind*mysub + myg
if (ymin >= (mysub + myg)) or (ymax < myg):
inrange = False # Y out of range
if ymin < myg:
ymin = myg
if ymax >= mysub+myg:
ymax = myg + mysub - 1
# Calculate global indices
ygmin = ymin + pe_yind * mysub - myg
ygmax = ymax + pe_yind * mysub - myg
if xguards:
# Get local ranges
xmin = xind[0] - pe_xind*mxsub
xmax = xind[1] - pe_xind*mxsub
# Check lower x boundary
if pe_xind == 0:
# Keeping inner boundary
if xmax < 0: inrange = False
if xmin < 0: xmin = 0
else:
if xmax < mxg: inrange = False
if xmin < mxg: xmin = mxg
# Upper x boundary
if pe_xind == (nxpe - 1):
# Keeping outer boundary
if xmin >= (mxsub + 2*mxg): inrange = False
if xmax > (mxsub + 2*mxg - 1): xmax = (mxsub + 2*mxg - 1)
else:
if xmin >= (mxsub + mxg): inrange = False
if xmax >= (mxsub + mxg): xmax = (mxsub+mxg-1)
# Calculate global indices
xgmin = xmin + pe_xind * mxsub
xgmax = xmax + pe_xind * mxsub
else:
# Get local ranges
xmin = xind[0] - pe_xind*mxsub + mxg
xmax = xind[1] - pe_xind*mxsub + mxg
if (xmin >= (mxsub + mxg)) or (xmax < mxg):
inrange = False # X out of range
if xmin < mxg:
xmin = mxg
if xmax >= mxsub+mxg:
xmax = mxg + mxsub - 1
# Calculate global indices
xgmin = xmin + pe_xind * mxsub - mxg
xgmax = xmax + pe_xind * mxsub - mxg
# Number of local values
nx_loc = xmax - xmin + 1
ny_loc = ymax - ymin + 1
if not inrange:
continue # Don't need this file
filename = os.path.join(path, prefix+"." + str(i) + suffix)
if info:
sys.stdout.write("\rReading from " + filename + ": [" + \
str(xmin) + "-" + str(xmax) + "][" + \
str(ymin) + "-" + str(ymax) + "] -> [" + \
str(xgmin) + "-" + str(xgmax) + "][" + \
str(ygmin) + "-" + str(ygmax) + "]")
f = DataFile(filename)
if ndims == 4:
d = f.read(varname, ranges=[tind[0],tind[1]+1,
xmin, xmax+1,
ymin, ymax+1,
zind[0],zind[1]+1])
data[:, (xgmin-xind[0]):(xgmin-xind[0]+nx_loc), (ygmin-yind[0]):(ygmin-yind[0]+ny_loc), :] = d
elif ndims == 3:
# Could be xyz or txy
if dimens[2] == 'z': # xyz
d = f.read(varname, ranges=[xmin, xmax+1,
ymin, ymax+1,
zind[0],zind[1]+1])
data[(xgmin-xind[0]):(xgmin-xind[0]+nx_loc), (ygmin-yind[0]):(ygmin-yind[0]+ny_loc), :] = d
else: # txy
d = f.read(varname, ranges=[tind[0],tind[1]+1,
xmin, xmax+1,
ymin, ymax+1])
data[:, (xgmin-xind[0]):(xgmin-xind[0]+nx_loc), (ygmin-yind[0]):(ygmin-yind[0]+ny_loc)] = d
elif ndims == 2:
# xy
d = f.read(varname, ranges=[xmin, xmax+1,
ymin, ymax+1])
data[(xgmin-xind[0]):(xgmin-xind[0]+nx_loc), (ygmin-yind[0]):(ygmin-yind[0]+ny_loc)] = d
elif ndims == 1:
if dimens[0] == 't':
# t
d = f.read(varname, ranges=[tind[0],tind[1]+1])
data[:] = d
f.close()
# Force the precision of arrays of dimension>1
if ndims>1:
try:
data = data.astype(t_array.dtype, copy=False)
except TypeError:
data = data.astype(t_array.dtype)
# Finished looping over all files
if info:
sys.stdout.write("\n")
return data
|
kevinpetersavage/BOUT-dev
|
tools/pylib/boutdata/collect.py
|
Python
|
gpl-3.0
| 12,834
|
[
"NetCDF"
] |
c424ee8703f3a3a7b7e180290d20d03af08262ceb10c489364f23b0afb641f37
|
"""
This sample simulates equal number of positively and negatively charged particles using the P3M solver. The system is maintained at a constant temperature by using a Langevin thermostat.
"""
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import espressomd
required_features = ["ELECTROSTATICS", "LENNARD_JONES"]
espressomd.assert_features(required_features)
from espressomd import electrostatics
import numpy as np
# System parameters
#############################################################
box_l = 10.7437
density = 0.7
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System(box_l=[box_l] * 3)
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
np.random.seed(seed=system.seed)
system.time_step = 0.01
system.cell_system.skin = 0.4
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min__dist
min_dist = 0.9
# integration
int_steps = 1000
int_n_times = 10
# Non-Bonded Interaction setup
#############################################################
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.force_cap = lj_cap
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
# Assingn charge to particles
for i in range(n_part // 2 - 1):
system.part[2 * i].q = -1.0
system.part[2 * i + 1].q = 1.0
# Warmup
#############################################################
lj_cap = 20
system.force_cap = lj_cap
i = 0
act_min_dist = system.analysis.min_dist()
while (i < warm_n_times and act_min_dist < min_dist):
system.integrator.run(warm_steps)
# Warmup criterion
act_min_dist = system.analysis.min_dist()
i += 1
lj_cap = lj_cap + 10
system.force_cap = lj_cap
lj_cap = 0
system.force_cap = lj_cap
# P3M setup after charge assigned
#############################################################
p3m = electrostatics.P3M(prefactor=1.0, accuracy=1e-2)
system.actors.add(p3m)
#############################################################
# Integration #
#############################################################
for i in range(0, int_n_times):
system.integrator.run(int_steps)
energies = system.analysis.energy()
print(energies)
|
hmenke/espresso
|
samples/minimal-charged-particles.py
|
Python
|
gpl-3.0
| 3,528
|
[
"ESPResSo"
] |
3c772e11b059bb2a3142fbe5a2d436ffd19c8b2e0cff6051c56c263d710a738c
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Discogs album search support to the autotagger. Requires the
python3-discogs-client library.
"""
import beets.ui
from beets import config
from beets.autotag.hooks import AlbumInfo, TrackInfo
from beets.plugins import MetadataSourcePlugin, BeetsPlugin, get_distance
import confuse
from discogs_client import Release, Master, Client
from discogs_client.exceptions import DiscogsAPIError
from requests.exceptions import ConnectionError
import http.client
import beets
import re
import time
import json
import socket
import os
import traceback
from string import ascii_lowercase
USER_AGENT = f'beets/{beets.__version__} +https://beets.io/'
API_KEY = 'rAzVUQYRaoFjeBjyWuWZ'
API_SECRET = 'plxtUTqoCzwxZpqdPysCwGuBSmZNdZVy'
# Exceptions that discogs_client should really handle but does not.
CONNECTION_ERRORS = (ConnectionError, socket.error, http.client.HTTPException,
ValueError, # JSON decoding raises a ValueError.
DiscogsAPIError)
class DiscogsPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'apikey': API_KEY,
'apisecret': API_SECRET,
'tokenfile': 'discogs_token.json',
'source_weight': 0.5,
'user_token': '',
'separator': ', ',
'index_tracks': False,
'append_style_genre': False,
})
self.config['apikey'].redact = True
self.config['apisecret'].redact = True
self.config['user_token'].redact = True
self.discogs_client = None
self.register_listener('import_begin', self.setup)
def setup(self, session=None):
"""Create the `discogs_client` field. Authenticate if necessary.
"""
c_key = self.config['apikey'].as_str()
c_secret = self.config['apisecret'].as_str()
# Try using a configured user token (bypassing OAuth login).
user_token = self.config['user_token'].as_str()
if user_token:
# The rate limit for authenticated users goes up to 60
# requests per minute.
self.discogs_client = Client(USER_AGENT, user_token=user_token)
return
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except OSError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata['token']
secret = tokendata['secret']
self.discogs_client = Client(USER_AGENT, c_key, c_secret,
token, secret)
def reset_auth(self):
"""Delete token file & redo the auth steps.
"""
os.remove(self._tokenfile())
self.setup()
def _tokenfile(self):
"""Get the path to the JSON file for storing the OAuth token.
"""
return self.config['tokenfile'].get(confuse.Filename(in_app_dir=True))
def authenticate(self, c_key, c_secret):
# Get the link for the OAuth page.
auth_client = Client(USER_AGENT, c_key, c_secret)
try:
_, _, url = auth_client.get_authorize_url()
except CONNECTION_ERRORS as e:
self._log.debug('connection error: {0}', e)
raise beets.ui.UserError('communication with Discogs failed')
beets.ui.print_("To authenticate with Discogs, visit:")
beets.ui.print_(url)
# Ask for the code and validate it.
code = beets.ui.input_("Enter the code:")
try:
token, secret = auth_client.get_access_token(code)
except DiscogsAPIError:
raise beets.ui.UserError('Discogs authorization failed')
except CONNECTION_ERRORS as e:
self._log.debug('connection error: {0}', e)
raise beets.ui.UserError('Discogs token request failed')
# Save the token for later use.
self._log.debug('Discogs token {0}, secret {1}', token, secret)
with open(self._tokenfile(), 'w') as f:
json.dump({'token': token, 'secret': secret}, f)
return token, secret
def album_distance(self, items, album_info, mapping):
"""Returns the album distance.
"""
return get_distance(
data_source='Discogs',
info=album_info,
config=self.config
)
def track_distance(self, item, track_info):
"""Returns the track distance.
"""
return get_distance(
data_source='Discogs',
info=track_info,
config=self.config
)
def candidates(self, items, artist, album, va_likely, extra_tags=None):
"""Returns a list of AlbumInfo objects for discogs search results
matching an album and artist (if not various).
"""
if not self.discogs_client:
return
if not album and not artist:
self._log.debug('Skipping Discogs query. Files missing album and '
'artist tags.')
return []
if va_likely:
query = album
else:
query = f'{artist} {album}'
try:
return self.get_albums(query)
except DiscogsAPIError as e:
self._log.debug('API Error: {0} (query: {1})', e, query)
if e.status_code == 401:
self.reset_auth()
return self.candidates(items, artist, album, va_likely)
else:
return []
except CONNECTION_ERRORS:
self._log.debug('Connection error in album search', exc_info=True)
return []
@staticmethod
def extract_release_id_regex(album_id):
"""Returns the Discogs_id or None."""
# Discogs-IDs are simple integers. In order to avoid confusion with
# other metadata plugins, we only look for very specific formats of the
# input string:
# - plain integer, optionally wrapped in brackets and prefixed by an
# 'r', as this is how discogs displays the release ID on its webpage.
# - legacy url format: discogs.com/<name of release>/release/<id>
# - current url format: discogs.com/release/<id>-<name of release>
# See #291, #4080 and #4085 for the discussions leading up to these
# patterns.
# Regex has been tested here https://regex101.com/r/wyLdB4/2
for pattern in [
r'^\[?r?(?P<id>\d+)\]?$',
r'discogs\.com/release/(?P<id>\d+)-',
r'discogs\.com/[^/]+/release/(?P<id>\d+)',
]:
match = re.search(pattern, album_id)
if match:
return int(match.group('id'))
return None
def album_for_id(self, album_id):
"""Fetches an album by its Discogs ID and returns an AlbumInfo object
or None if the album is not found.
"""
if not self.discogs_client:
return
self._log.debug('Searching for release {0}', album_id)
discogs_id = self.extract_release_id_regex(album_id)
if not discogs_id:
return None
result = Release(self.discogs_client, {'id': discogs_id})
# Try to obtain title to verify that we indeed have a valid Release
try:
getattr(result, 'title')
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug('API Error: {0} (query: {1})', e,
result.data['resource_url'])
if e.status_code == 401:
self.reset_auth()
return self.album_for_id(album_id)
return None
except CONNECTION_ERRORS:
self._log.debug('Connection error in album lookup',
exc_info=True)
return None
return self.get_album_info(result)
def get_albums(self, query):
"""Returns a list of AlbumInfo objects for a discogs search query.
"""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
query = re.sub(r'(?u)\W+', ' ', query)
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(r'(?i)\b(CD|disc)\s*\d+', '', query)
try:
releases = self.discogs_client.search(query,
type='release').page(1)
except CONNECTION_ERRORS:
self._log.debug("Communication error while searching for {0!r}",
query, exc_info=True)
return []
return [album for album in map(self.get_album_info, releases[:5])
if album]
def get_master_year(self, master_id):
"""Fetches a master release given its Discogs ID and returns its year
or None if the master release is not found.
"""
self._log.debug('Searching for master release {0}', master_id)
result = Master(self.discogs_client, {'id': master_id})
try:
year = result.fetch('year')
return year
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug('API Error: {0} (query: {1})', e,
result.data['resource_url'])
if e.status_code == 401:
self.reset_auth()
return self.get_master_year(master_id)
return None
except CONNECTION_ERRORS:
self._log.debug('Connection error in master release lookup',
exc_info=True)
return None
def get_album_info(self, result):
"""Returns an AlbumInfo object for a discogs Release object.
"""
# Explicitly reload the `Release` fields, as they might not be yet
# present if the result is from a `discogs_client.search()`.
if not result.data.get('artists'):
result.refresh()
# Sanity check for required fields. The list of required fields is
# defined at Guideline 1.3.1.a, but in practice some releases might be
# lacking some of these fields. This function expects at least:
# `artists` (>0), `title`, `id`, `tracklist` (>0)
# https://www.discogs.com/help/doc/submission-guidelines-general-rules
if not all([result.data.get(k) for k in ['artists', 'title', 'id',
'tracklist']]):
self._log.warning("Release does not contain the required fields")
return None
artist, artist_id = MetadataSourcePlugin.get_artist(
[a.data for a in result.artists]
)
album = re.sub(r' +', ' ', result.title)
album_id = result.data['id']
# Use `.data` to access the tracklist directly instead of the
# convenient `.tracklist` property, which will strip out useful artist
# information and leave us with skeleton `Artist` objects that will
# each make an API call just to get the same data back.
tracks = self.get_tracks(result.data['tracklist'])
# Extract information for the optional AlbumInfo fields, if possible.
va = result.data['artists'][0].get('name', '').lower() == 'various'
year = result.data.get('year')
mediums = [t.medium for t in tracks]
country = result.data.get('country')
data_url = result.data.get('uri')
style = self.format(result.data.get('styles'))
base_genre = self.format(result.data.get('genres'))
if self.config['append_style_genre'] and style:
genre = self.config['separator'].as_str().join([base_genre, style])
else:
genre = base_genre
discogs_albumid = self.extract_release_id_regex(result.data.get('uri'))
# Extract information for the optional AlbumInfo fields that are
# contained on nested discogs fields.
albumtype = media = label = catalogno = labelid = None
if result.data.get('formats'):
albumtype = ', '.join(
result.data['formats'][0].get('descriptions', [])) or None
media = result.data['formats'][0]['name']
if result.data.get('labels'):
label = result.data['labels'][0].get('name')
catalogno = result.data['labels'][0].get('catno')
labelid = result.data['labels'][0].get('id')
# Additional cleanups (various artists name, catalog number, media).
if va:
artist = config['va_name'].as_str()
if catalogno == 'none':
catalogno = None
# Explicitly set the `media` for the tracks, since it is expected by
# `autotag.apply_metadata`, and set `medium_total`.
for track in tracks:
track.media = media
track.medium_total = mediums.count(track.medium)
# Discogs does not have track IDs. Invent our own IDs as proposed
# in #2336.
track.track_id = str(album_id) + "-" + track.track_alt
# Retrieve master release id (returns None if there isn't one).
master_id = result.data.get('master_id')
# Assume `original_year` is equal to `year` for releases without
# a master release, otherwise fetch the master release.
original_year = self.get_master_year(master_id) if master_id else year
return AlbumInfo(album=album, album_id=album_id, artist=artist,
artist_id=artist_id, tracks=tracks,
albumtype=albumtype, va=va, year=year,
label=label, mediums=len(set(mediums)),
releasegroup_id=master_id, catalognum=catalogno,
country=country, style=style, genre=genre,
media=media, original_year=original_year,
data_source='Discogs', data_url=data_url,
discogs_albumid=discogs_albumid,
discogs_labelid=labelid, discogs_artistid=artist_id)
def format(self, classification):
if classification:
return self.config['separator'].as_str() \
.join(sorted(classification))
else:
return None
def get_tracks(self, tracklist):
"""Returns a list of TrackInfo objects for a discogs tracklist.
"""
try:
clean_tracklist = self.coalesce_tracks(tracklist)
except Exception as exc:
# FIXME: this is an extra precaution for making sure there are no
# side effects after #2222. It should be removed after further
# testing.
self._log.debug('{}', traceback.format_exc())
self._log.error('uncaught exception in coalesce_tracks: {}', exc)
clean_tracklist = tracklist
tracks = []
index_tracks = {}
index = 0
# Distinct works and intra-work divisions, as defined by index tracks.
divisions, next_divisions = [], []
for track in clean_tracklist:
# Only real tracks have `position`. Otherwise, it's an index track.
if track['position']:
index += 1
if next_divisions:
# End of a block of index tracks: update the current
# divisions.
divisions += next_divisions
del next_divisions[:]
track_info = self.get_track_info(track, index, divisions)
track_info.track_alt = track['position']
tracks.append(track_info)
else:
next_divisions.append(track['title'])
# We expect new levels of division at the beginning of the
# tracklist (and possibly elsewhere).
try:
divisions.pop()
except IndexError:
pass
index_tracks[index + 1] = track['title']
# Fix up medium and medium_index for each track. Discogs position is
# unreliable, but tracks are in order.
medium = None
medium_count, index_count, side_count = 0, 0, 0
sides_per_medium = 1
# If a medium has two sides (ie. vinyl or cassette), each pair of
# consecutive sides should belong to the same medium.
if all([track.medium is not None for track in tracks]):
m = sorted({track.medium.lower() for track in tracks})
# If all track.medium are single consecutive letters, assume it is
# a 2-sided medium.
if ''.join(m) in ascii_lowercase:
sides_per_medium = 2
for track in tracks:
# Handle special case where a different medium does not indicate a
# new disc, when there is no medium_index and the ordinal of medium
# is not sequential. For example, I, II, III, IV, V. Assume these
# are the track index, not the medium.
# side_count is the number of mediums or medium sides (in the case
# of two-sided mediums) that were seen before.
medium_is_index = track.medium and not track.medium_index and (
len(track.medium) != 1 or
# Not within standard incremental medium values (A, B, C, ...).
ord(track.medium) - 64 != side_count + 1
)
if not medium_is_index and medium != track.medium:
side_count += 1
if sides_per_medium == 2:
if side_count % sides_per_medium:
# Two-sided medium changed. Reset index_count.
index_count = 0
medium_count += 1
else:
# Medium changed. Reset index_count.
medium_count += 1
index_count = 0
medium = track.medium
index_count += 1
medium_count = 1 if medium_count == 0 else medium_count
track.medium, track.medium_index = medium_count, index_count
# Get `disctitle` from Discogs index tracks. Assume that an index track
# before the first track of each medium is a disc title.
for track in tracks:
if track.medium_index == 1:
if track.index in index_tracks:
disctitle = index_tracks[track.index]
else:
disctitle = None
track.disctitle = disctitle
return tracks
def coalesce_tracks(self, raw_tracklist):
"""Pre-process a tracklist, merging subtracks into a single track. The
title for the merged track is the one from the previous index track,
if present; otherwise it is a combination of the subtracks titles.
"""
def add_merged_subtracks(tracklist, subtracks):
"""Modify `tracklist` in place, merging a list of `subtracks` into
a single track into `tracklist`."""
# Calculate position based on first subtrack, without subindex.
idx, medium_idx, sub_idx = \
self.get_track_index(subtracks[0]['position'])
position = '{}{}'.format(idx or '', medium_idx or '')
if tracklist and not tracklist[-1]['position']:
# Assume the previous index track contains the track title.
if sub_idx:
# "Convert" the track title to a real track, discarding the
# subtracks assuming they are logical divisions of a
# physical track (12.2.9 Subtracks).
tracklist[-1]['position'] = position
else:
# Promote the subtracks to real tracks, discarding the
# index track, assuming the subtracks are physical tracks.
index_track = tracklist.pop()
# Fix artists when they are specified on the index track.
if index_track.get('artists'):
for subtrack in subtracks:
if not subtrack.get('artists'):
subtrack['artists'] = index_track['artists']
# Concatenate index with track title when index_tracks
# option is set
if self.config['index_tracks']:
for subtrack in subtracks:
subtrack['title'] = '{}: {}'.format(
index_track['title'], subtrack['title'])
tracklist.extend(subtracks)
else:
# Merge the subtracks, pick a title, and append the new track.
track = subtracks[0].copy()
track['title'] = ' / '.join([t['title'] for t in subtracks])
tracklist.append(track)
# Pre-process the tracklist, trying to identify subtracks.
subtracks = []
tracklist = []
prev_subindex = ''
for track in raw_tracklist:
# Regular subtrack (track with subindex).
if track['position']:
_, _, subindex = self.get_track_index(track['position'])
if subindex:
if subindex.rjust(len(raw_tracklist)) > prev_subindex:
# Subtrack still part of the current main track.
subtracks.append(track)
else:
# Subtrack part of a new group (..., 1.3, *2.1*, ...).
add_merged_subtracks(tracklist, subtracks)
subtracks = [track]
prev_subindex = subindex.rjust(len(raw_tracklist))
continue
# Index track with nested sub_tracks.
if not track['position'] and 'sub_tracks' in track:
# Append the index track, assuming it contains the track title.
tracklist.append(track)
add_merged_subtracks(tracklist, track['sub_tracks'])
continue
# Regular track or index track without nested sub_tracks.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
subtracks = []
prev_subindex = ''
tracklist.append(track)
# Merge and add the remaining subtracks, if any.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
return tracklist
def get_track_info(self, track, index, divisions):
"""Returns a TrackInfo object for a discogs track.
"""
title = track['title']
if self.config['index_tracks']:
prefix = ', '.join(divisions)
if prefix:
title = f'{prefix}: {title}'
track_id = None
medium, medium_index, _ = self.get_track_index(track['position'])
artist, artist_id = MetadataSourcePlugin.get_artist(
track.get('artists', [])
)
length = self.get_track_length(track['duration'])
return TrackInfo(title=title, track_id=track_id, artist=artist,
artist_id=artist_id, length=length, index=index,
medium=medium, medium_index=medium_index)
def get_track_index(self, position):
"""Returns the medium, medium index and subtrack index for a discogs
track position."""
# Match the standard Discogs positions (12.2.9), which can have several
# forms (1, 1-1, A1, A1.1, A1a, ...).
match = re.match(
r'^(.*?)' # medium: everything before medium_index.
r'(\d*?)' # medium_index: a number at the end of
# `position`, except if followed by a subtrack
# index.
# subtrack_index: can only be matched if medium
# or medium_index have been matched, and can be
r'((?<=\w)\.[\w]+' # - a dot followed by a string (A.1, 2.A)
r'|(?<=\d)[A-Z]+' # - a string that follows a number (1A, B2a)
r')?'
r'$',
position.upper()
)
if match:
medium, index, subindex = match.groups()
if subindex and subindex.startswith('.'):
subindex = subindex[1:]
else:
self._log.debug('Invalid position: {0}', position)
medium = index = subindex = None
return medium or None, index or None, subindex or None
def get_track_length(self, duration):
"""Returns the track length in seconds for a discogs duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
|
beetbox/beets
|
beetsplug/discogs.py
|
Python
|
mit
| 25,902
|
[
"VisIt"
] |
502fb6fade8b7638af1d3e6141509d6556490fff55b7e9edda32d66df9c30873
|
#!/usr/bin/env python
""" create rst files for documentation of DIRAC """
import os
import shutil
import socket
import sys
import logging
import glob
from diracdoctools.Utilities import writeLinesToFile, mkdir, makeLogger
from diracdoctools.Config import Configuration, CLParser as clparser
LOG = makeLogger('CodeReference')
# global used inside the CustomizedDocs modules
CUSTOMIZED_DOCSTRINGS = {}
class CLParser(clparser):
"""Extension to CLParser to also parse buildType."""
def __init__(self):
super(CLParser, self).__init__()
self.log = LOG.getChild('CLParser')
self.clean = False
self.parser.add_argument('--buildType', action='store', default='full',
choices=['full', 'limited'],
help='Build full or limited code reference',
)
self.parser.add_argument('--clean', action='store_true',
help='Remove rst files and exit',
)
def parse(self):
super(CLParser, self).parse()
self.log.info('Parsing options')
self.buildType = self.parsed.buildType
self.clean = self.parsed.clean
def optionDict(self):
oDict = super(CLParser, self).optionDict()
oDict['buildType'] = self.buildType
oDict['clean'] = self.clean
return oDict
class CodeReference(object):
"""Module to create rst files containing autodoc for sphinx."""
def __init__(self, configFile='docs.conf'):
self.config = Configuration(configFile, sections=['Code'])
self.orgWorkingDir = os.getcwd()
def end(self):
"""Make sure we are back in the original working directory."""
LOG.info('Done with creating code reference')
os.chdir(self.orgWorkingDir)
def getCustomDocs(self):
"""Import the dynamically created docstrings from the files in CustomizedDocs.
Use 'exec' to avoid a lot of relative import, pylint errors, etc.
"""
customizedPath = os.path.join(self.config.code_customDocsPath, '*.py')
LOG.info('Looking for custom strings in %s', customizedPath)
for filename in glob.glob(customizedPath):
LOG.info('Found customization: %s', filename)
exec(open(filename).read(), globals()) # pylint: disable=exec-used
def mkPackageRst(self, filename, modulename, fullmodulename, subpackages=None, modules=None):
"""Make a rst file for module containing other modules."""
if modulename == 'scripts':
return
else:
modulefinal = modulename
lines = []
lines.append('%s' % modulefinal)
lines.append('=' * len(modulefinal))
lines.append('.. module:: %s ' % fullmodulename)
lines.append('')
if subpackages or modules:
lines.append('.. toctree::')
lines.append(' :maxdepth: 1')
lines.append('')
subpackages = [s for s in subpackages if not s.endswith(('scripts', ))]
if subpackages:
LOG.info('Module %r with subpackages: %r', fullmodulename, ', '.join(subpackages))
lines.append('SubPackages')
lines.append('...........')
lines.append('')
lines.append('.. toctree::')
lines.append(' :maxdepth: 1')
lines.append('')
for package in sorted(subpackages):
lines.append(' %s/%s_Module.rst' % (package, package.split('/')[-1]))
lines.append('')
# remove CLI etc. because we drop them earlier
modules = [m for m in modules if not m.endswith('CLI') and '-' not in m]
if modules:
lines.append('Modules')
lines.append('.......')
lines.append('')
lines.append('.. toctree::')
lines.append(' :maxdepth: 1')
lines.append('')
for module in sorted(modules):
lines.append(' %s.rst' % (module.split('/')[-1],))
lines.append('')
writeLinesToFile(filename, lines)
def mkDummyRest(self, classname, fullclassname):
"""Create a dummy rst file for files that behave badly."""
filename = classname + '.rst'
lines = []
lines.append('%s' % classname)
lines.append('=' * len(classname))
lines.append('')
lines.append('.. py:module:: %s' % fullclassname)
lines.append('')
lines.append('This is an empty file, because we cannot parse this file correctly or it causes problems.')
lines.append('Please look at the source code directly')
writeLinesToFile(filename, lines)
def mkModuleRst(self, classname, fullclassname, buildtype='full'):
"""Create rst file for module."""
LOG.info('Creating rst file for %r, aka %r', classname, fullclassname)
filename = classname + '.rst'
lines = []
lines.append('%s' % classname)
lines.append('=' * len(classname))
lines.append('.. automodule:: %s' % fullclassname)
if buildtype == 'full':
lines.append(' :members:')
if classname not in self.config.code_noInherited:
lines.append(' :inherited-members:')
lines.append(' :undoc-members:')
lines.append(' :show-inheritance:')
if classname in self.config.code_privateMembers:
lines.append(' :special-members:')
lines.append(' :private-members:')
else:
lines.append(' :special-members: __init__')
if classname.startswith('_'):
lines.append(' :private-members:')
if fullclassname in CUSTOMIZED_DOCSTRINGS:
ds = CUSTOMIZED_DOCSTRINGS[fullclassname]
if ds.replace:
lines = ds.doc_string
else:
lines.append(ds.doc_string)
writeLinesToFile(filename, lines)
def getsubpackages(self, abspath, direc):
"""return list of subpackages with full path"""
packages = []
for dire in direc:
if dire.lower() == 'test' or dire.lower() == 'tests' or '/test' in dire.lower():
LOG.debug('Skipping test directory: %s/%s', abspath, dire)
continue
if dire.lower() == 'docs' or '/docs' in dire.lower():
LOG.debug('Skipping docs directory: %s/%s', abspath, dire)
continue
if os.path.exists(os.path.join(self.config.sourcePath, abspath, dire, '__init__.py')):
packages.append(os.path.join(dire))
return packages
def getmodules(self, abspath, _direc, files):
"""Return list of subpackages with full path."""
packages = []
for filename in files:
if filename.lower().startswith('test') or filename.lower().endswith('test') or \
any(f.lower() in filename.lower() for f in self.config.code_ignoreFiles):
LOG.debug('Skipping file: %s/%s', abspath, filename)
continue
if 'test' in filename.lower():
LOG.warn("File contains 'test', but is kept: %s/%s", abspath, filename)
if filename != '__init__.py':
packages.append(filename.split('.py')[0])
return packages
def cleanDoc(self):
"""Remove the code output folder."""
LOG.info('Removing existing code documentation: %r', self.config.code_targetPath)
if os.path.exists(self.config.code_targetPath):
shutil.rmtree(self.config.code_targetPath)
def createDoc(self, buildtype="full"):
"""create the rst files for all the things we want them for"""
LOG.info('self.config.sourcePath: %s', self.config.sourcePath)
LOG.info('self.config.targetPath: %s', self.config.code_targetPath)
LOG.info('Host: %s', socket.gethostname())
# we need to replace existing rst files so we can decide how much code-doc to create
if os.path.exists(self.config.code_targetPath) and os.environ.get('READTHEDOCS', 'False') == 'True':
self.cleanDoc()
mkdir(self.config.code_targetPath)
os.chdir(self.config.code_targetPath)
self.getCustomDocs()
LOG.info('Now creating rst files: starting in %r', self.config.sourcePath)
firstModule = True
for root, direc, files in os.walk(self.config.sourcePath):
configTemplate = [os.path.join(root, _) for _ in files if _ == 'ConfigTemplate.cfg']
files = [_ for _ in files if _.endswith('.py')]
if '__init__.py' not in files:
continue
elif any(f.lower() in root.lower() for f in self.config.code_ignoreFolders):
LOG.debug('Skipping folder: %s', root)
continue
modulename = root.split('/')[-1].strip('.')
codePath = root.split(self.config.sourcePath)[1].strip('/.')
docPath = codePath
if docPath.startswith(self.config.moduleName):
docPath = docPath[len(self.config.moduleName) + 1:]
fullmodulename = '.'.join(codePath.split('/')).strip('.')
if not fullmodulename.startswith(self.config.moduleName):
fullmodulename = ('.'.join([self.config.moduleName, fullmodulename])).strip('.')
packages = self.getsubpackages(codePath, direc)
if docPath:
LOG.debug('Trying to create folder: %s', docPath)
mkdir(docPath)
os.chdir(docPath)
if firstModule:
firstModule = False
self.createCodeDocIndex(
subpackages=packages,
modules=self.getmodules(
codePath,
direc,
files),
buildtype=buildtype)
elif buildtype == 'limited':
os.chdir(self.config.code_targetPath)
return 0
else:
self.mkPackageRst(
modulename + '_Module.rst',
modulename,
fullmodulename,
subpackages=packages,
modules=self.getmodules(
docPath,
direc,
files))
for filename in files:
# Skip things that call parseCommandLine or similar issues
fullclassname = '.'.join(docPath.split('/') + [filename])
if not fullclassname.startswith(self.config.moduleName):
fullclassname = '.'.join([self.config.moduleName, fullclassname])
if any(f in filename for f in self.config.code_dummyFiles):
LOG.debug('Creating dummy for file %r', filename)
self.mkDummyRest(filename.split('.py')[0], fullclassname.split('.py')[0])
continue
elif not filename.endswith('.py') or \
filename.endswith('CLI.py') or \
filename.lower().startswith('test') or \
filename == '__init__.py' or \
any(f in filename for f in self.config.code_ignoreFiles) or \
'-' in filename: # not valid python identifier, e.g. dirac-pilot
LOG.debug('Ignoring file %r', filename)
continue
self.mkModuleRst(filename.split('.py')[0], fullclassname.split('.py')[0], buildtype)
# copy configTemplate files to code doc so we can import them in the agent docstrings
if configTemplate:
shutil.copy(configTemplate[0], os.path.join(self.config.code_targetPath, docPath))
os.chdir(self.config.code_targetPath)
return 0
def createCodeDocIndex(self, subpackages, modules, buildtype="full"):
"""create the main index file"""
LOG.info('Creating base index file')
filename = 'index.rst'
lines = []
lines.append('.. _code_documentation:')
lines.append('')
lines.append('Code Documentation (|release|)')
lines.append('------------------------------')
# for limited builds we only create the most basic code documentation so
# we let users know there is more elsewhere
if buildtype == 'limited':
lines.append('')
lines.append('.. warning::')
lines.append(
' This a limited build of the code documentation, for the full code documentation '
'please look at the website')
lines.append('')
else:
if subpackages or modules:
lines.append('.. toctree::')
lines.append(' :maxdepth: 1')
lines.append('')
if subpackages:
systemPackages = sorted([pck for pck in subpackages if pck.endswith('System')])
otherPackages = sorted([pck for pck in subpackages if not pck.endswith('System')])
lines.append('=======')
lines.append('Systems')
lines.append('=======')
lines.append('')
lines.append('.. toctree::')
lines.append(' :maxdepth: 1')
lines.append('')
for package in systemPackages:
lines.append(' %s/%s_Module.rst' % (package, package.split('/')[-1]))
lines.append('')
lines.append('=====')
lines.append('Other')
lines.append('=====')
lines.append('')
lines.append('.. toctree::')
lines.append(' :maxdepth: 1')
lines.append('')
for package in otherPackages:
lines.append(' %s/%s_Module.rst' % (package, package.split('/')[-1]))
if modules:
for module in sorted(modules):
lines.append(' %s.rst' % (module.split('/')[-1],))
writeLinesToFile(filename, lines)
def checkBuildTypeAndRun(self, buildType='full'):
"""Check for input argument and then create the doc rst files."""
buildTypes = ('full', 'limited')
if buildType not in buildTypes:
LOG.error('Unknown build type: %s use %s ', buildType, ' '.join(buildTypes))
return 1
LOG.info('Buildtype: %s', buildType)
return self.createDoc(buildType)
def run(configFile='docs.conf', logLevel=logging.INFO, debug=False, buildType='full', clean=False):
"""Create the code reference.
:param str configFile: path to the configFile
:param logLevel: logging level to use
:param bool debug: if true even more debug information is printed
:param str buildType: 'full' or 'limited', use limited only when memory is limited
:param bool clean: Remove rst files and exit
:returns: return value 1 or 0
"""
logging.getLogger().setLevel(logLevel)
code = CodeReference(configFile=configFile)
if clean:
code.cleanDoc()
return 0
retVal = code.checkBuildTypeAndRun(buildType=buildType)
code.end()
return retVal
if __name__ == '__main__':
sys.exit(run(**(CLParser().optionDict())))
|
fstagni/DIRAC
|
docs/diracdoctools/cmd/codeReference.py
|
Python
|
gpl-3.0
| 13,798
|
[
"DIRAC"
] |
465e12f980a4528e430f5de4d6b61aacccea2922abf9b1c9232c90c309fed1e7
|
import sys
import os.path
#sys.path.insert(0, '/home/andy/theano/tool_examples/theano-lstm-0.0.15')
from theano_lstm import Embedding, LSTM, RNN, StackedCells, Layer, create_optimization_updates, masked_loss
from utilities import *
import dill
import argparse
#import cPickle
import pickle
import numpy
from collections import OrderedDict
import theano, theano.tensor as T
import turing_model
from theano_toolkit.parameters import Parameters
from theano.compile.nanguardmode import NanGuardMode
DESCRIPTION = """
Recurrent neural network based statistical language modelling toolkit
(based on LSTM algorithm)
Implemented by Daniel Soutner,
Department of Cybernetics, University of West Bohemia, Plzen, Czech rep.
dsoutner@kky.zcu.cz, 2013
"""
def parse_args(parser):
parser.add_argument('--train', nargs=1, action="store", metavar="FILE",
help='training file !')
parser.add_argument('--valid', nargs=1, action="store", metavar="FILE",
help='valid file !')
parser.add_argument('--test', nargs=1, action="store", metavar="FILE",
help='testing file for ppl!')
parser.add_argument('--neuron-type', action="store", dest='celltype',
help='type of hidden neurons, RNN/LSTM, default: RNN', type=str, default='RNN')
parser.add_argument('--train-method', action="store", dest='train_method',
help='training method LSTM/TURING/ALL, default: ALL', type=str, default='ALL')
parser.add_argument('--projection-size', action="store", dest='n_projection',
help='Number of neurons in projection layer, default: 100', type=int, default=100)
parser.add_argument('--hidden-size', action="store", dest='n_hidden',
help='Number of neurons in hidden layer, default: 100', type=int, default=100)
parser.add_argument('--stack', action="store", dest='n_stack',
help='Number of hidden neurons, default: 1 ', type=int, default=1)
parser.add_argument('--learning-rate', action="store", dest='lr',
help='learing rate at begining, default: 0.01 ', type=float, default=0.01)
parser.add_argument('--improvement-rate', action="store", dest='improvement_rate',
help='relative improvement for early stopping on ppl , default: 0.005 ', type=float, default=0.005)
parser.add_argument('--minibatch-size', action="store", dest='minibatch_size',
help='minibatch size for training, default: 100', type=int, default=100)
parser.add_argument('--max-epoch', action="store", dest='max_epoch',
help='maximum number of epoch if not early stopping, default: 1000', type=int, default=1000)
parser.add_argument('--early-stop', action="store", dest='early_stop',
help='1 for early-stopping, 0 for not', type=int, default=1)
parser.add_argument('--save-net', action="store", dest="save_net", default=None, metavar="FILE",
help="Save RNN to file")
parser.add_argument('--load-net', action="store", dest="load_net", default=None, metavar="FILE",
help="Load RNN from file")
return parser.parse_args()
def build_vocab(data_file_str):
lines = []
data_file = open(data_file_str)
for line in data_file:
tokens = line.replace('\n','.')
lines.append(tokens)
data_file.close()
vocab = Vocab()
for line in lines:
vocab.add_words(line.split(" "))
return vocab
def load_data(data_file_str, vocab, data_type):
lines = []
data_file = open(data_file_str)
for line in data_file:
tokens = line.replace('\n','.')
# abandom too long sent in training set., too long sent will take too many time and decrease preformance
tokens_for_count = line.replace('\n','').split(' ')
if len(tokens_for_count) > 50 and data_type == 'train':
continue
lines.append(tokens)
data_file.close()
# transform into big numerical matrix of sentences:
numerical_lines = []
for line in lines:
numerical_lines.append(vocab(line))
numerical_lines, numerical_lengths = pad_into_matrix(numerical_lines)
return numerical_lines, numerical_lengths
def softmax(x):
"""
Wrapper for softmax, helps with
pickling, and removing one extra
dimension that Theano adds during
its exponential normalization.
"""
return T.nnet.softmax(x.T)
def has_hidden(layer):
"""
Whether a layer has a trainable
initial hidden state.
"""
return hasattr(layer, 'initial_hidden_state')
def matrixify(vector, n):
return T.repeat(T.shape_padleft(vector), n, axis=0)
def initial_state(layer, dimensions = None):
"""
Initalizes the recurrence relation with an initial hidden state
if needed, else replaces with a "None" to tell Theano that
the network **will** return something, but it does not need
to send it to the next step of the recurrence
"""
if dimensions is None:
return layer.initial_hidden_state if has_hidden(layer) else None
else:
return matrixify(layer.initial_hidden_state, dimensions) if has_hidden(layer) else None
def initial_state_with_taps(layer, dimensions = None):
"""Optionally wrap tensor variable into a dict with taps=[-1]"""
state = initial_state(layer, dimensions)
if state is not None:
return dict(initial=state, taps=[-1])
else:
return None
class Model:
"""
Simple predictive model for forecasting words from
sequence using LSTMs. Choose how many LSTMs to stack
what size their memory should be, and how many
words can be predicted.
"""
def __init__(self, hidden_size, input_size, vocab_size, stack_size=1, celltype=LSTM):
# core layer in RNN/LSTM
self.model = StackedCells(input_size, celltype=celltype, layers =[hidden_size] * stack_size)
# add an embedding
self.model.layers.insert(0, Embedding(vocab_size, input_size))
# add a classifier:
self.model.layers.append(Layer(hidden_size, vocab_size, activation = softmax))
self.turing_params = Parameters()
#init turing machine model
self.turing_updates , self.turing_predict = turing_model.build(self.turing_params , hidden_size , vocab_size)
self.hidden_size = hidden_size
# inputs are matrices of indices,
# each row is a sentence, each column a timestep
self._stop_word = theano.shared(np.int32(999999999), name="stop word")
self.for_how_long = T.ivector()
self.input_mat = T.imatrix()
self.priming_word = T.iscalar()
self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
# create symbolic variables for prediction:
#change by darong #issue : what is greedy
self.lstm_predictions = self.create_lstm_prediction()
self.final_predictions = self.create_final_prediction()
# create symbolic variable for greedy search:
self.greedy_predictions = self.create_lstm_prediction(greedy=True)
# create gradient training functions:
self.create_cost_fun()#create 2 cost func(lstm final)
self.lstm_lr = 0.01
self.turing_lr = 0.01
self.all_lr = 0.01
self.create_training_function()#create 3 functions(lstm turing all)
self.create_predict_function()#create 2 predictions(lstm final)
# create ppl
self.lstm_ppl = self.create_lstm_ppl()
self.final_ppl = self.create_final_ppl()
self.create_ppl_function()
def save(self, save_file, vocab):
pickle.dump(self.model, open(save_file, "wb")) # pickle is for lambda function, cPickle cannot
pickle.dump(vocab, open(save_file+'.vocab', "wb")) # pickle is for lambda function, cPickle cannot
def save_turing(self, save_file):
self.turing_params.save(save_file + '.turing')
def load(self, load_file, lr):
self.model = pickle.load(open(load_file, "rb"))
if os.path.isfile(load_file + '.turing') :
self.turing_params.load(load_file + '.turing')
else :
print "no turing model!!!! pretrain with lstm param"
self.turing_params['W_input_hidden'] = self.model.layers[-1].params[0].get_value().T #not sure
self.turing_params['W_read_hidden'] = self.model.layers[-1].params[0].get_value().T
self.turing_params['b_hidden_0'] = self.model.layers[-1].params[1].get_value()
temp = self.model.layers[1].initial_hidden_state.get_value()[self.hidden_size:]
self.turing_params['memory_init'] = temp.reshape((1,)+temp.shape)
# need to compile again for calculating predictions after loading lstm
self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
self.lstm_predictions = self.create_lstm_prediction()
self.final_predictions = self.create_final_prediction()
self.greedy_predictions = self.create_lstm_prediction(greedy=True)#can change to final
self.create_cost_fun()#create 2 cost func(lstm final)
self.lstm_lr = lr
self.turing_lr = lr#change this
self.all_lr = lr
self.create_training_function()#create 3 functions(lstm turing all)
self.create_predict_function()#create 2 predictions(lstm final)
self.lstm_ppl = self.create_lstm_ppl()
self.final_ppl = self.create_final_ppl()
self.create_ppl_function()
# print "done compile"
def stop_on(self, idx):
self._stop_word.set_value(idx)
@property
def params(self):
return self.model.params
def create_lstm_prediction(self, greedy=False):
def step(idx, *states):
# new hiddens are the states we need to pass to LSTMs
# from past. Because the StackedCells also include
# the embeddings, and those have no state, we pass
# a "None" instead:
new_hiddens = [None] + list(states)
new_states = self.model.forward(idx, prev_hiddens = new_hiddens)
if greedy:
new_idxes = new_states[-1]
new_idx = new_idxes.argmax()
# provide a stopping condition for greedy search:
return ([new_idx.astype(self.priming_word.dtype)] + new_states[1:-1]), theano.scan_module.until(T.eq(new_idx,self._stop_word))
else:
return new_states[1:]
# in sequence forecasting scenario we take everything
# up to the before last step, and predict subsequent
# steps ergo, 0 ... n - 1, hence:
inputs = self.input_mat[:, 0:-1]
num_examples = inputs.shape[0]
# pass this to Theano's recurrence relation function:
# choose what gets outputted at each timestep:
if greedy:
outputs_info = [dict(initial=self.priming_word, taps=[-1])] + [initial_state_with_taps(layer) for layer in self.model.layers[1:-1]]
result, _ = theano.scan(fn=step,
n_steps=200,
outputs_info=outputs_info)
else:
outputs_info = [initial_state_with_taps(layer, num_examples) for layer in self.model.layers[1:]]
result, _ = theano.scan(fn=step,
sequences=[inputs.T],
outputs_info=outputs_info)
if greedy:
return result[0]
# softmaxes are the last layer of our network,
# and are at the end of our results list:
return result[-1].transpose((2,0,1))
# we reorder the predictions to be:
# 1. what row / example
# 2. what timestep
# 3. softmax dimension
def create_final_prediction(self, greedy=False):
def step(idx, *states):
# new hiddens are the states we need to pass to LSTMs
# from past. Because the StackedCells also include
# the embeddings, and those have no state, we pass
# a "None" instead:
new_hiddens = [None] + list(states)
new_states = self.model.forward(idx, prev_hiddens = new_hiddens)
if greedy:
new_idxes = new_states[-1]
new_idx = new_idxes.argmax()
# provide a stopping condition for greedy search:
return ([new_idx.astype(self.priming_word.dtype)] + new_states[1:-1]), theano.scan_module.until(T.eq(new_idx,self._stop_word))
else:
return new_states[1:]
# in sequence forecasting scenario we take everything
# up to the before last step, and predict subsequent
# steps ergo, 0 ... n - 1, hence:
inputs = self.input_mat[:, 0:-1]
num_examples = inputs.shape[0]
# pass this to Theano's recurrence relation function:
# choose what gets outputted at each timestep:
if greedy:
outputs_info = [dict(initial=self.priming_word, taps=[-1])] + [initial_state_with_taps(layer) for layer in self.model.layers[1:-1]]
result, _ = theano.scan(fn=step,
n_steps=200,
outputs_info=outputs_info)
else:
outputs_info = [initial_state_with_taps(layer, num_examples) for layer in self.model.layers[1:]]
result, _ = theano.scan(fn=step,
sequences=[inputs.T],
outputs_info=outputs_info)
if greedy:
return result[0]
# softmaxes are the last layer of our network,
# and are at the end of our results list:
hidden_size = result[-2].shape[2]/2
turing_result = self.turing_predict(result[-2][:,:,hidden_size:])
#the last layer do transpose before compute
return turing_result.transpose((1,0,2))
# we reorder the predictions to be:
# 1. what row / example
# 2. what timestep
# 3. softmax dimension
def create_cost_fun (self):
# create a cost function that
# takes each prediction at every timestep
# and guesses next timestep's value:
what_to_predict = self.input_mat[:, 1:]
# because some sentences are shorter, we
# place masks where the sentences end:
# (for how long is zero indexed, e.g. an example going from `[2,3)`)
# has this value set 0 (here we substract by 1):
for_how_long = self.for_how_long - 1
# all sentences start at T=0:
starting_when = T.zeros_like(self.for_how_long)
self.lstm_cost = masked_loss(self.lstm_predictions,
what_to_predict,
for_how_long,
starting_when).sum()
self.final_cost = masked_loss(self.final_predictions,
what_to_predict,
for_how_long,
starting_when).sum()
def create_predict_function(self):
self.lstm_pred_fun = theano.function(
inputs=[self.input_mat],
outputs=self.lstm_predictions,
allow_input_downcast=True
)
self.final_pred_fun = theano.function(
inputs=[self.input_mat],
outputs=self.final_predictions,
allow_input_downcast=True
)
self.greedy_fun = theano.function(
inputs=[self.priming_word],
outputs=T.concatenate([T.shape_padleft(self.priming_word), self.greedy_predictions]),
allow_input_downcast=True
)
def create_training_function(self):
updates, _, _, _, _ = create_optimization_updates(self.lstm_cost, self.params, method="SGD", lr=self.lstm_lr)
# updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta", lr=self.lr)
self.lstm_update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.lstm_cost,
updates=updates,
allow_input_downcast=True)
updates_turing = self.turing_updates(self.final_cost , lr=self.turing_lr)
# updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta", lr=self.lr)
self.turing_update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.final_cost,
updates=updates_turing,
mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True),
allow_input_downcast=True)
all_updates_lstm, _, _, _, _ = create_optimization_updates(self.final_cost, self.params, method="SGD", lr=self.all_lr,part=True)
all_updates_turing_temp = self.turing_updates(self.final_cost , lr=self.all_lr)
updates_all = all_updates_lstm
for pair in all_updates_turing_temp :
updates_all[pair[0]] = pair[1]
self.all_update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.final_cost,
updates=updates_all,
allow_input_downcast=True)
def create_lstm_ppl(self):
def timestep(predictions, label, len_example, total_len_example):
label_binary = T.gt(label[0:len_example-1], 0)
oov_count = T.shape(label_binary)[0] - T.sum(label_binary)
a = total_len_example
return T.sum(T.log( 1./ predictions[T.arange(len_example-1), label[0:len_example-1]]) * label_binary ), oov_count
result, _ = theano.scan(fn=timestep,
sequences=[ self.lstm_predictions, self.input_mat[:, 1:], self.for_how_long ],
non_sequences=T.sum(self.for_how_long))
oov_count_total = T.sum(result[1])
return T.exp(T.sum(result[0]).astype(theano.config.floatX)/(T.sum(self.for_how_long) - oov_count_total).astype(theano.config.floatX)).astype(theano.config.floatX)
def create_final_ppl(self):
def timestep(predictions, label, len_example, total_len_example):
label_binary = T.gt(label[0:len_example-1], 0)
oov_count = T.shape(label_binary)[0] - T.sum(label_binary)
a = total_len_example
return T.sum(T.log( 1./ predictions[T.arange(len_example-1), label[0:len_example-1]]) * label_binary ), oov_count
result, _ = theano.scan(fn=timestep,
sequences=[ self.final_predictions, self.input_mat[:, 1:], self.for_how_long ],
non_sequences=T.sum(self.for_how_long))
oov_count_total = T.sum(result[1])
return T.exp(T.sum(result[0]).astype(theano.config.floatX)/(T.sum(self.for_how_long) - oov_count_total).astype(theano.config.floatX)).astype(theano.config.floatX)
def create_ppl_function(self):
self.lstm_ppl_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.lstm_ppl,
allow_input_downcast=True)
self.final_ppl_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.final_ppl,
allow_input_downcast=True)
def __call__(self, x):
return self.pred_fun(x)#any problem??
def get_minibatch(full_data, full_lengths, minibatch_size, minibatch_idx):
lengths = []
for j in range(minibatch_size):
lengths.append(full_lengths[minibatch_size * minibatch_idx + j])
width = max(full_lengths)
# width = max(full_data[minibatch_size * minibatch_idx: minibatch_size * (minibatch_idx+1), :])
height = minibatch_size
minibatch_data = np.empty([height, width], dtype=theano.config.floatX)
minibatch_data = full_data[minibatch_size * minibatch_idx: minibatch_size * (minibatch_idx+1), :]
return minibatch_data, lengths
def training(args, vocab, train_data, train_lengths, valid_data, valid_lengths):
# training information
print 'training information'
print '-------------------------------------------------------'
print 'method: %s' % args.train_method
print 'vocab size: %d' % len(vocab)
print 'sentences in training file: %d' % len(train_lengths)
print 'max length in training file: %d' % max(train_lengths)
print 'train file: %s' % args.train[0]
print 'valid file: %s' % args.valid[0]
print 'type: %s' % args.celltype
print 'project: %d' % args.n_projection
print 'hidden: %d' % args.n_hidden
print 'stack: %d' % args.n_stack
print 'learning rate: %f' % args.lr
print 'minibatch size: %d' % args.minibatch_size
print 'max epoch: %d' % args.max_epoch
print 'improvement rate: %f' % args.improvement_rate
print 'save file: %s' % args.save_net
print 'load_model: %s' % args.load_net
print 'early-stop: %r' % args.early_stop
print '-------------------------------------------------------'
if args.celltype == 'LSTM':
celltype = LSTM
elif args.celltype == 'RNN':
celltype = RNN
print 'start initializing model'
# construct model & theano functions:
model = Model(
input_size=args.n_projection,
hidden_size=args.n_hidden,
vocab_size=len(vocab),
stack_size=args.n_stack, # make this bigger, but makes compilation slow
celltype=celltype # use RNN or LSTM
)
if args.lr :
model.lstm_lr = args.lr
model.turing_lr = args.lr
model.all_lr = args.lr
model.stop_on(vocab.word2index["."])
if args.load_net :
if args.lr :
model.load(args.load_net, args.lr)# 0 is useless
else :
model.load(args.load_net, 0)
# train:
#select correct train and prediction method according to train_method(LSTM/TURING/ALL)
if args.train_method == 'LSTM' :
update_fun = model.lstm_update_fun
ppl_fun = model.lstm_ppl_fun
lr = model.lstm_lr
print 'update lstm learning rate : %f' % model.lstm_lr
elif args.train_method == 'TURING' :
update_fun = model.turing_update_fun
ppl_fun = model.final_ppl_fun
lr = model.turing_lr
print 'update turing learning rate : %f' % model.turing_lr
else :
update_fun = model.all_update_fun
ppl_fun = model.final_ppl_fun
lr = model.all_lr
print 'update all learning rate : %f' % model.all_lr
stop_count = 0 # for stop training
change_count = 0 # for change learning rate
print 'start training'
min_valid_ppl = float('inf')
for epoch in range(args.max_epoch):
print "\nepoch %d" % epoch
# minibatch part
minibatch_size = args.minibatch_size # how many examples in a minibatch
n_train_batches = len(train_lengths)/minibatch_size
train_ppl = 0
for minibatch_idx in range(n_train_batches):
minibatch_train_data, lengths = get_minibatch(train_data, train_lengths, minibatch_size, minibatch_idx)
error = update_fun(minibatch_train_data , list(lengths) )
minibatch_train_ppl = ppl_fun(minibatch_train_data, list(lengths))
train_ppl = train_ppl + minibatch_train_ppl * sum(lengths)
sys.stdout.write( '\n%d minibatch idx / %d total minibatch, ppl: %f '% (minibatch_idx+1, n_train_batches, minibatch_train_ppl) )
sys.stdout.flush() # important
# rest minibatch if exits
if (minibatch_idx + 1) * minibatch_size != len(train_lengths):
minibatch_idx = minibatch_idx + 1
n_rest_example = len(train_lengths) - minibatch_size * minibatch_idx
minibatch_train_data, lengths = get_minibatch(train_data, train_lengths, n_rest_example, minibatch_idx)
error = update_fun(minibatch_train_data , list(lengths) )
minibatch_train_ppl = ppl_fun(minibatch_train_data, list(lengths))
train_ppl = train_ppl + minibatch_train_ppl * sum(lengths)
train_ppl = train_ppl / sum(train_lengths)
# print 'done training'
# valid ppl
minibatch_size = min(20, len(valid_lengths))
valid_ppl = 0
n_valid_batches = len(valid_lengths)/minibatch_size
for minibatch_idx in range(n_valid_batches):
minibatch_valid_data, lengths = get_minibatch(valid_data, valid_lengths, minibatch_size, minibatch_idx)
minibatch_valid_ppl = ppl_fun(minibatch_valid_data, list(lengths))
valid_ppl = valid_ppl + minibatch_valid_ppl * sum(lengths)
# last minibatch
if (minibatch_idx + 1) * minibatch_size != len(valid_lengths):
minibatch_idx = minibatch_idx + 1
n_rest_example = len(valid_lengths) - minibatch_size * minibatch_idx
minibatch_valid_data, lengths = get_minibatch(valid_data, valid_lengths, n_rest_example, minibatch_idx)
minibatch_valid_ppl = ppl_fun(minibatch_valid_data, list(lengths))
valid_ppl = valid_ppl + minibatch_valid_ppl * sum(lengths)
valid_ppl = valid_ppl / sum(valid_lengths)
print "\ntrain ppl: %f, valid ppl: %f" % (train_ppl, valid_ppl)
if valid_ppl < min_valid_ppl:
min_valid_ppl = valid_ppl
model.save(args.save_net, vocab)
if args.train_method != 'LSTM' :
model.save_turing(args.save_net)
stop_count = 0
change_count = 0
print "save best model"
continue
if args.early_stop:
if (valid_ppl - min_valid_ppl) / min_valid_ppl > args.improvement_rate:
if stop_count > 2 or lr < 1e-6:
print 'stop training'
break
stop_count = stop_count + 1
elif (valid_ppl - min_valid_ppl) / min_valid_ppl > args.improvement_rate * 0.5:
# if change_count > 2:
print 'change learning rate from %f to %f' % (lr, lr/2)
model.lstm_lr = model.lstm_lr / 2.
model.turing_lr = model.turing_lr / 2.
model.all_lr = model.all_lr / 2.
if args.train_method == 'LSTM' :
lr = model.lstm_lr
elif args.train_method == 'TURING' :
lr = model.turing_lr
else :
lr = model.all_lr
# change_count = change_count + 1
def testing(args, test_data, test_lengths):
model_load = Model(
input_size=1,
hidden_size=1,
vocab_size=1,
stack_size=1, # make this bigger, but makes compilation slow
celltype=RNN # use RNN or LSTM
)
model_load.stop_on(vocab.word2index["."])
if args.train_method != 'LSTM' :
if not os.path.isfile(args.load_net + '.turing') :
print "there is no trained turing file so we can't test by turing model!!"
sys.exit()
model_load.load(args.load_net, 0)
# test ppl
#select correct train and prediction method according to train_method(LSTM/TURING/ALL)
if args.train_method == 'LSTM' :
ppl_fun = model_load.lstm_ppl_fun
else :
ppl_fun = model_load.final_ppl_fun
minibatch_size = 1
n_test_batches = len(test_lengths)
for minibatch_idx in range(n_test_batches):
minibatch_test_data, lengths = get_minibatch(test_data, test_lengths, minibatch_size, minibatch_idx)
minibatch_test_ppl = ppl_fun(minibatch_test_data, list(lengths))
print minibatch_test_ppl
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=DESCRIPTION)
args = parse_args(parser)
# if no args are passed
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
if args.train:
vocab = build_vocab(args.train[0])
train_data, train_lengths = load_data(args.train[0], vocab, 'train')
valid_data, valid_lengths = load_data(args.valid[0], vocab, 'valid')
training(args, vocab, train_data, train_lengths, valid_data, valid_lengths)
elif args.test:
vocab = pickle.load(open(args.load_net+'.vocab', "rb"))
test_data, test_lengths = load_data(args.test[0], vocab, 'test')
testing(args, test_data, test_lengths)
|
darongliu/Lstm_Turing_LM
|
lstm-neural-turing-machines-lm/analysis/v1-one-weight-same/lm_v4.py
|
Python
|
mit
| 25,147
|
[
"NEURON"
] |
8cc7ce15b6264b048d005d70364b8d053652abbadccb5dea6704b0065ca223b0
|
from apts.catalogs import Catalogs
def test_messier_catalog():
c = Catalogs.MESSIER
# Messier catalog contains 110 objects
assert len(c) == 110
# M13 is Hercules Globular Cluster - NGC 6205 (index starts from 0)
assert c.iloc[12]["NGC"] == "NGC 6205"
# M45 is Pleiades (index starts from 0)
assert c.iloc[44]["Messier"] == "M45"
# M82 is Cigar Galaxy - NGC 3034 (index starts from 0)
assert c.iloc[81]["NGC"] == "NGC 3034"
# Andromeda is the biggest galaxy
assert c.sort_values(['Width'], ascending=[0]).iloc[0]["Name"] == "Andromeda Galaxy"
|
pozar87/apts
|
tests/catalogs_test.py
|
Python
|
apache-2.0
| 566
|
[
"Galaxy"
] |
d1b9eebbf8c35eaa70ad24e9fa6323c11f77c81dffeb586fa359da620b228574
|
import copy
import time,pdb
import ephem
import pandas as pd
import numpy as np
from astropy.io import ascii
from itertools import product
from .pdf import * # part of isoclassify package (to do make explicit import)
from .priors import * # part of isoclassify package (to do make explicit import)
from .plot import * # part of isoclassify package (to do make explicit import)
class obsdata():
def __init__(self):
self.plx = -99.0
self.plxe = -99.0
self.teff = -99.0
self.teffe = -99.0
self.logg = -99.0
self.logge = -99.0
self.feh = -99.0
self.fehe = -99.0
self.lum = -99.0
self.lume = -99.0
self.bmag = -99.0
self.bmage = -99.0
self.vmag = -99.0
self.vmage = -99.0
self.btmag = -99.0
self.btmage = -99.0
self.vtmag = -99.0
self.vtmage = -99.0
self.dmag = -99.0
self.dmage = -99.0
self.gmag = -99.0
self.gmage = -99.0
self.rmag = -99.0
self.rmage = -99.0
self.imag = -99.0
self.image = -99.0
self.zmag = -99.0
self.zmage = -99.0
self.jmag = -99.0
self.jmage = -99.0
self.hmag = -99.0
self.hmage = -99.0
self.kmag = -99.0
self.kmage = -99.0
self.gamag = -99.0
self.gamage = -99.0
self.bpmag = -99.0
self.bpmage = -99.0
self.rpmag = -99.0
self.rpmage = -99.0
self.numax = -99.0
self.numaxe = -99.0
self.dnu = -99.0
self.dnue = -99.0
def addspec(self,value,sigma):
self.teff = value[0]
self.teffe = sigma[0]
self.logg = value[1]
self.logge = sigma[1]
self.feh = value[2]
self.fehe = sigma[2]
def addlum(self,value,sigma):
self.lum = value[0]
self.lume = sigma[0]
def addbv(self,value,sigma):
self.bmag = value[0]
self.bmage = sigma[0]
self.vmag = value[1]
self.vmage = sigma[1]
def addbvt(self,value,sigma):
self.btmag = value[0]
self.btmage = sigma[0]
self.vtmag = value[1]
self.vtmage = sigma[1]
def addgriz(self,value,sigma):
self.gmag = value[0]
self.gmage = sigma[0]
self.rmag = value[1]
self.rmage = sigma[1]
self.imag = value[2]
self.image = sigma[2]
self.zmag = value[3]
self.zmage = sigma[3]
def addjhk(self,value,sigma):
self.jmag = value[0]
self.jmage = sigma[0]
self.hmag = value[1]
self.hmage = sigma[1]
self.kmag = value[2]
self.kmage = sigma[2]
def addgaia(self,value,sigma):
self.gamag = value[0]
self.gamage = sigma[0]
self.bpmag = value[1]
self.bpmage = sigma[1]
self.rpmag = value[2]
self.rpmage = sigma[2]
def addplx(self,value,sigma):
self.plx = value
self.plxe = sigma
def adddmag(self,value,sigma):
self.dmag = value
self.dmage = sigma
def addseismo(self,value,sigma):
self.numax = value[0]
self.numaxe = sigma[0]
self.dnu = value[1]
self.dnue = sigma[1]
def addcoords(self,value1,value2):
self.ra = value1
self.dec = value2
class resdata():
def __init__(self):
self.teff = 0.0
self.teffep = 0.0
self.teffem = 0.0
self.teffpx = 0.0
self.teffpy = 0.0
self.logg = 0.0
self.loggep = 0.0
self.loggem = 0.0
self.loggpx = 0.0
self.loggpy = 0.0
self.feh = 0.0
self.fehep = 0.0
self.fehem = 0.0
self.fehpx = 0.0
self.fehpy = 0.0
self.rad = 0.0
self.radep = 0.0
self.radem = 0.0
self.radpx = 0.0
self.radpy = 0.0
self.mass = 0.0
self.massep = 0.0
self.massem = 0.0
self.masspx = 0.0
self.masspy = 0.0
self.rho = 0.0
self.rhoep = 0.0
self.rhoem = 0.0
self.rhopx = 0.0
self.rhopy = 0.0
self.lum = 0.0
self.lumep = 0.0
self.lumem = 0.0
self.lumpx = 0.0
self.lumpy = 0.0
self.age = 0.0
self.ageep = 0.0
self.ageem = 0.0
self.agepx = 0.0
self.agepy = 0.0
self.avs = 0.0
self.avsep = 0.0
self.avsem = 0.0
self.avspx = 0.0
self.avspy = 0.0
self.dis = 0.0
self.disep = 0.0
self.disem = 0.0
self.dispx = 0.0
self.dispy = 0.0
self.teffsec = 0.0
self.teffsecep = 0.0
self.teffsecem = 0.0
self.teffsecpx = 0.0
self.teffsecpy = 0.0
self.radsec = 0.0
self.radsecep = 0.0
self.radsecem = 0.0
self.radsecpx = 0.0
self.radsecpy = 0.0
self.loggsec = 0.0
self.loggsecep = 0.0
self.loggsecem = 0.0
self.loggsecpx = 0.0
self.loggsecpy = 0.0
self.rhosec = 0.0
self.rhosecep = 0.0
self.rhosecem = 0.0
self.rhosecpx = 0.0
self.rhosecpy = 0.0
self.masssec = 0.0
self.masssecep = 0.0
self.masssecem = 0.0
self.masssecpx = 0.0
self.masssecpy = 0.0
class extinction():
def __init__(self):
self.ab = 1.3454449
self.av = 1.00
self.abt = 1.3986523
self.avt = 1.0602271
self.ag = 1.2348743
self.ar = 0.88343449
self.ai = 0.68095687
self.az = 0.48308430
self.aj = 0.28814896
self.ah = 0.18152716
self.ak = 0.11505195
self.aga=1.2348743
def classify(input, model, dustmodel=0, plot=1, useav=-99.0, ext=-99.0, band=''):
"""
Run grid based classifier
Args:
input (object): input object
model (dict): dictionary of arrays
dustmodel (Optional[DataFrame]): extinction model
useav (float):
ext (float):
"""
## constants
gsun = 27420.010
numaxsun = 3090.0
dnusun = 135.1
teffsun = 5772.0
# bolometric correction error; kinda needs to be motivated better ...
bcerr = 0.03
## extinction coefficients
extfactors = ext
## class containing output results
result = resdata()
# calculate colors + errors:
bvcol = input.bmag - input.vmag
bvtcol = input.btmag - input.vtmag
grcol = input.gmag - input.rmag
ricol = input.rmag - input.imag
izcol = input.imag - input.zmag
gicol = input.gmag - input.imag
rzcol = input.rmag - input.zmag
gzcol = input.gmag - input.zmag
jhcol = input.jmag - input.hmag
hkcol = input.hmag - input.kmag
jkcol = input.jmag - input.kmag
bpgacol = input.bpmag - input.gamag
garpcol = input.gamag - input.rpmag
bprpcol = input.bpmag - input.rpmag
vjcol = input.vmag - input.jmag
vtjcol = input.vtmag - input.jmag
gjcol = input.gmag - input.jmag
rjcol = input.rmag - input.jmag
vkcol = input.vmag - input.kmag
vtkcol = input.vtmag - input.kmag
gkcol = input.gmag - input.kmag
rkcol = input.rmag - input.kmag
gajcol = input.gamag - input.jmag
gakcol = input.gamag - input.kmag
bvcole = np.sqrt(input.bmage**2 + input.vmage**2)
bvtcole = np.sqrt(input.btmage**2 + input.vtmage**2)
grcole = np.sqrt(input.gmage**2 + input.rmage**2)
ricole = np.sqrt(input.rmage**2 + input.image**2)
izcole = np.sqrt(input.image**2 + input.zmage**2)
gicole = np.sqrt(input.gmage**2 + input.image**2)
rzcole = np.sqrt(input.rmage**2 + input.zmage**2)
gzcole = np.sqrt(input.gmage**2 + input.zmage**2)
jhcole = np.sqrt(input.jmage**2 + input.hmage**2)
hkcole = np.sqrt(input.hmage**2 + input.kmage**2)
jkcole = np.sqrt(input.jmage**2 + input.kmage**2)
bpgacole = np.sqrt(input.bpmage**2 + input.gamage**2)
garpcole = np.sqrt(input.gamage**2 + input.rpmage**2)
bprpcole = np.sqrt(input.bpmage**2 + input.rpmage**2)
vjcole = np.sqrt(input.vmage**2 + input.jmage**2)
vtjcole = np.sqrt(input.vtmage**2 + input.jmage**2)
gjcole = np.sqrt(input.gmage**2 + input.jmage**2)
rjcole = np.sqrt(input.rmage**2 + input.jmage**2)
vkcole = np.sqrt(input.vmage**2 + input.kmage**2)
vtkcole = np.sqrt(input.vtmage**2 + input.kmage**2)
gkcole = np.sqrt(input.gmage**2 + input.kmage**2)
rkcole = np.sqrt(input.rmage**2 + input.kmage**2)
gajcole = np.sqrt(input.gamage**2 + input.jmage**2)
gakcole = np.sqrt(input.gamage**2 + input.kmage**2)
# Compute extra color error term based on underestimation of stellar teff errors with nominal 2% error floor:
if ((input.gmag > -99.0) & (input.kmag > -99.0)):
gkexcole = compute_extra_gk_color_error(gkcol)
# Determine which gK error term is greater and use that one:
print("g - K error from photometry: ",gkcole)
print("g - K error from best-fit polynomial: ",gkexcole)
gkcole = max(gkcole,gkexcole)
print("Using g - K error: ",gkcole)
# apparent mag to use for distance estimation. set by "band" input
redmap = -99.0
if (getattr(input,band) > -99.):
redmap = getattr(input,band)
redmape = getattr(input,band+'e')
model_mabs = model[band]
# correct for companion
if (input.dmag != -99.):
dx=-0.4*input.dmag
dxe=-0.4*input.dmage
cor=2.5*np.log10(1.+10**dx)
redmap = redmap+cor
redmape = np.sqrt( redmape**2 + (dxe*2.5*10**dx/(1.+10**dx))**2)
# absolute magnitude
if (input.plx > -99.0):
mabs = -5.0 * np.log10(1.0 / input.plx) + redmap + 5.0
mabse = np.sqrt(
(-5.0 / (input.plx * np.log(10)))**2 * input.plxe**2
+ redmape**2 + bcerr**2)
# Also compute extra error term for M-dwarfs with K band mags only:
if (mabs > 4.0) and (input.kmag > -99.0):
print("M-dwarf with K band magnitude detected!")
mabseex = compute_extra_MK_error(mabs)
print("M_K from photometry: ",mabse)
print("M_K error from best-fit polynomial: ",mabseex)
mabse = np.sqrt(mabse**2 + mabseex**2)
print("After adding in quadrature, using M_K error: ",mabse)
else:
mabs = -99.0
mabse = -99.0
# pre-select model grid; first only using reddening-independent quantities
sig = 4.0
um = np.arange(0,len(model['teff']),1)
if (input.teff > -99.0):
ut=np.where((model['teff'] > input.teff-sig*input.teffe) & \
(model['teff'] < input.teff+sig*input.teffe))[0]
um=np.intersect1d(um,ut)
print('teff',len(um))
if (input.lum > -99.0):
ut=np.where((model['lum'] > input.lum-sig*input.lume) & \
(model['lum'] < input.lum+sig*input.lume))[0]
um=np.intersect1d(um,ut)
print('lum',len(um))
if (input.dnu > 0.0):
model_dnu = dnusun*model['fdnu']*np.sqrt(10**model['rho'])
ut = np.where(
(model_dnu > input.dnu - sig*input.dnue)
& (model_dnu < input.dnu + sig*input.dnue)
)
ut = ut[0]
um = np.intersect1d(um, ut)
print('dnu', len(um))
if (input.numax > 0.0):
model_numax = (numaxsun
* (10**model['logg']/gsun)
* (model['teff']/teffsun)**(-0.5))
ut = np.where(
(model_numax > input.numax - sig*input.numaxe)
& (model_numax < input.numax + sig*input.numaxe)
)
ut = ut[0]
um = np.intersect1d(um, ut)
print('numax', len(um))
if (input.logg > -99.0):
ut = np.where(
(model['logg'] > input.logg - sig*input.logge)
& (model['logg'] < input.logg + sig*input.logge)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if (input.feh > -99.0):
ut = np.where(
(model['feh'] > input.feh - sig*input.fehe)
& (model['feh'] < input.feh + sig*input.fehe)
)
ut = ut[0]
um = np.intersect1d(um, ut)
print('feh', len(um))
print('number of models used within non-phot obsconstraints:', len(um))
# bail if there are not enough good models
if (len(um) < 10):
return result
# add reddening
if (redmap > -99.0):
# if no reddening map is provided, add Av as a new variable
# and fit for it
if (isinstance(dustmodel,pd.DataFrame) == False):
avs = np.arange(-0.3,1.0,0.01)
# user-specified reddening
#if (useav > -99.0):
# avs = np.zeros(1) + useav
mod = reddening(model, um, avs, extfactors)
# otherwise, just redden each model according to the provided map
else:
mod = reddening_map(
model, model_mabs, redmap, dustmodel, um, input, extfactors, band
)
# photometry to use for distance
mod_mabs = mod[band]
um = np.arange(0,len(mod['teff']),1)
mod['dis'] = 10**((redmap - mod_mabs + 5.0)/5.0)
print('number of models incl reddening:',len(um))
else:
mod = model
# next, another model down-select based on reddening-dependent quantities
# only do this if no spec constraints are available
if (mabs > -99.0):
ut = np.where(
(mod_mabs > mabs - sig*mabse)
& (mod_mabs < mabs + sig*mabse)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if (input.teff == -99.0):
if ((input.bmag > -99.0) & (input.vmag > -99.0)):
ut=np.where(
(mod['bmag'] - mod['vmag'] > bvcol - sig*bvcole)
& (mod['bmag'] - mod['vmag'] < bvcol + sig*bvcole))
ut = ut[0]
um = np.intersect1d(um,ut)
if ((input.btmag > -99.0) & (input.vtmag > -99.0)):
ut=np.where(
(mod['btmag'] - mod['vtmag'] > bvtcol - sig*bvtcole)
& (mod['btmag'] - mod['vtmag'] < bvtcol + sig*bvtcole))
ut = ut[0]
um = np.intersect1d(um,ut)
if ((input.gmag > -99.0) & (input.rmag > -99.0)):
ut = np.where(
(mod['gmag'] - mod['rmag'] > grcol-sig*grcole)
& (mod['gmag'] - mod['rmag'] < grcol+sig*grcole))
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.rmag > -99.0) & (input.imag > -99.0)):
ut = np.where(
(mod['rmag'] - mod['imag'] > ricol - sig*ricole)
& (mod['rmag'] - mod['imag'] < ricol + sig*ricole)
)
ut = ut[0]
um = np.intersect1d(um,ut)
if ((input.imag > -99.0) & (input.zmag > -99.0)):
ut = np.where(
(mod['imag'] - mod['zmag'] > izcol - sig*izcole)
& (mod['imag'] - mod['zmag'] < izcol + sig*izcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gmag > -99.0) & (input.imag > -99.0)):
ut = np.where(
(mod['gmag'] - mod['imag'] > gicol-sig*gicole)
& (mod['gmag'] - mod['imag'] < gicol+sig*gicole))
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.rmag > -99.0) & (input.zmag > -99.0)):
ut = np.where(
(mod['rmag'] - mod['zmag'] > rzcol-sig*rzcole)
& (mod['rmag'] - mod['zmag'] < rzcol+sig*rzcole))
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gmag > -99.0) & (input.zmag > -99.0)):
ut = np.where(
(mod['gmag'] - mod['zmag'] > gzcol-sig*gzcole)
& (mod['gmag'] - mod['zmag'] < gzcol+sig*gzcole))
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.jmag > -99.0) & (input.hmag > -99.0)):
ut = np.where(
(mod['jmag'] - mod['hmag'] > jhcol - sig*jhcole)
& (mod['jmag'] - mod['hmag'] < jhcol + sig*jhcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.hmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['hmag'] - mod['kmag'] > hkcol - sig*hkcole)
& (mod['hmag'] - mod['kmag'] < hkcol + sig*hkcole))
ut = ut[0]
um = np.intersect1d(um,ut)
if ((input.jmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['jmag'] - mod['kmag'] > jkcol - sig*jkcole)
& (mod['jmag'] - mod['kmag'] < jkcol + sig*jkcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.bpmag > -99.0) & (input.gamag > -99.0)):
ut = np.where(
(mod['bpmag'] - mod['gamag'] > bpgacol - sig*bpgacole)
& (mod['bpmag'] - mod['gamag'] < bpgacol + sig*bpgacole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gamag > -99.0) & (input.rpmag > -99.0)):
ut = np.where(
(mod['gamag'] - mod['rpmag'] > garpcol - sig*garpcole)
& (mod['gamag'] - mod['rpmag'] < garpcol + sig*garpcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.bpmag > -99.0) & (input.rpmag > -99.0)):
ut = np.where(
(mod['bpmag'] - mod['rpmag'] > bprpcol - sig*bprpcole)
& (mod['bpmag'] - mod['rpmag'] < bprpcol + sig*bprpcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.vmag > -99.0) & (input.jmag > -99.0)):
ut = np.where(
(mod['vmag'] - mod['jmag'] > vjcol - sig*vjcole)
& (mod['vmag'] - mod['jmag'] < vjcol + sig*vjcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.vtmag > -99.0) & (input.jmag > -99.0)):
ut = np.where(
(mod['vtmag'] - mod['jmag'] > vtjcol - sig*vtjcole)
& (mod['vtmag'] - mod['jmag'] < vtjcol + sig*vtjcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gmag > -99.0) & (input.jmag > -99.0)):
ut = np.where(
(mod['gmag'] - mod['jmag'] > gjcol - sig*gjcole)
& (mod['gmag'] - mod['jmag'] < gjcol + sig*gjcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.rmag > -99.0) & (input.jmag > -99.0)):
ut = np.where(
(mod['rmag'] - mod['jmag'] > rjcol - sig*rjcole)
& (mod['rmag'] - mod['jmag'] < rjcol + sig*rjcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.vmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['vmag'] - mod['kmag'] > vkcol - sig*vkcole)
& (mod['vmag'] - mod['kmag'] < vkcol + sig*vkcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.vtmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['vtmag'] - mod['kmag'] > vtkcol - sig*vtkcole)
& (mod['vtmag'] - mod['kmag'] < vtkcol + sig*vtkcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['gmag'] - mod['kmag'] > gkcol - sig*gkcole)
& (mod['gmag'] - mod['kmag'] < gkcol + sig*gkcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.rmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['rmag'] - mod['kmag'] > rkcol - sig*rkcole)
& (mod['rmag'] - mod['kmag'] < rkcol + sig*rkcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gamag > -99.0) & (input.jmag > -99.0)):
ut = np.where(
(mod['gamag'] - mod['jmag'] > gajcol - sig*gajcole)
& (mod['gamag'] - mod['jmag'] < gajcol + sig*gajcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gamag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['gamag'] - mod['kmag'] > gakcol - sig*gakcole)
& (mod['gamag'] - mod['kmag'] < gakcol + sig*gakcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
print('number of models after phot constraints:',len(um))
print('----')
# bail if there are not enough good models
if (len(um) < 10):
return result
def gaussian(x, mu, sig):
return np.exp(-(x-mu)**2./(2.*sig**2.))
# likelihoods
if ((input.gmag > -99.0) & (input.rmag > -99.0)):
lh_gr = gaussian(grcol, mod['gmag'][um]-mod['rmag'][um], grcole)
else:
lh_gr = np.ones(len(um))
if ((input.rmag > -99.0) & (input.imag > -99.0)):
lh_ri = gaussian(ricol, mod['rmag'][um]-mod['imag'][um], ricole)
else:
lh_ri = np.ones(len(um))
if ((input.imag > -99.0) & (input.zmag > -99.0)):
lh_iz = gaussian(izcol, mod['imag'][um]-mod['zmag'][um], izcole)
else:
lh_iz = np.ones(len(um))
if ((input.gmag > -99.0) & (input.imag > -99.0)):
lh_gi = gaussian(gicol, mod['gmag'][um]-mod['imag'][um], gicole)
else:
lh_gi = np.ones(len(um))
if ((input.rmag > -99.0) & (input.zmag > -99.0)):
lh_rz = gaussian(rzcol, mod['rmag'][um]-mod['zmag'][um], rzcole)
else:
lh_rz = np.ones(len(um))
if ((input.gmag > -99.0) & (input.zmag > -99.0)):
lh_gz = gaussian(gzcol, mod['gmag'][um]-mod['zmag'][um], gzcole)
else:
lh_gz = np.ones(len(um))
if ((input.jmag > -99.0) & (input.hmag > -99.0)):
lh_jh = gaussian(jhcol, mod['jmag'][um]-mod['hmag'][um], jhcole)
else:
lh_jh = np.ones(len(um))
if ((input.hmag > -99.0) & (input.kmag > -99.0)):
lh_hk = gaussian(hkcol, mod['hmag'][um]-mod['kmag'][um], hkcole)
else:
lh_hk = np.ones(len(um))
if ((input.jmag > -99.0) & (input.kmag > -99.0)):
lh_jk = gaussian(jkcol, mod['jmag'][um]-mod['kmag'][um], jkcole)
else:
lh_jk = np.ones(len(um))
if ((input.bpmag > -99.0) & (input.gamag > -99.0)):
lh_bpga = gaussian(bpgacol, mod['bpmag'][um]-mod['gamag'][um], bpgacole)
else:
lh_bpga = np.ones(len(um))
if ((input.gamag > -99.0) & (input.rpmag > -99.0)):
lh_garp = gaussian(garpcol, mod['gamag'][um]-mod['rpmag'][um], garpcole)
else:
lh_garp = np.ones(len(um))
if ((input.bpmag > -99.0) & (input.rpmag > -99.0)):
lh_bprp = gaussian(bprpcol, mod['bpmag'][um]-mod['rpmag'][um], bprpcole)
else:
lh_bprp = np.ones(len(um))
if ((input.bmag > -99.0) & (input.vmag > -99.0)):
lh_bv = gaussian(bvcol, mod['bmag'][um]-mod['vmag'][um], bvcole)
else:
lh_bv = np.ones(len(um))
if ((input.btmag > -99.0) & (input.vtmag > -99.0)):
lh_bvt = gaussian(bvtcol, mod['btmag'][um]-mod['vtmag'][um], bvtcole)
else:
lh_bvt = np.ones(len(um))
if ((input.vmag > -99.0) & (input.jmag > -99.0)):
lh_vj = gaussian(vjcol, mod['vmag'][um]-mod['jmag'][um], vjcole)
else:
lh_vj = np.ones(len(um))
if ((input.vtmag > -99.0) & (input.jmag > -99.0)):
lh_vtj = gaussian(vtjcol, mod['vtmag'][um]-mod['jmag'][um], vtjcole)
else:
lh_vtj = np.ones(len(um))
if ((input.gmag > -99.0) & (input.jmag > -99.0)):
lh_gj = gaussian(gjcol, mod['gmag'][um]-mod['jmag'][um], gjcole)
else:
lh_gj = np.ones(len(um))
if ((input.rmag > -99.0) & (input.jmag > -99.0)):
lh_rj = gaussian(rjcol, mod['rmag'][um]-mod['jmag'][um], rjcole)
else:
lh_rj = np.ones(len(um))
if ((input.vmag > -99.0) & (input.kmag > -99.0)):
lh_vk = gaussian(vkcol, mod['vmag'][um]-mod['kmag'][um], vkcole)
else:
lh_vk = np.ones(len(um))
if ((input.vtmag > -99.0) & (input.kmag > -99.0)):
lh_vtk = gaussian(vtkcol, mod['vtmag'][um]-mod['kmag'][um], vtkcole)
else:
lh_vtk = np.ones(len(um))
if ((input.gmag > -99.0) & (input.kmag > -99.0)):
lh_gk = gaussian(gkcol, mod['gmag'][um]-mod['kmag'][um], gkcole)
else:
lh_gk = np.ones(len(um))
if ((input.rmag > -99.0) & (input.kmag > -99.0)):
lh_rk = gaussian(rkcol, mod['rmag'][um]-mod['kmag'][um], rkcole)
else:
lh_rk = np.ones(len(um))
if ((input.gamag > -99.0) & (input.jmag > -99.0)):
lh_gaj = gaussian(gajcol, mod['gamag'][um]-mod['jmag'][um], gajcole)
else:
lh_gaj = np.ones(len(um))
if ((input.gamag > -99.0) & (input.kmag > -99.0)):
lh_gak = gaussian(gakcol, mod['gamag'][um]-mod['kmag'][um], gakcole)
else:
lh_gak = np.ones(len(um))
if (input.teff > -99):
lh_teff = gaussian(input.teff, mod['teff'][um], input.teffe)
else:
lh_teff = np.ones(len(um))
if (input.lum > -99):
lh_lum = gaussian(input.lum, mod['lum'][um], input.lume)
else:
lh_lum = np.ones(len(um))
if (input.logg > -99.0):
lh_logg = gaussian(input.logg, mod['logg'][um], input.logge)
else:
lh_logg = np.ones(len(um))
if (input.feh > -99.0):
lh_feh = gaussian(input.feh, mod['feh'][um], input.fehe)
else:
lh_feh = np.ones(len(um))
if (input.plx > -99.0):
# Compute most likely value of absolute magnitude:
mabsIndex = np.argmax(np.exp( (-1./(2.*input.plxe**2))*(input.plx-1./mod['dis'][um])**2))
# Only use downselected models based on input parameters:
downSelMagArr = mod_mabs[um]
# Compute the likelihood of the maximum magnitude given computed errors:
lh_mabs = gaussian(downSelMagArr[mabsIndex],mod_mabs[um],mabse)
else:
lh_mabs = np.ones(len(um))
if (input.dnu > 0.):
mod_dnu = dnusun*mod['fdnu']*np.sqrt(10**mod['rho'])
lh_dnu = np.exp( -(input.dnu-mod_dnu[um])**2.0 / (2.0*input.dnue**2.0))
else:
lh_dnu = np.ones(len(um))
if (input.numax > 0.):
mod_numax = (numaxsun
* (10**mod['logg']/gsun)
* (mod['teff']/teffsun)**(-0.5))
lh_numax = gaussian(input.numax,mod_numax[um],input.numaxe)
else:
lh_numax = np.ones(len(um))
tlh = (lh_gr*lh_ri*lh_iz*lh_gi*lh_rz*lh_gz*lh_jh*lh_hk*lh_jk*lh_bv*lh_bvt*lh_bpga*lh_garp*lh_bprp*
lh_vj*lh_vtj*lh_gj*lh_rj*lh_vk*lh_vtk*lh_gk*lh_rk*lh_gaj*lh_gak*
lh_teff*lh_logg*lh_feh*lh_mabs*lh_dnu*lh_numax*lh_lum)
# metallicity prior (only if no FeH input is given)
if (input.feh > -99.0):
fprior = np.ones(len(um))
else:
fprior = fehprior(mod['feh'][um])
# distance prior
if (input.plx > -99.0):
lscale = 1350.
dprior = ((mod['dis'][um]**2/(2.0*lscale**3.))
*np.exp(-mod['dis'][um]/lscale))
else:
dprior = np.ones(len(um))
# isochrone prior (weights)
tprior = mod['dage'][um]*mod['dmass'][um]*mod['dfeh'][um]
# posterior
prob = fprior*dprior*tprior*tlh
prob = prob/np.sum(prob)
if (isinstance(dustmodel,pd.DataFrame) == False):
names = ['teff', 'logg', 'feh', 'rad', 'mass', 'rho', 'lum', 'age']
steps = [0.001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]
fixes = [0, 1, 1, 0, 0, 1, 1, 0, 1]
if (redmap > -99.0):
names = [
'teff', 'logg', 'feh', 'rad', 'mass', 'rho', 'lum', 'age',
'avs'
]
steps = [0.001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]
fixes=[0, 1, 1, 0, 0, 1, 1, 0, 1]
if ((input.plx == -99.0) & (redmap > -99)):
names=[
'teff', 'logg', 'feh', 'rad', 'mass', 'rho', 'lum', 'age',
'avs', 'dis'
]
steps=[0.001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]
fixes=[0, 1, 1, 0, 0, 1, 1, 0, 1, 0]
#if ((input.plx == -99.0) & (map > -99) & (useav > -99.0)):
# names=['teff','logg','feh','rad','mass','rho','lum','age','dis']
# steps=[0.001,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01]
# fixes=[0,1,1,0,0,1,1,0,0]
else:
#names=['teff','logg','feh','rad','mass','rho','lum','age']
#steps=[0.001,0.01,0.01,0.01,0.01,0.01,0.01,0.01]
#fixes=[0,1,1,0,0,1,1,0,1]
#if (input.plx == -99.0):
avstep=((np.max(mod['avs'][um])-np.min(mod['avs'][um]))/10.)
#pdb.set_trace()
names = [
'teff', 'logg', 'feh', 'rad', 'mass', 'rho', 'lum', 'age', 'avs',
'dis'
]
steps=[0.001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, avstep, 0.01]
fixes=[0, 1, 1, 0, 0, 1, 1, 0, 1, 0]
# Provision figure
if plot:
plotinit()
ix = 1
iy = 2
npar = len(names)
for j in range(0,npar):
if fnmatch.fnmatch(names[j],'*lum*'):
lum=np.log10((mod['rad'][um]**2. * (mod['teff'][um]/5772.)**4.))
x, y, res, err1, err2 = getpdf(
lum, prob, name=names[j], step=steps[j], fixed=fixes[j],
dustmodel=dustmodel)
else:
if (len(np.unique(mod[names[j]][um])) > 1):
x, y, res, err1, err2 = getpdf(
mod[names[j]][um], prob, name=names[j], step=steps[j],
fixed=fixes[j],dustmodel=dustmodel
)
elif ((len(np.unique(mod[names[j]][um])) == 1) and (names[j] == 'avs')):
res = mod[names[j]][um[0]]
err1 = 0.0
err2 = 0.0
x = res
y = 1.0
else:
res = 0.0
err1 = 0.0
err2 = 0.0
print(names[j], res, err1, err2)
setattr(result, names[j], res)
setattr(result, names[j]+'ep', err1)
setattr(result, names[j]+'em', err2)
setattr(result, names[j]+'px', x)
setattr(result, names[j]+'py', y)
# Plot individual posteriors
if plot:
plotposterior(x, y, res, err1, err2, names, j, ix, iy)
ix += 2
iy += 2
# calculate posteriors for a secondary with a given delta_mag, assuming it has the same
# distance, age, and metallicity. to do this we'll interpolate the physical properties
# of the secondary given a delta_mag, and assign it the same posterior probabilities
# same procedure as used in Kraus+ 16
if (input.dmag > -99.):
print(' ')
print('calculating properties for secondary ...')
delta_k=input.dmag
delta_k_err=input.dmage
print('using dmag=',delta_k,'+/-',delta_k_err,' in ',band)
# interpolate across constant age and metallicity
feh_un=np.unique(mod['feh_init'][um])
age_un=np.unique(mod['age'][um])
#adding in the contrast error without sampling is tricky, because that uncertainty
# is not present in the primary posterior; instead, calculate the secondary
# posteriors 3 times for +/- contrast errors, and then add those in quadrature
# *explicitly assumes that the contrast errors are gaussian*
mds=[delta_k+delta_k_err,delta_k,delta_k-delta_k_err]
# the new model quantities for the secondary
mod_sec=np.zeros((5,3,len(prob)))
# Now reduce model to only those that match metallicity, age, and mass (must be less than max primary mass) conditions:
ufeh = np.in1d(model['feh_init'],feh_un) # Must match all potential primary initial metallicities
uage = np.in1d(model['age'],age_un) # Must match all potential primary ages
umass = np.where(model['mass'] < np.max(mod['mass'][um]))[0] # Must be less than max primary mass
ufa = np.where((ufeh == True) & (uage == True))[0] # Find intersection of age and feh
ufam = np.intersect1d(umass,ufa) # Find intersection of mass and ufa
modelMin = dict((k, model[k][ufam]) for k in model.keys()) # Define minimal model grid
# insanely inefficient triple loop follows
for s in range(0,len(mds)):
for r in range(0,len(feh_un)):
for k in range (0,len(age_un)):
# NB the next line uses model instead of mod, since the interpolation needs
# the full model grid rather than the pre-selected models returned by the
# reddening routine (which excludes secondary solutions). This may screw
# things up when trying to constrain reddening (i.e. dust="none")
ux=np.where((modelMin['feh_init'] == feh_un[r]) & (modelMin['age'] == age_un[k]))[0]
ux2=np.where((mod['feh_init'][um] == feh_un[r]) & (mod['age'][um] == age_un[k]))[0]
sr=np.argsort(modelMin[band][ux])
if ((len(ux) == 0) | (len(ux2) == 0)):
continue
mod_sec[0,s,ux2]=np.interp(mod[band][um[ux2]]+mds[s],modelMin[band][ux[sr]],modelMin['teff'][ux[sr]])
mod_sec[1,s,ux2]=np.interp(mod[band][um[ux2]]+mds[s],modelMin[band][ux[sr]],modelMin['logg'][ux[sr]])
mod_sec[2,s,ux2]=np.interp(mod[band][um[ux2]]+mds[s],modelMin[band][ux[sr]],modelMin['rad'][ux[sr]])
mod_sec[3,s,ux2]=np.interp(mod[band][um[ux2]]+mds[s],modelMin[band][ux[sr]],modelMin['mass'][ux[sr]])
mod_sec[4,s,ux2]=np.interp(mod[band][um[ux2]]+mds[s],modelMin[band][ux[sr]],modelMin['rho'][ux[sr]])
# now get PDFs across all delta mags, add errors in quadrature
names = ['teff', 'logg', 'rad', 'mass', 'rho']
steps=[0.001, 0.01, 0.01, 0.01, 0.01]
fixes=[0, 1, 0, 0, 1]
ix = 1
iy = 2
npar = len(names)
for j in range(0,5):
x, y, res_1, err1_1, err2_1 = getpdf(mod_sec[j,0,:], prob, name=names[j], step=steps[j], fixed=fixes[j],dustmodel=dustmodel)
xo, yo, res_2, err1_2, err2_2 = getpdf(mod_sec[j,1,:], prob, name=names[j], step=steps[j], fixed=fixes[j],dustmodel=dustmodel)
x, y, res_3, err1_3, err2_3 = getpdf(mod_sec[j,2,:], prob, name=names[j], step=steps[j], fixed=fixes[j],dustmodel=dustmodel)
finerr1=np.sqrt(err1_2**2 + (np.abs(res_2-res_1))**2)
finerr2=np.sqrt(err2_2**2 + (np.abs(res_2-res_3))**2)
print(names[j], res_2, finerr1, finerr2)
setattr(result, names[j]+'sec', res_2)
setattr(result, names[j]+'sec'+'ep', finerr1)
setattr(result, names[j]+'sec'+'em', finerr2)
setattr(result, names[j]+'sec'+'px', x)
setattr(result, names[j]+'sec'+'py', y)
# Plot individual posteriors
if plot:
plotposterior_sec(xo,yo, res_2, finerr1, finerr2, names, j, ix, iy)
ix += 2
iy += 2
# Plot HR diagrams
if plot:
plothrd(model,mod,um,input,mabs,mabse,ix,iy)
return result
# add extinction as a model parameter
def reddening(model,um,avs,extfactors):
model2=dict((k, model[k][um]) for k in model.keys())
nmodels=len(model2['teff'])*len(avs)
keys = [
'dage', 'dmass', 'dfeh', 'teff', 'logg', 'feh', 'rad', 'mass',
'rho', 'age', 'gmag', 'rmag', 'imag', 'zmag', 'jmag', 'hmag',
'bmag', 'vmag', 'btmag','vtmag', 'bpmag', 'gamag', 'rpmag',
'dis', 'kmag', 'avs', 'fdnu', 'feh_init'
]
dtype = [(key, float) for key in keys]
model3 = np.zeros(nmodels,dtype=dtype)
start=0
end=len(um)
#print start,end
for i in range(0,len(avs)):
ix = np.arange(start,end,1)
# NB: in reality, the model mags should also be Av-dependent;
# hopefully a small effect!
for c in 'b v g r i z j h k bt vt bp ga rp'.split():
cmag = c + 'mag'
ac = 'a' + c
av = extfactors['av']
model3[cmag][ix] = model2[cmag] + avs[i]*extfactors[ac]/av
keys = 'teff logg feh rad mass rho age feh_init dfeh dmass dage fdnu'.split()
for key in keys:
model3[key][ix]=model2[key]
model3['avs'][ix] = avs[i]
start = start + len(um)
end = end + len(um)
print(i)
return model3
# redden model given a reddening map
def reddening_map(model, model_mabs, redmap, dustmodel, um, input, extfactors,
band):
if (len(band) == 4):
bd = band[0:1]
else:
bd = band[0:2]
equ = ephem.Equatorial(
input.ra*np.pi/180.0, input.dec*np.pi/180.0, epoch=ephem.J2000
)
gal = ephem.Galactic(equ)
lon_deg = gal.lon*180./np.pi
lat_deg = gal.lat*180./np.pi
# zero-reddening distance
dis = 10**((redmap-model_mabs[um]+5)/5.)
# iterate distance and map a few times
for i in range(0,1):
xp = np.concatenate(
([0.0],np.array(dustmodel.columns[2:].str[3:],dtype='float'))
)
fp = np.concatenate(([0.0],np.array(dustmodel.iloc[0][2:])))
ebvs = np.interp(x=dis, xp=xp, fp = fp)
ext_band = extfactors['a'+bd]*ebvs
dis=10**((redmap-ext_band-model_mabs[um]+5)/5.)
# if no models have been pre-selected (i.e. input is
# photometry+parallax only), redden all models
if (len(um) == len(model['teff'])):
model3 = copy.deepcopy(model)
for c in 'b v g r i z j h k bt vt bp ga rp'.split():
cmag = c + 'mag'
ac = 'a' + c
av = extfactors['av']
model3[cmag] = model[cmag] + extfactors[ac] * ebvs
model3['dis'] = dis
model3['avs'] = extfactors['av']*ebvs
#pdb.set_trace()
# if models have been pre-selected, extract and only redden those
else:
model2 = dict((k, model[k][um]) for k in model.keys())
nmodels = len(model2['teff'])
keys = [
'dage', 'dmass', 'dfeh', 'teff', 'logg', 'feh', 'rad', 'mass',
'rho', 'age', 'gmag', 'rmag', 'imag', 'zmag', 'jmag', 'hmag',
'bmag', 'vmag', 'btmag','vtmag', 'bpmag', 'gamag', 'rpmag',
'dis', 'kmag', 'avs', 'fdnu', 'feh_init'
]
dtype = [(key, float) for key in keys]
model3 = np.zeros(nmodels,dtype=dtype)
for c in 'b v g r i z j h k bt vt bp ga rp'.split():
cmag = c + 'mag'
ac = 'a' + c
av = extfactors['av']
model3[cmag] = model2[cmag] + extfactors[ac] * ebvs
model3['dis']=dis
model3['avs']=extfactors['av']*ebvs
keys = 'teff logg feh rad mass rho age feh_init dfeh dmass dage fdnu'.split()
for key in keys:
model3[key] = model2[key]
return model3
########################### M-dwarf error computation and gK to 2% teff uncertainty computation:
def compute_extra_MK_error(abskmag):
massPoly = np.array([-1.218087354981032275e-04,3.202749540513295540e-03,
-2.649332720970200630e-02,5.491458806424324990e-02,6.102330369026183476e-02,
6.122397810371335014e-01])
massPolyDeriv = np.array([-6.090436774905161376e-04,1.281099816205318216e-02,
-7.947998162910602238e-02,1.098291761284864998e-01,6.102330369026183476e-02])
kmagExtraErr = abs(0.021*np.polyval(massPoly,abskmag)/np.polyval(massPolyDeriv,abskmag))
return kmagExtraErr
def compute_extra_gk_color_error(gk):
teffPoly = np.array([5.838899127633915245e-06,-4.579640759410575821e-04,
1.591988911769273360e-02,-3.229622768514631148e-01,4.234782988549875782e+00,
-3.752421323678526477e+01,2.279521336429464498e+02,-9.419602441779162518e+02,
2.570487048729761227e+03,-4.396474893847861495e+03,4.553858427460818348e+03,
-4.123317864249115701e+03,9.028586421378711748e+03])
teffPolyDeriv = np.array([7.006678953160697955e-05,-5.037604835351633566e-03,
1.591988911769273429e-01,-2.906660491663167978e+00,3.387826390839900625e+01,
-2.626694926574968463e+02,1.367712801857678642e+03,-4.709801220889581600e+03,
1.028194819491904491e+04,-1.318942468154358357e+04,9.107716854921636696e+03,
-4.123317864249115701e+03])
gkExtraColorErr = abs(0.02*np.polyval(teffPoly,gk)/np.polyval(teffPolyDeriv,gk))
return gkExtraColorErr
######################################### misc stuff
# calculate parallax for each model
def redden(redmap, mabs, gl, gb, dust):
logd = (redmap-mabs+5.)/5.
newd = logd
for i in range(0,1):
cur = 10**newd
ebv = dust(gl,gb,cur/1000.)
av = ebv*3.1
aj = av*1.2348743
newd = (redmap-mabs-aj+5.)/5.
s_newd = np.sqrt( (0.2*0.01)**2 + (0.2*0.03)**2 + (0.2*0.02)**2 )
plx=1./(10**newd)
s_plx=10**(-newd)*np.log(10)*s_newd
pdb.set_trace()
return 1./(10**newd)
def readinput(input):
input = ascii.read('input.txt')
ra = input['col1'][0]
dec = input['col2'][0]
bmag = input['col1'][1]
bmage = input['col2'][1]
vmag = input['col1'][2]
vmage = input['col2'][2]
gmag = input['col1'][3]
gmage = input['col2'][3]
rmag = input['col1'][4]
rmage = input['col2'][4]
imag = input['col1'][5]
image = input['col2'][5]
zmag = input['col1'][6]
zmage = input['col2'][6]
jmag = input['col1'][7]
jmage = input['col2'][7]
hmag = input['col1'][8]
hmage = input['col2'][8]
kmag = input['col1'][9]
kmage = input['col2'][9]
plx = input['col1'][10]
plxe = input['col2'][10]
teff = input['col1'][11]
teffe = input['col2'][11]
logg = input['col1'][12]
logge = input['col2'][12]
feh = input['col1'][13]
fehe = input['col2'][13]
out = (
ra, dec, bmag, bmage, vmag, vmage, gmag, gmage, rmag, rmage,
imag, image, zmag, zmage, jmag, jmage, hmag, hmage, kmag, kmage,
plx, plxe, teff, teffe, logg, logge, feh, fehe
)
return out
|
danxhuber/isoclassify
|
isoclassify/grid/classify.py
|
Python
|
mit
| 42,118
|
[
"Gaussian"
] |
82db6faa505856bb8623bfd211d001b249af4c0b668c0b486346c99e12f72674
|
#!/usr/local/epd/bin/python
#------------------------------------------------------------------------------------------------------
# Dirac propagator based on:
# Fillion-Gourdeau, Francois, Lorin, Emmanuel, Bandrauk, Andre D.
# Numerical Solution of the Time-Dependent Dirac Equation in Coordinate Space without Fermion-Doubling
#------------------------------------------------------------------------------------------------------
import numpy as np
import scipy.fftpack as fftpack
import h5py
import time
import sys
from scipy.special import laguerre
from scipy.special import genlaguerre
from scipy.special import legendre
#from pyfft.cuda import Plan
import pycuda.gpuarray as gpuarray
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
import cufft_wrapper as cuda_fft
#-------------------------------------------------------------------------------
_TakabayashiAngle_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
__global__ void Kernel( double *out,
pycuda::complex<double> *Psi1, pycuda::complex<double> *Psi1_conj,
pycuda::complex<double> *Psi2, pycuda::complex<double> *Psi2_conj,
pycuda::complex<double> *Psi3, pycuda::complex<double> *Psi3_conj,
pycuda::complex<double> *Psi4, pycuda::complex<double> *Psi4_conj )
{
const int DIM_X = blockDim.x;
const int DIM_Y = gridDim.x;
const int indexTotal = threadIdx.x + DIM_X * blockIdx.x;
pycuda::complex<double> _out;
/*_out = Psi1[indexTotal] * Psi1_conj[indexTotal] ;
_out += Psi3[indexTotal] * Psi1_conj[indexTotal] ;
_out += Psi2[indexTotal] * Psi2_conj[indexTotal] ;
_out += Psi4[indexTotal] * Psi2_conj[indexTotal] ;
_out -= Psi1[indexTotal] * Psi3_conj[indexTotal];
_out -= Psi3[indexTotal] * Psi3_conj[indexTotal];
_out -= Psi2[indexTotal] * Psi4_conj[indexTotal];
_out -= Psi4[indexTotal] * Psi4_conj[indexTotal]; */
_out = Psi1[indexTotal] * pycuda::conj<double>( Psi1[indexTotal] );
_out += Psi3[indexTotal] * pycuda::conj<double>( Psi1[indexTotal] );
_out += Psi2[indexTotal] * pycuda::conj<double>( Psi2[indexTotal] );
_out += Psi4[indexTotal] * pycuda::conj<double>( Psi2[indexTotal] );
_out -= Psi1[indexTotal] * pycuda::conj<double>( Psi3[indexTotal] );
_out -= Psi3[indexTotal] * pycuda::conj<double>( Psi3[indexTotal] );
_out -= Psi2[indexTotal] * pycuda::conj<double>( Psi4[indexTotal] );
_out -= Psi4[indexTotal] * pycuda::conj<double>( Psi4[indexTotal] );
double density;
/*density = pycuda::real<double>( Psi1[indexTotal]*Psi1_conj[indexTotal] );
density += pycuda::real<double>( Psi2[indexTotal]*Psi2_conj[indexTotal] );
density += pycuda::real<double>( Psi3[indexTotal]*Psi3_conj[indexTotal] );
density += pycuda::real<double>( Psi4[indexTotal]*Psi4_conj[indexTotal] );*/
density = pycuda::real<double>( Psi1[indexTotal]*pycuda::conj( Psi1[indexTotal] ) );
density += pycuda::real<double>( Psi2[indexTotal]*pycuda::conj( Psi2[indexTotal] ) );
density += pycuda::real<double>( Psi3[indexTotal]*pycuda::conj( Psi3[indexTotal] ) );
density += pycuda::real<double>( Psi4[indexTotal]*pycuda::conj( Psi4[indexTotal] ) );
double takabayasi;
if( density > 1.0e-8 )
takabayasi = atan2( pycuda::imag( _out ) , pycuda::real( _out ) );
else
takabayasi = 0.;
out[indexTotal] = takabayasi;
}
"""
TakabayashiAngle_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
__global__ void Kernel( double *out,
pycuda::complex<double> *Psi1,
pycuda::complex<double> *Psi2,
pycuda::complex<double> *Psi3,
pycuda::complex<double> *Psi4 )
{
const int DIM_X = blockDim.x;
const int DIM_Y = gridDim.x;
const int indexTotal = threadIdx.x + DIM_X * blockIdx.x;
pycuda::complex<double> _out;
_out = Psi1[indexTotal] * pycuda::conj<double>( Psi1[indexTotal] );
_out += Psi3[indexTotal] * pycuda::conj<double>( Psi1[indexTotal] );
_out += Psi2[indexTotal] * pycuda::conj<double>( Psi2[indexTotal] );
_out += Psi4[indexTotal] * pycuda::conj<double>( Psi2[indexTotal] );
_out -= Psi1[indexTotal] * pycuda::conj<double>( Psi3[indexTotal] );
_out -= Psi3[indexTotal] * pycuda::conj<double>( Psi3[indexTotal] );
_out -= Psi2[indexTotal] * pycuda::conj<double>( Psi4[indexTotal] );
_out -= Psi4[indexTotal] * pycuda::conj<double>( Psi4[indexTotal] );
double density;
density = pycuda::real<double>( Psi1[indexTotal]*pycuda::conj( Psi1[indexTotal] ) );
density += pycuda::real<double>( Psi2[indexTotal]*pycuda::conj( Psi2[indexTotal] ) );
density += pycuda::real<double>( Psi3[indexTotal]*pycuda::conj( Psi3[indexTotal] ) );
density += pycuda::real<double>( Psi4[indexTotal]*pycuda::conj( Psi4[indexTotal] ) );
double takabayasi;
if( density > 1.0e-8 )
takabayasi = atan2( pycuda::imag( _out ) , pycuda::real( _out ) );
else
takabayasi = 0.;
out[indexTotal] = takabayasi;
}
"""
#--------------------------------------------------------------------------------
Propagator_Takabayashi_K_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
{CUDA_constants}
__global__ void Kernel(
double *TakabayashiAngle,
pycuda::complex<double> *Psi1, pycuda::complex<double> *Psi2, pycuda::complex<double> *Psi3, pycuda::complex<double> *Psi4 )
{{
const int DIM_X = blockDim.x;
const int DIM_Y = gridDim.x;
const int indexTotal = threadIdx.x + DIM_X * blockIdx.x;
int j = (threadIdx.x + DIM_X/2)%DIM_X;
int i = (blockIdx.x + DIM_Y/2)%DIM_Y;
double px = dPx*( j - DIM_X/2 );
double py = dPx*( i - DIM_X/2 );
double Energy = c*sqrt( pow( mass*c , 2 ) + pow( px , 2) +pow( py , 2) );
double TA = TakabayashiAngle[indexTotal];
double sTakab = sin( TA );
double cTakab = cos( TA );
pycuda::complex<double> p_plus = pycuda::complex<double>( px , py);
pycuda::complex<double> ip_plus = pycuda::complex<double>(-py , px);
pycuda::complex<double> p_minus = pycuda::complex<double>( px , -py);
pycuda::complex<double> ip_minus = pycuda::complex<double>( py , px);
pycuda::complex<double> U11 = pycuda::complex<double>( cos(dt*Energy), - mass*c*c*sin(dt*Energy)/Energy*cTakab );
pycuda::complex<double> U22 = U11;
pycuda::complex<double> U44 = pycuda::complex<double>( cos(dt*Energy), mass*c*c*sin(dt*Energy)/Energy*cTakab );
pycuda::complex<double> U33 = U44;
pycuda::complex<double> U13 = pycuda::complex<double>( - mass*c*c*sTakab*sin(Energy*dt)/Energy ,0.);
pycuda::complex<double> U14 = - ip_minus*c*sin( dt*Energy )/Energy;
pycuda::complex<double> U23 = - ip_plus*c*sin( dt*Energy )/Energy;
pycuda::complex<double> U24 = pycuda::complex<double>( - mass*c*c*sTakab*sin(Energy*dt)/Energy , 0.);
pycuda::complex<double> U31 = pycuda::complex<double>( mass*c*c*sTakab*sin(Energy*dt)/Energy , 0.);
pycuda::complex<double> U32 = - ip_minus *c*sin( dt*Energy )/Energy;
pycuda::complex<double> U41 = - ip_plus *c*sin( dt*Energy )/Energy;
pycuda::complex<double> U42 = pycuda::complex<double>( mass*c*c*sTakab*sin(Energy*dt)/Energy , 0.);
pycuda::complex<double> _Psi1, _Psi2, _Psi3, _Psi4;
_Psi1= U11*Psi1[indexTotal] +U13*Psi3[indexTotal] +U14*Psi4[indexTotal];
_Psi2= U22*Psi2[indexTotal] +U23*Psi3[indexTotal] +U24*Psi4[indexTotal];
_Psi3= U31*Psi1[indexTotal] +U32*Psi2[indexTotal] +U33*Psi3[indexTotal] ;
_Psi4= U41*Psi1[indexTotal] +U42*Psi2[indexTotal] +U44*Psi4[indexTotal];
Psi1[indexTotal] = _Psi1;
Psi2[indexTotal] = _Psi2;
Psi3[indexTotal] = _Psi3;
Psi4[indexTotal] = _Psi4;
}}
"""
Propagator_Takabayashi_A_source = """
//
// source code for the Dirac propagator with scalar-vector potential interaction
// and smooth time dependence
//
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
{CUDA_constants}
// The vector potential must be supplied with UP indices and: eA
__device__ double A0(double t, double x, double y)
{{
return {A0} ;
}}
__device__ double A1(double t, double x, double y)
{{
return {A1} ;
}}
__device__ double A2(double t, double x, double y)
{{
return {A2} ;
}}
__device__ double A3(double t, double x, double y)
{{
return {A3} ;
}}
__device__ double VectorPotentialSquareSum(double t, double x, double y)
{{
return pow( A1(t,x,y), 2.) + pow( A2(t,x,y), 2.) + pow( A3(t,x,y), 2.);
}}
//-------------------------------------------------------------------------------------------------------------
__global__ void DiracPropagatorA_Kernel( double *TakabayashiAngle,
pycuda::complex<double> *Psi1, pycuda::complex<double> *Psi2, pycuda::complex<double> *Psi3, pycuda::complex<double> *Psi4, double t )
{{
const int DIM_X = blockDim.x;
const int DIM_Y = gridDim.x;
int j = (threadIdx.x + DIM_X/2)%DIM_X;
int i = (blockIdx.x + DIM_Y/2)%DIM_Y;
const int indexTotal = threadIdx.x + DIM_X * blockIdx.x ;
double x = dX*(j - DIM_X/2);
double y = dY*(i - DIM_Y/2);
pycuda::complex<double> _Psi1, _Psi2, _Psi3, _Psi4;
double F;
double mass_ = 0.0001;
F = sqrt( pow(mass_*c*c,2.) + c*VectorPotentialSquareSum(t,x,y) );
//pycuda::complex<double> expV0 = exp( -I*dt*A0(t,x,y) );
pycuda::complex<double> expV0 = exp( pycuda::complex<double>(0., -dt*A0(t,x,y)) );
pycuda::complex<double> U11 = pycuda::complex<double>( cos(dt*F) , -mass_*c*c*sin(dt*F)/F );
pycuda::complex<double> U22 = U11;
pycuda::complex<double> U33 = pycuda::complex<double>( cos(dt*F) , mass_*c*c*sin(dt*F)/F );
pycuda::complex<double> U44 = U33;
pycuda::complex<double> U31 = pycuda::complex<double>( 0., A3(t,x,y)*sin(dt*F)/F );
pycuda::complex<double> U41 = pycuda::complex<double>( -A2(t,x,y)*sin(dt*F)/F , A1(t,x,y)*sin(dt*F)/F );
pycuda::complex<double> U32 = pycuda::complex<double>( A2(t,x,y)*sin(dt*F)/F , A1(t,x,y)*sin(dt*F)/F );
pycuda::complex<double> U42 = pycuda::complex<double>( 0., -A3(t,x,y)*sin(dt*F)/F );
pycuda::complex<double> U13 = U31;
pycuda::complex<double> U14 = U32;
pycuda::complex<double> U23 = U41;
pycuda::complex<double> U24 = U42;
_Psi1 = expV0*( U11*Psi1[indexTotal] + U13*Psi3[indexTotal] + U14*Psi4[indexTotal] );
_Psi2 = expV0*( U22*Psi2[indexTotal] + U23*Psi3[indexTotal] + U24*Psi4[indexTotal] );
_Psi3 = expV0*( U31*Psi1[indexTotal] + U32*Psi2[indexTotal] + U33*Psi3[indexTotal] );
_Psi4 = expV0*( U41*Psi1[indexTotal] + U42*Psi2[indexTotal] + U44*Psi4[indexTotal] );
Psi1[indexTotal] = _Psi1;
Psi2[indexTotal] = _Psi2;
Psi3[indexTotal] = _Psi3;
Psi4[indexTotal] = _Psi4;
}}
"""
#--------------------------------------------------------------------------------
BaseCUDAsource_K = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s
__global__ void Kernel(
pycuda::complex<double> *Psi1, pycuda::complex<double> *Psi2, pycuda::complex<double> *Psi3, pycuda::complex<double> *Psi4 )
{
const int DIM_X = blockDim.x;
const int DIM_Y = gridDim.x;
int j = (threadIdx.x + DIM_X/2)%%DIM_X;
int i = (blockIdx.x + DIM_Y/2)%%DIM_Y;
pycuda::complex<double> _Psi1, _Psi2, _Psi3, _Psi4;
const int indexTotal = threadIdx.x + DIM_X * blockIdx.x;
double px = dPx*(j - DIM_X/2);
double py = dPy*(i - DIM_Y/2);
double pp = sqrt( px*px + py*py + pow(10.,-12));
double sdt = sin( c*dt*pp )/pp;
double cosdt = cos(c*dt*pp);
pycuda::complex<double> p_plus = pycuda::complex<double>(py,px);
pycuda::complex<double> p_mius = pycuda::complex<double>(py,-px);
_Psi1= cos(c*dt*pp)*Psi1[indexTotal] - p_plus*sdt*Psi4[indexTotal];
_Psi2= cos(c*dt*pp)*Psi2[indexTotal] + p_mius*sdt*Psi3[indexTotal] ;
_Psi3= - p_plus*sdt*Psi2[indexTotal] + cos(c*dt*pp)*Psi3[indexTotal] ;
_Psi4= p_mius*sdt*Psi1[indexTotal] + cos(c*dt*pp)*Psi4[indexTotal];
Psi1[indexTotal] = _Psi1;
Psi2[indexTotal] = _Psi2;
Psi3[indexTotal] = _Psi3;
Psi4[indexTotal] = _Psi4;
}
"""
DiracPropagatorA_source = """
//
// source code for the Dirac propagator with scalar-vector potential interaction
// and smooth time dependence
//
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s // Define the essential constants {mass, c, dt, min_x, min_y, ... } . Variables _a_ and _b_ are reserved
// The vector potential must be supplied with UP indices and: eA
__device__ double A0(double t, double x, double y)
{
return %s ;
}
__device__ double A1(double t, double x, double y)
{
return %s ;
}
__device__ double A2(double t, double x, double y)
{
return %s ;
}
__device__ double A3(double t, double x, double y)
{
return %s ;
}
__device__ double VectorPotentialSquareSum(double t, double x, double y)
{
return pow( A1(t,x,y), 2.) + pow( A2(t,x,y), 2.) + pow( A3(t,x,y), 2.);
}
//-------------------------------------------------------------------------------------------------------------
__global__ void DiracPropagatorA_Kernel(
pycuda::complex<double> *Psi1, pycuda::complex<double> *Psi2, pycuda::complex<double> *Psi3, pycuda::complex<double> *Psi4, double t )
{
//pycuda::complex<double> I = pycuda::complex<double>(0.,1.);
const int DIM_X = blockDim.x;
const int DIM_Y = gridDim.x;
int j = (threadIdx.x + DIM_X/2)%%DIM_X;
int i = (blockIdx.x + DIM_Y/2)%%DIM_Y;
const int indexTotal = threadIdx.x + DIM_X * blockIdx.x ;
double x = dX*(j - DIM_X/2);
double y = dY*(i - DIM_Y/2);
pycuda::complex<double> _Psi1, _Psi2, _Psi3, _Psi4;
double F;
F = sqrt( pow(mass*c*c,2.) + c*VectorPotentialSquareSum(t,x,y) );
//pycuda::complex<double> expV0 = exp( -I*dt*A0(t,x,y) );
pycuda::complex<double> expV0 = exp( pycuda::complex<double>(0., -dt*A0(t,x,y)) );
pycuda::complex<double> U11 = pycuda::complex<double>( cos(dt*F) , -mass*c*c*sin(dt*F)/F );
pycuda::complex<double> U22 = U11;
pycuda::complex<double> U33 = pycuda::complex<double>( cos(dt*F) , mass*c*c*sin(dt*F)/F );
pycuda::complex<double> U44 = U33;
pycuda::complex<double> U31 = pycuda::complex<double>( 0., A3(t,x,y)*sin(dt*F)/F );
pycuda::complex<double> U41 = pycuda::complex<double>( -A2(t,x,y)*sin(dt*F)/F , A1(t,x,y)*sin(dt*F)/F );
pycuda::complex<double> U32 = pycuda::complex<double>( A2(t,x,y)*sin(dt*F)/F , A1(t,x,y)*sin(dt*F)/F );
pycuda::complex<double> U42 = pycuda::complex<double>( 0., -A3(t,x,y)*sin(dt*F)/F );
pycuda::complex<double> U13 = U31;
pycuda::complex<double> U14 = U32;
pycuda::complex<double> U23 = U41;
pycuda::complex<double> U24 = U42;
_Psi1 = expV0*( U11*Psi1[indexTotal] + U13*Psi3[indexTotal] + U14*Psi4[indexTotal] );
_Psi2 = expV0*( U22*Psi2[indexTotal] + U23*Psi3[indexTotal] + U24*Psi4[indexTotal] );
_Psi3 = expV0*( U31*Psi1[indexTotal] + U32*Psi2[indexTotal] + U33*Psi3[indexTotal] );
_Psi4 = expV0*( U41*Psi1[indexTotal] + U42*Psi2[indexTotal] + U44*Psi4[indexTotal] );
Psi1[indexTotal] = _Psi1;
Psi2[indexTotal] = _Psi2;
Psi3[indexTotal] = _Psi3;
Psi4[indexTotal] = _Psi4;
}
"""
BaseCUDAsource_AbsorbBoundary_xy = """
//............................................................................................
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
__global__ void AbsorbBoundary_Kernel(
pycuda::complex<double> *Psi1, pycuda::complex<double> *Psi2,
pycuda::complex<double> *Psi3 , pycuda::complex<double> *Psi4 )
{
const int DIM_X = blockDim.x;
const int DIM_Y = gridDim.x;
const int j = (threadIdx.x + DIM_X/2)%DIM_X;
const int i = (blockIdx.x + DIM_Y/2)%DIM_Y;
const int indexTotal = threadIdx.x + DIM_X*blockIdx.x + DIM_X*DIM_Y*blockIdx.y;
double wx = pow(3.*double(DIM_X)/100.,2);
double wy = pow(3.*double(DIM_Y)/100.,2);
//--------------------------- boundary in x --------------------------------------
double expB = 1. - exp( -double(j*j)/wx );
Psi1[indexTotal] *= expB;
Psi2[indexTotal] *= expB;
Psi3[indexTotal] *= expB;
Psi4[indexTotal] *= expB;
expB = 1.- exp( -(j - DIM_X+1. )*(j - DIM_X+1.)/wx );
Psi1[indexTotal] *= expB;
Psi2[indexTotal] *= expB;
Psi3[indexTotal] *= expB;
Psi4[indexTotal] *= expB;
//-------------- boundary in y
expB = 1.- exp( -double(i*i)/wy );
Psi1[indexTotal] *= expB;
Psi2[indexTotal] *= expB;
Psi3[indexTotal] *= expB;
Psi4[indexTotal] *= expB;
expB = 1. - exp( -double( (i - DIM_Y + 1)*(i - DIM_Y + 1) )/wy );
Psi1[indexTotal] *= expB;
Psi2[indexTotal] *= expB;
Psi3[indexTotal] *= expB;
Psi4[indexTotal] *= expB;
}
"""
#..........................................................................................................
Potential_0_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__device__ double Potential0(double t, double x)
{
return %s ;
}
//............................................................................................................
__global__ void Kernel( pycuda::complex<double>* preExpectationValue,
pycuda::complex<double>* Psi1, pycuda::complex<double>* Psi2, pycuda::complex<double>* Psi3, pycuda::complex<double>* Psi4,
double t)
{
const int DIM_X = blockDim.x;
const int DIM_Y = gridDim.x;
int j = (threadIdx.x + DIM_X/2)%%DIM_X;
int i = (blockIdx.x + DIM_Y/2)%%DIM_Y;
double x = dX*(j - DIM_X/2);
double y = dY*(i - DIM_Y/2);
const int indexTotal = threadIdx.x + DIM_X * blockIdx.x ;
double out;
out = Potential0( t, x)* pow( abs( Psi1[indexTotal] ) , 2 );
out += Potential0( t, x)* pow( abs( Psi2[indexTotal] ) , 2 );
out += Potential0( t, x)* pow( abs( Psi3[indexTotal] ) , 2 );
out += Potential0( t, x)* pow( abs( Psi4[indexTotal] ) , 2 );
preExpectationValue[indexTotal] = out;
}
"""
#-----------------------------------------------------------------------------------------------
class GPU_DiracDaviau2D:
"""
Propagator 2D for the Dirac equation
Parameters:
gridDIM_X
gridDIM_Y
min_X
min_Y
timeSteps
skipFrames: Number of frames to be saved
frameSaveMode = 'Density' saves only the density
frameSaveMode = 'Spinor' saves the whole spinor
"""
def __init__(self, gridDIM, amplitude, dt, timeSteps, skipFrames = 1,frameSaveMode='Density'):
X_amplitude,Y_amplitude = amplitude
X_gridDIM, Y_gridDIM = gridDIM
self.dX = 2.*X_amplitude/np.float(X_gridDIM)
self.dY = 2.*Y_amplitude/np.float(Y_gridDIM)
self.X_amplitude = X_amplitude
self.Y_amplitude = Y_amplitude
self.X_gridDIM = X_gridDIM
self.Y_gridDIM = Y_gridDIM
self.min_X = -X_amplitude
self.min_Y = -Y_amplitude
self.timeSteps = timeSteps
self.skipFrames = skipFrames
self.frameSaveMode = frameSaveMode
rangeX = np.linspace(-X_amplitude, X_amplitude - self.dX, X_gridDIM )
rangeY = np.linspace(-Y_amplitude, Y_amplitude - self.dY, Y_gridDIM )
self.X = fftpack.fftshift(rangeX)[np.newaxis, : ]
self.Y = fftpack.fftshift(rangeY)[:, np.newaxis ]
self.X_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.X + 0.*self.Y, dtype = np.complex128) )
self.Y_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Y + 0.*self.X, dtype = np.complex128) )
self.Px_amplitude = np.pi/self.dX
self.dPx = 2*self.Px_amplitude/float(self.X_gridDIM)
self.Px_range = np.linspace( -self.Px_amplitude, self.Px_amplitude - self.dPx, self.X_gridDIM )
self.Py_amplitude = np.pi/self.dY
self.dPy = 2*self.Py_amplitude/float(self.Y_gridDIM)
self.Py_range = np.linspace( -self.Py_amplitude, self.Py_amplitude - self.dPy, self.Y_gridDIM )
self.Px = fftpack.fftshift(self.Px_range)[np.newaxis,:]
self.Py = fftpack.fftshift(self.Py_range)[:,np.newaxis]
self.Px_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Px + 0.*self.Py, dtype = np.complex128) )
self.Py_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Py + 0.*self.Px, dtype = np.complex128) )
self.dt = dt
#................ Strings: mass,c,dt must be defined in children class....................
self.CUDA_constants_essential = '__constant__ double mass=%f; '%self.mass
self.CUDA_constants_essential += '__constant__ double c=%f; '%self.c
self.CUDA_constants_essential += '__constant__ double dt=%f; '%self.dt
self.CUDA_constants_essential += '__constant__ double dX=%f; '%self.dX
self.CUDA_constants_essential += '__constant__ double dY=%f; '%self.dY
self.CUDA_constants_essential += '__constant__ double dPx=%f; '%self.dPx
self.CUDA_constants_essential += '__constant__ double dPy=%f; '%self.dPy
self.CUDA_constants = self.CUDA_constants_essential #+ self.CUDA_constants_additional
#................ CUDA Kernels ...........................................................
self.DiracPropagatorK = SourceModule(BaseCUDAsource_K%self.CUDA_constants,arch="sm_20").get_function( "Kernel" )
self.DiracPropagatorA = SourceModule(
DiracPropagatorA_source%(
self.CUDA_constants,
self.Potential_0_String,
self.Potential_1_String,
self.Potential_2_String,
self.Potential_3_String),arch="sm_20").get_function( "DiracPropagatorA_Kernel" )
#
self.Propagator_Takabayashi_K = SourceModule(
Propagator_Takabayashi_K_source.format(CUDA_constants=self.CUDA_constants)
).get_function( "Kernel" )
self.Propagator_Takabayashi_A = SourceModule(
Propagator_Takabayashi_A_source.format(
CUDA_constants = self.CUDA_constants,
A0 = self.Potential_0_String,
A1 = self.Potential_1_String,
A2 = self.Potential_2_String,
A3 = self.Potential_3_String) ).get_function( "DiracPropagatorA_Kernel" )
self.DiracAbsorbBoundary_xy = \
SourceModule(BaseCUDAsource_AbsorbBoundary_xy,arch="sm_20").get_function( "AbsorbBoundary_Kernel" )
self.Potential_0_Average_Function = \
SourceModule( Potential_0_Average_source%(
self.CUDA_constants,self.Potential_0_String) ).get_function("Kernel" )
self.TakabayashiAngle_Function = SourceModule(TakabayashiAngle_source).get_function( "Kernel" )
#................ FFT PLAN ...........................
self.plan_Z2Z_2D = cuda_fft.Plan_Z2Z( (self.X_gridDIM,self.Y_gridDIM) )
""" def Fourier_X_To_P_GPU(self,W_out_GPU):
cuda_fft.fft_Z2Z( W_out_GPU, W_out_GPU , self.plan_Z2Z_2D )
W_out_GPU *= 1./np.sqrt( float(self.X_gridDIM*self.Y_gridDIM) )/np.pi
def Fourier_P_To_X_GPU(self,W_out_GPU):
cuda_fft.ifft_Z2Z( W_out_GPU, W_out_GPU , self.plan_Z2Z_2D )
W_out_GPU *= np.pi*np.sqrt( float(self.X_gridDIM*self.Y_gridDIM) )"""
def Fourier_X_To_P_GPU(self,W_out_GPU):
cuda_fft.fft_Z2Z( W_out_GPU, W_out_GPU , self.plan_Z2Z_2D )
W_out_GPU *= 1./np.sqrt( float(self.X_gridDIM*self.Y_gridDIM) )
def Fourier_P_To_X_GPU(self,W_out_GPU):
cuda_fft.ifft_Z2Z( W_out_GPU, W_out_GPU , self.plan_Z2Z_2D )
W_out_GPU *= np.sqrt( float(self.X_gridDIM*self.Y_gridDIM) )
def Fourier_4_X_To_P_GPU(self, Psi1, Psi2, Psi3, Psi4):
self.Fourier_X_To_P_GPU(Psi1)
self.Fourier_X_To_P_GPU(Psi2)
self.Fourier_X_To_P_GPU(Psi3)
self.Fourier_X_To_P_GPU(Psi4)
def Fourier_4_P_To_X_GPU(self, Psi1, Psi2, Psi3, Psi4):
self.Fourier_P_To_X_GPU(Psi1)
self.Fourier_P_To_X_GPU(Psi2)
self.Fourier_P_To_X_GPU(Psi3)
self.Fourier_P_To_X_GPU(Psi4)
#-------------------------------------------------------------------------------------------------------------------
# Gaussian PARTICLE spinors
def Spinor_Particle_SpinUp(self, p_init, modulation_Function ):
"""
"""
px, py = p_init
rho = np.exp(1j*self.X*px + 1j*self.Y*py )*modulation_Function( self.X , self.Y )
p0 = np.sqrt( px*px + py*py + (self.mass*self.c)**2 )
Psi1 = rho*( p0 + self.mass*self.c )
Psi2 = rho*0.
Psi3 = rho*0.
Psi4 = rho*( px + 1j*py )
return np.array([Psi1, Psi2, Psi3, Psi4 ])
def Spinor_Particle_SpinDown(self, p_init, modulation_Function ):
"""
"""
px, py = p_init
rho = np.exp(1j*self.X*px + 1j*self.Y*py )*modulation_Function( self.X , self.Y )
p0 = np.sqrt( px*px + py*py + (self.mass*self.c)**2 )
Psi1 = rho*0.
Psi2 = rho*( p0 + self.mass*self.c )
Psi3 = rho*( px - 1j*py )
Psi4 = rho*0.
return np.array([Psi1, Psi2, Psi3, Psi4 ])
def Spinor_AntiParticle_SpinDown(self, p_init, modulation_Function ):
"""
"""
px, py = p_init
rho = np.exp(1j*self.X*px + 1j*self.Y*py )*modulation_Function( self.X , self.Y )
p0 = -np.sqrt( px*px + py*py + (self.mass*self.c)**2 )
Psi1 = rho*0.
Psi2 = rho*( px + 1j*py )
Psi3 = rho*( p0 - self.mass*self.c )
Psi4 = rho*0.
return -1j*np.array([Psi1, Psi2, Psi3, Psi4 ])
def Spinor_AntiParticle_SpinUp(self, p_init, modulation_Function ):
"""
"""
px, py = p_init
rho = np.exp(1j*self.X*px + 1j*self.Y*py )*modulation_Function( self.X , self.Y )
p0 = -np.sqrt( px*px + py*py + (self.mass*self.c)**2 )
Psi1 = rho*( px - 1j*py )
Psi2 = rho*0.
Psi3 = rho*0.
Psi4 = rho*( p0 - self.mass*self.c )
return -1j*np.array([Psi1, Psi2, Psi3, Psi4 ])
#.......................................................................
def Boost(self, p1,p2):
# Boost matrix in Dirac gamma matrices
p0 = np.sqrt( (self.mass*self.c)**2 + p1**2 + p2**2 )
K = np.sqrt( 2*self.mass*self.c*(self.mass*self.c + p0) )
B00 = self.mass*self.c + p0
p_Plus = p1 + 1j*p2
p_Minus = p1 - 1j*p2
return np.array([ [B00, 0., 0., p_Minus], [0., B00, p_Plus,0.], [0.,p_Minus,B00,0.], [p_Plus,0.,0.,B00] ])
def LandauLevelSpinor(self, B , n , x , y ,type=1):
# Symmetric Gauge
def energy(n):
return np.sqrt( (self.mass*self.c**2)**2 + 2*B*self.c*self.hBar*n )
K = B*( (self.X-x)**2 + (self.Y-y)**2)/( 4.*self.c*self.hBar )
psi1 = np.exp(-K)*( energy(n) + self.mass*self.c**2 )*laguerre(n)( 2*K )
psi3 = np.exp(-K)*( energy(n) - self.mass*self.c**2 )*laguerre(n)( 2*K )
if n>0:
psi2 = 1j*np.exp(-K)*( self.X-x + 1j*(self.Y-y) )*genlaguerre(n-1,1)( 2*K )
else:
psi2 = 0.*K
psi4 = psi2
if type==1:
spinor = np.array([ psi1 , 0*psi2 , 0*psi3 , psi4 ])
elif type ==2:
spinor = np.array([ 0*psi1 , psi2 , psi3 , 0*psi4 ])
else :
print 'Error: type spinor must be 1 or 2'
norm = self.Norm(spinor)
spinor /= norm
return spinor
def LandauLevelSpinor_Boosted(self, B , n , x , y , py ):
K = B*( (self.X-x)**2 + (self.Y-y)**2)/( 4.*self.c*self.hBar )
p0 = np.sqrt( (self.mass*self.c)**2 + py**2 )
psi1 = 0j*K
psi2 = 1j*self.c* np.exp(-K) * py *( p0 - self.mass*self.c )
psi3 = self.c* np.exp(-K) * py*py*( p0 - self.mass*self.c ) + 0j
psi4 = 0j*K
spinor = np.array([ psi1 , psi2 , psi3 , psi4 ])
norm = self.Norm(spinor)
spinor /= norm
return spinor
def LandaoLevelSpinor_GaugeX(self, B , n , Py ):
def energy(n):
return np.sqrt( (self.mass*self.c**2)**2 + 2*B*self.c*self.hBar*n )
K = B*(self.X - self.c*Py/B)**2/( 2.*self.c*self.hBar )
psi1 = np.exp(-K)*( self.mass*self.c**2 + energy(n) )* legendre(n)( K/np.sqrt(B*self.c*self.hBar) )
psi3 = np.exp(-K)*( self.mass*self.c**2 + energy(n) )* legendre(n)( K/np.sqrt(B*self.c*self.hBar) )
if n>0:
psi2 = np.exp(-K)*( self.mass*self.c**2 + energy(n) )* legendre(n-1)( K/np.sqrt(B*self.c*self.hBar) )
psi2 = 2*1j*n*np.sqrt(B*self.c*self.hBar)
psi4 = -psi2
else:
psi2 = 0.*K
psi4 = 0.*K
spinor = np.array([psi1 , psi2 , psi3 , psi2 ])
norm = self.Norm(spinor)
spinor /= norm
return spinor
#.............................................................................................
def FilterElectrons(self,sign, Psi):
'''
Routine that uses the Fourier transform to filter positrons/electrons
Options:
sign=1 Leaves electrons
sign=-1 Leaves positrons
'''
print ' '
print ' Filter Electron routine '
print ' '
px = self.c*self.Px
py = self.c*self.Py
m = self.mass
c= self.c
energy = np.sqrt( (m*c**2)**2 + px**2 + py**2 )
EP_11 = 1. + sign*m*c**2/energy
EP_12 = 0.
EP_13 = 0.
EP_14 = sign*(px - 1j*py)/energy
EP_21 = 0.
EP_22 = 1. + sign*m*c**2/energy
EP_23 = sign*(px + 1j*py)/energy
EP_24 = 0.
EP_31 = 0.
EP_32 = sign*(px - 1j*py)/energy
EP_33 = 1. - sign*m*c**2/energy
EP_34 = 0.
EP_41 = sign*(px + 1j*py)/energy
EP_42 = 0.
EP_43 = 0.
EP_44 = 1. - sign*m*c**2/energy
#Psi1, Psi2, Psi3, Psi4 = Psi
psi1_fft = fftpack.fft2( Psi[0] )
psi2_fft = fftpack.fft2( Psi[1] )
psi3_fft = fftpack.fft2( Psi[2] )
psi4_fft = fftpack.fft2( Psi[3] )
psi1_fft_electron = EP_11*psi1_fft + EP_12*psi2_fft + EP_13*psi3_fft + EP_14*psi4_fft
psi2_fft_electron = EP_21*psi1_fft + EP_22*psi2_fft + EP_23*psi3_fft + EP_24*psi4_fft
psi3_fft_electron = EP_31*psi1_fft + EP_32*psi2_fft + EP_33*psi3_fft + EP_34*psi4_fft
psi4_fft_electron = EP_41*psi1_fft + EP_42*psi2_fft + EP_43*psi3_fft + EP_44*psi4_fft
return np.array([ fftpack.ifft2( psi1_fft_electron ),
fftpack.ifft2( psi2_fft_electron ),
fftpack.ifft2( psi3_fft_electron ),
fftpack.ifft2( psi4_fft_electron ) ])
def save_Spinor(self,f1, t, Psi1_GPU,Psi2_GPU,Psi3_GPU,Psi4_GPU):
print ' progress ', 100*t/(self.timeSteps+1), '%'
PsiTemp = Psi1_GPU.get()
f1['1/'+str(t)] = PsiTemp
PsiTemp = Psi2_GPU.get()
f1['2/'+str(t)] = PsiTemp
PsiTemp = Psi3_GPU.get()
f1['3/'+str(t)] = PsiTemp
PsiTemp = Psi4_GPU.get()
f1['4/'+str(t)] = PsiTemp
def save_Density(self,f1,t,Psi1_GPU,Psi2_GPU,Psi3_GPU,Psi4_GPU):
print ' progress ', 100*t/(self.timeSteps+1), '%'
PsiTemp1 = Psi1_GPU.get()
PsiTemp2 = Psi2_GPU.get()
PsiTemp3 = Psi3_GPU.get()
PsiTemp4 = Psi4_GPU.get()
rho = np.abs(PsiTemp1)**2
rho += np.abs(PsiTemp2)**2
rho += np.abs(PsiTemp3)**2
rho += np.abs(PsiTemp4)**2
#print ' Save norm = ', np.sum(rho)*self.dX*self.dY
f1[str(t)] = np.ascontiguousarray(fftpack.fftshift(rho).astype(np.float32))
def load_Density(self, n, fileName=None ):
if fileName==None:
FILE = h5py.File(self.fileName)
else :
FILE = h5py.File(fileName)
probability = FILE['/'+str(n)][...]
FILE.close()
return probability
def load_Spinor(self, n, fileName=None ):
if fileName==None:
FILE = h5py.File(self.fileName)
else :
FILE = h5py.File(fileName)
psi1 = FILE['1/'+str(n)][...]
psi2 = FILE['2/'+str(n)][...]
psi3 = FILE['3/'+str(n)][...]
psi4 = FILE['4/'+str(n)][...]
FILE.close()
return np.array([ psi1, psi2, psi3, psi4 ])
def Density_From_Spinor(self,Psi):
rho = np.abs(Psi[0])**2
rho += np.abs(Psi[1])**2
rho += np.abs(Psi[2])**2
rho += np.abs(Psi[3])**2
return rho
def Norm( self, Psi):
norm = np.sum(np.abs(Psi[0])**2)
norm += np.sum(np.abs(Psi[1])**2)
norm += np.sum(np.abs(Psi[2])**2)
norm += np.sum(np.abs(Psi[3])**2)
norm *= self.dX*self.dY
norm = np.sqrt(norm)
return norm
def Norm_P( self, Psi):
norm = np.sum(np.abs(Psi[0])**2)
norm += np.sum(np.abs(Psi[1])**2)
norm += np.sum(np.abs(Psi[2])**2)
norm += np.sum(np.abs(Psi[3])**2)
norm *= self.dPx*self.dPy
norm = np.sqrt(norm)
return norm
def Norm_GPU( self, Psi1, Psi2, Psi3, Psi4):
norm = gpuarray.sum( Psi1.__abs__()**2 ).get()
norm += gpuarray.sum( Psi2.__abs__()**2 ).get()
norm += gpuarray.sum( Psi3.__abs__()**2 ).get()
norm += gpuarray.sum( Psi4.__abs__()**2 ).get()
norm = np.sqrt(norm*self.dX * self.dY )
return norm
def Norm_P_GPU( self, Psi1, Psi2, Psi3, Psi4):
norm = gpuarray.sum( Psi1.__abs__()**2 ).get()
norm += gpuarray.sum( Psi2.__abs__()**2 ).get()
norm += gpuarray.sum( Psi3.__abs__()**2 ).get()
norm += gpuarray.sum( Psi4.__abs__()**2 ).get()
norm = np.sqrt(norm*self.dPx * self.dPy )
return norm
def Normalize_GPU( self, Psi1, Psi2, Psi3, Psi4):
norm = self.Norm_GPU(Psi1, Psi2, Psi3, Psi4)
Psi1 /= norm
Psi2 /= norm
Psi3 /= norm
Psi4 /= norm
def Normalize_P_GPU( self, Psi1, Psi2, Psi3, Psi4):
norm = self.Norm_P_GPU(Psi1, Psi2, Psi3, Psi4)
Psi1 /= norm
Psi2 /= norm
Psi3 /= norm
Psi4 /= norm
#------------------------------------------------------------------------
def Potential_0_Average(self, temp_GPU, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU ,t):
self.Potential_0_Average_Function( temp_GPU,
Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU, t , block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dY * gpuarray.sum(temp_GPU).get()
#........................................................................
def Average_X( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot(Psi1_GPU.__abs__()**2,self.X_GPU).get()
average += gpuarray.dot(Psi2_GPU.__abs__()**2,self.X_GPU).get()
average += gpuarray.dot(Psi3_GPU.__abs__()**2,self.X_GPU).get()
average += gpuarray.dot(Psi4_GPU.__abs__()**2,self.X_GPU).get()
average *= self.dX*self.dY
return average
def Average_Y( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot(Psi1_GPU.__abs__()**2,self.Y_GPU).get()
average += gpuarray.dot(Psi2_GPU.__abs__()**2,self.Y_GPU).get()
average += gpuarray.dot(Psi3_GPU.__abs__()**2,self.Y_GPU).get()
average += gpuarray.dot(Psi4_GPU.__abs__()**2,self.Y_GPU).get()
average *= self.dX*self.dY
return average
def Average_Px( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot( Psi1_GPU.__abs__()**2,self.Px_GPU ).get()
average += gpuarray.dot( Psi2_GPU.__abs__()**2,self.Px_GPU ).get()
average += gpuarray.dot( Psi3_GPU.__abs__()**2,self.Px_GPU ).get()
average += gpuarray.dot( Psi4_GPU.__abs__()**2,self.Px_GPU ).get()
average *= self.dPx*self.dPy
return average
def Average_Py( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot(Psi1_GPU.__abs__()**2,self.Py_GPU).get()
average += gpuarray.dot(Psi2_GPU.__abs__()**2,self.Py_GPU).get()
average += gpuarray.dot(Psi3_GPU.__abs__()**2,self.Py_GPU).get()
average += gpuarray.dot(Psi4_GPU.__abs__()**2,self.Py_GPU).get()
average *= self.dPx*self.dPy
return average
def Average_Px_CPU( self, Psi):
average = np.sum( np.abs( Psi[0] )**2* self.Px )
average += np.sum( np.abs( Psi[1] )**2* self.Px )
average += np.sum( np.abs( Psi[2] )**2* self.Px )
average += np.sum( np.abs( Psi[3] )**2* self.Px )
return average*self.dPx*self.dPy
def Average_Py_CPU( self, Psi1, Psi2, Psi3, Psi4):
average = np.sum( np.abs( Psi[0] )**2* self.Py )
average += np.sum( np.abs( Psi[1] )**2* self.Py )
average += np.sum( np.abs( Psi[2] )**2* self.Py )
average += np.sum( np.abs( Psi[3] )**2* self.Py )
return average*self.dPx*self.dPy
#........................................................................
def Average_Alpha1( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot( Psi2_GPU, Psi3_GPU.conj() ).get().real
average += gpuarray.dot( Psi1_GPU, Psi4_GPU.conj() ).get().real
average *= 2.*self.dX*self.dY
return average
def Average_Alpha2( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot( Psi3_GPU, Psi2_GPU.conj() ).get().imag
average += gpuarray.dot( Psi1_GPU, Psi4_GPU.conj() ).get().imag
average *= -2.*self.dX*self.dY
return average
def Average_Beta( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot( Psi1_GPU, Psi1_GPU.conj() ).get()
average += gpuarray.dot( Psi2_GPU, Psi2_GPU.conj() ).get()
average -= gpuarray.dot( Psi3_GPU, Psi3_GPU.conj() ).get()
average -= gpuarray.dot( Psi4_GPU, Psi4_GPU.conj() ).get()
average *= self.dX*self.dY
return average
def Average_TotalProbabilityX( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
temp = gpuarray.sum( Psi1_GPU*Psi1_GPU.conj() ).get()
temp += gpuarray.sum( Psi2_GPU*Psi2_GPU.conj() ).get()
temp += gpuarray.sum( Psi3_GPU*Psi3_GPU.conj() ).get()
temp += gpuarray.sum( Psi4_GPU*Psi4_GPU.conj() ).get()
return temp * self.dX*self.dY
def Average_TotalProbabilityP( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
temp = gpuarray.sum( Psi1_GPU*Psi1_GPU.conj() ).get()
temp += gpuarray.sum( Psi2_GPU*Psi2_GPU.conj() ).get()
temp += gpuarray.sum( Psi3_GPU*Psi3_GPU.conj() ).get()
temp += gpuarray.sum( Psi4_GPU*Psi4_GPU.conj() ).get()
return temp * self.dPx*self.dPy
def Average_KEnergy( self, temp_GPU, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
energy = gpuarray.sum( Psi1_GPU*Psi1_GPU.conj() ).get()
energy += gpuarray.sum( Psi2_GPU*Psi2_GPU.conj() ).get()
energy -= gpuarray.sum( Psi3_GPU*Psi3_GPU.conj() ).get()
energy -= gpuarray.sum( Psi4_GPU*Psi4_GPU.conj() ).get()
energy *= self.mass*self.c*self.c*self.dPx*self.dPy
#
temp_GPU *= 0.
temp_GPU += Psi4_GPU * Psi1_GPU.conj()
temp_GPU += Psi1_GPU * Psi4_GPU.conj()
temp_GPU += Psi3_GPU * Psi2_GPU.conj()
temp_GPU += Psi2_GPU * Psi3_GPU.conj()
temp_GPU *= self.Px_GPU
#temp_GPU *= self.c
energy += gpuarray.sum( temp_GPU ).get()*self.dPx*self.dPy*self.c
#
temp_GPU *= 0.
temp_GPU += Psi4_GPU * Psi1_GPU.conj()
temp_GPU -= Psi1_GPU * Psi4_GPU.conj()
temp_GPU -= Psi3_GPU * Psi2_GPU.conj()
temp_GPU += Psi2_GPU * Psi3_GPU.conj()
temp_GPU *= self.Py_GPU
#temp_GPU *= -1j
energy += gpuarray.sum( temp_GPU ).get()*self.dPx*self.dPy*self.c*(-1j)
return energy
def TakabayashiAngle_CPU(self, Psi ):
expTA = Psi[0] * Psi[0].conj()
expTA += Psi[2] * Psi[0].conj()
expTA += Psi[1] * Psi[1].conj()
expTA += Psi[3] * Psi[1].conj()
expTA -= Psi[0] * Psi[2].conj()
expTA -= Psi[2] * Psi[2].conj()
expTA -= Psi[1] * Psi[3].conj()
expTA -= Psi[3] * Psi[3].conj()
rho = Psi[0]*Psi[0].conj() + Psi[1]*Psi[1].conj() + Psi[2]*Psi[2].conj() + Psi[3]*Psi[3].conj()
rho = rho.real
rho[ rho < 0.000001 ] = 0.
rho = np.ceil(rho)
return np.arctan2( expTA.imag , expTA.real )*rho
#.....................................................................
def Run(self):
try :
import os
os.remove (self.fileName)
except OSError:
pass
f1 = h5py.File(self.fileName)
print '--------------------------------------------'
print ' Dirac Propagator 2D '
print '--------------------------------------------'
print ' save Mode = ', self.frameSaveMode
f1['x_gridDIM'] = self.X_gridDIM
f1['y_gridDIM'] = self.Y_gridDIM
#f1['x_min'] = self.min_X
#f1['y_min'] = self.min_Y
f1['x_amplitude'] = self.X_amplitude
f1['y_amplitude'] = self.Y_amplitude
# Redundant information on dx dy dz
f1['dx'] = self.dX
f1['dy'] = self.dY
f1['Potential_0_String'] = self.Potential_0_String
f1['Potential_1_String'] = self.Potential_1_String
f1['Potential_2_String'] = self.Potential_2_String
f1['Potential_3_String'] = self.Potential_3_String
self.Psi1_init, self.Psi2_init, self.Psi3_init, self.Psi4_init = self.Psi_init
Psi1_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Psi1_init, dtype=np.complex128) )
Psi2_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Psi2_init, dtype=np.complex128) )
Psi3_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Psi3_init, dtype=np.complex128) )
Psi4_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Psi4_init, dtype=np.complex128) )
_Psi1_GPU = gpuarray.zeros_like(Psi1_GPU)
TakabayashiAngle_GPU = gpuarray.empty( Psi1_GPU.shape, dtype = np.float64 )
#TakabayashiAngle_GPU = gpuarray.empty( Psi1_GPU.shape, dtype = np.complex128 )
#_Psi2_GPU = gpuarray.zeros_like(Psi1_GPU)
#_Psi3_GPU = gpuarray.zeros_like(Psi1_GPU)
#_Psi4_GPU = gpuarray.zeros_like(Psi1_GPU)
#
print ' '
print 'number of steps = ', self.timeSteps, ' dt = ',self.dt
print 'dX = ', self.dX, 'dY = ', self.dY
print 'dPx = ', self.dPx, 'dPy = ', self.dPy
print ' '
print ' '
if self.frameSaveMode=='Spinor':
self.save_Spinor(f1, 0 , Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU)
if self.frameSaveMode=='Density':
self.save_Density(f1, 0, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU)
# ............................... Main LOOP .....................................
self.blockCUDA = (self.X_gridDIM,1,1)
self.gridCUDA = (self.Y_gridDIM,1)
timeRange = range(1, self.timeSteps+1)
initial_time = time.time()
X_average = []
Y_average = []
Alpha1_average = []
Alpha2_average = []
Beta_average = []
KEnergy_average = []
Potential_0_average = []
Px_average = []
Py_average = []
TotalProbabilityX_average = []
TotalProbabilityP_average = []
for t_index in timeRange:
t_GPU = np.float64(self.dt * t_index )
X_average.append( self.Average_X( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
Y_average.append( self.Average_Y( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
Alpha1_average.append( self.Average_Alpha1( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
Alpha2_average.append( self.Average_Alpha2( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
Beta_average.append( self.Average_Beta( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
TotalProbabilityX_average.append(
self.Average_TotalProbabilityX( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
Potential_0_average.append(
self.Potential_0_Average( _Psi1_GPU, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU ,t_GPU) )
self.Fourier_4_X_To_P_GPU( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU )
#......................................................................
# Kinetic
#......................................................................
self.Normalize_P_GPU( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU)
KEnergy_average.append(
self.Average_KEnergy( _Psi1_GPU, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
Px_average.append( self.Average_Px(Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
Py_average.append( self.Average_Py(Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
TotalProbabilityP_average.append(
self.Average_TotalProbabilityP( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
"""self.DiracPropagatorK( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU,
block=self.blockCUDA, grid=self.gridCUDA )"""
"""self.TakabayashiAngle_Function( TakabayashiAngle_GPU,
Psi1_GPU,Psi1_GPU.conj(), Psi2_GPU,Psi2_GPU.conj(),
Psi3_GPU,Psi3_GPU.conj(), Psi4_GPU,Psi4_GPU.conj(),
block=self.blockCUDA, grid=self.gridCUDA )"""
self.TakabayashiAngle_Function( TakabayashiAngle_GPU,
Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
self.Propagator_Takabayashi_K( TakabayashiAngle_GPU,
Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
self.Fourier_4_P_To_X_GPU( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU)
#...........................................................................
# Mass potential
#...........................................................................
self.TakabayashiAngle_Function( TakabayashiAngle_GPU,
Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
self.Propagator_Takabayashi_A( TakabayashiAngle_GPU,
Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
"""self.DiracPropagatorA( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU,
t_GPU, block=self.blockCUDA, grid=self.gridCUDA )"""
# Absorbing boundary
self.DiracAbsorbBoundary_xy(
Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
#
# Normalization
#
self.Normalize_GPU( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU)
# Saving files
if t_index % self.skipFrames == 0:
if self.frameSaveMode=='Spinor':
self.save_Spinor( f1,t_index,Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU)
if self.frameSaveMode=='Density':
self.save_Density(f1,t_index,Psi1_GPU,Psi2_GPU,Psi3_GPU,Psi4_GPU)
final_time = time.time()
print ' computational time = ', final_time - initial_time
f1.close()
self.Psi_end = np.array( [ Psi1_GPU.get(), Psi2_GPU.get(), Psi3_GPU.get(), Psi4_GPU.get() ] )
self.Psi_init = np.array( [ self.Psi1_init, self.Psi2_init, self.Psi3_init, self.Psi4_init ] )
self.TakabayashiAngle_end = TakabayashiAngle_GPU.get()
self.timeRange = np.array(timeRange)
self.X_average = np.array(X_average).real
self.Y_average = np.array(Y_average).real
self.Alpha1_average = np.array(Alpha1_average).real
self.Alpha2_average = np.array(Alpha2_average).real
self.Beta_average = np.array(Beta_average).real
self.KEnergy_average = np.array( KEnergy_average ).real
self.Potential_0_average = np.array( Potential_0_average ).real
self.Px_average = np.array( Px_average ).real
self.Py_average = np.array( Py_average ).real
self.TotalProbabilityX_average = np.array(TotalProbabilityX_average).real
self.TotalProbabilityP_average = np.array(TotalProbabilityP_average).real
return 0
|
cabrer7/PyWignerCUDA
|
GPU_DiracDaviau2D.py
|
Python
|
mit
| 45,985
|
[
"DIRAC",
"Gaussian",
"Psi4"
] |
ad6852dfd879727904846e03c415e80ff9081b5d0a152963b5066d72b8a7d93d
|
"""
Container page in Studio
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import Promise, EmptyPromise
from common.test.acceptance.pages.studio import BASE_URL
from common.test.acceptance.pages.studio.utils import HelpMixin
from common.test.acceptance.pages.common.utils import click_css, confirm_prompt
from common.test.acceptance.pages.studio.utils import type_in_codemirror
class ContainerPage(PageObject, HelpMixin):
"""
Container page in Studio
"""
NAME_SELECTOR = '.page-header-title'
NAME_INPUT_SELECTOR = '.page-header .xblock-field-input'
NAME_FIELD_WRAPPER_SELECTOR = '.page-header .wrapper-xblock-field'
ADD_MISSING_GROUPS_SELECTOR = '.notification-action-button[data-notification-action="add-missing-groups"]'
def __init__(self, browser, locator):
super(ContainerPage, self).__init__(browser)
self.locator = locator
@property
def url(self):
"""URL to the container page for an xblock."""
return "{}/container/{}".format(BASE_URL, self.locator)
@property
def name(self):
titles = self.q(css=self.NAME_SELECTOR).text
if titles:
return titles[0]
else:
return None
def is_browser_on_page(self):
def _xblock_count(class_name, request_token):
return len(self.q(css='{body_selector} .xblock.{class_name}[data-request-token="{request_token}"]'.format(
body_selector=XBlockWrapper.BODY_SELECTOR, class_name=class_name, request_token=request_token
)).results)
def _is_finished_loading():
is_done = False
# Get the request token of the first xblock rendered on the page and assume it is correct.
data_request_elements = self.q(css='[data-request-token]')
if len(data_request_elements) > 0:
request_token = data_request_elements.first.attrs('data-request-token')[0]
# Then find the number of Studio xblock wrappers on the page with that request token.
num_wrappers = len(self.q(css='{} [data-request-token="{}"]'.format(XBlockWrapper.BODY_SELECTOR, request_token)).results)
# Wait until all components have been loaded and marked as either initialized or failed.
# See:
# - common/static/js/xblock/core.js which adds the class "xblock-initialized"
# at the end of initializeBlock.
# - common/static/js/views/xblock.js which adds the class "xblock-initialization-failed"
# if the xblock threw an error while initializing.
num_initialized_xblocks = _xblock_count('xblock-initialized', request_token)
num_failed_xblocks = _xblock_count('xblock-initialization-failed', request_token)
is_done = num_wrappers == (num_initialized_xblocks + num_failed_xblocks)
return (is_done, is_done)
def _loading_spinner_hidden():
""" promise function to check loading spinner state """
is_spinner_hidden = self.q(css='div.ui-loading.is-hidden').present
return is_spinner_hidden, is_spinner_hidden
# First make sure that an element with the view-container class is present on the page,
# and then wait for the loading spinner to go away and all the xblocks to be initialized.
return (
self.q(css='body.view-container').present and
Promise(_loading_spinner_hidden, 'loading spinner is hidden.').fulfill() and
Promise(_is_finished_loading, 'Finished rendering the xblock wrappers.').fulfill()
)
def wait_for_component_menu(self):
"""
Waits until the menu bar of components is present on the page.
"""
EmptyPromise(
lambda: self.q(css='div.add-xblock-component').present,
'Wait for the menu of components to be present'
).fulfill()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the container page.
"""
return self._get_xblocks()
@property
def inactive_xblocks(self):
"""
Return a list of inactive xblocks loaded on the container page.
"""
return self._get_xblocks(".is-inactive ")
@property
def active_xblocks(self):
"""
Return a list of active xblocks loaded on the container page.
"""
return self._get_xblocks(".is-active ")
@property
def displayed_children(self):
"""
Return a list of displayed xblocks loaded on the container page.
"""
return self._get_xblocks()[0].children
@property
def publish_title(self):
"""
Returns the title as displayed on the publishing sidebar component.
"""
return self.q(css='.pub-status').first.text[0]
@property
def release_title(self):
"""
Returns the title before the release date in the publishing sidebar component.
"""
return self.q(css='.wrapper-release .title').first.text[0]
@property
def release_date(self):
"""
Returns the release date of the unit (with ancestor inherited from), as displayed
in the publishing sidebar component.
"""
return self.q(css='.wrapper-release .copy').first.text[0]
@property
def last_saved_text(self):
"""
Returns the last saved message as displayed in the publishing sidebar component.
"""
return self.q(css='.wrapper-last-draft').first.text[0]
@property
def last_published_text(self):
"""
Returns the last published message as displayed in the sidebar.
"""
return self.q(css='.wrapper-last-publish').first.text[0]
@property
def currently_visible_to_students(self):
"""
Returns True if the unit is marked as currently visible to students
(meaning that a warning is being displayed).
"""
warnings = self.q(css='.container-message .warning')
if not warnings.is_present():
return False
warning_text = warnings.first.text[0]
return warning_text == "Caution: The last published version of this unit is live. By publishing changes you will change the student experience."
def shows_inherited_staff_lock(self, parent_type=None, parent_name=None):
"""
Returns True if the unit inherits staff lock from a section or subsection.
"""
return self.q(css='.bit-publishing .wrapper-visibility .copy .inherited-from').visible
@property
def sidebar_visibility_message(self):
"""
Returns the text within the sidebar visibility section.
"""
return self.q(css='.bit-publishing .wrapper-visibility').first.text[0]
@property
def publish_action(self):
"""
Returns the link for publishing a unit.
"""
return self.q(css='.action-publish').first
def publish(self):
"""
Publishes the container.
"""
self.publish_action.click()
self.wait_for_ajax()
def discard_changes(self):
"""
Discards draft changes (which will then re-render the page).
"""
click_css(self, 'a.action-discard', 0, require_notification=False)
confirm_prompt(self)
self.wait_for_ajax()
@property
def is_staff_locked(self):
""" Returns True if staff lock is currently enabled, False otherwise """
for attr in self.q(css='a.action-staff-lock>.fa').attrs('class'):
if 'fa-check-square-o' in attr:
return True
return False
def toggle_staff_lock(self, inherits_staff_lock=False):
"""
Toggles "hide from students" which enables or disables a staff-only lock.
Returns True if the lock is now enabled, else False.
"""
was_locked_initially = self.is_staff_locked
if not was_locked_initially:
self.q(css='a.action-staff-lock').first.click()
else:
click_css(self, 'a.action-staff-lock', 0, require_notification=False)
if not inherits_staff_lock:
confirm_prompt(self)
self.wait_for_ajax()
return not was_locked_initially
def view_published_version(self):
"""
Clicks "View Live Version", which will open the published version of the unit page in the LMS.
Switches the browser to the newly opened LMS window.
"""
self.q(css='.button-view').first.click()
self._switch_to_lms()
def verify_publish_title(self, expected_title):
"""
Waits for the publish title to change to the expected value.
"""
def wait_for_title_change():
"""
Promise function to check publish title.
"""
return (self.publish_title == expected_title, self.publish_title)
Promise(wait_for_title_change, "Publish title incorrect. Found '" + self.publish_title + "'").fulfill()
def preview(self):
"""
Clicks "Preview", which will open the draft version of the unit page in the LMS.
Switches the browser to the newly opened LMS window.
"""
self.q(css='.button-preview').first.click()
self._switch_to_lms()
def _switch_to_lms(self):
"""
Assumes LMS has opened-- switches to that window.
"""
browser_window_handles = self.browser.window_handles
# Switch to browser window that shows HTML Unit in LMS
# The last handle represents the latest windows opened
self.browser.switch_to_window(browser_window_handles[-1])
def _get_xblocks(self, prefix=""):
return self.q(css=prefix + XBlockWrapper.BODY_SELECTOR).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results
def duplicate(self, source_index):
"""
Duplicate the item with index source_index (based on vertical placement in page).
"""
click_css(self, '.duplicate-button', source_index)
def delete(self, source_index):
"""
Delete the item with index source_index (based on vertical placement in page).
Only visible items are counted in the source_index.
The index of the first item is 0.
"""
# Click the delete button
click_css(self, '.delete-button', source_index, require_notification=False)
# Click the confirmation dialog button
confirm_prompt(self)
def edit(self):
"""
Clicks the "edit" button for the first component on the page.
"""
return _click_edit(self, '.edit-button', '.xblock-studio_view')
def verify_confirmation_message(self, message, verify_hidden=False):
"""
Verify for confirmation message is present or hidden.
"""
def _verify_message():
""" promise function to check confirmation message state """
text = self.q(css='#page-alert .alert.confirmation #alert-confirmation-title').text
return text and message not in text[0] if verify_hidden else text and message in text[0]
self.wait_for(_verify_message, description='confirmation message {status}'.format(
status='hidden' if verify_hidden else 'present'
))
def click_undo_move_link(self):
"""
Click undo move link.
"""
click_css(self, '#page-alert .alert.confirmation .nav-actions .action-primary')
def click_take_me_there_link(self):
"""
Click take me there link.
"""
click_css(self, '#page-alert .alert.confirmation .nav-actions .action-secondary', require_notification=False)
def add_missing_groups(self):
"""
Click the "add missing groups" link.
Note that this does an ajax call.
"""
self.q(css=self.ADD_MISSING_GROUPS_SELECTOR).first.click()
self.wait_for_ajax()
# Wait until all xblocks rendered.
self.wait_for_page()
def missing_groups_button_present(self):
"""
Returns True if the "add missing groups" button is present.
"""
return self.q(css=self.ADD_MISSING_GROUPS_SELECTOR).present
def get_xblock_information_message(self):
"""
Returns an information message for the container page.
"""
return self.q(css=".xblock-message.information").first.text[0]
def is_inline_editing_display_name(self):
"""
Return whether this container's display name is in its editable form.
"""
return "is-editing" in self.q(css=self.NAME_FIELD_WRAPPER_SELECTOR).first.attrs("class")[0]
def get_category_tab_names(self, category_type):
"""
Returns list of tab name in a category.
Arguments:
category_type (str): category type
Returns:
list
"""
self.q(css='.add-xblock-component-button[data-type={}]'.format(category_type)).first.click()
return self.q(css='.{}-type-tabs>li>a'.format(category_type)).text
def get_category_tab_components(self, category_type, tab_index):
"""
Return list of component names in a tab in a category.
Arguments:
category_type (str): category type
tab_index (int): tab index in a category
Returns:
list
"""
css = '#tab{tab_index} button[data-category={category_type}] span'.format(
tab_index=tab_index,
category_type=category_type
)
return self.q(css=css).html
class XBlockWrapper(PageObject):
"""
A PageObject representing a wrapper around an XBlock child shown on the Studio container page.
"""
url = None
BODY_SELECTOR = '.studio-xblock-wrapper'
NAME_SELECTOR = '.xblock-display-name'
VALIDATION_SELECTOR = '.xblock-message.validation'
COMPONENT_BUTTONS = {
'basic_tab': '.editor-tabs li.inner_tab_wrap:nth-child(1) > a',
'advanced_tab': '.editor-tabs li.inner_tab_wrap:nth-child(2) > a',
'settings_tab': '.editor-modes .settings-button',
'save_settings': '.action-save',
}
def __init__(self, browser, locator):
super(XBlockWrapper, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def student_content(self):
"""
Returns the text content of the xblock as displayed on the container page.
"""
return self.q(css=self._bounded_selector('.xblock-student_view'))[0].text
@property
def author_content(self):
"""
Returns the text content of the xblock as displayed on the container page.
(For blocks which implement a distinct author_view).
"""
return self.q(css=self._bounded_selector('.xblock-author_view'))[0].text
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def children(self):
"""
Will return any first-generation descendant xblocks of this xblock.
"""
descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).filter(lambda el: el.is_displayed()).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results
# Now remove any non-direct descendants.
grandkids = []
for descendant in descendants:
grandkids.extend(descendant.children)
grand_locators = [grandkid.locator for grandkid in grandkids]
return [descendant for descendant in descendants if descendant.locator not in grand_locators]
@property
def has_validation_message(self):
""" Is a validation warning/error/message shown? """
return self.q(css=self._bounded_selector(self.VALIDATION_SELECTOR)).present
def _validation_paragraph(self, css_class):
""" Helper method to return the <p> element of a validation warning """
return self.q(css=self._bounded_selector('{} p.{}'.format(self.VALIDATION_SELECTOR, css_class)))
@property
def has_validation_warning(self):
""" Is a validation warning shown? """
return self._validation_paragraph('warning').present
@property
def has_validation_error(self):
""" Is a validation error shown? """
return self._validation_paragraph('error').present
@property
# pylint: disable=invalid-name
def has_validation_not_configured_warning(self):
""" Is a validation "not configured" message shown? """
return self._validation_paragraph('not-configured').present
@property
def validation_warning_text(self):
""" Get the text of the validation warning. """
return self._validation_paragraph('warning').text[0]
@property
def validation_error_text(self):
""" Get the text of the validation error. """
return self._validation_paragraph('error').text[0]
@property
def validation_error_messages(self):
return self.q(css=self._bounded_selector('{} .xblock-message-item.error'.format(self.VALIDATION_SELECTOR))).text
@property
# pylint: disable=invalid-name
def validation_not_configured_warning_text(self):
""" Get the text of the validation "not configured" message. """
return self._validation_paragraph('not-configured').text[0]
@property
def preview_selector(self):
return self._bounded_selector('.xblock-student_view,.xblock-author_view')
@property
def has_group_visibility_set(self):
return self.q(css=self._bounded_selector('.wrapper-xblock.has-group-visibility-set')).is_present()
@property
def has_duplicate_button(self):
"""
Returns true if this xblock has a 'duplicate' button
"""
return self.q(css=self._bounded_selector('.duplicate-button'))
@property
def has_delete_button(self):
"""
Returns true if this xblock has a 'delete' button
"""
return self.q(css=self._bounded_selector('.delete-button'))
@property
def has_edit_visibility_button(self):
"""
Returns true if this xblock has an 'edit visibility' button
:return:
"""
return self.q(css=self._bounded_selector('.visibility-button')).is_present()
@property
def has_move_modal_button(self):
"""
Returns True if this xblock has move modal button else False
"""
return self.q(css=self._bounded_selector('.move-button')).is_present()
def go_to_container(self):
"""
Open the container page linked to by this xblock, and return
an initialized :class:`.ContainerPage` for that xblock.
"""
return ContainerPage(self.browser, self.locator).visit()
def edit(self):
"""
Clicks the "edit" button for this xblock.
"""
return _click_edit(self, '.edit-button', '.xblock-studio_view', self._bounded_selector)
def edit_visibility(self):
"""
Clicks the edit visibility button for this xblock.
"""
return _click_edit(self, '.visibility-button', '.xblock-visibility_view', self._bounded_selector)
def open_advanced_tab(self):
"""
Click on Advanced Tab.
"""
self._click_button('advanced_tab')
def open_basic_tab(self):
"""
Click on Basic Tab.
"""
self._click_button('basic_tab')
def open_settings_tab(self):
"""
If editing, click on the "Settings" tab
"""
self._click_button('settings_tab')
def open_move_modal(self):
"""
Opens the move modal.
"""
click_css(self, '.move-button', require_notification=False)
self.wait_for(
lambda: self.q(css='.modal-window.move-modal').visible, description='move modal is visible'
)
def set_field_val(self, field_display_name, field_value):
"""
If editing, set the value of a field.
"""
selector = '{} li.field label:contains("{}") + input'.format(self.editor_selector, field_display_name)
script = "$(arguments[0]).val(arguments[1]).change();"
self.browser.execute_script(script, selector, field_value)
def reset_field_val(self, field_display_name):
"""
If editing, reset the value of a field to its default.
"""
scope = '{} li.field label:contains("{}")'.format(self.editor_selector, field_display_name)
script = "$(arguments[0]).siblings('.setting-clear').click();"
self.browser.execute_script(script, scope)
def set_codemirror_text(self, text, index=0):
"""
Set the text of a CodeMirror editor that is part of this xblock's settings.
"""
type_in_codemirror(self, index, text, find_prefix='$("{}").find'.format(self.editor_selector))
def set_license(self, license_type):
"""
Uses the UI to set the course's license to the given license_type (str)
"""
css_selector = (
"ul.license-types li[data-license={license_type}] button"
).format(license_type=license_type)
self.wait_for_element_presence(
css_selector,
"{license_type} button is present".format(license_type=license_type)
)
self.q(css=css_selector).click()
def save_settings(self):
"""
Click on settings Save button.
"""
self._click_button('save_settings')
@property
def editor_selector(self):
return '.xblock-studio_view'
def _click_button(self, button_name):
"""
Click on a button as specified by `button_name`
Arguments:
button_name (str): button name
"""
self.q(css=self.COMPONENT_BUTTONS[button_name]).first.click()
self.wait_for_ajax()
def go_to_group_configuration_page(self):
"""
Go to the Group Configuration used by the component.
"""
self.q(css=self._bounded_selector('span.message-text a')).first.click()
def is_placeholder(self):
"""
Checks to see if the XBlock is rendered as a placeholder without a preview.
"""
return not self.q(css=self._bounded_selector('.wrapper-xblock article')).present
@property
def group_configuration_link_name(self):
"""
Get Group Configuration name from link.
"""
return self.q(css=self._bounded_selector('span.message-text a')).first.text[0]
def _click_edit(page_object, button_css, view_css, bounded_selector=lambda(x): x):
"""
Click on the first editing button found and wait for the Studio editor to be present.
"""
page_object.q(css=bounded_selector(button_css)).first.click()
EmptyPromise(
lambda: page_object.q(css=view_css).present,
'Wait for the Studio editor to be present'
).fulfill()
return page_object
|
romain-li/edx-platform
|
common/test/acceptance/pages/studio/container.py
|
Python
|
agpl-3.0
| 23,626
|
[
"VisIt"
] |
e2822eb0b94415a6b503fc2cb465377bea59f00d9c45494960581bc069aafc66
|
# -*- coding: utf-8 -*-
"""Tests for interchange with JGIF."""
import json
import logging
import sys
import unittest
from pybel import from_cbn_jgif, to_jgif
from pybel.constants import (
ACTIVITY,
ANNOTATIONS,
CITATION,
CITATION_TYPE_OTHER,
CITATION_TYPE_PUBMED,
DECREASES,
DIRECTLY_INCREASES,
EFFECT,
EVIDENCE,
IDENTIFIER,
MODIFIER,
NAMESPACE,
RELATION,
TARGET_MODIFIER,
)
from pybel.dsl import (
Abundance,
BiologicalProcess,
ComplexAbundance,
NamedComplexAbundance,
Pathology,
Protein,
ProteinModification,
)
from pybel.language import activity_mapping
from pybel.testing.constants import test_jgif_path
from tests.constants import TestGraphMixin
logging.getLogger("pybel.parser").setLevel(20)
calcium = Abundance("SCHEM", "Calcium")
calcineurin_complex = NamedComplexAbundance("SCOMP", "Calcineurin Complex")
foxo3 = Protein("HGNC", "FOXO3")
tcell_proliferation = BiologicalProcess("GO", "CD8-positive, alpha-beta T cell proliferation")
il15 = Protein("HGNC", "IL15")
il2rg = Protein("MGI", "Il2rg")
jgif_expected_nodes = {
calcium,
calcineurin_complex,
foxo3,
tcell_proliferation,
il15,
il2rg,
Protein("HGNC", "CXCR6"),
Protein("HGNC", "IL15RA"),
BiologicalProcess("GO", "lymphocyte chemotaxis"),
Protein("HGNC", "IL2RG"),
Protein("HGNC", "ZAP70"),
NamedComplexAbundance("SCOMP", "T Cell Receptor Complex"),
BiologicalProcess("GO", "T cell activation"),
Protein("HGNC", "CCL3"),
Protein("HGNC", "PLCG1"),
Protein("HGNC", "FASLG"),
Protein("HGNC", "IDO1"),
Protein("HGNC", "IL2"),
Protein("HGNC", "CD8A"),
Protein("HGNC", "CD8B"),
Protein("HGNC", "PLCG1"),
Protein("HGNC", "BCL2"),
Protein("HGNC", "CCR3"),
Protein("HGNC", "IL2RB"),
Protein("HGNC", "CD28"),
Pathology("SDIS", "Cytotoxic T-cell activation"),
Protein("HGNC", "FYN"),
Protein("HGNC", "CXCL16"),
Protein("HGNC", "CCR5"),
Protein("HGNC", "LCK"),
Protein("SFAM", "Chemokine Receptor Family"),
Protein("HGNC", "CXCL9"),
Pathology("SDIS", "T-cell migration"),
Protein("HGNC", "CXCR3"),
Abundance("CHEBI", "acrolein"),
Protein("HGNC", "IDO2"),
Pathology("MESHD", "Pulmonary Disease, Chronic Obstructive"),
Protein("HGNC", "IFNG"),
Protein("HGNC", "TNFRSF4"),
Protein("HGNC", "CTLA4"),
Protein("HGNC", "GZMA"),
Protein("HGNC", "PRF1"),
Protein("HGNC", "TNF"),
Protein("SFAM", "Chemokine Receptor Family"),
ComplexAbundance([Protein("HGNC", "CD8A"), Protein("HGNC", "CD8B")]),
ComplexAbundance([Protein("HGNC", "CD8A"), Protein("HGNC", "CD8B")]),
Protein("HGNC", "PLCG1", variants=ProteinModification("Ph", "Tyr")),
Protein("EGID", "21577"),
}
jgif_expected_edges = [
(
calcium,
calcineurin_complex,
{
RELATION: DIRECTLY_INCREASES,
EVIDENCE: "NMDA-mediated influx of calcium led to activated of the calcium-dependent phosphatase calcineurin and the subsequent dephosphorylation and activation of the protein-tyrosine phosphatase STEP",
CITATION: {NAMESPACE: CITATION_TYPE_PUBMED, IDENTIFIER: "12483215"},
TARGET_MODIFIER: {MODIFIER: ACTIVITY, EFFECT: activity_mapping["phos"]},
ANNOTATIONS: {"Species": {"10116": True}, "Cell": {"neuron": True}},
},
),
(
foxo3,
tcell_proliferation,
{
RELATION: DECREASES,
EVIDENCE: '"These data suggested that FOXO3 downregulates the accumulation of CD8 T cells in tissue specific fashion during an acute LCMV [lymphocytic choriomeningitis virus] infection." (p. 3)',
CITATION: {NAMESPACE: CITATION_TYPE_OTHER, IDENTIFIER: "22359505"},
ANNOTATIONS: {
"Species": {"10090": True},
"Disease": {"Viral infection": True},
},
},
),
(
il15,
il2rg,
{
RELATION: DIRECTLY_INCREASES,
EVIDENCE: "IL-15 utilizes ... the common cytokine receptor γ-chain (CD132) for signal transduction in lymphocytes",
CITATION: {NAMESPACE: CITATION_TYPE_OTHER, IDENTIFIER: "20335267"},
TARGET_MODIFIER: {MODIFIER: ACTIVITY, EFFECT: activity_mapping["cat"]},
ANNOTATIONS: {
"Tissue": {"lung": True},
"Species": {"9606": True},
},
},
),
]
class TestJgif(TestGraphMixin):
"""Tests data interchange of JGIF."""
@unittest.skipIf(sys.platform.startswith("win"), "does not work on Windows")
def test_jgif_interchange(self):
"""Tests data from CBN"""
with open(test_jgif_path) as f:
graph_jgif_dict = json.load(f)
graph = from_cbn_jgif(graph_jgif_dict)
self.assertEqual(jgif_expected_nodes, set(graph))
for u, v, d in jgif_expected_edges:
self.assert_has_edge(graph, u, v, permissive=False, **d)
# TODO test more thoroughly?
export_jgif = to_jgif(graph)
self.assertIsInstance(export_jgif, dict)
|
pybel/pybel
|
tests/test_io/test_jgif.py
|
Python
|
mit
| 5,119
|
[
"NEURON",
"Pybel"
] |
730818c2e5310c92a750582ffc4e15542120722e6f419a389d747535cdde9e5d
|
###############################################################################
# Copyright 2017-2021 - Climate Research Division
# Environment and Climate Change Canada
#
# This file is part of the "fstd2nc" package.
#
# "fstd2nc" is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "fstd2nc" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with "fstd2nc". If not, see <http://www.gnu.org/licenses/>.
###############################################################################
from fstd2nc.stdout import _, info, warn, error
from fstd2nc.mixins import BufferBase
#################################################
# Mixin for adding a compatibility layer to the netCDF output file, so it can
# also function as a valid FSTD file.
class FSTD_Compat (BufferBase):
@classmethod
def _cmdline_args (cls, parser):
import argparse
super(FSTD_Compat,cls)._cmdline_args(parser)
parser.add_argument('--fstd-compat', action='store_true', help=_('Adds a compatibility layer to the netCDF output file, so it can also function as a valid FSTD file. EXPERIMENTAL.'))
def __init__ (self, *args, **kwargs):
"""
fstd_compat : bool, optional
Adds a compatibility layer to the netCDF output file, so it can
also function as a valid FSTD file. EXPERIMENTAL.
"""
# Check if compatibility interface should be activated.
self._fstd_compat = kwargs.pop('fstd_compat', False)
if self._fstd_compat:
self.to_netcdf = self._to_netcdf_compat
super(FSTD_Compat,self).__init__(*args,**kwargs)
# Override fstluk to keep track of which FST records were used in the
# conversion. The header information for these records will be added to the
# output file.
def _fstluk (self, rec_id, dtype=None, rank=None, dataArray=None):
import numpy as np
if not self._fstd_compat:
return super(FSTD_Compat,self)._fstluk(rec_id, dtype, rank, dataArray)
if not hasattr(self,'_used_rec_ids'):
self._used_rec_ids = []
# If rec_id is a dict from rpnpy, then convert it to an index.
if isinstance(rec_id,dict):
key = rec_id['key']>>10
rec_id = int(np.where(self._headers['key']==key)[0])
self._used_rec_ids.append(rec_id)
return super(FSTD_Compat,self)._fstluk (rec_id, dtype=dtype, rank=rank, dataArray=dataArray)
def _to_netcdf_compat (self, filename, nc_format='NETCDF4', global_metadata=None, zlib=False, compression=4, progress=False):
"""
Write the records to a netCDF file.
Requires the netCDF4 package.
"""
from fstd2nc.mixins import _var_type, _ProgressBar, _FakeBar
from netCDF4 import Dataset
import numpy as np
import os
import rpnpy.librmn.all as rmn
# This only works with an uncompressed netCDF4 file.
nc_format = 'NETCDF4'
zlib = False
# Get a minimal netCDF4 header, to be used later.
Dataset(filename, "w", format='NETCDF4').close() # Get a netCDF4 header.
with open(filename,'rb') as f:
nc_header = np.fromfile(f,'B')
os.remove(filename) # Got the netCDF4, now start over.
# Start the file with an FSTD file header followed by a netCDF4 header.
# The FSTD header must be at the very beginning of the file in order for
# librmn to recognize the file, but the netCDF4 header is more flexible.
iun = rmn.fstopenall(filename, rmn.FST_RW)
rmn.fstcloseall(iun) # Write FSTD header.
# Write the netCDF4 header in a very particular location so it can be
# detected.
# The netCDF4 library will check the beginning of the file, but also check
# byte positions at powers of two starting at 512, 1024, 2048, etc.
# Here we put it at offset 32768 (hex 0x8000), which is just after the FSTD
# header and the first FSTD directory "page".
with open(filename,'r+b') as f:
f.seek(0x8000,0)
nc_header.tofile(f)
# Now, open the file as netCDF4 format.
f = Dataset(filename, "r+")
# Apply global metadata (from config files and global_metadata argument).
if 'global' in getattr(self,'_metadata',{}):
f.setncatts(self._metadata['global'])
if global_metadata is not None:
f.setncatts(global_metadata)
# Collect all the records that will be read/written.
# List of (key,recshape,ncvar,ncind).
# Note: derived variables (with values stored in memory) will be written
# immediately, bypassing this list.
io = []
self._makevars()
# Define the dimensions.
for axis in self._iter_axes():
# Special case: make the time dimension unlimited.
if axis.name == 'time' and self._time_unlimited:
f.createDimension(axis.name, None)
else:
f.createDimension(axis.name, len(axis))
# When writing data to netCDF4, need to make sure it's aligned properly.
# Also need to get the file offset when the data is written.
def write_data (v, ind, array, npad=[0]):
from os.path import getsize
import rpnpy.librmn.all as rmn
# Align the data on 8-byte boundaries, which is what FSTD expects.
while True:
f.sync()
alignment = getsize(filename) % 8
if alignment == 0: break
# Add dummy global attributes as a way to pad out the file.
# The chosen size for the attributes is from trial-and-error. It seems
# to get the right padding after a few iterations.
# Ideally it would be nice to close the file, add a few bytes of
# padding, update the netCDF4 header and re-open with the required
# alignment, but there's a bug when re-opening a netCDF4 file with
# big-endian variables (https://github.com/Unidata/netcdf-c/issues/1802).
# The current approach of writing attributes wastes more space (a few
# dozen kilobytes?) but it seems to do the job.
f.setncattr('_pad%05d'%npad[0], np.zeros(4096+(8-alignment),dtype='B'))
npad[0] += 1
# Next, write the data
address = getsize(filename)
v[ind] = array
f.sync()
# Finally, locate *where* the data was written to file.
array = array.flatten()
with open(filename,'rb') as f2:
f2.seek(address,0)
test = np.fromfile(f2,v.dtype,array.size)
if not np.all(test==array):
address = getsize(filename) - array.size*array.dtype.itemsize
f2.seek(address,0)
test = np.fromfile(f2,v.dtype,array.size)
if not np.all(test==array):
return None
# Determine which FST datyp would decode this data.
datyp = rmn.dtype_numpy2fst(v.dtype.newbyteorder('='), compress=False, missing=False)
# Floating-point must be IEEE, which is what netCDF4 would have written.
if datyp == 1:
datyp = 5
nbits = v.dtype.itemsize*8
# <Address of chunk>, <length>, <RPN datyp>, <nbits>
return address, array.size*v.dtype.itemsize, datyp, nbits
# Addresses of arrays not associated with FSTD records.
# Keep track of these in case they *can* be associated with records later.
# E.g., lat/lon coordinate arrays might exactly match data from ^^ or >>.
direct_addresses = {}
# Generate the variable structures.
for var in self._iter_objects():
# Write the variable.
# Easy case: already have the data.
if hasattr(var,'array'):
# Tell netCDF4 to use big-endian encoding of data, where applicable.
# FSTD requires big-endian encoding (and netCDF4 can work with either).
if var.array.dtype.itemsize > 1:
v = f.createVariable(var.name, datatype=var.array.dtype.newbyteorder('>'), endian='big', dimensions=var.dims, zlib=zlib, complevel=compression)
else:
v = f.createVariable(var.name, datatype=var.array.dtype, dimensions=var.dims, zlib=zlib, complevel=compression)
# Write the metadata.
v.setncatts(var.atts)
direct_addresses[var.array.tobytes()] = write_data (v, (), var.array)
continue
# Hard case: only have the record indices, need to loop over the records.
# Get the shape of a single record for the variable.
if hasattr(var,'record_id'):
record_shape = var.shape[var.record_id.ndim:]
elif hasattr(var,'chunksize'):
record_shape = var.chunksize
else:
continue
# Use this as the "chunk size" for the netCDF file, to improve I/O
# performance.
chunksizes = (1,)*(len(var.axes)-len(record_shape)) + record_shape
if hasattr(self,'_fill_value') and var.dtype.name.startswith('float32'):
fill_value = self._fill_value
else:
fill_value = None
# netCDF3 can't handle unsigned ints, so cast to signed.
dtype = var.dtype
if dtype.name.startswith('uint') and nc_format.startswith('NETCDF3'):
warn (_("netCDF3 does not support unsigned ints. Converting %s to signed int.")%var.name)
dtype = np.dtype(dtype.name[1:])
# Tell netCDF4 to use big-endian encoding of data, where applicable.
# FSTD requires big-endian encoding (and netCDF4 can work with either).
v = f.createVariable(var.name, datatype=dtype.newbyteorder('>'), endian='big', dimensions=var.dims, zlib=zlib, complevel=compression, chunksizes=chunksizes, fill_value=fill_value)
# Turn off auto scaling of variables - want to encode the values as-is.
# 'scale_factor' and 'add_offset' will only be applied when *reading* the
# the file after it's created.
v.set_auto_scale(False)
# Write the metadata.
v.setncatts(var.atts)
# Write the data.
if hasattr(var,'record_id'):
indices = list(np.ndindex(var.record_id.shape))
keys = map(int,var.record_id.flatten())
else:
indices = list(var.keys())
keys = list(var.chunks.values())
record_shape = None # Reshaping with chunked data not supported.
for r, ind in zip(keys,indices):
if r >= 0:
io.append((r,record_shape,v,ind))
# Check if no data records exist and no coordinates were converted.
if len(io) == 0 and len(f.variables) == 0:
warn(_("No relevant FST records were found."))
# Now, do the actual transcribing of the data.
# Read/write the data in the same order of records in the RPN file(s) to
# improve performance.
Bar = _ProgressBar if (progress is True and len(io) > 0) else _FakeBar
bar = Bar(_("Saving netCDF file"), suffix="%(percent)d%% [%(myeta)s]")
chunk_addresses = {}
chunk_sizes = {}
for r,shape,v,ind in bar.iter(sorted(io)):
try:
data = self._fstluk(r,dtype=v.dtype.newbyteorder('='))['d'].transpose().reshape(shape)
addr = write_data (v, ind, data)
if addr is not None:
chunk_addresses[r] = addr
else:
warn(_("Problem writing compatible record for %s:%s. Writing separate netCDF / FSTD versions instead.")%(v.name,ind))
except (IndexError,ValueError):
warn(_("Internal problem with the script - unable to get data for '%s'")%v.name)
continue
# Clean up attributes used for padding.
for att in list(f.ncattrs()):
if att.startswith('_pad'):
f.delncattr(att)
f.close()
# Get list of all records that were relevant to the conversion.
used_rec_ids = set(self._used_rec_ids)
# Include all metadata records. Difficult to know for certain which ones
# were used, since librmn may have read them internally in some routines
# like horizontal / vertical grid extraction.
ismeta = self._headers['ismeta']
used_rec_ids.update(np.where(ismeta==1)[0])
# Prepare the file for writing FSTD structures.
with open(filename,'r+b') as f:
# Disable "aux keys" in FSTD header.
# Normally, FSTD will expect two 32-bit integers of zeros preceding the
# actual data (in two unused "aux keys"). If those zeros aren't there,
# then librmn will abort if you try to read the data.
# We can't control the bytes just before the data in this scenario,
# since the file layout is being dictated by netCDF. As a workaround,
# we use a non-standard (but still valid) FSTD header that sets the
# number of "aux keys" to zero, effectively disabling that check.
f.seek(0x2f,0)
f.write(b'\0')
# Pad the end of file to align with 8-byte boundary.
f.seek(0,2)
while (f.tell()%8!=0):
f.write(b'0')
# Update file size info.
filesize = f.tell()
f.seek(0x10,0)
np.array([filesize//8],'>i4').tofile(f)
# Find the FST records that weren't directly written to netCDF variables.
# Examples include coordinates (>>,^^,!!), and mask (typvar=@@) records.
unwritten_rec_ids = used_rec_ids - set(chunk_addresses.keys())
# Write anything which isn't already in the file.
iun = rmn.fstopenall(filename,rmn.FST_RW)
for rec_id in unwritten_rec_ids:
d = self._fstluk(rec_id)
# This data might have already been written as a netCDF coordinate array.
# E.g., could have got out "lat" coordinate from "^^", but the extraction
# would have been opaque to us (handled by gdll routine in librmn).
try:
chunk_addresses[rec_id] = direct_addresses[d['d'].transpose().tobytes()]
continue
except KeyError:
pass
# If not, then write it using the original FSTD parameters.
# This data wasn't used for the netCDF4 interface, so don't have to worry
# about making it netCDF-compatible.
rmn.fstecr(iun,d)
# Keep track of where the data was written.
prm = rmn.fstprm(rmn.fstinl(iun)[-1])
chunk_addresses[rec_id] = (prm['swa']-1)*8+72, (prm['lng']*4)-96, d['datyp'], d['nbits']
rmn.fstcloseall(iun)
# By this point, all data is in the file. This includes netCDF-only data,
# FSTD-only data, and shared netCDF/FSTD data arrays.
# The next step is to construct the FSTD record headers to annotate the
# data so it's accessible from the FSTD interface.
# First, generate the record header data in the low-level format that will
# be used in the file.
rec_ids, addresses = zip(*sorted(chunk_addresses.items()))
addresses, sizes, datyps, nbits = zip(*addresses)
# Transform addresses to 64-bit units, rewound for "header", origin at 1.
addresses = np.asarray(addresses) // 8 - 9 + 1
# Transform sizes to 64-bit units, using some padding.
sizes = np.asarray(sizes) // 8 + 12
datyps = np.asarray(datyps)
nbits = np.asarray(nbits)
# Encode the record headers.
from fstd2nc.extra import structured_array
headers = structured_array(self._headers)
headers = headers[list(rec_ids)]
nrecs = len(headers)
buf = np.zeros((nrecs,18),'>i4')
# deleted, select, size
buf[:,0] = 0x01000000 + sizes
# address
buf[:,1] = addresses
# deet, nbits
buf[:,2] = (headers['deet']<<8) + np.asarray(nbits,'b').view('B')
# ni, grtyp
buf[:,3] = (headers['ni']<<8) + np.asarray(headers['grtyp'],'|S1').view('B')
# nj, datyp
buf[:,4] = (headers['nj']<<8) + datyps
# nk, ubc
buf[:,5] = (headers['nk']<<12) + headers['ubc']
# npas, pad7
buf[:,6] = (headers['npas']<<6)
# ig4, ig2a
buf[:,7] = (headers['ig4']<<8) + (headers['ig2']>>16)
# ig1, ig2b
buf[:,8] = (headers['ig1']<<8) + ((headers['ig2']>>8)%256)
# ig3, ig2c
buf[:,9] = (headers['ig3']<<8) + (headers['ig2']%256)
# etik15, pad1
etiket = np.asarray(np.array(headers['etiket']).reshape(-1,1).view('B'),'int32')
etiket -= 32
buf[:,10] = (etiket[:,0]<<26) + (etiket[:,1]<<20) + (etiket[:,2]<<14) + (etiket[:,3]<<8) + (etiket[:,4]<<2)
# etik6a, pad2
buf[:,11] = (etiket[:,5]<<26) + (etiket[:,6]<<20) + (etiket[:,7]<<14) + (etiket[:,8]<<8) + (etiket[:,9]<<2)
# etikbc, typvar, pad3
typvar = np.asarray(np.array(headers['typvar']).reshape(-1,1).view('B'),'int32')
typvar -= 32
buf[:,12] = (etiket[:,10]<<26) + (etiket[:,11]<<20) + (typvar[:,0]<<14) + (typvar[:,1]<<8)
# nomvar, pad4
nomvar = np.asarray(np.array(headers['nomvar']).reshape(-1,1).view('B'),'int32')
nomvar -= 32
buf[:,13] = (nomvar[:,0]<<26) + (nomvar[:,1]<<20) + (nomvar[:,2]<<14) + (nomvar[:,3]<<8)
# ip1, levtyp
buf[:,14] = (headers['ip1']<<4)
# ip2, pad5
buf[:,15] = (headers['ip2']<<4)
# ip3, pad6
buf[:,16] = (headers['ip3']<<4)
# date_stamp
buf[:,17] = ((headers['datev']//10)<<3) + (headers['datev']%10)
# Next, write these raw record headers to the file, dividing them into
# FSTD directory pages.
with open(filename,'r+b') as f:
f.seek(0x18,0)
np.array([nrecs],'>i4').tofile(f) # number of extensions (?)
f.seek(0x1c,0)
np.array([1],'>i4').tofile(f) # Reset number of pages to one to start
f.seek(0x24,0)
np.array(np.max(sizes),'>i4').tofile(f) # maximum data length
f.seek(0x34,0)
np.array([nrecs],'>i4').tofile(f) # total num records
for rec0 in range(0,nrecs+1,256):
nrecs = len(addresses[rec0:rec0+256])
# Move to page location.
if rec0 == 0:
f.seek(0xd0)
else:
f.seek(0,2)
while (f.tell()%8!=0):
f.write(b'0')
page = f.tell()//8+1
# Write page header.
np.array([2308],'>i4').tofile(f) # idtyp, header length
np.array([page],'>i4').tofile(f) # address of this page
np.array([0,0],'>i4').tofile(f) # reserved
np.array([0,nrecs],'>i4').tofile(f) # next page, num records in page
checksum = nrecs ^ 0
for b in buf[rec0:rec0+256].flatten():
checksum ^= b
np.array([checksum,0],'>i4').tofile(f) # checksum, reserved
buf[rec0:rec0+256].tofile(f)
# Pad last page to 256 entries.
nrecs_written = buf[rec0:rec0+256].shape[0]
if nrecs_written < 256:
np.zeros((256-nrecs_written,18),'>i4').tofile(f)
if rec0 != 0:
# Update total number of pages.
f.seek(0x1c,0)
npages = np.fromfile(f,'>i4',1)[0]
npages = npages + 1
f.seek(-4,1)
np.array([npages],'>i4').tofile(f)
# Update pointer to last page.
f.seek(0x20,0)
prev_page = np.fromfile(f,'>i4',1)[0]
f.seek(-4,1)
np.array([page],'>i4').tofile(f)
# Link this page to the next.
f.seek((prev_page-1)*8+16,0)
np.array([page],'>i4').tofile(f)
# Update checksum.
f.seek(4,1)
checksum = np.fromfile(f,'>i4',1)[0]
checksum ^= page
f.seek(-4,1)
np.array([checksum],'>i4').tofile(f)
# Update file size info (FST header)
f.seek(0,2)
filesize = f.tell()
f.seek(0x10,0)
np.array([filesize//8],'>i4').tofile(f)
# Update file size info (netCDF header)
f.seek(0x8028,0)
np.array([filesize],'<i4').tofile(f)
|
neishm/fstd2nc
|
fstd2nc/mixins/compat.py
|
Python
|
lgpl-3.0
| 19,391
|
[
"NetCDF"
] |
5b51267ac30da29c9b857ce4a36ec12f57d4a508a482e36505e9199ed5de159f
|
#!/usr/bin/env pythonw
import tkinter as Tk
from tkinter import ttk
import matplotlib
import numpy as np
import numpy.ma as ma
import new_cmaps
import matplotlib.colors as mcolors
import matplotlib.gridspec as gridspec
import matplotlib.patheffects as PathEffects
class EnergyPanel:
# A dictionary of all of the parameters for this plot with the default parameters
plot_param_dict = {'twoD' : 1,
'masked': 1,
'cnorm_type': 'Log',
'prtl_type': 0,
'show_cbar': True,
'weighted': False,
'show_shock': False,
'show_int_region': True,
'set_color_limits': False,
'xbins' : 200,
'ebins' : 200,
'v_min': -2.0,
'v_max' : 0,
'set_v_min': False,
'set_v_max': False,
'set_y_min' : False,
'y_min': 1.0,
'set_y_max': False,
'y_max': 200.0,
'spatial_x': True,
'spatial_y': False,
'interpolation': 'nearest',
'face_color': 'gainsboro'}
prtl_opts = ['proton', 'electron']
gradient = np.linspace(0, 1, 256)# A way to make the colorbar display better
gradient = np.vstack((gradient, gradient))
def __init__(self, parent, figwrapper):
self.settings_window = None
self.FigWrap = figwrapper
self.parent = parent
self.ChartTypes = self.FigWrap.PlotTypeDict.keys()
self.chartType = self.FigWrap.chartType
self.figure = self.FigWrap.figure
self.InterpolationMethods = ['none','nearest', 'bilinear', 'bicubic', 'spline16',
'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric',
'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos']
# A variable that controls whether the energy integration region
# is shown
self.IntRegVar = Tk.IntVar()
self.IntRegVar.set(self.GetPlotParam('show_int_region'))
self.IntRegVar.trace('w', self.IntVarHandler)
# Figure out the energy color the intergration region
if self.GetPlotParam('prtl_type') == 1: #electons
self.energy_color = self.parent.electron_color
else:
self.energy_color = self.parent.ion_color
# A list that will hold any lines for the integration region
def IntVarHandler(self, *args):
# This should only be called by the user-interactio when all the plots already exist...
# so we can take some shortcuts and assume a lot of things are already created.
self.SetPlotParam('show_int_region', self.IntRegVar.get(), update_plot = False)
if self.IntRegVar.get() == True:
# We need to show the integration region.
# Look for all the spectra plots and plot the lines.
for i in range(self.parent.MainParamDict['NumOfRows']):
for j in range(self.parent.MainParamDict['NumOfCols']):
if self.parent.SubPlotList[i][j].chartType == 'SpectraPlot':
k = min(self.parent.SubPlotList[i][j].graph.spect_num, len(self.parent.dashes_options)-1)
# figure out if we are as ion phase diagram or an electron one
if self.GetPlotParam('prtl_type') == 0:
# Append the left line to the list
self.IntRegionLines.append(self.axes.axvline(
max(self.parent.SubPlotList[i][j].graph.i_left_loc, self.xmin+1),
linewidth = 1.5, linestyle = '-', color = self.energy_color))
# Choose the right dashes pattern
self.IntRegionLines[-1].set_dashes(self.parent.dashes_options[k])
# Append the left line to the list
self.IntRegionLines.append(self.axes.axvline(
min(self.parent.SubPlotList[i][j].graph.i_right_loc, self.xmax-1),
linewidth = 1.5, linestyle = '-', color = self.energy_color))
# Choose the right dashes pattern
self.IntRegionLines[-1].set_dashes(self.parent.dashes_options[k])
else:
# Append the left line to the list
self.IntRegionLines.append(self.axes.axvline(
max(self.parent.SubPlotList[i][j].graph.e_left_loc, self.xmin+1),
linewidth = 1.5, linestyle = '-', color = self.energy_color))
# Choose the right dashes pattern
self.IntRegionLines[-1].set_dashes(self.parent.dashes_options[k])
# Append the left line to the list
self.IntRegionLines.append(self.axes.axvline(
min(self.parent.SubPlotList[i][j].graph.e_right_loc, self.xmax-1),
linewidth = 1.5, linestyle = '-', color = self.energy_color))
# Choose the right dashes pattern
self.IntRegionLines[-1].set_dashes(self.parent.dashes_options[k])
# CLOSES IF. NOW IF WE TURN OFF THE INTEGRATION REGIONS, we have to delete all the lines.
else:
for i in xrange(len(self.IntRegionLines)):
self.IntRegionLines.pop(0).remove()
# Update the canvas
self.parent.canvas.draw()
self.parent.canvas.get_tk_widget().update_idletasks()
def ChangePlotType(self, str_arg):
self.FigWrap.ChangeGraph(str_arg)
def norm(self, vmin=None,vmax=None):
if self.GetPlotParam('cnorm_type') == 'Log':
return mcolors.LogNorm(vmin, vmax)
else:
return mcolors.Normalize(vmin, vmax)
def set_plot_keys(self):
'''A helper function that will insure that each hdf5 file will only be
opened once per time step'''
self.arrs_needed = ['c_omp', 'bx', 'istep', 'me', 'mi']
# First see if we will need to know the energy of the particle
# (requied for lorentz boosts and setting e_min and e_max)
if self.GetPlotParam('prtl_type') == 0:
self.arrs_needed.append('xi')
if self.GetPlotParam('weighted'):
self.arrs_needed.append('chi')
self.arrs_needed.append('ui')
self.arrs_needed.append('vi')
self.arrs_needed.append('wi')
if self.GetPlotParam('prtl_type') == 1:
self.arrs_needed.append('xe')
if self.GetPlotParam('weighted'):
self.arrs_needed.append('che')
self.arrs_needed.append('ue')
self.arrs_needed.append('ve')
self.arrs_needed.append('we')
return self.arrs_needed
def LoadData(self):
''' A helper function that checks if the histogram has
already been calculated and if it hasn't, it calculates
it then stores it.'''
self.key_name = 'Energy_'
if self.GetPlotParam('masked'):
self.key_name += 'masked_'
if self.GetPlotParam('weighted'):
self.key_name += 'weighted_'
self.key_name += self.prtl_opts[self.GetPlotParam('prtl_type')]
if self.key_name in self.parent.DataDict.keys():
self.hist2d = self.parent.DataDict[self.key_name]
else:
# Generate the X-axis values
self.c_omp = self.FigWrap.LoadKey('c_omp')[0]
self.istep = self.FigWrap.LoadKey('istep')[0]
self.weights = None
self.x_values = None
self.y_values = None
# Choose the particle type and px, py, or pz
if self.GetPlotParam('prtl_type') == 0: #protons
self.energy_color = self.parent.ion_color
self.x_values = self.FigWrap.LoadKey('xi')/self.c_omp
if self.GetPlotParam('weighted'):
self.weights = self.FigWrap.LoadKey('chi')
u = self.FigWrap.LoadKey('ui')
v = self.FigWrap.LoadKey('vi')
w = self.FigWrap.LoadKey('wi')
if self.GetPlotParam('prtl_type') == 1: #electons
self.energy_color = self.parent.electron_color
self.x_values = self.FigWrap.LoadKey('xe')/self.c_omp
if self.GetPlotParam('weighted'):
self.weights = self.FigWrap.LoadKey('che')
u = self.FigWrap.LoadKey('ue')
v = self.FigWrap.LoadKey('ve')
w = self.FigWrap.LoadKey('we')
self.y_values = np.sqrt(u**2+v**2+w**2+1)-1
if self.GetPlotParam('prtl_type') == 1:
self.y_values *= self.FigWrap.LoadKey('me')[0]/self.FigWrap.LoadKey('mi')[0]
self.Ymin = min(self.y_values)
self.Ymax = max(self.y_values)
self.Ymax = self.Ymax if ( self.Ymin != self.Ymax ) else self.Ymin+1
self.xmin = 0
self.xmax = self.FigWrap.LoadKey('bx').shape[2]/self.c_omp*self.istep
self.xmax = self.xmax if ( self.xmin != self.xmax ) else self.xmin+1
self.hist2d = np.histogram2d(self.y_values, self.x_values,
bins = [self.GetPlotParam('ebins'), self.GetPlotParam('xbins')],
range = [[self.Ymin,self.Ymax],[0,self.xmax]],
weights = self.weights)
if self.GetPlotParam('masked'):
zval = ma.masked_array(self.hist2d[0])
zval[zval == 0] = ma.masked
zval *= float(zval.max())**(-1)
tmplist = [zval[~zval.mask].min(), zval.max()]
else:
zval = np.copy(self.hist2d[0])
zval[zval==0] = 0.5
zval *= float(zval.max())**(-1)
tmplist = [zval.min(), zval.max()]
self.hist2d = zval, self.hist2d[1], self.hist2d[2], tmplist
self.parent.DataDict[self.key_name] = self.hist2d
def UpdateLabelsandColors(self):
self.x_label = r'$x\ [c/\omega_{\rm pe}]$'
if self.GetPlotParam('prtl_type') == 0: #protons
self.energy_color = self.parent.ion_color
self.y_label = r'$E_p\ [m_i c^2]$'
for line in self.IntRegionLines:
line.set_color(self.energy_color)
if self.GetPlotParam('prtl_type') == 1: #electons
self.energy_color = self.parent.electron_color
self.y_label = r'$E_{e}\ [m_i c^2]$'
def draw(self):
# In order to speed up the plotting, we only recalculate everything
# if necessary.
# Figure out the color and ylabel
# Choose the particle type and px, py, or pz
self.IntRegionLines = []
self.UpdateLabelsandColors()
self.xmin = self.hist2d[2][0]
self.xmax = self.hist2d[2][-1]
self.ymin = self.hist2d[1][0]
self.ymax = self.hist2d[1][-1]
if self.GetPlotParam('masked'):
self.tick_color = 'k'
else:
self.tick_color = 'white'
self.clim = list(self.hist2d[3])
if self.GetPlotParam('set_v_min'):
self.clim[0] = 10**self.GetPlotParam('v_min')
if self.GetPlotParam('set_v_max'):
self.clim[1] = 10**self.GetPlotParam('v_max')
self.gs = gridspec.GridSpecFromSubplotSpec(100,100, subplot_spec = self.parent.gs0[self.FigWrap.pos])#, bottom=0.2,left=0.1,right=0.95, top = 0.95)
if self.parent.MainParamDict['LinkSpatial'] == 1:
if self.FigWrap.pos == self.parent.first_x:
self.axes = self.figure.add_subplot(self.gs[self.parent.axes_extent[0]:self.parent.axes_extent[1], self.parent.axes_extent[2]:self.parent.axes_extent[3]])
else:
self.axes = self.figure.add_subplot(self.gs[self.parent.axes_extent[0]:self.parent.axes_extent[1], self.parent.axes_extent[2]:self.parent.axes_extent[3]], sharex = self.parent.SubPlotList[self.parent.first_x[0]][self.parent.first_x[1]].graph.axes)
else:
self.axes = self.figure.add_subplot(self.gs[self.parent.axes_extent[0]:self.parent.axes_extent[1], self.parent.axes_extent[2]:self.parent.axes_extent[3]])
self.cax = self.axes.imshow(self.hist2d[0],
cmap = new_cmaps.cmaps[self.parent.MainParamDict['ColorMap']],
norm = self.norm(), origin = 'lower',
aspect = 'auto',
interpolation=self.GetPlotParam('interpolation'))
self.cax.set_extent([self.xmin, self.xmax, self.ymin, self.ymax])
self.cax.set_clim(self.clim)
self.shock_line = self.axes.axvline(self.parent.shock_loc, linewidth = 1.5, linestyle = '--', color = self.parent.shock_color, path_effects=[PathEffects.Stroke(linewidth=2, foreground='k'),
PathEffects.Normal()])
if not self.GetPlotParam('show_shock'):
self.shock_line.set_visible(False)
self.axC = self.figure.add_subplot(self.gs[self.parent.cbar_extent[0]:self.parent.cbar_extent[1], self.parent.cbar_extent[2]:self.parent.cbar_extent[3]])
self.parent.cbarList.append(self.axC)
# Technically I should use the colorbar class here,
# but I found it annoying in some of it's limitations.
if self.parent.MainParamDict['HorizontalCbars']:
self.cbar = self.axC.imshow(self.gradient, aspect='auto',
cmap=new_cmaps.cmaps[self.parent.MainParamDict['ColorMap']])
# Make the colobar axis more like the real colorbar
self.axC.tick_params(axis='x',
which = 'both', # bothe major and minor ticks
top = False, # turn off top ticks
labelsize=self.parent.MainParamDict['NumFontSize'])
self.axC.tick_params(axis='y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the bottom edge are off
right=False, # ticks along the top edge are off
labelleft=False)
else:
self.cbar = self.axC.imshow(np.transpose(self.gradient)[::-1], aspect='auto',
cmap=new_cmaps.cmaps[self.parent.MainParamDict['ColorMap']])
# Make the colobar axis more like the real colorbar
self.axC.tick_params(axis='x',
which = 'both', # bothe major and minor ticks
top = False, # turn off top ticks
bottom = False,
labelbottom = False,
labelsize=self.parent.MainParamDict['NumFontSize'])
self.axC.tick_params(axis='y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left= False, # ticks along the bottom edge are off
right= True, # ticks along the top edge are off
labelleft= False,
labelright=True,
labelsize=self.parent.MainParamDict['NumFontSize'])
if not self.GetPlotParam('show_cbar'):
self.axC.set_visible(False)
if int(matplotlib.__version__[0]) < 2:
self.axes.set_axis_bgcolor(self.GetPlotParam('face_color'))
else:
self.axes.set_facecolor(self.GetPlotParam('face_color'))
self.axes.tick_params(labelsize = self.parent.MainParamDict['NumFontSize'], color=self.tick_color)
self.axes.set_xlabel(self.x_label, labelpad = self.parent.MainParamDict['xLabelPad'], color = 'black', size = self.parent.MainParamDict['AxLabelSize'])
self.axes.set_ylabel(self.y_label, labelpad = self.parent.MainParamDict['yLabelPad'], color = 'black', size = self.parent.MainParamDict['AxLabelSize'])
self.refresh()
def refresh(self):
'''This is a function that will be called only if self.axes already
holds a density type plot. We only update things that have shown. If
hasn't changed, or isn't viewed, don't touch it. The difference between this and last
time, is that we won't actually do any drawing in the plot. The plot
will be redrawn after all subplots data is changed. '''
# Main goal, only change what is showing..
self.xmin = self.hist2d[2][0]
self.xmax = self.hist2d[2][-1]
self.ymin = self.hist2d[1][0]
self.ymax = self.hist2d[1][-1]
self.clim = list(self.hist2d[3])
self.cax.set_data(self.hist2d[0])
self.cax.set_extent([self.xmin,self.xmax, self.ymin, self.ymax])
if self.GetPlotParam('set_v_min'):
self.clim[0] = 10**self.GetPlotParam('v_min')
if self.GetPlotParam('set_v_max'):
self.clim[1] = 10**self.GetPlotParam('v_max')
self.cax.set_clim(self.clim)
if self.GetPlotParam('show_cbar'):
self.CbarTickFormatter()
if self.GetPlotParam('show_shock'):
self.shock_line.set_xdata([self.parent.shock_loc,self.parent.shock_loc])
self.UpdateLabelsandColors()
self.axes.set_xlabel(self.x_label, labelpad = self.parent.MainParamDict['xLabelPad'], color = 'black', size = self.parent.MainParamDict['AxLabelSize'])
self.axes.set_ylabel(self.y_label, labelpad = self.parent.MainParamDict['yLabelPad'], color = 'black', size = self.parent.MainParamDict['AxLabelSize'])
if self.GetPlotParam('set_y_min'):
self.ymin = self.GetPlotParam('y_min')
if self.GetPlotParam('set_y_max'):
self.ymax = self.GetPlotParam('y_max')
self.axes.set_ylim(self.ymin, self.ymax)
if self.parent.MainParamDict['SetxLim'] and self.parent.MainParamDict['LinkSpatial'] == 1:
if self.parent.MainParamDict['xLimsRelative']:
self.axes.set_xlim(self.parent.MainParamDict['xLeft'] + self.parent.shock_loc,
self.parent.MainParamDict['xRight'] + self.parent.shock_loc)
else:
self.axes.set_xlim(self.parent.MainParamDict['xLeft'], self.parent.MainParamDict['xRight'])
else:
self.axes.set_xlim(self.xmin,self.xmax)
def CbarTickFormatter(self):
''' A helper function that sets the cbar ticks & labels. This used to be
easier, but because I am no longer using the colorbar class i have to do
stuff manually.'''
clim = np.copy(self.cax.get_clim())
if self.GetPlotParam('show_cbar'):
if self.GetPlotParam('cnorm_type') == "Log":
if self.parent.MainParamDict['HorizontalCbars']:
self.cbar.set_extent([np.log10(clim[0]),np.log10(clim[1]),0,1])
self.axC.set_xlim(np.log10(clim[0]),np.log10(clim[1]))
self.axC.xaxis.set_label_position("top")
if self.GetPlotParam('prtl_type') ==0:
self.axC.set_xlabel(r'$\log{\ \ f_i(p)}$', size = self.parent.MainParamDict['AxLabelSize'])
else:
self.axC.set_xlabel(r'$\log{\ \ f_e(p)}$', size = self.parent.MainParamDict['AxLabelSize'])
else:
self.cbar.set_extent([0,1,np.log10(clim[0]),np.log10(clim[1])])
self.axC.set_ylim(np.log10(clim[0]),np.log10(clim[1]))
self.axC.locator_params(axis='y',nbins=6)
self.axC.yaxis.set_label_position("right")
if self.GetPlotParam('prtl_type') ==0:
self.axC.set_ylabel(r'$\log{\ \ f_i(p)}$', labelpad =self.parent.MainParamDict['cbarLabelPad'], rotation = -90, size = self.parent.MainParamDict['AxLabelSize'])
else:
self.axC.set_ylabel(r'$\log{\ \ f_e(p)}$', labelpad =self.parent.MainParamDict['cbarLabelPad'], rotation = -90, size = self.parent.MainParamDict['AxLabelSize'])
else:# self.GetPlotParam('cnorm_type') == "Linear":
if self.parent.MainParamDict['HorizontalCbars']:
self.cbar.set_extent([clim[0], clim[1], 0, 1])
self.axC.set_xlim(clim[0], clim[1])
self.axC.xaxis.set_label_position("top")
if self.GetPlotParam('prtl_type') ==0:
self.axC.set_xlabel(r'$f_i(p)$', size = self.parent.MainParamDict['AxLabelSize'])
else:
self.axC.set_xlabel(r'$f_e(p)$', size = self.parent.MainParamDict['AxLabelSize'])
else:
self.cbar.set_extent([0, 1, clim[0], clim[1]])
self.axC.set_ylim(clim[0], clim[1])
self.axC.locator_params(axis='y', nbins=6)
self.axC.yaxis.set_label_position("right")
if self.GetPlotParam('prtl_type') ==0:
self.axC.set_ylabel(r'$f_i(p)$', labelpad =self.parent.MainParamDict['cbarLabelPad'], rotation = -90, size = self.parent.MainParamDict['AxLabelSize'])
else:
self.axC.set_ylabel(r'$f_e(p)$', labelpad =self.parent.MainParamDict['cbarLabelPad'], rotation = -90, size = self.parent.MainParamDict['AxLabelSize'])
def GetPlotParam(self, keyname):
return self.FigWrap.GetPlotParam(keyname)
def SetPlotParam(self, keyname, value, update_plot = True):
self.FigWrap.SetPlotParam(keyname, value, update_plot = update_plot)
def OpenSettings(self):
if self.settings_window is None:
self.settings_window = EnergySettings(self)
else:
self.settings_window.destroy()
self.settings_window = EnergySettings(self)
class EnergySettings(Tk.Toplevel):
def __init__(self, parent):
self.parent = parent
Tk.Toplevel.__init__(self)
self.wm_title('Phase Plot (%d,%d) Settings' % self.parent.FigWrap.pos)
self.parent = parent
frm = ttk.Frame(self)
frm.pack(fill=Tk.BOTH, expand=True)
self.protocol('WM_DELETE_WINDOW', self.OnClosing)
self.bind('<Return>', self.TxtEnter)
# Create the OptionMenu to chooses the Chart Type:
self.InterpolVar = Tk.StringVar(self)
self.InterpolVar.set(self.parent.GetPlotParam('interpolation')) # default value
self.InterpolVar.trace('w', self.InterpolChanged)
ttk.Label(frm, text="Interpolation Method:").grid(row=0, column = 2)
InterplChooser = ttk.OptionMenu(frm, self.InterpolVar, self.parent.GetPlotParam('interpolation'), *tuple(self.parent.InterpolationMethods))
InterplChooser.grid(row =0, column = 3, sticky = Tk.W + Tk.E)
# Create the OptionMenu to chooses the Chart Type:
self.ctypevar = Tk.StringVar(self)
self.ctypevar.set(self.parent.chartType) # default value
self.ctypevar.trace('w', self.ctypeChanged)
ttk.Label(frm, text="Choose Chart Type:").grid(row=0, column = 0)
cmapChooser = ttk.OptionMenu(frm, self.ctypevar, self.parent.chartType, *tuple(self.parent.ChartTypes))
cmapChooser.grid(row =0, column = 1, sticky = Tk.W + Tk.E)
# the Radiobox Control to choose the particle
self.prtlList = ['ion', 'electron']
self.pvar = Tk.IntVar()
self.pvar.set(self.parent.GetPlotParam('prtl_type'))
ttk.Label(frm, text='Particle:').grid(row = 1, sticky = Tk.W)
for i in range(len(self.prtlList)):
ttk.Radiobutton(frm,
text=self.prtlList[i],
variable=self.pvar,
command = self.RadioPrtl,
value=i).grid(row = 2+i, sticky =Tk.W)
# Control whether or not Cbar is shown
self.CbarVar = Tk.IntVar()
self.CbarVar.set(self.parent.GetPlotParam('show_cbar'))
cb = ttk.Checkbutton(frm, text = "Show Color bar",
variable = self.CbarVar,
command = self.CbarHandler)
cb.grid(row = 6, sticky = Tk.W)
# show shock
self.ShockVar = Tk.IntVar()
self.ShockVar.set(self.parent.GetPlotParam('show_shock'))
cb = ttk.Checkbutton(frm, text = "Show Shock",
variable = self.ShockVar,
command = self.ShockVarHandler)
cb.grid(row = 6, column = 1, sticky = Tk.W)
# Control if the plot is weighted
self.WeightVar = Tk.IntVar()
self.WeightVar.set(self.parent.GetPlotParam('weighted'))
cb = ttk.Checkbutton(frm, text = "Weight by charge",
variable = self.WeightVar,
command = lambda:
self.parent.SetPlotParam('weighted', self.WeightVar.get()))
cb.grid(row = 7, sticky = Tk.W)
# Show energy integration region
cb = ttk.Checkbutton(frm, text = "Show Energy Region",
variable = self.parent.IntRegVar)
cb.grid(row = 7, column = 1, sticky = Tk.W)
# control mask
self.MaskVar = Tk.IntVar()
self.MaskVar.set(self.parent.GetPlotParam('masked'))
cb = ttk.Checkbutton(frm, text = "Mask Zeros",
variable = self.MaskVar,
command = lambda:
self.parent.SetPlotParam('masked', self.MaskVar.get()))
cb.grid(row = 8, sticky = Tk.W)
# ttk.Label(frm, text = 'If the zero values are not masked they are set to z_min/2').grid(row =9, columnspan =2)
# Define functions for the events
# Now the field lim
self.setVminVar = Tk.IntVar()
self.setVminVar.set(self.parent.GetPlotParam('set_v_min'))
self.setVminVar.trace('w', self.setVminChanged)
self.setVmaxVar = Tk.IntVar()
self.setVmaxVar.set(self.parent.GetPlotParam('set_v_max'))
self.setVmaxVar.trace('w', self.setVmaxChanged)
self.Vmin = Tk.StringVar()
self.Vmin.set(str(self.parent.GetPlotParam('v_min')))
self.Vmax = Tk.StringVar()
self.Vmax.set(str(self.parent.GetPlotParam('v_max')))
cb = ttk.Checkbutton(frm, text ='Set log(f) min',
variable = self.setVminVar)
cb.grid(row = 3, column = 2, sticky = Tk.W)
self.VminEnter = ttk.Entry(frm, textvariable=self.Vmin, width=7)
self.VminEnter.grid(row = 3, column = 3)
cb = ttk.Checkbutton(frm, text ='Set log(f) max',
variable = self.setVmaxVar)
cb.grid(row = 4, column = 2, sticky = Tk.W)
self.VmaxEnter = ttk.Entry(frm, textvariable=self.Vmax, width=7)
self.VmaxEnter.grid(row = 4, column = 3)
# Now the y lim
self.setYminVar = Tk.IntVar()
self.setYminVar.set(self.parent.GetPlotParam('set_y_min'))
self.setYminVar.trace('w', self.setYminChanged)
self.setYmaxVar = Tk.IntVar()
self.setYmaxVar.set(self.parent.GetPlotParam('set_y_max'))
self.setYmaxVar.trace('w', self.setYmaxChanged)
self.Ymin = Tk.StringVar()
self.Ymin.set(str(self.parent.GetPlotParam('y_min')))
self.Ymax = Tk.StringVar()
self.Ymax.set(str(self.parent.GetPlotParam('y_max')))
cb = ttk.Checkbutton(frm, text ='Set y_axis min',
variable = self.setYminVar)
cb.grid(row = 5, column = 2, sticky = Tk.W)
self.YminEnter = ttk.Entry(frm, textvariable=self.Ymin, width=7)
self.YminEnter.grid(row = 5, column = 3)
cb = ttk.Checkbutton(frm, text ='Set y_axis max',
variable = self.setYmaxVar)
cb.grid(row = 6, column = 2, sticky = Tk.W)
self.YmaxEnter = ttk.Entry(frm, textvariable=self.Ymax, width=7)
self.YmaxEnter.grid(row = 6, column = 3)
def ShockVarHandler(self, *args):
if self.parent.GetPlotParam('show_shock')== self.ShockVar.get():
pass
else:
self.parent.shock_line.set_visible(self.ShockVar.get())
self.parent.SetPlotParam('show_shock', self.ShockVar.get())
def CbarHandler(self, *args):
if self.parent.GetPlotParam('show_cbar')== self.CbarVar.get():
pass
else:
self.parent.axC.set_visible(self.CbarVar.get())
self.parent.SetPlotParam('show_cbar', self.CbarVar.get(), update_plot =self.parent.GetPlotParam('twoD'))
def ctypeChanged(self, *args):
if self.ctypevar.get() == self.parent.chartType:
pass
else:
self.parent.ChangePlotType(self.ctypevar.get())
self.destroy()
def InterpolChanged(self, *args):
if self.InterpolVar.get() == self.parent.GetPlotParam('interpolation'):
pass
else:
self.parent.cax.set_interpolation(self.InterpolVar.get())
self.parent.SetPlotParam('interpolation', self.InterpolVar.get())
def RadioPrtl(self):
if self.pvar.get() == self.parent.GetPlotParam('prtl_type'):
pass
else:
self.parent.SetPlotParam('prtl_type', self.pvar.get(), update_plot = False)
self.parent.UpdateLabelsandColors()
self.parent.axes.set_ylabel(self.parent.y_label, labelpad = self.parent.parent.MainParamDict['yLabelPad'], color = 'black', size = self.parent.parent.MainParamDict['AxLabelSize'])
self.parent.SetPlotParam('prtl_type', self.pvar.get())
def setVminChanged(self, *args):
if self.setVminVar.get() == self.parent.GetPlotParam('set_v_min'):
pass
else:
self.parent.SetPlotParam('set_v_min', self.setVminVar.get())
def setVmaxChanged(self, *args):
if self.setVmaxVar.get() == self.parent.GetPlotParam('set_v_max'):
pass
else:
self.parent.SetPlotParam('set_v_max', self.setVmaxVar.get())
def setYminChanged(self, *args):
if self.setYminVar.get() == self.parent.GetPlotParam('set_y_min'):
pass
else:
self.parent.SetPlotParam('set_y_min', self.setYminVar.get())
def setYmaxChanged(self, *args):
if self.setYmaxVar.get() == self.parent.GetPlotParam('set_y_max'):
pass
else:
self.parent.SetPlotParam('set_y_max', self.setYmaxVar.get())
def TxtEnter(self, e):
self.FieldsCallback()
def FieldsCallback(self):
tkvarLimList = [self.Vmin, self.Vmax, self.Ymin, self.Ymax]
plot_param_List = ['v_min', 'v_max', 'y_min', 'y_max']
tkvarSetList = [self.setVminVar, self.setVmaxVar, self.setYminVar, self.setYmaxVar]
to_reload = False
for j in range(len(tkvarLimList)):
try:
#make sure the user types in a float
if np.abs(float(tkvarLimList[j].get()) - self.parent.GetPlotParam(plot_param_List[j])) > 1E-4:
self.parent.SetPlotParam(plot_param_List[j], float(tkvarLimList[j].get()), update_plot = False)
to_reload += True*tkvarSetList[j].get()
except ValueError:
#if they type in random stuff, just set it ot the param value
tkvarLimList[j].set(str(self.parent.GetPlotParam(plot_param_List[j])))
if to_reload:
self.parent.SetPlotParam('v_min', self.parent.GetPlotParam('v_min'))
def OnClosing(self):
self.parent.settings_window = None
self.destroy()
|
pcrumley/Iseult
|
src/energy_plots.py
|
Python
|
gpl-3.0
| 32,212
|
[
"Gaussian"
] |
91e3dc5eefe8742c5d0fd2837da784fcd322e4472d706247b97212065515d610
|
from distutils.core import setup, Extension
from glob import glob
module1 = Extension('hashpumpy',
sources = glob('*.cpp'),
libraries = ['crypto'])
setup (name = 'hashpumpy',
version = '1.2',
author = 'Zach Riggle (Python binding), Brian Wallace (HashPump), Yen Chi Hsuan (Python3 support)',
description = 'Python bindings for HashPump',
ext_modules = [module1],
license = 'MIT',
url = 'https://github.com/bwall/HashPump')
|
christophetd/HashPump
|
setup.py
|
Python
|
mit
| 524
|
[
"Brian"
] |
99eb4c245b6c7b796d4d5e235e5d788a3de8a37994ad1d2847e0bc676aa6aeaf
|
""" This simply invokes DIRAC APIs for creating 2 jobDescription.xml files,
one with an application that will end with status 0, and a second with status != 0
"""
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC.tests.Utilities.utils import find_all
from DIRAC.Interfaces.API.Job import Job
# With a script that returns 0
j = Job()
scriptSHLocation = find_all( 'script-OK.sh', '..', '/DIRAC/WorkloadManagementSystem/JobWrapper' )[0]
j.setExecutable('sh %s' %scriptSHLocation)
jobXMLFile = 'jobDescription-OK.xml'
with open( jobXMLFile, 'w+' ) as fd:
fd.write( j._toXML() )
# With a script that returns 111
j = Job()
scriptSHLocation = find_all( 'script.sh', '..', '/DIRAC/WorkloadManagementSystem/JobWrapper' )[0]
j.setExecutable('sh %s' %scriptSHLocation)
jobXMLFile = 'jobDescription-FAIL.xml'
with open( jobXMLFile, 'w+' ) as fd:
fd.write( j._toXML() )
# With a script that returns 1502
j = Job()
scriptSHLocation = find_all( 'script-RESC.sh', '..', '/DIRAC/WorkloadManagementSystem/JobWrapper' )[0]
j.setExecutable('sh %s' %scriptSHLocation)
jobXMLFile = 'jobDescription-FAIL1502.xml'
with open( jobXMLFile, 'w+' ) as fd:
fd.write( j._toXML() )
|
Andrew-McNab-UK/DIRAC
|
tests/Integration/WorkloadManagementSystem/createJobXMLDescriptions.py
|
Python
|
gpl-3.0
| 1,201
|
[
"DIRAC"
] |
ff294a0e2ca21fb06c8ffe817ae5433f25954f2a3cef0f40c64685e120679f6c
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2008, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
NAME = 'ZenPacks.AndreaConsadori.Funkwerk'
VERSION = '3.0'
AUTHOR = 'Andrea Consadori'
LICENSE = ''
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.AndreaConsadori']
PACKAGES = ['ZenPacks', 'ZenPacks.AndreaConsadori', 'ZenPacks.AndreaConsadori.Funkwerk']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = '>=3.0'
PREV_ZENPACK_NAME = ''
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# Tell setuptools what non-python files should also be included
# with the binary egg.
package_data = {
'': ['*.txt'],
'':['../COPYRIGHT.txt','../LICENSE.txt'],
NAME: ['objects/*','skins/*/*','services/*', 'reports/*/*',
'modeler/*/*', 'daemons/*', 'lib/*', 'libexec/*'],
},
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
|
anksp21/Community-Zenpacks
|
ZenPacks.AndreaConsadori.Funkwerk/setup.py
|
Python
|
gpl-2.0
| 3,300
|
[
"VisIt"
] |
91f7f5a2645792e1688e1f317cc48a489d66217f60265eaf23674a884d9d5e31
|
"""
Bok choy acceptance and a11y tests for problem types in the LMS
See also lettuce tests in lms/djangoapps/courseware/features/problems.feature
"""
import random
import textwrap
from nose import SkipTest
from abc import ABCMeta, abstractmethod
from nose.plugins.attrib import attr
from selenium.webdriver import ActionChains
from capa.tests.response_xml_factory import (
AnnotationResponseXMLFactory,
ChoiceResponseXMLFactory,
ChoiceTextResponseXMLFactory,
CodeResponseXMLFactory,
CustomResponseXMLFactory,
FormulaResponseXMLFactory,
ImageResponseXMLFactory,
MultipleChoiceResponseXMLFactory,
NumericalResponseXMLFactory,
OptionResponseXMLFactory,
StringResponseXMLFactory,
SymbolicResponseXMLFactory,
)
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.tests.helpers import select_option_by_text
from common.test.acceptance.tests.lms.test_lms_problems import ProblemsTest
from common.test.acceptance.tests.helpers import EventsTestMixin
class ProblemTypeTestBaseMeta(ABCMeta):
"""
MetaClass for ProblemTypeTestBase to ensure that the required attributes
are defined in the inheriting classes.
"""
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
required_attrs = [
'problem_name',
'problem_type',
'factory',
'factory_kwargs',
'status_indicators',
]
for required_attr in required_attrs:
msg = ('{} is a required attribute for {}').format(
required_attr, str(cls)
)
try:
if obj.__getattribute__(required_attr) is None:
raise NotImplementedError(msg)
except AttributeError:
raise NotImplementedError(msg)
return obj
class ProblemTypeTestBase(ProblemsTest, EventsTestMixin):
"""
Base class for testing assesment problem types in bok choy.
This inherits from ProblemsTest, which has capabilities for testing problem
features that are not problem type specific (checking, hinting, etc.).
The following attributes must be explicitly defined when inheriting from
this class:
problem_name (str)
problem_type (str)
factory (ResponseXMLFactory subclass instance)
Additionally, the default values for factory_kwargs and status_indicators
may need to be overridden for some problem types.
"""
__metaclass__ = ProblemTypeTestBaseMeta
problem_name = None
problem_type = None
factory = None
factory_kwargs = {}
status_indicators = {
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered'],
}
def setUp(self):
"""
Visits courseware_page and defines self.problem_page.
"""
super(ProblemTypeTestBase, self).setUp()
self.courseware_page.visit()
self.problem_page = ProblemPage(self.browser)
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'always'}
)
def wait_for_status(self, status):
"""
Waits for the expected status indicator.
Args:
status: one of ("correct", "incorrect", "unanswered)
"""
msg = "Wait for status to be {}".format(status)
selector = ', '.join(self.status_indicators[status])
self.problem_page.wait_for_element_visibility(selector, msg)
@abstractmethod
def answer_problem(self, correct):
"""
Args:
`correct` (bool): Inputs correct answer if True, else inputs
incorrect answer.
"""
raise NotImplementedError()
class ProblemTypeTestMixin(object):
"""
Test cases shared amongst problem types.
"""
can_submit_blank = False
@attr(shard=7)
def test_answer_correctly(self):
"""
Scenario: I can answer a problem correctly
Given External graders respond "correct"
And I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "correctly"
Then my "<ProblemType>" answer is marked "correct"
And The "<ProblemType>" problem displays a "correct" answer
And a "problem_check" server event is emitted
And a "problem_check" browser event is emitted
"""
# Make sure we're looking at the right problem
self.assertEqual(self.problem_page.problem_name, self.problem_name)
# Answer the problem correctly
self.answer_problem(correct=True)
self.problem_page.click_check()
self.wait_for_status('correct')
# Check for corresponding tracking event
expected_events = [
{
'event_source': 'server',
'event_type': 'problem_check',
'username': self.username,
}, {
'event_source': 'browser',
'event_type': 'problem_check',
'username': self.username,
},
]
for event in expected_events:
self.wait_for_events(event_filter=event, number_of_matches=1)
@attr(shard=7)
def test_answer_incorrectly(self):
"""
Scenario: I can answer a problem incorrectly
Given External graders respond "incorrect"
And I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "incorrectly"
Then my "<ProblemType>" answer is marked "incorrect"
And The "<ProblemType>" problem displays a "incorrect" answer
"""
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Answer the problem incorrectly
self.answer_problem(correct=False)
self.problem_page.click_check()
self.wait_for_status('incorrect')
@attr(shard=7)
def test_submit_blank_answer(self):
"""
Scenario: I can submit a blank answer
Given I am viewing a "<ProblemType>" problem
When I check a problem
Then my "<ProblemType>" answer is marked "incorrect"
And The "<ProblemType>" problem displays a "blank" answer
"""
if not self.can_submit_blank:
raise SkipTest("Test incompatible with the current problem type")
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Leave the problem unchanged and click check.
self.assertNotIn('is-disabled', self.problem_page.q(css='div.problem button.check').attrs('class')[0])
self.problem_page.click_check()
self.wait_for_status('incorrect')
@attr(shard=7)
def test_cant_submit_blank_answer(self):
"""
Scenario: I can't submit a blank answer
When I try to submit blank answer
Then I can't check a problem
"""
if self.can_submit_blank:
raise SkipTest("Test incompatible with the current problem type")
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
self.assertIn('is-disabled', self.problem_page.q(css='div.problem button.check').attrs('class')[0])
@attr('a11y')
def test_problem_type_a11y(self):
"""
Run accessibility audit for the problem type.
"""
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Set the scope to the problem container
self.problem_page.a11y_audit.config.set_scope(
include=['div#seq_content'])
self.problem_page.a11y_audit.config.set_rules({
"ignore": [
'aria-allowed-attr', # TODO: AC-491
'aria-valid-attr', # TODO: AC-491
'aria-roles', # TODO: AC-491
'checkboxgroup', # TODO: AC-491
'radiogroup', # TODO: AC-491
'section', # TODO: AC-491
'label', # TODO: AC-491
]
})
# Run the accessibility audit.
self.problem_page.a11y_audit.check_for_accessibility_errors()
class AnnotationProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Annotation Problem Type
"""
problem_name = 'ANNOTATION TEST PROBLEM'
problem_type = 'annotationresponse'
factory = AnnotationResponseXMLFactory()
can_submit_blank = True
factory_kwargs = {
'title': 'Annotation Problem',
'text': 'The text being annotated',
'comment': 'What do you think the about this text?',
'comment_prompt': 'Type your answer below.',
'tag_prompt': 'Which of these items most applies to the text?',
'options': [
('dog', 'correct'),
('cat', 'incorrect'),
('fish', 'partially-correct'),
]
}
status_indicators = {
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'partially-correct': ['span.partially-correct'],
'unanswered': ['span.unanswered'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for AnnotationProblemTypeTest
"""
super(AnnotationProblemTypeTest, self).setUp(*args, **kwargs)
def answer_problem(self, correct):
"""
Answer annotation problem.
"""
choice = 0 if correct else 1
answer = 'Student comment'
self.problem_page.q(css='div.problem textarea.comment').fill(answer)
self.problem_page.q(
css='div.problem span.tag'.format(choice=choice)
).nth(choice).click()
class CheckboxProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Checkbox Problem Type
"""
problem_name = 'CHECKBOX TEST PROBLEM'
problem_type = 'checkbox'
factory = ChoiceResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 0 and Choice 2',
'choice_type': 'checkbox',
'choices': [True, False, True, False],
'choice_names': ['Choice 0', 'Choice 1', 'Choice 2', 'Choice 3'],
'explanation_text': 'This is explanation text'
}
def setUp(self, *args, **kwargs):
"""
Additional setup for CheckboxProblemTypeTest
"""
super(CheckboxProblemTypeTest, self).setUp(*args, **kwargs)
def answer_problem(self, correct):
"""
Answer checkbox problem.
"""
if correct:
self.problem_page.click_choice("choice_0")
self.problem_page.click_choice("choice_2")
else:
self.problem_page.click_choice("choice_1")
@attr('shard_7')
def test_can_show_hide_answer(self):
"""
Scenario: Verifies that show/hide answer button is working as expected.
Given that I am on courseware page
And I can see a CAPA problem with show answer button
When I click "Show Answer" button
Then I should see "Hide Answer" text on button
And I should see question's solution
And I should see correct choices highlighted
When I click "Hide Answer" button
Then I should see "Show Answer" text on button
And I should not see question's solution
And I should not see correct choices highlighted
"""
self.problem_page.click_show_hide_button()
self.assertTrue(self.problem_page.is_solution_tag_present())
self.assertTrue(self.problem_page.is_correct_choice_highlighted(correct_choices=[1, 3]))
self.problem_page.click_show_hide_button()
self.assertFalse(self.problem_page.is_solution_tag_present())
self.assertFalse(self.problem_page.is_correct_choice_highlighted(correct_choices=[1, 3]))
class MultipleChoiceProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Multiple Choice Problem Type
"""
problem_name = 'MULTIPLE CHOICE TEST PROBLEM'
problem_type = 'multiple choice'
factory = MultipleChoiceResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 2',
'choices': [False, False, True, False],
'choice_names': ['choice_0', 'choice_1', 'choice_2', 'choice_3'],
}
status_indicators = {
'correct': ['label.choicegroup_correct'],
'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for MultipleChoiceProblemTypeTest
"""
super(MultipleChoiceProblemTypeTest, self).setUp(*args, **kwargs)
def answer_problem(self, correct):
"""
Answer multiple choice problem.
"""
if correct:
self.problem_page.click_choice("choice_choice_2")
else:
self.problem_page.click_choice("choice_choice_1")
class RadioProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Radio Problem Type
"""
problem_name = 'RADIO TEST PROBLEM'
problem_type = 'radio'
factory = ChoiceResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 2',
'choice_type': 'radio',
'choices': [False, False, True, False],
'choice_names': ['Choice 0', 'Choice 1', 'Choice 2', 'Choice 3'],
}
status_indicators = {
'correct': ['label.choicegroup_correct'],
'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for RadioProblemTypeTest
"""
super(RadioProblemTypeTest, self).setUp(*args, **kwargs)
def answer_problem(self, correct):
"""
Answer radio problem.
"""
if correct:
self.problem_page.click_choice("choice_2")
else:
self.problem_page.click_choice("choice_1")
class DropDownProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Drop Down Problem Type
"""
problem_name = 'DROP DOWN TEST PROBLEM'
problem_type = 'drop down'
factory = OptionResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Option 2',
'options': ['Option 1', 'Option 2', 'Option 3', 'Option 4'],
'correct_option': 'Option 2'
}
def setUp(self, *args, **kwargs):
"""
Additional setup for DropDownProblemTypeTest
"""
super(DropDownProblemTypeTest, self).setUp(*args, **kwargs)
def answer_problem(self, correct):
"""
Answer drop down problem.
"""
answer = 'Option 2' if correct else 'Option 3'
selector_element = self.problem_page.q(
css='.problem .option-input select')
select_option_by_text(selector_element, answer)
class StringProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for String Problem Type
"""
problem_name = 'STRING TEST PROBLEM'
problem_type = 'string'
factory = StringResponseXMLFactory()
factory_kwargs = {
'question_text': 'The answer is "correct string"',
'case_sensitive': False,
'answer': 'correct string',
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for StringProblemTypeTest
"""
super(StringProblemTypeTest, self).setUp(*args, **kwargs)
def answer_problem(self, correct):
"""
Answer string problem.
"""
textvalue = 'correct string' if correct else 'incorrect string'
self.problem_page.fill_answer(textvalue)
class NumericalProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Numerical Problem Type
"""
problem_name = 'NUMERICAL TEST PROBLEM'
problem_type = 'numerical'
factory = NumericalResponseXMLFactory()
factory_kwargs = {
'question_text': 'The answer is pi + 1',
'answer': '4.14159',
'tolerance': '0.00001',
'math_display': True,
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for NumericalProblemTypeTest
"""
super(NumericalProblemTypeTest, self).setUp(*args, **kwargs)
def answer_problem(self, correct):
"""
Answer numerical problem.
"""
textvalue = "pi + 1" if correct else str(random.randint(-2, 2))
self.problem_page.fill_answer(textvalue)
class FormulaProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Formula Problem Type
"""
problem_name = 'FORMULA TEST PROBLEM'
problem_type = 'formula'
factory = FormulaResponseXMLFactory()
factory_kwargs = {
'question_text': 'The solution is [mathjax]x^2+2x+y[/mathjax]',
'sample_dict': {'x': (-100, 100), 'y': (-100, 100)},
'num_samples': 10,
'tolerance': 0.00001,
'math_display': True,
'answer': 'x^2+2*x+y',
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for FormulaProblemTypeTest
"""
super(FormulaProblemTypeTest, self).setUp(*args, **kwargs)
def answer_problem(self, correct):
"""
Answer formula problem.
"""
textvalue = "x^2+2*x+y" if correct else 'x^2'
self.problem_page.fill_answer(textvalue)
class ScriptProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Script Problem Type
"""
problem_name = 'SCRIPT TEST PROBLEM'
problem_type = 'script'
factory = CustomResponseXMLFactory()
factory_kwargs = {
'cfn': 'test_add_to_ten',
'expect': '10',
'num_inputs': 2,
'group_label': 'Enter two integers that sum to 10.',
'script': textwrap.dedent("""
def test_add_to_ten(expect,ans):
try:
a1=int(ans[0])
a2=int(ans[1])
except ValueError:
a1=0
a2=0
return (a1+a2)==int(expect)
"""),
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for ScriptProblemTypeTest
"""
super(ScriptProblemTypeTest, self).setUp(*args, **kwargs)
def answer_problem(self, correct):
"""
Answer script problem.
"""
# Correct answer is any two integers that sum to 10
first_addend = random.randint(-100, 100)
second_addend = 10 - first_addend
# If we want an incorrect answer, then change
# the second addend so they no longer sum to 10
if not correct:
second_addend += random.randint(1, 10)
self.problem_page.fill_answer(first_addend, input_num=0)
self.problem_page.fill_answer(second_addend, input_num=1)
class CodeProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Code Problem Type
"""
problem_name = 'CODE TEST PROBLEM'
problem_type = 'code'
factory = CodeResponseXMLFactory()
factory_kwargs = {
'question_text': 'Submit code to an external grader',
'initial_display': 'print "Hello world!"',
'grader_payload': '{"grader": "ps1/Spring2013/test_grader.py"}',
}
status_indicators = {
'correct': ['.grader-status .correct ~ .debug'],
'incorrect': ['.grader-status .incorrect ~ .debug'],
'unanswered': ['.grader-status .unanswered ~ .debug'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for CodeProblemTypeTest
"""
super(CodeProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'label', # TODO: AC-286
]
})
def answer_problem(self, correct):
"""
Answer code problem.
"""
# The fake xqueue server is configured to respond
# correct / incorrect no matter what we submit.
# Furthermore, since the inline code response uses
# JavaScript to make the code display nicely, it's difficult
# to programatically input text
# (there's not <textarea> we can just fill text into)
# For this reason, we submit the initial code in the response
# (configured in the problem XML above)
pass
def test_answer_incorrectly(self):
"""
Overridden for script test because the testing grader always responds
with "correct"
"""
pass
def test_submit_blank_answer(self):
"""
Overridden for script test because the testing grader always responds
with "correct"
"""
pass
def test_cant_submit_blank_answer(self):
"""
Overridden for script test because the testing grader always responds
with "correct"
"""
pass
class ChoiceTextProbelmTypeTestBase(ProblemTypeTestBase):
"""
Base class for "Choice + Text" Problem Types.
(e.g. RadioText, CheckboxText)
"""
choice_type = None
def _select_choice(self, input_num):
"""
Selects the nth (where n == input_num) choice of the problem.
"""
self.problem_page.q(
css='div.problem input.ctinput[type="{}"]'.format(self.choice_type)
).nth(input_num).click()
def _fill_input_text(self, value, input_num):
"""
Fills the nth (where n == input_num) text input field of the problem
with value.
"""
self.problem_page.q(
css='div.problem input.ctinput[type="text"]'
).nth(input_num).fill(value)
def answer_problem(self, correct):
"""
Answer radio text problem.
"""
choice = 0 if correct else 1
input_value = "8" if correct else "5"
self._select_choice(choice)
self._fill_input_text(input_value, choice)
class RadioTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Radio Text Problem Type
"""
problem_name = 'RADIO TEXT TEST PROBLEM'
problem_type = 'radio_text'
choice_type = 'radio'
factory = ChoiceTextResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'radiotextgroup',
'choices': [
("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"}),
],
}
status_indicators = {
'correct': ['section.choicetextgroup_correct'],
'incorrect': ['section.choicetextgroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for RadioTextProblemTypeTest
"""
super(RadioTextProblemTypeTest, self).setUp(*args, **kwargs)
class CheckboxTextProblemTypeTest(ChoiceTextProbelmTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Checkbox Text Problem Type
"""
problem_name = 'CHECKBOX TEXT TEST PROBLEM'
problem_type = 'checkbox_text'
choice_type = 'checkbox'
factory = ChoiceTextResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'checkboxtextgroup',
'choices': [
("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"}),
],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for CheckboxTextProblemTypeTest
"""
super(CheckboxTextProblemTypeTest, self).setUp(*args, **kwargs)
class ImageProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Image Problem Type
"""
problem_name = 'IMAGE TEST PROBLEM'
problem_type = 'image'
factory = ImageResponseXMLFactory()
can_submit_blank = True
factory_kwargs = {
'src': '/static/images/placeholder-image.png',
'rectangle': '(0,0)-(50,50)',
}
def answer_problem(self, correct):
"""
Answer image problem.
"""
offset = 25 if correct else -25
input_selector = ".imageinput [id^='imageinput_'] img"
input_element = self.problem_page.q(css=input_selector)[0]
chain = ActionChains(self.browser)
chain.move_to_element(input_element)
chain.move_by_offset(offset, offset)
chain.click()
chain.perform()
class SymbolicProblemTypeTest(ProblemTypeTestBase, ProblemTypeTestMixin):
"""
TestCase Class for Symbolic Problem Type
"""
problem_name = 'SYMBOLIC TEST PROBLEM'
problem_type = 'symbolicresponse'
factory = SymbolicResponseXMLFactory()
factory_kwargs = {
'expect': '2*x+3*y',
}
status_indicators = {
'correct': ['div.capa_inputtype div.correct'],
'incorrect': ['div.capa_inputtype div.incorrect'],
'unanswered': ['div.capa_inputtype div.unanswered'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for SymbolicProblemTypeTest
"""
super(SymbolicProblemTypeTest, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'label', # TODO: AC-294
]
})
def answer_problem(self, correct):
"""
Answer symbolic problem.
"""
choice = "2*x+3*y" if correct else "3*a+4*b"
self.problem_page.fill_answer(choice)
|
jjmiranda/edx-platform
|
common/test/acceptance/tests/lms/test_problem_types.py
|
Python
|
agpl-3.0
| 27,271
|
[
"VisIt"
] |
847f33e36bbebe93b78aad156e36e995f2fc9ccebe9f6d5bb14e18bce6a4b199
|
from contextlib import contextmanager
from taichi._lib import core as _ti_core
from taichi.lang import impl
from taichi.profiler.kernel_metrics import default_cupti_metrics
class StatisticalResult:
"""Statistical result of records.
Profiling records with the same kernel name will be counted in a ``StatisticalResult`` instance via function ``insert_record(time)``.
Currently, only the kernel elapsed time is counted, other statistics related to the kernel will be added in the feature.
"""
def __init__(self, name):
self.name = name
self.counter = 0
self.min_time = 0.0
self.max_time = 0.0
self.total_time = 0.0
def __lt__(self, other):
# For sorted()
return self.total_time < other.total_time
def insert_record(self, time):
"""Insert records with the same kernel name.
Currently, only the kernel elapsed time is counted.
"""
if self.counter == 0:
self.min_time = time
self.max_time = time
self.counter += 1
self.total_time += time
self.min_time = min(self.min_time, time)
self.max_time = max(self.max_time, time)
class KernelProfiler:
"""Kernel profiler of Taichi.
Kernel profiler acquires kernel profiling records from backend, counts records in Python scope,
and prints the results to the console by :func:`~taichi.profiler.kernel_profiler.KernelProfiler.print_info`.
``KernelProfiler`` now support detailed low-level performance metrics (such as memory bandwidth consumption) in its advanced mode.
This mode is only available for the CUDA backend with CUPTI toolkit, i.e. you need ``ti.init(kernel_profiler=True, arch=ti.cuda)``.
Note:
For details about using CUPTI in Taichi, please visit https://docs.taichi.graphics/docs/lang/articles/misc/profiler#advanced-mode.
"""
def __init__(self):
self._profiling_mode = False
self._profiling_toolkit = 'default'
self._metric_list = [default_cupti_metrics]
self._total_time_ms = 0.0
self._traced_records = []
self._statistical_results = {}
# public methods
def set_kernel_profiler_mode(self, mode=False):
"""Turn on or off :class:`~taichi.profiler.kernel_profiler.KernelProfiler`."""
if type(mode) is bool:
self._profiling_mode = mode
else:
raise TypeError(
f'Arg `mode` must be of type boolean. Type {type(mode)} is not supported.'
)
def get_kernel_profiler_mode(self):
"""Get status of :class:`~taichi.profiler.kernel_profiler.KernelProfiler`."""
return self._profiling_mode
def set_toolkit(self, toolkit_name='default'):
if self._check_not_turned_on_with_warning_message():
return False
status = impl.get_runtime().prog.set_kernel_profiler_toolkit(
toolkit_name)
if status is True:
self._profiling_toolkit = toolkit_name
else:
_ti_core.warn(
f'Failed to set kernel profiler toolkit ({toolkit_name}) , keep using ({self._profiling_toolkit}).'
)
return status
def get_total_time(self):
"""Get elapsed time of all kernels recorded in KernelProfiler.
Returns:
time (float): total time in second.
"""
if self._check_not_turned_on_with_warning_message():
return 0.0
self._update_records() # kernel records
self._count_statistics() # _total_time_ms is counted here
return self._total_time_ms / 1000 # ms to s
def clear_info(self):
"""Clear all records both in front-end :class:`~taichi.profiler.kernel_profiler.KernelProfiler` and back-end instance ``KernelProfilerBase``.
Note:
The values of ``self._profiling_mode`` and ``self._metric_list`` will not be cleared.
"""
if self._check_not_turned_on_with_warning_message():
return None
#sync first
impl.get_runtime().prog.sync_kernel_profiler()
#then clear backend & frontend info
impl.get_runtime().prog.clear_kernel_profile_info()
self._clear_frontend()
return None
def query_info(self, name):
"""For docstring of this function, see :func:`~taichi.profiler.query_kernel_profiler_info`."""
if self._check_not_turned_on_with_warning_message():
return None
self._update_records() # kernel records
self._count_statistics() # statistics results
# TODO : query self.StatisticalResult in python scope
return impl.get_runtime().prog.query_kernel_profile_info(name)
def set_metrics(self, metric_list=default_cupti_metrics):
"""For docstring of this function, see :func:`~taichi.profiler.set_kernel_profiler_metrics`."""
if self._check_not_turned_on_with_warning_message():
return None
self._metric_list = metric_list
metric_name_list = [metric.name for metric in metric_list]
self.clear_info()
impl.get_runtime().prog.reinit_kernel_profiler_with_metrics(
metric_name_list)
return None
@contextmanager
def collect_metrics_in_context(self, metric_list=default_cupti_metrics):
"""This function is not exposed to user now.
For usage of this function, see :func:`~taichi.profiler.collect_kernel_profiler_metrics`.
"""
if self._check_not_turned_on_with_warning_message():
return None
self.set_metrics(metric_list)
yield self
self.set_metrics() #back to default metric list
return None
# mode of print_info
COUNT = 'count' # print the statistical results (min,max,avg time) of Taichi kernels.
TRACE = 'trace' # print the records of launched Taichi kernels with specific profiling metrics (time, memory load/store and core utilization etc.)
def print_info(self, mode=COUNT):
"""Print the profiling results of Taichi kernels.
For usage of this function, see :func:`~taichi.profiler.print_kernel_profiler_info`.
Args:
mode (str): the way to print profiling results.
"""
if self._check_not_turned_on_with_warning_message():
return None
self._update_records() # kernel records
self._count_statistics() # statistics results
#COUNT mode (default) : print statistics of all kernel
if mode == self.COUNT:
self._print_statistics_info()
#TRACE mode : print records of launched kernel
elif mode == self.TRACE:
self._print_kernel_info()
else:
raise ValueError(
'Arg `mode` must be of type \'str\', and has the value \'count\' or \'trace\'.'
)
return None
# private methods
def _check_not_turned_on_with_warning_message(self):
if self._profiling_mode is False:
_ti_core.warn(
'use \'ti.init(kernel_profiler = True)\' to turn on KernelProfiler.'
)
return True
return False
def _clear_frontend(self):
"""Clear member variables in :class:`~taichi.profiler.kernel_profiler.KernelProfiler`.
Note:
The values of ``self._profiling_mode`` and ``self._metric_list`` will not be cleared.
"""
self._total_time_ms = 0.0
self._traced_records.clear()
self._statistical_results.clear()
def _update_records(self):
"""Acquires kernel records from a backend."""
impl.get_runtime().prog.sync_kernel_profiler()
self._clear_frontend()
self._traced_records = impl.get_runtime(
).prog.get_kernel_profiler_records()
def _count_statistics(self):
"""Counts the statistics of launched kernels during the profiling period.
The profiling records with the same kernel name are counted as a profiling result.
"""
for record in self._traced_records:
if self._statistical_results.get(record.name) is None:
self._statistical_results[record.name] = StatisticalResult(
record.name)
self._statistical_results[record.name].insert_record(
record.kernel_time)
self._total_time_ms += record.kernel_time
self._statistical_results = {
k: v
for k, v in sorted(self._statistical_results.items(),
key=lambda item: item[1],
reverse=True)
}
def _make_table_header(self, mode):
header_str = f'Kernel Profiler({mode}, {self._profiling_toolkit})'
arch_name = f' @ {_ti_core.arch_name(impl.current_cfg().arch).upper()}'
device_name = impl.get_runtime().prog.get_kernel_profiler_device_name()
if len(device_name) > 1: # default device_name = ' '
device_name = ' on ' + device_name
return header_str + arch_name + device_name
def _print_statistics_info(self):
"""Print statistics of launched kernels during the profiling period."""
# headers
table_header = table_header = self._make_table_header('count')
column_header = '[ % total count | min avg max ] Kernel name'
# partition line
line_length = max(len(column_header), len(table_header))
outer_partition_line = '=' * line_length
inner_partition_line = '-' * line_length
#message in one line
string_list = []
values_list = []
for key in self._statistical_results:
result = self._statistical_results[key]
fraction = result.total_time / self._total_time_ms * 100.0
string_list.append(
'[{:6.2f}% {:7.3f} s {:6d}x |{:9.3f} {:9.3f} {:9.3f} ms] {}')
values_list.append([
fraction,
result.total_time / 1000.0,
result.counter,
result.min_time,
result.total_time / result.counter, # avg_time
result.max_time,
result.name
])
# summary
summary_line = '[100.00%] Total execution time: '
summary_line += f'{self._total_time_ms/1000:7.3f} s '
summary_line += f'number of results: {len(self._statistical_results)}'
# print
print(outer_partition_line)
print(table_header)
print(outer_partition_line)
print(column_header)
print(inner_partition_line)
result_num = len(self._statistical_results)
for idx in range(result_num):
print(string_list[idx].format(*values_list[idx]))
print(inner_partition_line)
print(summary_line)
print(outer_partition_line)
def _print_kernel_info(self):
"""Print a list of launched kernels during the profiling period."""
metric_list = self._metric_list
values_num = len(self._traced_records[0].metric_values)
# We currently get kernel attributes through CUDA Driver API,
# there is no corresponding implementation in other backends yet.
# Profiler dose not print invalid kernel attributes info for now.
kernel_attribute_state = self._traced_records[0].register_per_thread > 0
# headers
table_header = self._make_table_header('trace')
column_header = ('[ start.time | kernel.time |') #default
if kernel_attribute_state:
column_header += (
' regs | shared mem | grid size | block size | occupancy |'
) #kernel_attributes
for idx in range(values_num):
column_header += metric_list[idx].header + '|'
column_header = (column_header + '] Kernel name').replace("|]", "]")
# partition line
line_length = max(len(column_header), len(table_header))
outer_partition_line = '=' * line_length
inner_partition_line = '-' * line_length
# message in one line: formatted_str.format(*values)
fake_timestamp = 0.0
string_list = []
values_list = []
for record in self._traced_records:
formatted_str = '[{:9.3f} ms |{:9.3f} ms |' #default
values = [fake_timestamp, record.kernel_time] #default
if kernel_attribute_state:
formatted_str += ' {:4d} | {:6d} bytes | {:6d} | {:6d} | {:2d} blocks |'
values += [
record.register_per_thread, record.shared_mem_per_block,
record.grid_size, record.block_size,
record.active_blocks_per_multiprocessor
]
for idx in range(values_num):
formatted_str += metric_list[idx].format + '|'
values += [record.metric_values[idx] * metric_list[idx].scale]
formatted_str = (formatted_str + '] ' + record.name)
string_list.append(formatted_str.replace("|]", "]"))
values_list.append(values)
fake_timestamp += record.kernel_time
# print
print(outer_partition_line)
print(table_header)
print(outer_partition_line)
print(column_header)
print(inner_partition_line)
record_num = len(self._traced_records)
for idx in range(record_num):
print(string_list[idx].format(*values_list[idx]))
print(inner_partition_line)
print(f"Number of records: {len(self._traced_records)}")
print(outer_partition_line)
_ti_kernel_profiler = KernelProfiler()
def get_default_kernel_profiler():
"""We have only one :class:`~taichi.profiler.kernelprofiler.KernelProfiler` instance(i.e. ``_ti_kernel_profiler``) now.
For ``KernelProfiler`` using ``CuptiToolkit``, GPU devices can only work in a certain configuration.
Profiling mode and metrics are configured by the host(CPU) via CUPTI APIs, and device(GPU) will use
its counter registers to collect specific metrics.
So if there are multiple instances of ``KernelProfiler``, the device will work in the latest configuration,
the profiling configuration of other instances will be changed as a result.
For data retention purposes, multiple instances will be considered in the future.
"""
return _ti_kernel_profiler
def print_kernel_profiler_info(mode='count'):
"""Print the profiling results of Taichi kernels.
To enable this profiler, set ``kernel_profiler=True`` in ``ti.init()``.
``'count'`` mode: print the statistics (min,max,avg time) of launched kernels,
``'trace'`` mode: print the records of launched kernels with specific profiling metrics (time, memory load/store and core utilization etc.),
and defaults to ``'count'``.
Args:
mode (str): the way to print profiling results.
Example::
>>> import taichi as ti
>>> ti.init(ti.cpu, kernel_profiler=True)
>>> var = ti.field(ti.f32, shape=1)
>>> @ti.kernel
>>> def compute():
>>> var[0] = 1.0
>>> compute()
>>> ti.profiler.print_kernel_profiler_info()
>>> # equivalent calls :
>>> # ti.profiler.print_kernel_profiler_info('count')
>>> ti.profiler.print_kernel_profiler_info('trace')
Note:
Currently the result of `KernelProfiler` could be incorrect on OpenGL
backend due to its lack of support for `ti.sync()`.
For advanced mode of `KernelProfiler`, please visit https://docs.taichi.graphics/docs/lang/articles/misc/profiler#advanced-mode.
"""
get_default_kernel_profiler().print_info(mode)
def query_kernel_profiler_info(name):
"""Query kernel elapsed time(min,avg,max) on devices using the kernel name.
To enable this profiler, set `kernel_profiler=True` in `ti.init`.
Args:
name (str): kernel name.
Returns:
KernelProfilerQueryResult (class): with member variables(counter, min, max, avg)
Example::
>>> import taichi as ti
>>> ti.init(ti.cpu, kernel_profiler=True)
>>> n = 1024*1024
>>> var = ti.field(ti.f32, shape=n)
>>> @ti.kernel
>>> def fill():
>>> for i in range(n):
>>> var[i] = 0.1
>>> fill()
>>> ti.profiler.clear_kernel_profiler_info() #[1]
>>> for i in range(100):
>>> fill()
>>> query_result = ti.profiler.query_kernel_profiler_info(fill.__name__) #[2]
>>> print("kernel excuted times =",query_result.counter)
>>> print("kernel elapsed time(min_in_ms) =",query_result.min)
>>> print("kernel elapsed time(max_in_ms) =",query_result.max)
>>> print("kernel elapsed time(avg_in_ms) =",query_result.avg)
Note:
[1] To get the correct result, query_kernel_profiler_info() must be used in conjunction with
clear_kernel_profiler_info().
[2] Currently the result of `KernelProfiler` could be incorrect on OpenGL
backend due to its lack of support for `ti.sync()`.
"""
return get_default_kernel_profiler().query_info(name)
def clear_kernel_profiler_info():
"""Clear all KernelProfiler records."""
get_default_kernel_profiler().clear_info()
def get_kernel_profiler_total_time():
"""Get elapsed time of all kernels recorded in KernelProfiler.
Returns:
time (float): total time in second.
"""
return get_default_kernel_profiler().get_total_time()
def set_kernel_profiler_toolkit(toolkit_name='default'):
"""Set the toolkit used by KernelProfiler.
Currently, we only support toolkits: ``'default'`` and ``'cupti'``.
Args:
toolkit_name (str): string of toolkit name.
Returns:
status (bool): whether the setting is successful or not.
Example::
>>> import taichi as ti
>>> ti.init(arch=ti.cuda, kernel_profiler=True)
>>> x = ti.field(ti.f32, shape=1024*1024)
>>> @ti.kernel
>>> def fill():
>>> for i in x:
>>> x[i] = i
>>> ti.profiler.set_kernel_profiler_toolkit('cupti')
>>> for i in range(100):
>>> fill()
>>> ti.profiler.print_kernel_profiler_info()
>>> ti.profiler.set_kernel_profiler_toolkit('default')
>>> for i in range(100):
>>> fill()
>>> ti.profiler.print_kernel_profiler_info()
"""
return get_default_kernel_profiler().set_toolkit(toolkit_name)
def set_kernel_profiler_metrics(metric_list=default_cupti_metrics):
"""Set metrics that will be collected by the CUPTI toolkit.
Args:
metric_list (list): a list of :class:`~taichi.profiler.CuptiMetric()` instances, default value: :data:`~taichi.profiler.kernel_metrics.default_cupti_metrics`.
Example::
>>> import taichi as ti
>>> ti.init(kernel_profiler=True, arch=ti.cuda)
>>> ti.profiler.set_kernel_profiler_toolkit('cupti')
>>> num_elements = 128*1024*1024
>>> x = ti.field(ti.f32, shape=num_elements)
>>> y = ti.field(ti.f32, shape=())
>>> y[None] = 0
>>> @ti.kernel
>>> def reduction():
>>> for i in x:
>>> y[None] += x[i]
>>> # In the case of not pramater, Taichi will print its pre-defined metrics list
>>> ti.profiler.get_predefined_cupti_metrics()
>>> # get Taichi pre-defined metrics
>>> profiling_metrics = ti.profiler.get_predefined_cupti_metrics('shared_access')
>>> global_op_atom = ti.profiler.CuptiMetric(
>>> name='l1tex__t_set_accesses_pipe_lsu_mem_global_op_atom.sum',
>>> header=' global.atom ',
>>> format=' {:8.0f} ')
>>> # add user defined metrics
>>> profiling_metrics += [global_op_atom]
>>> # metrics setting will be retained until the next configuration
>>> ti.profiler.set_kernel_profile_metrics(profiling_metrics)
>>> for i in range(16):
>>> reduction()
>>> ti.profiler.print_kernel_profiler_info('trace')
Note:
Metrics setting will be retained until the next configuration.
"""
get_default_kernel_profiler().set_metrics(metric_list)
@contextmanager
def collect_kernel_profiler_metrics(metric_list=default_cupti_metrics):
"""Set temporary metrics that will be collected by the CUPTI toolkit within this context.
Args:
metric_list (list): a list of :class:`~taichi.profiler.CuptiMetric()` instances, default value: :data:`~taichi.profiler.kernel_metrics.default_cupti_metrics`.
Example::
>>> import taichi as ti
>>> ti.init(kernel_profiler=True, arch=ti.cuda)
>>> ti.profiler.set_kernel_profiler_toolkit('cupti')
>>> num_elements = 128*1024*1024
>>> x = ti.field(ti.f32, shape=num_elements)
>>> y = ti.field(ti.f32, shape=())
>>> y[None] = 0
>>> @ti.kernel
>>> def reduction():
>>> for i in x:
>>> y[None] += x[i]
>>> # In the case of not pramater, Taichi will print its pre-defined metrics list
>>> ti.profiler.get_predefined_cupti_metrics()
>>> # get Taichi pre-defined metrics
>>> profiling_metrics = ti.profiler.get_predefined_cupti_metrics('device_utilization')
>>> global_op_atom = ti.profiler.CuptiMetric(
>>> name='l1tex__t_set_accesses_pipe_lsu_mem_global_op_atom.sum',
>>> header=' global.atom ',
>>> format=' {:8.0f} ')
>>> # add user defined metrics
>>> profiling_metrics += [global_op_atom]
>>> # metrics setting is temporary, and will be clear when exit from this context.
>>> with ti.profiler.collect_kernel_profiler_metrics(profiling_metrics):
>>> for i in range(16):
>>> reduction()
>>> ti.profiler.print_kernel_profiler_info('trace')
Note:
The configuration of the ``metric_list`` will be clear when exit from this context.
"""
get_default_kernel_profiler().set_metrics(metric_list)
yield get_default_kernel_profiler()
get_default_kernel_profiler().set_metrics()
__all__ = [
'clear_kernel_profiler_info', 'collect_kernel_profiler_metrics',
'get_kernel_profiler_total_time', 'print_kernel_profiler_info',
'query_kernel_profiler_info', 'set_kernel_profiler_metrics',
'set_kernel_profiler_toolkit'
]
|
yuanming-hu/taichi
|
python/taichi/profiler/kernel_profiler.py
|
Python
|
mit
| 22,549
|
[
"VisIt"
] |
be5e0782a63b43b42ab69e2aa8bd3329a2cd0ac50a6ba220428bd37107f936ef
|
#!/usr/bin/env python
'''
GAEUnit: Google App Engine Unit Test Framework
Usage:
1. Put gaeunit.py into your application directory. Modify 'app.yaml' by
adding the following mapping below the 'handlers:' section:
- url: /test.*
script: gaeunit.py
2. Write your own test cases by extending unittest.TestCase.
3. Launch the development web server. To run all tests, point your browser to:
http://localhost:8080/test (Modify the port if necessary.)
For plain text output add '?format=plain' to the above URL.
See README.TXT for information on how to run specific tests.
4. The results are displayed as the tests are run.
Visit http://code.google.com/p/gaeunit for more information and updates.
------------------------------------------------------------------------------
Copyright (c) 2008-2009, George Lei and Steven R. Farley. All rights reserved.
Distributed under the following BSD license:
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
------------------------------------------------------------------------------
'''
__author__ = "George Lei and Steven R. Farley"
__email__ = "George.Z.Lei@Gmail.com"
__version__ = "#Revision: 1.2.8 $"[11:-2]
__copyright__= "Copyright (c) 2008-2009, George Lei and Steven R. Farley"
__license__ = "BSD"
__url__ = "http://code.google.com/p/gaeunit"
import sys
import os
import unittest
import time
import logging
import cgi
import re
import django.utils.simplejson
from xml.sax.saxutils import unescape
from google.appengine.ext import webapp
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_file_stub
from google.appengine.ext.webapp.util import run_wsgi_app
_LOCAL_TEST_DIR = 'test' # location of files
_WEB_TEST_DIR = '/test' # how you want to refer to tests on your web server
_LOCAL_DJANGO_TEST_DIR = '../../gaeunit/test'
# or:
# _WEB_TEST_DIR = '/u/test'
# then in app.yaml:
# - url: /u/test.*
# script: gaeunit.py
##################################################
## Django support
def django_test_runner(request):
unknown_args = [arg for (arg, v) in request.REQUEST.items()
if arg not in ("format", "package", "name")]
if len(unknown_args) > 0:
errors = []
for arg in unknown_args:
errors.append(_log_error("The request parameter '%s' is not valid." % arg))
from django.http import HttpResponseNotFound
return HttpResponseNotFound(" ".join(errors))
format = request.REQUEST.get("format", "html")
package_name = request.REQUEST.get("package")
test_name = request.REQUEST.get("name")
if format == "html":
return _render_html(package_name, test_name)
elif format == "plain":
return _render_plain(package_name, test_name)
else:
error = _log_error("The format '%s' is not valid." % cgi.escape(format))
from django.http import HttpResponseServerError
return HttpResponseServerError(error)
def _render_html(package_name, test_name):
suite, error = _create_suite(package_name, test_name, _LOCAL_DJANGO_TEST_DIR)
if not error:
content = _MAIN_PAGE_CONTENT % (_test_suite_to_json(suite), _WEB_TEST_DIR, __version__)
from django.http import HttpResponse
return HttpResponse(content)
else:
from django.http import HttpResponseServerError
return HttpResponseServerError(error)
def _render_plain(package_name, test_name):
suite, error = _create_suite(package_name, test_name, _LOCAL_DJANGO_TEST_DIR)
if not error:
from django.http import HttpResponse
response = HttpResponse()
response["Content-Type"] = "text/plain"
runner = unittest.TextTestRunner(response)
response.write("====================\n" \
"GAEUnit Test Results\n" \
"====================\n\n")
_run_test_suite(runner, suite)
return response
else:
from django.http import HttpResponseServerError
return HttpResponseServerError(error)
def django_json_test_runner(request):
from django.http import HttpResponse
response = HttpResponse()
response["Content-Type"] = "text/javascript"
test_name = request.REQUEST.get("name")
_load_default_test_modules(_LOCAL_DJANGO_TEST_DIR)
suite = unittest.defaultTestLoader.loadTestsFromName(test_name)
runner = JsonTestRunner()
_run_test_suite(runner, suite)
runner.result.render_to(response)
return response
########################################################
class GAETestCase(unittest.TestCase):
"""TestCase parent class that provides the following assert functions
* assertHtmlEqual - compare two HTML string ignoring the
out-of-element blanks and other differences acknowledged in standard.
"""
def assertHtmlEqual(self, html1, html2):
if html1 is None or html2 is None:
raise self.failureException, "argument is None"
html1 = self._formalize(html1)
html2 = self._formalize(html2)
if not html1 == html2:
error_msg = self._findHtmlDifference(html1, html2)
error_msg = "HTML contents are not equal" + error_msg
raise self.failureException, error_msg
def _formalize(self, html):
html = html.replace("\r\n", " ").replace("\n", " ")
html = re.sub(r"[ \t]+", " ", html)
html = re.sub(r"[ ]*>[ ]*", ">", html)
html = re.sub(r"[ ]*<[ ]*", "<", html)
return unescape(html)
def _findHtmlDifference(self, html1, html2):
display_window_width = 41
html1_len = len(html1)
html2_len = len(html2)
for i in range(html1_len):
if i >= html2_len or html1[i] != html2[i]:
break
if html1_len < html2_len:
html1 += " " * (html2_len - html1_len)
length = html2_len
else:
html2 += " " * (html1_len - html2_len)
length = html1_len
if length <= display_window_width:
return "\n%s\n%s\n%s^" % (html1, html2, "_" * i)
start = i - display_window_width / 2
end = i + 1 + display_window_width / 2
if start < 0:
adjust = -start
start += adjust
end += adjust
pointer_pos = i
leading_dots = ""
ending_dots = "..."
elif end > length:
adjust = end - length
start -= adjust
end -= adjust
pointer_pos = i - start + 3
leading_dots = "..."
ending_dots = ""
else:
pointer_pos = i - start + 3
leading_dots = "..."
ending_dots = "..."
return '\n%s%s%s\n%s\n%s^' % (leading_dots, html1[start:end], ending_dots, leading_dots+html2[start:end]+ending_dots, "_" * (i - start + len(leading_dots)))
assertHtmlEquals = assertHtmlEqual
##############################################################################
# Main request handler
##############################################################################
class MainTestPageHandler(webapp.RequestHandler):
def get(self):
unknown_args = [arg for arg in self.request.arguments()
if arg not in ("format", "package", "name")]
if len(unknown_args) > 0:
errors = []
for arg in unknown_args:
errors.append(_log_error("The request parameter '%s' is not valid." % arg))
self.error(404)
self.response.out.write(" ".join(errors))
return
format = self.request.get("format", "html")
package_name = self.request.get("package")
test_name = self.request.get("name")
if format == "html":
self._render_html(package_name, test_name)
elif format == "plain":
self._render_plain(package_name, test_name)
else:
error = _log_error("The format '%s' is not valid." % cgi.escape(format))
self.error(404)
self.response.out.write(error)
def _render_html(self, package_name, test_name):
suite, error = _create_suite(package_name, test_name, _LOCAL_TEST_DIR)
if not error:
self.response.out.write(_MAIN_PAGE_CONTENT % (_test_suite_to_json(suite), _WEB_TEST_DIR, __version__))
else:
self.error(404)
self.response.out.write(error)
def _render_plain(self, package_name, test_name):
self.response.headers["Content-Type"] = "text/plain"
runner = unittest.TextTestRunner(self.response.out)
suite, error = _create_suite(package_name, test_name, _LOCAL_TEST_DIR)
if not error:
self.response.out.write("====================\n" \
"GAEUnit Test Results\n" \
"====================\n\n")
_run_test_suite(runner, suite)
else:
self.error(404)
self.response.out.write(error)
##############################################################################
# JSON test classes
##############################################################################
class JsonTestResult(unittest.TestResult):
def __init__(self):
unittest.TestResult.__init__(self)
self.testNumber = 0
def render_to(self, stream):
result = {
'runs': self.testsRun,
'total': self.testNumber,
'errors': self._list(self.errors),
'failures': self._list(self.failures),
}
stream.write(django.utils.simplejson.dumps(result).replace('},', '},\n'))
def _list(self, list):
dict = []
for test, err in list:
d = {
'desc': test.shortDescription() or str(test),
'detail': cgi.escape(err),
}
dict.append(d)
return dict
class JsonTestRunner:
def run(self, test):
self.result = JsonTestResult()
self.result.testNumber = test.countTestCases()
startTime = time.time()
test(self.result)
stopTime = time.time()
timeTaken = stopTime - startTime
return self.result
class JsonTestRunHandler(webapp.RequestHandler):
def get(self):
self.response.headers["Content-Type"] = "text/javascript"
test_name = self.request.get("name")
_load_default_test_modules(_LOCAL_TEST_DIR)
suite = unittest.defaultTestLoader.loadTestsFromName(test_name)
runner = JsonTestRunner()
_run_test_suite(runner, suite)
runner.result.render_to(self.response.out)
# This is not used by the HTML page, but it may be useful for other client test runners.
class JsonTestListHandler(webapp.RequestHandler):
def get(self):
self.response.headers["Content-Type"] = "text/javascript"
suite, error = _create_suite(self.request) #TODO
if not error:
self.response.out.write(_test_suite_to_json(suite))
else:
self.error(404)
self.response.out.write(error)
##############################################################################
# Module helper functions
##############################################################################
def _create_suite(package_name, test_name, test_dir):
loader = unittest.defaultTestLoader
suite = unittest.TestSuite()
error = None
try:
if not package_name and not test_name:
modules = _load_default_test_modules(test_dir)
for module in modules:
suite.addTest(loader.loadTestsFromModule(module))
elif test_name:
_load_default_test_modules(test_dir)
suite.addTest(loader.loadTestsFromName(test_name))
elif package_name:
package = reload(__import__(package_name))
module_names = package.__all__
for module_name in module_names:
suite.addTest(loader.loadTestsFromName('%s.%s' % (package_name, module_name)))
if suite.countTestCases() == 0:
raise Exception("'%s' is not found or does not contain any tests." % \
(test_name or package_name or 'local directory: \"%s\"' % _LOCAL_TEST_DIR))
except Exception, e:
print e
error = str(e)
_log_error(error)
return (suite, error)
def _load_default_test_modules(test_dir):
if not test_dir in sys.path:
sys.path.append(test_dir)
module_names = [mf[0:-3] for mf in os.listdir(test_dir) if mf.endswith(".py")]
return [reload(__import__(name)) for name in module_names]
def _get_tests_from_suite(suite, tests):
for test in suite:
if isinstance(test, unittest.TestSuite):
_get_tests_from_suite(test, tests)
else:
tests.append(test)
def _test_suite_to_json(suite):
tests = []
_get_tests_from_suite(suite, tests)
test_tuples = [(type(test).__module__, type(test).__name__, test._testMethodName) \
for test in tests]
test_dict = {}
for test_tuple in test_tuples:
module_name, class_name, method_name = test_tuple
if module_name not in test_dict:
mod_dict = {}
method_list = []
method_list.append(method_name)
mod_dict[class_name] = method_list
test_dict[module_name] = mod_dict
else:
mod_dict = test_dict[module_name]
if class_name not in mod_dict:
method_list = []
method_list.append(method_name)
mod_dict[class_name] = method_list
else:
method_list = mod_dict[class_name]
method_list.append(method_name)
return django.utils.simplejson.dumps(test_dict)
def _run_test_suite(runner, suite):
"""Run the test suite.
Preserve the current development apiproxy, create a new apiproxy and
replace the datastore with a temporary one that will be used for this
test suite, run the test suite, and restore the development apiproxy.
This isolates the test datastore from the development datastore.
"""
original_apiproxy = apiproxy_stub_map.apiproxy
try:
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
temp_stub = datastore_file_stub.DatastoreFileStub('GAEUnitDataStore', None, None, trusted=True)
apiproxy_stub_map.apiproxy.RegisterStub('datastore', temp_stub)
# Allow the other services to be used as-is for tests.
for name in ['user', 'urlfetch', 'mail', 'memcache', 'images', 'blobstore']:
apiproxy_stub_map.apiproxy.RegisterStub(name, original_apiproxy.GetStub(name))
runner.run(suite)
finally:
apiproxy_stub_map.apiproxy = original_apiproxy
def _log_error(s):
logging.warn(s)
return s
################################################
# Browser HTML, CSS, and Javascript
################################################
# This string uses Python string formatting, so be sure to escape percents as %%.
_MAIN_PAGE_CONTENT = """
<html>
<head>
<style>
body {font-family:arial,sans-serif; text-align:center}
#title {font-family:"Times New Roman","Times Roman",TimesNR,times,serif; font-size:28px; font-weight:bold; text-align:center}
#version {font-size:87%%; text-align:center;}
#weblink {font-style:italic; text-align:center; padding-top:7px; padding-bottom:7px}
#results {padding-top:20px; margin:0pt auto; text-align:center; font-weight:bold}
#testindicator {width:750px; height:16px; border-style:solid; border-width:2px 1px 1px 2px; background-color:#f8f8f8;}
#footerarea {text-align:center; font-size:83%%; padding-top:25px}
#errorarea {padding-top:25px}
.error {border-color: #c3d9ff; border-style: solid; border-width: 2px 1px 2px 1px; width:750px; padding:1px; margin:0pt auto; text-align:left}
.errtitle {background-color:#c3d9ff; font-weight:bold}
</style>
<script language="javascript" type="text/javascript">
var testsToRun = %s;
var totalRuns = 0;
var totalErrors = 0;
var totalFailures = 0;
function newXmlHttp() {
try { return new XMLHttpRequest(); } catch(e) {}
try { return new ActiveXObject("Msxml2.XMLHTTP"); } catch (e) {}
try { return new ActiveXObject("Microsoft.XMLHTTP"); } catch (e) {}
alert("XMLHttpRequest not supported");
return null;
}
function requestTestRun(moduleName, className, methodName) {
var methodSuffix = "";
if (methodName) {
methodSuffix = "." + methodName;
}
var xmlHttp = newXmlHttp();
xmlHttp.open("GET", "%s/run?name=" + moduleName + "." + className + methodSuffix, true);
xmlHttp.onreadystatechange = function() {
if (xmlHttp.readyState != 4) {
return;
}
if (xmlHttp.status == 200) {
var result = eval("(" + xmlHttp.responseText + ")");
totalRuns += parseInt(result.runs);
totalErrors += result.errors.length;
totalFailures += result.failures.length;
document.getElementById("testran").innerHTML = totalRuns;
document.getElementById("testerror").innerHTML = totalErrors;
document.getElementById("testfailure").innerHTML = totalFailures;
if (totalErrors == 0 && totalFailures == 0) {
testSucceed();
} else {
testFailed();
}
var errors = result.errors;
var failures = result.failures;
var details = "";
for(var i=0; i<errors.length; i++) {
details += '<p><div class="error"><div class="errtitle">ERROR ' +
errors[i].desc +
'</div><div class="errdetail"><pre>'+errors[i].detail +
'</pre></div></div></p>';
}
for(var i=0; i<failures.length; i++) {
details += '<p><div class="error"><div class="errtitle">FAILURE ' +
failures[i].desc +
'</div><div class="errdetail"><pre>' +
failures[i].detail +
'</pre></div></div></p>';
}
var errorArea = document.getElementById("errorarea");
errorArea.innerHTML += details;
} else {
document.getElementById("errorarea").innerHTML = xmlHttp.responseText;
testFailed();
}
};
xmlHttp.send(null);
}
function testFailed() {
document.getElementById("testindicator").style.backgroundColor="red";
}
function testSucceed() {
document.getElementById("testindicator").style.backgroundColor="green";
}
function runTests() {
// Run each test asynchronously (concurrently).
var totalTests = 0;
for (var moduleName in testsToRun) {
var classes = testsToRun[moduleName];
for (var className in classes) {
// TODO: Optimize for the case where tests are run by class so we don't
// have to always execute each method separately. This should be
// possible when we have a UI that allows the user to select tests
// by module, class, and method.
//requestTestRun(moduleName, className);
methods = classes[className];
for (var i = 0; i < methods.length; i++) {
totalTests += 1;
var methodName = methods[i];
requestTestRun(moduleName, className, methodName);
}
}
}
document.getElementById("testtotal").innerHTML = totalTests;
}
</script>
<title>GAEUnit: Google App Engine Unit Test Framework</title>
</head>
<body onload="runTests()">
<div id="headerarea">
<div id="title">GAEUnit: Google App Engine Unit Test Framework</div>
<div id="version">Version %s</div>
</div>
<div id="resultarea">
<table id="results"><tbody>
<tr><td colspan="3"><div id="testindicator"> </div></td</tr>
<tr>
<td>Runs: <span id="testran">0</span>/<span id="testtotal">0</span></td>
<td>Errors: <span id="testerror">0</span></td>
<td>Failures: <span id="testfailure">0</span></td>
</tr>
</tbody></table>
</div>
<div id="errorarea"></div>
<div id="footerarea">
<div id="weblink">
<p>
Please visit the <a href="http://code.google.com/p/gaeunit">project home page</a>
for the latest version or to report problems.
</p>
<p>
Copyright 2008-2009 <a href="mailto:George.Z.Lei@Gmail.com">George Lei</a>
and <a href="mailto:srfarley@gmail.com">Steven R. Farley</a>
</p>
</div>
</div>
</body>
</html>
"""
##############################################################################
# Script setup and execution
##############################################################################
application = webapp.WSGIApplication([('%s' % _WEB_TEST_DIR, MainTestPageHandler),
('%s/run' % _WEB_TEST_DIR, JsonTestRunHandler),
('%s/list' % _WEB_TEST_DIR, JsonTestListHandler)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
toomoresuch/pysonengine
|
parts/gaeunit/gaeunit.py
|
Python
|
mit
| 23,642
|
[
"VisIt"
] |
0fc87ff942c4bb4a0adc452ab1b0541c8cd2e0a0ed5fdacceb93153464c7e736
|
"""
Support for managing apps (as created with "0install add").
@since: 1.9
"""
# Copyright (C) 2012, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, SafeException, logger
from zeroinstall.support import basedir, portable_rename
from zeroinstall.injector import namespaces, selections, qdom, model
import re, os, time, tempfile
# Avoid characters that are likely to cause problems (reject : and ; everywhere
# so that apps can be portable between POSIX and Windows).
valid_name = re.compile(r'''^[^./\\:=;'"][^/\\:=;'"]*$''')
def validate_name(name):
if valid_name.match(name): return
raise SafeException("Invalid application name '{name}'".format(name = name))
def _export(name, value):
"""Try to guess the command to set an environment variable."""
shell = os.environ.get('SHELL', '?')
if 'csh' in shell:
return "setenv %s %s" % (name, value)
return "export %s=%s" % (name, value)
def find_bin_dir(paths = None):
"""Find the first writable path in the list (default $PATH),
skipping /bin, /sbin and everything under /usr except /usr/local/bin"""
if paths is None:
paths = os.environ['PATH'].split(os.pathsep)
for path in paths:
if path.startswith('/usr/') and not path.startswith('/usr/local/bin'):
# (/usr/local/bin is OK if we're running as root)
pass
elif path.startswith('/bin') or path.startswith('/sbin'):
pass
elif os.path.realpath(path).startswith(basedir.xdg_cache_home):
pass # print "Skipping cache", first_path
elif not os.access(path, os.W_OK):
pass # print "No access", first_path
else:
break
else:
path = os.path.expanduser('~/bin/')
logger.warn('%s is not in $PATH. Add it with:\n%s' % (path, _export('PATH', path + ':$PATH')))
if not os.path.isdir(path):
os.makedirs(path)
return path
_command_template = """#!/bin/sh
exec 0install run {app} "$@"
"""
class AppScriptInfo:
"""@since: 1.12"""
name = None
command = None
def parse_script_header(stream):
"""If stream is a shell script for an application, return the app details.
@param stream: the executable file's stream (will seek)
@type stream: file-like object
@return: the app details, if any
@rtype: L{AppScriptInfo} | None
@since: 1.12"""
try:
stream.seek(0)
template_header = _command_template[:_command_template.index("{app}")]
actual_header = stream.read(len(template_header))
stream.seek(0)
if template_header == actual_header:
# If it's a launcher script, it should be quite short!
rest = stream.read()
line = rest.split('\n')[1]
else:
return None
except UnicodeDecodeError as ex:
logger.info("Not an app script '%s': %s", stream, ex)
return None
info = AppScriptInfo()
info.name = line.split()[3]
return info
class App:
def __init__(self, config, path):
self.config = config
self.path = path
def set_selections(self, sels, set_last_checked = True):
"""Store a new set of selections. We include today's date in the filename
so that we keep a history of previous selections (max one per day), in case
we want to to roll back later."""
date = time.strftime('%Y-%m-%d')
sels_file = os.path.join(self.path, 'selections-{date}.xml'.format(date = date))
dom = sels.toDOM()
tmp = tempfile.NamedTemporaryFile(prefix = 'selections.xml-', dir = self.path, delete = False, mode = 'wt')
try:
dom.writexml(tmp, addindent=" ", newl="\n", encoding = 'utf-8')
except:
tmp.close()
os.unlink(tmp.name)
raise
tmp.close()
portable_rename(tmp.name, sels_file)
sels_latest = os.path.join(self.path, 'selections.xml')
if os.path.exists(sels_latest):
os.unlink(sels_latest)
os.symlink(os.path.basename(sels_file), sels_latest)
if set_last_checked:
self.set_last_checked()
def get_selections(self, snapshot_date = None, may_update = False):
"""Load the selections.
@param may_update: whether to check for updates
@type may_update: bool
@param snapshot_date: get a historical snapshot
@type snapshot_date: (as returned by L{get_history}) | None
@return: the selections
@rtype: L{selections.Selections}"""
if snapshot_date:
sels_file = os.path.join(self.path, 'selections-' + snapshot_date + '.xml')
else:
sels_file = os.path.join(self.path, 'selections.xml')
with open(sels_file, 'rb') as stream:
sels = selections.Selections(qdom.parse(stream))
if may_update:
sels = self._check_for_updates(sels)
return sels
def get_history(self):
"""Get the dates of the available snapshots, starting with the most recent.
@rtype: [str]"""
date_re = re.compile('selections-(\d\d\d\d-\d\d-\d\d).xml')
snapshots = []
for f in os.listdir(self.path):
match = date_re.match(f)
if match:
snapshots.append(match.group(1))
snapshots.sort(reverse = True)
return snapshots
def download_selections(self, sels):
"""Download any missing implementations.
@return: a blocker which resolves when all needed implementations are available
@rtype: L{tasks.Blocker} | None"""
return sels.download_missing(self.config) # TODO: package impls
def _check_for_updates(self, sels):
"""Check whether the selections need to be updated.
If any input feeds have changed, we re-run the solver. If the
new selections require a download, we schedule one in the
background and return the old selections. Otherwise, we return the
new selections. If we can select better versions without downloading,
we update the app's selections and return the new selections.
We also schedule a background update from time-to-time anyway.
@return: the selections to use
@rtype: L{selections.Selections}"""
need_solve = False # Rerun solver (cached feeds have changed)
need_update = False # Update over the network
utime = self._get_mtime('last-checked', warn_if_missing = True)
last_solve = max(self._get_mtime('last-solve', warn_if_missing = False), utime)
# Ideally, this would return all the files which were inputs into the solver's
# decision. Currently, we approximate with:
# - the previously selected feed files (local or cached)
# - configuration files for the selected interfaces
# - the global configuration
# We currently ignore feeds and interfaces which were
# considered but not selected.
# Can yield None (ignored), paths or (path, mtime) tuples.
# If this throws an exception, we will log it and resolve anyway.
def get_inputs():
for sel in sels.selections.values():
logger.info("Checking %s", sel.feed)
feed = iface_cache.get_feed(sel.feed)
if not feed:
raise IOError("Input %s missing; update" % sel.feed)
else:
if feed.local_path:
yield feed.local_path
else:
yield (feed.url, feed.last_modified)
# Per-feed configuration
yield basedir.load_first_config(namespaces.config_site, namespaces.config_prog,
'interfaces', model._pretty_escape(sel.interface))
# Global configuration
yield basedir.load_first_config(namespaces.config_site, namespaces.config_prog, 'global')
# If any of the feeds we used have been updated since the last check, do a quick re-solve
iface_cache = self.config.iface_cache
try:
for item in get_inputs():
if not item: continue
if isinstance(item, tuple):
path, mtime = item
else:
path = item
mtime = os.stat(path).st_mtime
if mtime and mtime > last_solve:
logger.info("Triggering update to %s because %s has changed", self, path)
need_solve = True
break
except Exception as ex:
logger.info("Error checking modification times: %s", ex)
need_solve = True
need_update = True
# Is it time for a background update anyway?
if not need_update:
staleness = time.time() - utime
logger.info("Staleness of app %s is %d hours", self, staleness / (60 * 60))
freshness_threshold = self.config.freshness
if freshness_threshold > 0 and staleness >= freshness_threshold:
need_update = True
if need_solve:
from zeroinstall.injector.driver import Driver
driver = Driver(config = self.config, requirements = self.get_requirements())
if driver.need_download():
# Continue with the current (hopefully cached) selections while we download
need_update = True
else:
old_sels = sels
sels = driver.solver.selections
from zeroinstall.support import xmltools
if not xmltools.nodes_equal(sels.toDOM(), old_sels.toDOM()):
self.set_selections(sels, set_last_checked = False)
self._touch('last-solve')
# If we tried to check within the last hour, don't try again.
if need_update:
last_check_attempt = self._get_mtime('last-check-attempt', warn_if_missing = False)
if last_check_attempt and last_check_attempt + 60 * 60 > time.time():
logger.info("Tried to check within last hour; not trying again now")
need_update = False
if need_update:
self.set_last_check_attempt()
from zeroinstall.injector import background
r = self.get_requirements()
background.spawn_background_update2(r, False, self)
return sels
def set_requirements(self, requirements):
import json
tmp = tempfile.NamedTemporaryFile(prefix = 'tmp-requirements-', dir = self.path, delete = False, mode = 'wt')
try:
json.dump(dict((key, getattr(requirements, key)) for key in requirements.__slots__), tmp)
except:
tmp.close()
os.unlink(tmp.name)
raise
tmp.close()
reqs_file = os.path.join(self.path, 'requirements.json')
portable_rename(tmp.name, reqs_file)
def get_requirements(self):
import json
from zeroinstall.injector import requirements
r = requirements.Requirements(None)
reqs_file = os.path.join(self.path, 'requirements.json')
with open(reqs_file, 'rt') as stream:
values = json.load(stream)
# Update old before/not-before values
before = values.pop('before', None)
not_before = values.pop('not_before', None)
if before or not_before:
assert not values.extra_restrictions
expr = (not_before or '') + '..'
if before:
expr += '!' + before
values['extra_restrictions'] = {values['interface_uri']: expr}
for k, v in values.items():
setattr(r, k, v)
return r
def set_last_check_attempt(self):
self._touch('last-check-attempt')
def set_last_checked(self):
self._touch('last-checked')
def _touch(self, name):
timestamp_path = os.path.join(self.path, name)
fd = os.open(timestamp_path, os.O_WRONLY | os.O_CREAT, 0o644)
os.close(fd)
os.utime(timestamp_path, None) # In case file already exists
def _get_mtime(self, name, warn_if_missing = True):
timestamp_path = os.path.join(self.path, name)
try:
return os.stat(timestamp_path).st_mtime
except Exception as ex:
if warn_if_missing:
logger.warn("Failed to get time-stamp of %s: %s", timestamp_path, ex)
return 0
def get_last_checked(self):
"""Get the time of the last successful check for updates.
@return: the timestamp (or None on error)
@rtype: float | None"""
return self._get_mtime('last-checked', warn_if_missing = True)
def get_last_check_attempt(self):
"""Get the time of the last attempted check.
@return: the timestamp, or None if we updated successfully.
@rtype: float | None"""
last_check_attempt = self._get_mtime('last-check-attempt', warn_if_missing = False)
if last_check_attempt:
last_checked = self.get_last_checked()
if last_checked < last_check_attempt:
return last_check_attempt
return None
def destroy(self):
# Check for shell command
# TODO: remember which commands we own instead of guessing
name = self.get_name()
bin_dir = find_bin_dir()
launcher = os.path.join(bin_dir, name)
expanded_template = _command_template.format(app = name)
if os.path.exists(launcher) and os.path.getsize(launcher) == len(expanded_template):
with open(launcher, 'r') as stream:
contents = stream.read()
if contents == expanded_template:
#print "rm", launcher
os.unlink(launcher)
# Remove the app itself
import shutil
shutil.rmtree(self.path)
def integrate_shell(self, name):
# TODO: remember which commands we create
if not valid_name.match(name):
raise SafeException("Invalid shell command name '{name}'".format(name = name))
bin_dir = find_bin_dir()
launcher = os.path.join(bin_dir, name)
if os.path.exists(launcher):
raise SafeException("Command already exists: {path}".format(path = launcher))
with open(launcher, 'w') as stream:
stream.write(_command_template.format(app = self.get_name()))
# Make new script executable
os.chmod(launcher, 0o111 | os.fstat(stream.fileno()).st_mode)
def get_name(self):
return os.path.basename(self.path)
def __str__(self):
return '<app ' + self.get_name() + '>'
class AppManager:
def __init__(self, config):
self.config = config
def create_app(self, name, requirements):
validate_name(name)
apps_dir = basedir.save_config_path(namespaces.config_site, "apps")
app_dir = os.path.join(apps_dir, name)
if os.path.isdir(app_dir):
raise SafeException(_("Application '{name}' already exists: {path}").format(name = name, path = app_dir))
os.mkdir(app_dir)
app = App(self.config, app_dir)
app.set_requirements(requirements)
app.set_last_checked()
return app
def lookup_app(self, name, missing_ok = False):
"""Get the App for name.
Returns None if name is not an application (doesn't exist or is not a valid name).
Since / and : are not valid name characters, it is generally safe to try this
before calling L{injector.model.canonical_iface_uri}."""
if not valid_name.match(name):
if missing_ok:
return None
else:
raise SafeException("Invalid application name '{name}'".format(name = name))
app_dir = basedir.load_first_config(namespaces.config_site, "apps", name)
if app_dir:
return App(self.config, app_dir)
if missing_ok:
return None
else:
raise SafeException("No such application '{name}'".format(name = name))
|
michel-slm/0install
|
zeroinstall/apps.py
|
Python
|
lgpl-2.1
| 13,853
|
[
"VisIt"
] |
28d2093566e5ddf94065a250c7c659d36019b1d190e3e0d58e63868f93444f60
|
"""
Statistics.
"""
from functools import partial
import math
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as spsp
import scipy.stats as spst
import scipy.optimize as spop
"""
Plot style settings.
"""
_figwidth = 10
plt.rc('figure', figsize=[_figwidth,_figwidth/1.618], facecolor='1.0')
plt.rc('font', family='serif')
plt.rc('axes', color_cycle=['#33b5e5','#99cc00','#ff4444','#aa66cc','#ffbb33'])
plt.rc('lines', linewidth=1.5)
plt.rc('patch', linewidth=1.5)
def rms(x,y=None):
"""
Calculate the root mean square of a data set.
Arguments
---------
x -- array-like, required
y -- array-like, optional
If y is ommitted, the RMS of x is calculated. If both x and y are
specified, x is interpreted as bin locations and y as the corresponding bin
values.
Returns
-------
rms -- float
"""
x = np.asarray(x)
if y is None:
return np.sqrt(np.mean(np.square(x)))
else:
y = np.asarray(y)
return np.sqrt( np.sum(y*np.square(x)) / np.sum(y) )
def validate_dist(dist):
""" Convert a string to a scipy.stats distribution object. """
try:
dist = getattr(spst,dist)
except AttributeError:
raise ValueError('invalid distribution: ' + dist)
return dist
"""
Starting parameters for fitting the generalized gamma distribution.
Flow distributions seem to consistently have rms ~ scale.
order: a, c, loc, scale -- where a,c are shape params.
"""
spst.gengamma._fitstart = lambda *args: (1., 2., 0., rms(*args))
# fix loc = 0 when fitting gengamma
spst.gengamma.fit = partial(spst.gengamma.fit, floc=0)
class rice_gen(spst.rv_continuous):
"""
The Rice / Bessel-Gaussian distribution with standard parameterization.
Overrides scipy.stats.rice.
Parameters are named for flow distributions:
vrp -- v_n reaction-plane
dv -- delta v_n
The PDF is
f(v;vrp,dv) = v/dv^2 * exp(-(v^2+vrp^2)/(2*dv^2)) * I[0](v*vrp/dv^2)
for v, vrp, dv > 0.
The scipy location and scale parameters should be left fixed at defaults.
The fit method is set to do this automatically and only returns vrp,dv.
This is a bit of a hack but should be transparent to the user.
"""
def _pdf(self, v, vrp, dv, exp=np.exp, i0=spsp.i0):
dv2 = dv*dv
return v / dv2 * exp(-0.5*(v*v+vrp*vrp)/dv2) * i0(v*vrp/dv2)
def _logpdf(self, v, vrp, dv, log=np.log, i0=spsp.i0):
dv2 = dv*dv
return log(v/dv2) - 0.5*(v*v + vrp*vrp)/dv2 + log(i0(v*vrp/dv2))
def _argcheck(self,*args):
vrp,dv = args
return (vrp >= 0) & (dv > 0)
def _fitstart(self,data):
""" Rough starting fit parameters, based on ATLAS results. """
mean = data.mean()
std = data.std()
return np.sqrt(mean**2-std**2), std, 0, 1
def fit(self, data, *args, **kwargs):
""" Fit with fixed location and scale. """
return super().fit(data,floc=0,fscale=1)[:2]
spst.rice = rice_gen(a=0.0, name="rice", shapes="vrp,dv")
class RawData:
"""
Store raw (unbinned) data and provide related methods.
Arguments
---------
data -- array-like, will be flattened
dist -- name of scipy distribution which is expected to describe the data
maxstd -- maximum allowed standard deviations from the mean;
points further away are removed
"""
def __init__(self,data,dist='rice',maxstd=10):
self.dist = validate_dist(dist)
# flatten
data = np.ravel(data)
# remove outliers and store
self.data = data[np.abs(data - data.mean()) < maxstd*data.std()]
@classmethod
def from_table(cls,data,dist='rice',**kwargs):
"""
Create several RawData instances from the columns of tabular data. Each
column is expected to correspond to a separate data set.
Arguments
---------
data -- array-like, file object, filename, or generator containing
tabular data
dist -- same as for __init__
kwargs -- passed to np.loadtxt if reading a file
Returns
-------
iterable of RawData instances for each column
"""
if any(isinstance(data,t) for t in (np.ndarray,list,tuple)):
data = np.asarray(data).T
else:
kwargs.update(unpack=True)
data = np.loadtxt(data,**kwargs)
data = np.atleast_2d(data)
return (cls(col,dist=dist) for col in data)
def describe(self):
""" Calculate mean and standard deviation. """
return self.data.mean(), self.data.std()
def ks(self,*args,**kwargs):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
Arguments
---------
*args -- dist. parameters to test against
**kwargs -- for scipy.stats.kstest
Returns
-------
D,p -- KS test statistic, corresponding p-value
"""
return spst.kstest(self.data,self.dist.name,args=args,**kwargs)
def fit(self):
"""
Calculate MLE distribution parameters.
Returns
-------
*shapes, loc, scale -- as produced by scipy.stats.rv_continuous.fit
"""
if self.dist is spst.norm:
return self.describe()
else:
return self.dist.fit(self.data)
def plot(self):
""" Fit and plot the data. """
x = np.linspace(self.data.min(),self.data.max(),100)
params = self.fit()
plt.plot(x, self.dist.pdf(x, *params))
plt.hist(self.data, bins=int(2*self.data.size**.33),
histtype='step', normed=True)
plt.show()
class BinnedData:
"""
Store unbinned data and provide related methods.
Arguments
---------
data -- array-like with 2-5 columns, see below
dist -- name of scipy distribution which is expected to describe the data
The first two columns should contain x and y values; the remaining column[s]
should contain errors:
1 error column -- symmetrical errors
2 error columns -- high and low errors
3 error columns -- stat, syshigh, syslow errors [to be added in quadrature]
"""
def __init__(self,data,dist='rice'):
self.dist = validate_dist(dist)
data = np.asarray(data).T
try:
self.x = data[0]
self.y = data[1]
except IndexError:
raise ValueError('data must have 2-5 columns')
ncol = data.shape[0]
if ncol == 2:
self.errhigh = self.errlow = None
elif ncol == 3:
self.errhigh = self.errlow = data[2]
elif ncol == 4:
self.errhigh, self.errlow = data[2:]
elif ncol == 5:
stat, *sys = data[2:]
self.errhigh, self.errlow = (np.sqrt(stat*stat + s*s) for s in sys)
else:
raise ValueError('data must have 2-5 columns')
@classmethod
def from_file(cls,data,dist='rice',**kwargs):
"""
Create an instance from a tabular data file. Columns follow the same
format as in __init__.
Arguments
---------
data -- file object, filename, or generator containing tabular data
dist -- same as for __init__
kwargs -- passed to np.loadtxt
"""
kwargs.update(unpack=False)
data = np.loadtxt(data,**kwargs)
return cls(data,dist=dist)
def describe(self):
""" Calculate mean and standard deviation. """
w = np.sum(self.y)
mu = np.sum(self.x*self.y) / w
sigma = np.sqrt( np.sum( np.square(self.x - mu) * self.y) / w )
return mu, sigma
def fit(self):
"""
Calculate least-squares distribution parameters.
Returns
-------
*shapes, loc, scale -- as produced by scipy.optimize.curve_fit
"""
if self.dist is spst.norm:
return self.describe()
if self.dist is spst.gengamma:
# fix location parameter to zero
def f(x,*p):
return self.dist.pdf(x,p[0],p[1],0,p[-1])
p0 = self.dist._fitstart(self.x,self.y)
p0 = p0[:2] + p0[3:]
elif self.dist is spst.rice:
f = self.dist.pdf
mean, std = self.describe()
p0 = np.sqrt(mean**2-std**2), std
else:
f = self.dist.pdf
p0 = None
try:
sigma = np.maximum(self.errhigh,self.errlow)
except TypeError:
sigma = None
popt, pcov = spop.curve_fit(f, self.x, self.y, p0=p0, sigma=sigma)
popt = popt.tolist()
if self.dist is spst.gengamma:
popt.insert(2,0)
return tuple(popt)
def plot(self):
""" Fit and plot the data. """
X = np.linspace(0,self.x.max(),100)
params = self.fit()
yerr = self.errlow if self.errlow is self.errhigh \
else (self.errlow, self.errhigh)
plt.errorbar(self.x, self.y, yerr=yerr, fmt='o')
plt.plot(X, self.dist.pdf(X, *params))
plt.show()
def unfold(dv=None,M=None):
"""
Unfold flow distributions by reducing width according to multiplicity.
Arguments
---------
dv -- array-like of Rice distribution widths
M -- array-like of multiplicities
Returns
-------
unfolded dv
"""
dv = np.asarray(dv)
M = np.asarray(M)
return np.sqrt(np.maximum(dv*dv - 0.5/M,1e-8))
|
jbernhard/ebe-analysis
|
lib/stats.py
|
Python
|
mit
| 9,576
|
[
"Gaussian"
] |
65c951679b94fdd7d0ced9f5d57d1eb9aa7b27791f163250ec26727f588e7b9e
|
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
from Bio import motifs
import gzip
import numpy as np
import random
#import multiprocessing
import itertools
import os
import sys
"""
A class for holding static methods that can serve as useful
Bioinformatics tools. There are methods here for random sequence
generation and tools for converting sequence into one-hot binary
vectors for machine learning applications.
I gotta clean this up!
"""
def seq_permutation(nuc_len):
nucs = 'ATGC'*nuc_len
all_perm = itertools.permutations(nucs,nuc_len)
for i in all_perm:
print ''.join(i)
def get_str_from_coords(SeqObj,start,end):
#print SeqObj
return str(SeqObj[start:end])
vectorDict = {'T':[ 1, 0, 0, 0 ],
't':[ 1, 0, 0, 0 ],
'U':[ 1, 0, 0, 0 ],
'u':[ 1, 0, 0, 0 ],
'C':[ 0, 1, 0, 0 ],
'c':[ 0, 1, 0, 0 ],
'A':[ 0, 0, 1, 0 ],
'a':[ 0, 0, 1, 0 ],
'G':[ 0, 0, 0, 1 ],
'g':[ 0, 0, 0, 1 ],
'N':[ 0.25, 0.25, 0.25, 0.25],
'n':[ 0.25, 0.25, 0.25, 0.25]
}
def seq_to_onehot(seqObj):
"""
Converts a Seq object into a set of four boolean one-hot vectors
vectorDict order is T C A G. Remember the proper command
for transposing a numpy array is myarray.transpose()
returns a nx4 onehot representation of the nucleotide sequences in question
"""
seq_str = str(seqObj)
onehot = np.zeros((len(seq_str),4),dtype='float32')
for i,letter in enumerate(seq_str):
onehot[i,:] = np.array(vectorDict[letter])
return onehot
###Methods for generating random nucleotide sequences, and random nucleotide
### sequences seeded with user specified motifs. These will be used to generate
### simulated training sets for validating machine learning efficacy
def seq_to_flat_onehot(seqObj):
#Does the same thing as seq_to_onehot, but flattens the output
seq = seq_to_onehot(seqObj)
return seq.ravel()
@staticmethod
def fasta_to_flat_onehot(fname):
# Takes in fasta file and converts to numpy array
# > First output is a large numpy array with each
# row representing a flattened one-hot vector.
# Nucleotides are in the order TCAG
# Usage:
# fasta_to_flat_onehot('myfasta.fa')
seq_parser = SeqIO.parse(fname,"fasta")
#Convert generator fasta objects to large list of
#1xn (n=4*SEQLEN) onehot vectors
seq = seqio_to_flat_onehot(seq_parser)
#Close iterator handles
seq_parser.close()
return seq
def seqio_to_flat_onehot(bio_seqrecord_gener):
# Takes a BioPython SeqIO generator, converts every element into a
# 4 rows of one-hot vectors, with each row going up to down being A, T,
# G, then C. Then flatten/reshape, into 1x(n*4) numpy array vector.
#The second return value is the length of each nucleotide string
#The flat_onehot return value is a numpy array with each row being a different
#flattened one-hot vector representing a single fasta record.
#Examine first element to determine sequence length
rec0seq = bio_seqrecord_gener.next().seq
nuc_len = len(rec0seq)
vec_len = nuc_len*4
#Initialize width of output
flat_onehot = np.zeros((1,vec_len), dtype=np.bool)
#Set first row
flat_onehot[0,:] =seq_to_flat_onehot(rec0seq)
#Set the remaining records in the generator
for rec in bio_seqrecord_gener:
#Convert to one-hot
cur_rec = seq_to_flat_onehot(rec.seq)
#Flatten and append as row to
flat_onehot = np.vstack((flat_onehot,cur_rec))
return flat_onehot
def rand_dna_nuc(nuc_len, gc_fraction=0.5):
# Generates a random nucleotide sequence of fixed length with
# specified nucleotide percentage
# requires numpy
s_perc= (gc_fraction*.5)
w_perc = (.5*(1-gc_fraction))
nucs = ['T','C', 'A' , 'G']
probs = [s_perc,s_perc,w_perc,w_perc]
return ''.join([np.random.choice(nucs,p=probs)
for _ in xrange(nuc_len)])
def shuffle_batch_pull(batch_size,*args):
# Takes in a user specified batch_size number,
# and one or more numpy arrays that have the same number of rows
# Randomly pulls batch_size number of rows from input numpy_arrays
# args* here can be a variable number of numpy arrays
# This function will return random rows (with the same index)
# from each input numpy array.
# If you want to pull data from a different axis, you can specify that axis
#This function is used for pulling random batches for stochastic gradient
#descent
#Check if there are the same number of rows in each input numpy array
AXIS = 0
num_rows = args[0].shape[AXIS]
random_rows = np.random.choice(num_rows,batch_size, replace=False)
return_tuple = []
for i,np_arr in enumerate(args):
if np_arr.shape[0]!=num_rows:
print 'Numpy array row mismatch error. Exiting!'
return None
else:
return_tuple.append( np.take(np_arr, random_rows,axis=AXIS))
return tuple(return_tuple)
def rand_motif_instance(motif):
#Takes a motif object, and pulls an instance randomly from it
#Note: A motif object can be created by
#mymotif = motifs.create(instances)
#where instances = [Seq("ATA"),Seq("AGA")]
return random.choice(motif.instances)
def rand_seed_motif(nuc_seq, motif, dist_from_tss=[-50,50],
orientation= 0,wobble=0):
#Places a specified motif within a specified nucleotide sequence
#Optional parameters are:
# >dist_from_tss - specifies how far away from center of nuc_seq
# the motif should be placed. Can be a list of positive and negative
# values
# >orientation = -1 to place reverse orientation, 1 for forward, 0 for
# random orientation
# >wobble = number of nucleotides the motif can be out of register
# with respect to dist_from_tss
#Note: The 5' end of the motif is always inserted at dist_from_tss
#Regardless of orientation
#motif needs to be a Biopython motif object
nuc_len = len(nuc_seq)
motif_len = len(motif)
tss_center = np.floor(nuc_len*.5)
#Set motif orientation
if orientation ==0:
orientation = int(random.choice([-1,1]) )
if orientation == -1:
motif = motif.reverse_complement()
elif orientation == 1:
pass
elif (orientation >1 or orientation < -1):
print ("Orientation out of bounds. Needs to be -1,0,or 1")
return None
insertion_points = [tss_center + each_dist for each_dist in dist_from_tss]
#Apply wobble to insertion points
insertion_points = [int(each_point) +random.randint(0,wobble)for each_point in insertion_points]
#Check bounds, exit if dist from tss too far
for each_point in insertion_points:
if each_point < 0 or each_point+motif_len>nuc_len:
print("Motif insertion point out of bounds. Exiting...")
return None
#Insert motif sequence at appropriate position, replacing original sequence nuc_seq[tss_center-dist_from_tss]
#Pick a random motif from all instances
nuc_seq =( nuc_seq[:int(each_point)]+
str(motif)+nuc_seq[int(each_point+motif_len):] )
return nuc_seq
#Important python note: my_list[random.randint(1,len(my_list))-1] for
# randomly selecting an item in a list.
#Or just use random.choice(my_list)
def motif_to_convfilter(motifObj,filter_width):
#Return a python list of numpy convolution filters
#This method will go into all the instances in the bioPython
# motif, and make a corresponding [1,motif_len,5,1] convolutional
# filter where dim_2 = 5 represents 5 different nucleotide
# letters (equivalent to colors in image processing)
# I am using this method strictly for testing tensorflow's
# convolution functions on nucleotide sequences
filter_list = []
for instance in motifObj.instances:
filter_list.append(seq_to_convfilter(instance,filter_width))
return filter_list
def seq_to_convfilter(seqObj,filter_width=9):
onehot = seq_to_onehot(seqObj).T
#We want to convert this one_hot of shape [filter_width,4]
# to a [1,filter_width,4,1] filter (remember the last dim,
#dim_3 is used by tensorflow in the case you have multiple filters).
#Use numpy indexing to put this one_hot representation in the correct
#dims of the 4d matrix
seq_len = len(seqObj)
if seq_len>filter_width:
print "Motif is longer than specified filter width"
print "quitting"
return
elif seq_len == filter_width:
pass
elif seq_len<filter_width:
#If the filter_width>seq_len
#pad the onehot array with zeros on the top and bottom
#So that the number of rows == filter_width
diff = filter_width-seq_len
#Calculate row index for centering sequence on filter
centering_ind = (np.floor(filter_width/2)-np.floor(seq_len/2))
temp = np.zeros(shape=(filter_width,4))
#This confusing line of code essentially drops the one_hot motif
#representation into the middle of the filter
temp[centering_ind:(onehot.shape[0]+centering_ind),:] = onehot
onehot=temp
#Initialize empty ndarray
convfilter = np.empty(shape=(1,filter_width,4,1))
convfilter[0,:,:,0]=onehot
return convfilter #shape [1,filter_width,4,1]
def seq_to_4d_onehot(seqObj):
# Convert Seq object (BioPython format) to a numpy ndarray
# with the shape [batch_size=1,height=1,width=seq_len,num_channels=4]
onehot = seq_to_onehot(seqObj).T #shape is [seq_len,4]
return onehot[np.newaxis,np.newaxis,:,:]
def fasta_to_4d_onehot(fname):
'''
Converts a fasta file into an ndarray with the shape
[num_fasta_entries,height=1,seq_len,num_channels =4]
dim3 (num_channels) is for each nucleotide letter, with the
ordering TCAG
'''
seq_parser = SeqIO.parse(fname,"fasta")
seq_dict =SeqIO.index(fname,"fasta")
num_records = len(seq_dict)
#Convert generator fasta objects to large list of
#1xn (n=4*SEQLEN) onehot vectors
seq = seqio_to_4d_onehot(seq_parser,num_records)
#Close iterator handles
seq_parser.close()
seq_dict.close()
return seq
def seqio_to_4d_onehot(bio_seqrecord_gener,num_records):
'''
Converts a fasta file into an ndarray with the shape
[num_fasta_entries,height=1,seq_len,num_channels =4]
dim3 (num_channels) is for each nucleotide letter, with the
ordering TCAG
num_records parameter is necessary for numpy array preallocation
'''
#Use BioPython indexing to determine the number of records in
#The generator object
#Examine first element to determine sequence length and
#use that information to initialize empty numpy ndarray
first_rec = bio_seqrecord_gener.next().seq
seq_len = len(first_rec)
#Preallocate array (this makes things memory efficient)
test = np.zeros((num_records,1,seq_len,4),dtype='float32')
onehot_4d = np.zeros((num_records,1,seq_len,4),dtype='float32')
onehot_4d[0,:,:,:] = seq_to_4d_onehot(first_rec)
#Set the remaining records in the generator
for i,rec in enumerate(bio_seqrecord_gener):
#Look at first entry to determine nucleotide sequence length
#Convert each sequence to one-hot
onehot_4d[i+1,:,:,:]=seq_to_4d_onehot(rec.seq)
return onehot_4d
def onehot_to_nuc(onehot):
'''Converts a onehot 4xn array to a nucleotide sequence'''
seq_len = onehot.shape[1]
nuc_string = np.repeat('',seq_len)
#onehot = (onehot >= 1.) #Convert to bool mask
T_mask = onehot[0,:] == 1
C_mask = onehot[1,:] == 1
A_mask = onehot[2,:] == 1
G_mask = onehot[3,:] == 1
N_mask = ( (onehot[0,:] == 0.25)+
(onehot[1,:] == 0.25)+
(onehot[2,:] == 0.25)+
(onehot[3,:] == 0.25) )
nuc_string[T_mask] = 'T'
nuc_string[C_mask] = 'C'
nuc_string[A_mask] = 'A'
nuc_string[G_mask] = 'G'
nuc_string[N_mask] = 'N'
return nuc_string.tostring()
def onehot_4d_to_nuc(onehot_4d,output_file=None,include_fasta_header=False):
#Converts ndarray with shape
#[num_fasta_entries,height=1,seq_len,num_channels =4]
#into a fasta sequence.
stdout_old = sys.stdout
if (output_file !=None):
#Output to stdout
sys.stdout = open(output_file,'w')
onehot_4d = np.squeeze(onehot_4d,axis=1)
#[num_fasta_entries,seq_len,num_channels =4]
num_entries = onehot_4d.shape[0]
all_nucs = []
for i in range(num_entries):
nuc_string = np.repeat('',onehot_4d.shape[1])
T_mask = onehot_4d[i,:,0] == 1
C_mask = onehot_4d[i,:,1] == 1
A_mask = onehot_4d[i,:,2] == 1
G_mask = onehot_4d[i,:,3] == 1
N_mask = ((onehot_4d[i,:,0] == .25)+
(onehot_4d[i,:,1] == .25)+
(onehot_4d[i,:,2] == .25)+
(onehot_4d[i,:,3] == .25))
nuc_string[T_mask]='T'
nuc_string[C_mask]='C'
nuc_string[A_mask]='A'
nuc_string[G_mask]='G'
nuc_string[N_mask]='N'
if (include_fasta_header == True):
print '>seq',i
nucleotides = nuc_string.tostring()
print nucleotides
all_nucs.append(nucleotides)
sys.stdout = stdout_old
return all_nucs
def extract_n_classes_fasta(fname_list):
'''
Convert list of fasta files to one-hot vectors,
and creates one-hot labels for each class.
Labels are ordered based on file inputs.
Each fasta file gets a unique label.
'''
if type(fname_list) is not list:
fname_list = [fname_list]
rec_onehot = []
labels_list = []
num_classes= len(fname_list)
for i,fname in enumerate(fname_list):
# fasta_to_4d_onehot Converts each fasta file into an
# ndarray with the shape:
# [num_fasta_entries,height=1,seq_len,num_channels =4]
rec_onehot.append(fasta_to_4d_onehot(fname))
label_block = np.zeros((rec_onehot[i].shape[0],num_classes),dtype=np.bool_)
#Convert column corresponding to current index to all ones
label_block[:,i] = np.ones(label_block.shape[0],dtype=np.bool_)
labels_list.append(label_block)
#Vertical stack data
#Reshape python one-hot list into numpy matrix by vertical stacking of entries.
data = np.concatenate(rec_onehot,axis=0)
labels = np.concatenate(labels_list,axis=0)
return(data,labels,num_classes)
def divide_fasta(fname_list, fractions = [.6,.2,.2], suffix_list=['train','test','validation']):
'''
Divide a fasta file into multiple fasta files with
'''
if len(fractions)!= len(suffix_list):
print('Size mismatch between fraction division list and suffix list!')
num_outputs = len(fractions)
if sum(fractions)> 1:
print 'Error! Values in \'fractions\' must add up to <= 1'
return None
#Validate list
if type(fname_list) is not list:
print "Converting to list"
fname_list = [fname_list]
for i,file in enumerate(fname_list):
print 'Opening file', file
handle = open(file, "rU")
records = list(SeqIO.parse(handle,"fasta"))
handle.close()
random.shuffle(records)
num_records = len(records)
print ('File ',file,' contains ', len(records), ' records.')
prev_record_index = 0
for k in range(num_outputs):
#Divide the original data set into the values specified by
#the fractions list
num_cur_records= int(math.floor(num_records*fraction[k]))
lower_range = prev_record_index
upper_range = prev_record_index+num_cur_records
prev_record_index = upper_range
_write_fasta_file(file,file+'_'+suffix_list[i],
records[lower_range:upper_range])
def retrieve_files_on_dir (dir,ext):
'''
Retrive all files on dir with given extension
'''
dir_files = os.listdir(dir)
return [(dir+os.sep+f) for f in dir_files if f.endswith(ext)]
def _write_fasta_file(input_fname,name_extension,seqio_records_list):
fname, file_extension = os.path.splitext(input_fname)
output_fname = fname+'_'+name_extension+file_extension
output_handle = open(output_fname,"w")
SeqIO.write(seqio_records_list,output_handle,"fasta")
output_handle.close()
def count_lines_file(filename):
'''Counts the number of lines in a file'''
#From http://stackoverflow.com/questions/845058/how-to-get-line-count-cheaply-in-python
file_ext = os.path.splitext(filename)[1]
if file_ext == '.gz' or file_ext=='.gzip':
f = gzip.open(filename,'r')
else:
f = open(filename,'r')
lines = 0
buf_size = 1024 * 1024
read_f = f.read # loop optimization
buf = read_f(buf_size)
while buf:
lines += buf.count('\n')
buf = read_f(buf_size)
f.close()
return lines
def extract_seq_by_coords(genome_fasta, chr_key,coord_tuple):
""""
Extract sequence from fasta file. User must provide header key
(typically the chromosome index). This method opens and loads the file on
each call, so use it sparingly!
"""
genome_dict = SeqIO.index(genome_fasta,"fasta")
return genome_dict[str(chr_key )].seq[coord_tuple[0]:coord_tuple[1]]
genome_dict.close()
def split_fasta(fasta_file):
'''Splits a single fasta file into multiple fasta files
with each output file representing one fasta entry.
Useful for splitting genomic data into constituent chromosomes'''
for entry in SeqIO.parse(fasta_file,'fasta'):
output_fname = entry.id+'.fa'
with open(output_fname,'w') as out_file:
out_file.write('>'+entry.id+'\n')
out_file.write(str(entry.seq))
def contig_sizes_to_dict(sizes_fname):
contig_size_dict = {}
with open(sizes_fname,'r') as f:
for line in f:
contig,contig_len = line.strip().split()
contig_size_dict[contig]=int(contig_len)
return contig_size_dict
def numeric_labels_to_onehot(labels):
"""
Given a list of labels like [0,1,3,2]
Convert to onehot numpy array where each row
corresponds to label
There must not be gaps in the sequence of numbers
(ie: [0,1,2, 4,5] is not allowed)
"""
labels = np.asarray(labels)
num_labels = labels.max()+1
if num_labels != len(labels):
print "Error. Gap in label sequence", labels
return
labels_onehot = np.zeros((num_labels,num_labels))
labels_onehot[np.arange(num_labels),labels] = 1.
return labels_onehot
def numeric_labels_to_onehot_list(labels):
'''
Convert numeric labels to list of np.array onehot arrays
'''
labels_onehot = numeric_labels_to_onehot(labels)
num_rows = labels_onehot.shape[0]
out_list = []
for row in range(num_rows):
out_list.append(labels_onehot[row,:])
return out_list
def get_chrom_list(chrom_sizes_file):
chrom_list=[]
with open( chrom_sizes_file,'r') as csf:
for line in csf:
if line.startswith('#') or line.startswith('>'):
pass
else:
line.rstrip().split('\t')
chrom_list.append(line[0])
return chrom_list
def chr_sizes_dict(chrom_sizes_file):
'''
Create a dict from a chrom sizes file
where key is chrom, and value is size
'''
chrom_size_dict = dict()
with open( chrom_sizes_file,'r') as csf:
for line in csf:
if line.startswith('#') or line.startswith('>'):
pass
else:
pl = line.rstrip().split('\t')
chrom_size_dict[pl[0]]=int(pl[1])
return chrom_size_dict
def check_bed_bounds(bed_file,
chr_sizes_dict,
up_pad=0,
down_pad=0,
min_bound=0,
skip_first_line=True):
"""
Check the bounds of every coordinate within a bed file to make sure
no values exceed the chromosome size
Args:
bed_file: A '.bed' file
chr_sizes_dict: A python dict with key 'chr*' and value=chromosome_size
up_pad: Amount of padding to add to start of each coord (default=0)
down_padd: Amount of padding to add to end of each coord (default=0)
min_bound: Value to take as minimum coord for each chromsome
Return:
num_valid_entries: The number of valid entries
"""
with open(bed_file,'r') as bf:
num_valid_entries = 0
if skip_first_line:
bf.readline()
for line in bf:
if line.startswith('#') or line.startswith('>'):
pass
else:
bl = line.rstrip().split('\t')
contig = bl[0]
start = int(bl[1])+int(up_pad)
end = int(bl[2])+int(down_pad)
max_bound = chr_sizes_dict[contig]
if start<min_bound or end>max_bound:
print 'Invalid entry detected',(contig,start,end)
else:
num_valid_entries += 1
return num_valid_entries
|
LarsDu/DeepNuc
|
deepnuc/dubiotools.py
|
Python
|
gpl-3.0
| 21,845
|
[
"Biopython"
] |
d7da1324bfd1860a53e03fedabfb280c2056bf8a7296294852c9f414a5d055ee
|
#import matplotlib.pyplot as plt
import numpy as np
from collections import deque
import numbers
"""
Created on Jun 29, 2016
@author: hans-werner
"""
def convert_to_array(x, dim=None, return_is_singleton=False):
"""
Convert point or list of points to a numpy array.
Inputs:
x: (list of) point(s) to be converted to an array. Allowable inputs are
1. a list of Vertices,
2. a list of tuples,
3. a list of numbers or (2,) arrays
4. a numpy array of the approriate size
dim: int, (1 or 2) optional number used to adjudicate ambiguous cases.
return_is_singleton: bool, if True, return whether the input x is a
singleton.
Outputs:
x: double, numpy array containing the points in x.
If x is one-dimensional (i.e. a list of 1d Vertices, 1-tuples, or
a 1d vector), convert to an (n,1) array.
If x is two-dimensional (i.e. a list of 2d Vertices, 2-tupples, or
a 2d array), return an (n,2) array.
"""
is_singleton = False
if type(x) is list:
#
# Points in list
#
if all(isinstance(xi, Vertex) for xi in x):
#
# All points are of type vertex
#
x = [xi.coordinates() for xi in x]
x = np.array(x)
elif all(type(xi) is tuple for xi in x):
#
# All points are tuples
#
x = np.array(x)
elif all(type(xi) is numbers.Real for xi in x):
#
# List of real numbers -> turn into (n,1) array
#
x = np.array(x)
x = x[:,np.newaxis]
elif all(type(xi) is np.ndarray for xi in x):
#
# list of (2,) arrays
#
x = np.array(x)
else:
raise Exception(['For x, use arrays or lists'+\
'of tuples or vertices.'])
elif isinstance(x, Vertex):
#
# A single vertex
#
x = np.array([x.coordinates()])
is_singleton = True
elif isinstance(x, numbers.Real):
if dim is not None:
assert dim==1, 'Dimension should be 1.'
x = np.array([[x]])
is_singleton = True
elif type(x) is tuple:
#
# A tuple
#
if len(x)==1:
#
# A oneple
#
x, = x
x = np.array([[x]])
is_singleton = True
elif len(x)==2:
#
# A tuple
#
x,y = x
x = np.array([[x,y]])
is_singleton = True
elif type(x) is np.ndarray:
#
# Points in numpy array
#
if len(x.shape)==1:
#
# x is a one-dimensional vector
if len(x)==1:
#
# x is a vector with one entry
#
if dim is not None:
assert dim==1, 'Incompatible dimensions'
x = x[:,np.newaxis]
if len(x) == 2:
#
# x is a vector 2 entries: ambiguous
#
if dim == 2:
#
# Turn 2-vector into a (1,2) array
#
x = x[np.newaxis,:]
else:
#
# Turn vector into (2,1) array
#
x = x[:,np.newaxis]
else:
#
# Turn vector into (n,1) array
#
x = x[:,np.newaxis]
elif len(x.shape)==2:
assert x.shape[1]<=2,\
'Dimension of array should be at most 2'
else:
raise Exception('Only 1- or 2 dimensional arrays allowed.')
if return_is_singleton:
# Specify whether x is a singleton
return x, is_singleton
else:
return x
class Markable(object):
"""
Description: Any object that can be assigned a flag
"""
def __init__(self):
"""
Constructor
"""
self.__flag = None
def mark(self, flag):
"""
"""
pass
def unmark(self, flag):
"""
Remove flag
"""
pass
def is_marked(self, flag):
"""
Determine whether
"""
pass
class Tree(object):
"""
Description: Tree object for storing and manipulating adaptively
refined quadtree meshes.
Attributes:
node_type: str, specifying node's relation to parents and/or children
'ROOT' (no parent node),
'BRANCH' (parent & children), or
'LEAF' (parent but no children)
address: int, list allowing access to node's location within the tree
General form [k0, k1, ..., kd], d=depth, ki in [0,...,n_children_i]
address = [] if ROOT node.
depth: int, depth within the tree (ROOT nodes are at depth 0).
parent: Tree/Mesh whose child this is
children: list of child nodes.
flag: set, of str/int/bool allowing tree nodes to be marked.
"""
def __init__(self, n_children=None, regular=True, flag=None,
parent=None, position=None, forest=None):
"""
Constructor
"""
#
# Set some attributes
#
self._is_regular = regular
self._parent = parent
self._forest = None
self._in_forest = False
self._node_position = position
#
# Set flags
#
self._flags = set()
if flag is not None:
if type(flag) is set:
# Add all flags in set
for f in flag:
self.mark(f)
else:
# Add single flag
self.mark(flag)
if parent is None:
#
# ROOT Tree
#
self._node_type = 'ROOT'
self._node_depth = 0
self._node_address = []
if self.is_regular():
# Ensure that the number of ROOT children is specified
assert n_children is not None, \
'ROOT node: Specify number of children.'
else:
# Not a regular tree: number of children 0 initially
n_children = 0
if forest is not None:
#
# Tree contained in a Forest
#
assert isinstance(forest, Forest), \
'Input grid must be an instance of Grid class.'
#
# Add tree to forest
#
forest.add_tree(self)
self._in_forest = True
self._forest = forest
self._node_address = [self.get_node_position()]
else:
#
# Free standing ROOT cell
#
assert self.get_node_position() is None, \
'Unattached ROOT cell has no position.'
#
# Assign space for children
#
self._children = [None]*n_children
self._n_children = n_children
else:
#
# LEAF Node
#
position_missing = 'Position within parent cell must be specified.'
assert self.get_node_position() is not None, position_missing
self._node_type = 'LEAF'
# Determine cell's depth and address
self._node_depth = parent.get_depth() + 1
self._node_address = parent.get_node_address() + [position]
if regular:
#
# Regular tree -> same number of children in every generation
#
if n_children is not None:
assert n_children == self.get_parent().n_children(),\
'Regular tree: parents should have the same ' + \
'number of children than oneself.'
else:
n_children = self.get_parent().n_children()
else:
n_children = 0
#
# Assign space for children
#
self._children = [None]*n_children
self._n_children = n_children
# Change parent type (from LEAF)
if parent.get_node_type() == 'LEAF':
parent.set_node_type('BRANCH')
def info(self):
"""
Display essential information about Tree
"""
print('')
print('-'*50)
print('Tree Info')
print('-'*50)
print('{0:10}: {1}'.format('Address', self._node_address))
print('{0:10}: {1}'.format('Type', self._node_type))
if self._node_type != 'ROOT':
print('{0:10}: {1}'.format('Parent', \
self.get_parent().get_node_address()))
print('{0:10}: {1}'.format('Position', self._node_position))
print('{0:10}: {1}'.format('Flags', self._flags))
if self.has_children():
child_string = ''
for i in range(len(self._children)):
child = self.get_child(i)
if child is not None:
child_string += str(i) + ': 1, '
else:
child_string += str(i) + ': 0, '
print('{0:10}: {1}'.format('Children',child_string))
else:
child_string = 'None'
print('{0:10}: {1}'.format('Children',child_string))
print('')
def get_node_type(self):
"""
Returns whether node is a ROOT, a BRANCH, or a LEAF
"""
return self._node_type
def get_node_position(self):
"""
Returns position of current node within parent/forest
"""
return self._node_position
def set_node_type(self, node_type):
"""
Sets a node's type
"""
assert node_type in ['ROOT', 'BRANCH', 'LEAF'], \
'Input "node_type" should be "ROOT", "BRANCH", or "LEAF".'
if node_type == 'ROOT':
assert not self.has_parent(), \
'ROOT nodes should not have a parent.'
elif node_type == 'LEAF':
assert not self.has_children(), \
'LEAF nodes should not have children.'
elif node_type == 'BRANCH':
assert self.has_parent(),\
'BRANCH nodes should have a parent.'
self._node_type = node_type
def get_node_address(self):
"""
Return address of the node
"""
return self._node_address
def get_depth(self):
"""
Return depth of current node
"""
return self._node_depth
def tree_depth(self, flag=None):
"""
Return the maximum depth of the tree
"""
depth = self.get_depth()
if self.has_children():
for child in self.get_children(flag=flag):
d = child.tree_depth()
if d > depth:
depth = d
return depth
def in_forest(self):
"""
Determine whether a (ROOT)cell lies within a forest
"""
return self._in_forest
def get_forest(self):
"""
Returns the forest containing the node
"""
return self._forest
def plant_in_forest(self, forest, position):
"""
Modify own attributes to reflect node's containment within a forest
"""
assert self.get_node_type() == 'ROOT', \
'Only ROOT nodes are in the forest.'
self._node_position = position
self._node_address = [position]
self._in_forest = True
self._forest = forest
def remove_from_forest(self):
"""
Remove node from forest
"""
self._in_forest = False
self._node_position = None
self._node_address = []
self._forest = None
def is_regular(self):
"""
Determine whether node is a regular tree, that is all subnodes
have the same number of children.
"""
return self._is_regular
def mark(self, flag=None, recursive=False, reverse=False):
"""
Mark Tree and its progeny/ancestors
Inputs:
flag: int, optional label used to mark node
recursive: bool, also mark all sub-/super nodes
"""
if flag is None:
#
# No flag specified: add "True" flag
#
self._flags.add(True)
else:
#
# Add given flag
#
self._flags.add(flag)
#
# Add flag to progeny/parents
#
if recursive:
if reverse:
#
# Mark ancestors
#
if self.has_parent():
parent = self.get_parent()
parent.mark(flag=flag, recursive=recursive, \
reverse=reverse)
else:
#
# Mark progeny
#
if self.has_children():
for child in self.get_children():
child.mark(flag=flag, recursive=recursive)
def unmark(self, flag=None, recursive=False, reverse=False):
"""
Unmark Cell
Inputs:
flag: label to be removed
recursive: bool, also unmark all subcells
"""
#
# Remove label from own list
#
if flag is None:
# No flag specified -> delete all
self._flags.clear()
else:
# Remove specified flag (if present)
if flag in self._flags: self._flags.remove(flag)
#
# Remove label from children if applicable
#
if recursive:
if reverse:
#
# Unmark ancestors
#
if self.has_parent():
parent = self.get_parent()
parent.unmark(flag=flag, recursive=recursive, \
reverse=reverse)
else:
#
# Unmark progeny
#
if self.has_children():
for child in self.get_children():
child.unmark(flag=flag, recursive=recursive)
def is_marked(self,flag=None):
"""
Check whether cell is marked
Input: flag, label for QuadCell: usually one of the following:
True (catchall), 'split' (split cell), 'count' (counting)
"""
if flag is None:
# No flag -> check whether set is empty
if self._flags:
return True
else:
return False
else:
# Check wether given label is contained in cell's set
return flag in self._flags
def has_parent(self, flag=None):
"""
Returns True if node has (flagged) parent node, False otherwise
"""
if flag is not None:
return self._parent is not None and self._parent.is_marked(flag)
else:
return self._parent is not None
def get_parent(self, flag=None):
"""
Return cell's parent, or first ancestor with given flag (None if there
are none).
"""
if flag is None:
if self.has_parent():
return self._parent
else:
if self.has_parent(flag):
parent = self._parent
if parent.is_marked(flag):
return parent
else:
return parent.get_parent(flag=flag)
def get_root(self):
"""
Find the ROOT cell for a given cell
"""
if self._node_type == 'ROOT':
return self
else:
return self._parent.get_root()
def has_children(self, position=None, flag=None):
"""
Determine whether node has children
Inputs:
position: int, position of the child node within self
flag: str/int/bool, required marker for positive answer
Output:
has_children: bool, true if self has (marked) children, false
otherwise.
"""
if position is None:
#
# Check for any children
#
if flag is None:
return any(child is not None for child in self._children)
else:
#
# Check for flagged children
#
for child in self._children:
if child is not None and child.is_marked(flag):
return True
return False
else:
#
# Check for child in specific position
#
# Ensure position is valid
assert position < self._n_children, \
'Position exceeds the number of children.'
if flag is None:
#
# No flag specified
#
return self._children[position] is not None
else:
#
# With flag
#
return (self._children[position] is not None) and \
self._children[position].is_marked(flag)
def get_child(self, position):
"""
Return the child in a given position
"""
assert position<self.n_children() and position>-self.n_children(), \
'Input "position" exceeds number of children.'
return self._children[position]
def get_children(self, flag=None, reverse=False):
"""
Iterator: Returns (flagged) children, in (reverse) order
Inputs:
flag: [None], optional marker
reverse: [False], option to list children in reverse order
(useful for the 'traverse' function).
Note: Only returns children that are not None
Use this to obtain a consistent iteration of children
"""
if self.has_children(flag=flag):
if not reverse:
#
# Go in usual order
#
for child in self._children:
if child is not None:
if flag is None:
yield child
elif child.is_marked(flag):
yield child
else:
#
# Go in reverse order
#
for child in reversed(self._children):
if child is not None:
if flag is None:
yield child
elif child.is_marked(flag):
yield child
def n_children(self):
"""
Returns the number of children
"""
return self._n_children
def remove(self):
"""
Remove node (self) from parent's list of children
"""
assert self.get_node_type() != 'ROOT', 'Cannot delete ROOT node.'
self.get_parent()._children[self._node_position] = None
def add_child(self):
"""
Add a child to current node (only works if node is not regular).
"""
assert not self.is_regular(),\
'Regular tree: add children by method "split".'
child = Tree(parent=self, regular=False, position=self.n_children())
self._children.append(child)
self._n_children += 1
def delete_children(self, position=None):
"""
Delete all sub-nodes of given node
"""
#
# Change children to None
#
if position is None:
for child in self.get_children():
child.remove()
else:
assert position < self.n_children(), \
'Position exceeds number of children '
child = self._children[position]
child.remove()
#
# Change node type from LEAF to BRANCH
#
if self._node_type == 'BRANCH' and not self.has_children():
self._node_type = 'LEAF'
def split(self, n_children=None):
"""
Split node into subnodes
"""
if self.is_regular():
#
# Regular tree: Number of grandchildren inherited
#
for i in range(self.n_children()):
#
# Instantiate Children
#
self._children[i] = Tree(parent=self, position=i)
else:
#
# Not a regular tree: Must specify number of children
#
assert self.n_children() == 0, \
'Cannot split irregular tree with children. ' + \
'Use "add_child" method.'
for i in range(n_children):
#
# Instantiate Children
#
self.add_child()
def traverse(self, queue=None, flag=None, mode='depth-first'):
"""
Iterator: Return current cell and all its (flagged) sub-cells
Inputs:
flag [None]: cell flag
mode: str, type of traversal
'depth-first' [default]: Each cell's progeny is visited before
proceeding to next cell.
'breadth-first': All cells at a given depth are returned before
proceeding to the next level.
Output:
all_nodes: list, of all nodes in tree (marked with flag).
"""
if queue is None:
queue = deque([self])
while len(queue) != 0:
if mode == 'depth-first':
node = queue.pop()
elif mode == 'breadth-first':
node = queue.popleft()
else:
raise Exception('Input "mode" must be "depth-first"'+\
' or "breadth-first".')
if node.has_children():
reverse = True if mode=='depth-first' else False
for child in node.get_children(reverse=reverse):
queue.append(child)
if flag is not None:
if node.is_marked(flag):
yield node
else:
yield node
def get_leaves(self, flag=None, subtree_flag=None, mode='breadth-first'):
"""
Return all marked LEAF nodes (nodes with no children) of current subtree
Inputs:
*flag: If flag is specified, return all leaf nodes within rooted
subtree marked with flag (or an empty list if there are none).
*subtree_flag: Label specifying the rooted subtree (rs) within which
to search for (flagged) leaves.
*mode: Method by which to traverse the tree ('breadth-first' or
'depth-first').
Outputs:
leaves: list, of LEAF nodes.
Note:
The rooted subtree must contain all ancestors of a marked node
"""
#
# Get all leaves of the subtree
#
leaves = []
for node in self.traverse(flag=subtree_flag, mode=mode):
#
# Iterate over all sub-nodes within subtree
#
if not node.has_children(flag=subtree_flag):
#
# Nodes without marked children are the subtree leaves
#
leaves.append(node)
#
# Return marked leaves
#
if flag is None:
return leaves
else:
return [leaf for leaf in leaves if leaf.is_marked(flag)]
def make_rooted_subtree(self, flag):
"""
Mark all ancestors of flagged node with same flag, to turn flag into
a subtree marker.
"""
#
# Search through all nodes
#
for node in self.get_root().traverse(mode='breadth-first'):
if node.is_marked(flag):
#
# If node is flagged, mark all its ancestors
#
ancestor = node
while ancestor.has_parent():
ancestor = ancestor.get_parent()
ancestor.mark(flag)
def is_rooted_subtree(self, flag):
"""
Determine whether a given flag defines a rooted subtree
Note: This takes roughly the same amount of work as make_rooted_subtree
"""
#
# Search through all nodes
#
for node in self.get_root().traverse(mode='breadth-first'):
if node.is_marked(flag):
#
# Check that ancestors of flagged node are also marked
#
ancestor = node
while ancestor.has_parent():
ancestor = ancestor.get_parent()
if not ancestor.is_marked(flag):
#
# Ancestor not marked: not a rooted subtree
#
return False
#
# No problems: it's a rooted subtree
#
return True
def find_node(self, address):
"""
Locate node by its address
"""
node = self.get_root()
if address != []:
#
# Not the ROOT node
#
for a in address:
if node.has_children() and a in range(node.n_children()):
node = node.get_child(a)
else:
return None
return node
def nearest_ancestor(self, flag):
"""
Returns the nearest ancestor with given flag
"""
if flag is None:
return self
candidate = self
while not candidate.is_marked(flag):
if candidate.get_depth()==0:
return None
else:
candidate = candidate.get_parent()
return candidate
def contains(self, tree):
"""
Determine whether self contains a given node
"""
if tree.get_depth() < self.get_depth():
return False
elif tree == self:
return True
else:
while tree.get_depth() > self.get_depth():
tree = tree.get_parent()
if self == tree:
return True
#
# Reached the end
#
return False
def coarsen(self, subforest_flag=None, coarsening_flag=None,
new_label=None, clean_up=True, debug=False):
"""
Coarsen tree by
"""
if subforest_flag is not None:
#
# Subforest specified
#
if not self.is_marked(subforest_flag):
#
# Tree not in subforest, nothing to coarsen
#
return
#
# Check whether to coarsen
#
coarsen = False
if coarsening_flag is not None:
#
# Check whether tree is flagged (if applicable)
#
if self.is_marked(coarsening_flag):
coarsen = True
else:
#
# Are children LEAF nodes?
#
if self.has_children(flag=subforest_flag):
#
# Check if children are in subforest
#
for child in self.get_children():
#
# All children have to be LEAF nodes
#
coarsen = True
if child.get_node_type()!='LEAF':
coarsen = False
break
if new_label is not None:
#
# Apply new label to node (regardless of whether to coarsen)
#
self.mark(new_label)
if coarsen:
#
# Coarsen tree
#
if new_label is not None:
#
# If new_label specified, don't mess with children
#
pass
elif subforest_flag is not None:
#
# Remove subforest flag from children
#
for child in self.get_children():
child.unmark(subforest_flag)
else:
#
# Delete children
#
self.delete_children()
if coarsening_flag is not None and clean_up:
#
# Remove coarsening flag if necessary
#
self.unmark(coarsening_flag)
else:
#
# Recursion step, check children
#
if self.has_children(flag=subforest_flag):
for child in self.get_children():
child.coarsen(subforest_flag=subforest_flag,
coarsening_flag=coarsening_flag,
new_label=new_label,
clean_up=clean_up,
debug=debug)
class Forest(object):
"""
Collection of Trees
"""
def __init__(self, trees=None, n_trees=None):
"""
Constructor
"""
if trees is not None:
#
# List of trees specified
#
assert type(trees) is list, 'Trees should be passed as a list.'
self._trees = []
for tree in trees:
self.add_tree(tree)
elif n_trees is not None:
#
# No trees specified, only the number of slots
#
assert type(n_trees) is np.int and n_trees > 0,\
'Input "n_children" should be a positive integer.'
self._trees = [None]*n_trees
else:
#
# No trees specified: create an empty list.
#
self._trees = []
def n_children(self):
"""
Return the number of trees
"""
return len(self._trees)
def is_regular(self):
"""
Determine whether the forest contains only regular trees
"""
for tree in self._trees:
if not tree.is_regular():
return False
return True
def depth(self):
"""
Determine the depth of the largest tree in the forest
"""
current_depth = 0
for tree in self.get_children():
new_depth = tree.tree_depth()
if new_depth > current_depth:
current_depth = new_depth
return current_depth
def traverse(self, flag=None, mode='depth-first'):
"""
Iterator: Visit every (flagged) node in the forest
Inputs:
flag [None]: node flag
mode: str, type of traversal
'depth-first' [default]: Each node's progeny is visited before
proceeding to next cell.
'breadth-first': All nodes at a given depth are returned before
proceeding to the next level.
Output:
all_nodes: list, of all nodes in tree (marked with flag).
"""
if mode=='depth-first':
queue = deque(reversed(self._trees))
elif mode=='breadth-first':
queue = deque(self._trees)
else:
raise Exception('Input "mode" must be "depth-first"'+\
' or "breadth-first".')
while len(queue) != 0:
if mode == 'depth-first':
node = queue.pop()
elif mode == 'breadth-first':
node = queue.popleft()
if node.has_children():
reverse = True if mode=='depth-first' else False
for child in node.get_children(reverse=reverse):
queue.append(child)
if flag is not None:
if node.is_marked(flag):
yield node
else:
yield node
def get_leaves(self, flag=None, subforest_flag=None, mode='breadth-first'):
"""
Return all marked LEAF nodes (nodes with no children) of current subtree
Inputs:
*flag: If flag is specified, return all leaf nodes within rooted
subtree marked with flag (or an empty list if there are none).
*subforest_flag: Label specifying the rooted subtrees (rs) within which
to search for (flagged) leaves.
Outputs:
leaves: list, of LEAF nodes.
Note:
The rooted subtree must contain all ancestors of a marked node
"""
#
# Get all leaves of the subtree
#
leaves = []
for node in self.traverse(flag=subforest_flag, mode=mode):
if not node.has_children(flag=subforest_flag):
leaves.append(node)
#
# Return marked leaves
#
if flag is None:
return leaves
else:
return [leaf for leaf in leaves if leaf.is_marked(flag)]
def root_subtrees(self, flag):
"""
Mark all ancestors of flagged node with same flag, to turn flag into
a subtree marker.
Note: If no node is flagged, then only flag the root nodes.
"""
#
# Search through all nodes
#
for root_node in self.get_children():
#
# Mark all root nodes with flag
#
root_node.mark(flag)
for node in root_node.traverse():
#
# Look for marked subnodes
#
if node.is_marked(flag):
#
# If node is flagged, mark all its ancestors & siblings
#
ancestor = node
while ancestor.has_parent():
ancestor = ancestor.get_parent()
# Mark ancestor
ancestor.mark(flag)
for child in ancestor.get_children():
# Mark siblings
child.mark(flag)
def subtrees_rooted(self, flag):
"""
Determine whether a given flag defines a rooted subtree
Note: This takes roughly the same amount of work as make_rooted_subtree
"""
if flag is None:
#
# Forest itself is always one of rooted subtrees
#
return True
#
# Search through all nodes
#
for root_node in self.get_children():
#
# Check if root nodes are marked
#
if not root_node.is_marked(flag):
return False
else:
for node in root_node.traverse():
if node.is_marked(flag):
#
# Check that ancestors and sibilngs of flagged node
# are also marked
#
ancestor = node
while ancestor.has_parent():
ancestor = ancestor.get_parent()
if not ancestor.is_marked(flag):
#
# Ancestor not marked: not a rooted subtree
#
return False
for child in ancestor.get_children():
if not child.is_marked(flag):
#
# Sibling not marked
#
return False
#
# No problems: it's a forest of rooted subtrees
#
return True
def find_node(self, address):
"""
Locate a tree node by its address
Inputs:
address: list of branches along which to find node in tree
"""
# Reverse address
address = address[::-1]
node = self
while len(address)>0:
a = address.pop()
if node.has_children():
if a not in range(node.n_children()):
return None
else:
node = node.get_child(a)
return node
def has_children(self, flag=None):
"""
Determine whether the forest contains any trees
"""
if len(self._trees) > 0:
if flag is None:
return True
else:
return any(tree for tree in self.get_children(flag=flag))
else:
return False
def get_child(self, position):
"""
Returns the tree at a given position
"""
assert position < len(self._trees),\
'Input "position" exceeds number of trees.'
assert type(position) is np.int, \
'Input "position" should be a nonnegative integer. '
return self._trees[position]
def get_children(self, flag=None, reverse=False):
"""
Iterate over (all) (flagged) trees in the forest
"""
if not reverse:
if flag is None:
return self._trees
else:
children = []
for tree in self._trees:
if tree.is_marked(flag):
children.append(tree)
return children
else:
if flag is None:
return self._trees[::-1]
else:
children = []
for tree in reversed(self._trees):
if tree.is_marked():
children.append(tree)
def add_tree(self, tree):
"""
Add a new tree to the current forest
"""
assert isinstance(tree, Tree), \
'Can only add trees to the forest.'
self._trees.append(tree)
tree.plant_in_forest(self, self.n_children()-1)
def remove_tree(self, position):
"""
Remove a tree from the forest.
"""
assert type(position) is np.int, \
'Input "position" should be an integer.'
assert position < len(self._trees), \
'Input "position" exceeds number of trees.'
tree = self.get_child(position)
tree.remove_from_forest()
del self._trees[position]
def record(self, flag):
"""
Mark all trees in current forest with flag
"""
for tree in self.get_children():
tree.mark(flag, recursive=True)
def coarsen(self, subforest_flag=None, coarsening_flag=None,
new_label=None, clean_up=True, debug=False):
"""
Coarsen (sub)forest (delimited by 'subforest_flag', by (possibly)
merging (=deleting or unlabeling the siblings of) children of nodes
marked with 'coarsening_flag' and labeling said nodes with new_label.
If subforest_flag is None, coarsen all nodes
If new_label is None, then:
- either remove subforest flag (if there is one), or
- delete child nodes
Inputs:
*subforest_flag: flag, specifying the subforest being coarsened.
*coarsening_flag: flag, specyfying nodes in subforest whose children
are to be deleted/unmarked.
*new_label: flag, specifying the new subforest.
*clean_up: bool, remove coarsening_flag after use.
"""
#
# Ensure the subforest is rooted
#
if subforest_flag is not None:
self.root_subtrees(subforest_flag)
for tree in self.get_children():
tree.coarsen(subforest_flag=subforest_flag,
coarsening_flag=coarsening_flag,
new_label=new_label,
clean_up=clean_up, debug=debug)
"""
if coarsening_flag is not None:
#
# Coarsen
#
for tree in self.get_children():
coarsened = False
if tree.is_marked(coarsening_flag):
#
# Coarsen tree
#
if new_label is not None:
#
# Mark tree with new label and move on
#
tree.mark(new_label)
continue
elif subforest_flag is not None:
#
# Remove subforest flag from progeny
#
for child in tree.get_children(subforest_flag):
child.unmark(subforest_flag, recursive=True)
else:
#
# Delete children
#
tree.delete_children()
# Record coarsened
coarsened = True
elif tree.get_node_type()=='LEAF':
#
# Already a leaf: no need to coarsen
#
coarsened = True
while not coarsened:
if subforest_flag is not None:
if tree.has_children(subforest_flag):
for child in tree.get_children():
else:
pass
for child in tree.get_children():
pass
else:
#
# Don't coarsen yet, go to children
#
if tree.has_children(subforest_flag):
for child in tree.get_children():
pass
if clean_up:
to_clean = []
#
# Look for marked leaves within the submesh
#
for leaf in self.get_leaves(subforest_flag=subforest_flag):
#
# During coarsening, some leaves may already be unmarked
#
if debug:
print('leaf info')
leaf.info()
if subforest_flag is not None:
if not leaf.is_marked(subforest_flag):
continue
#
# Find nodes that must be coarsened
#
if not leaf.has_parent():
if debug:
print('ROOT Node')
#
# Leaf without parent is a ROOT: must be part of the new mesh.
#
if new_label is not None:
#
# Mark leaf with new_label
#
leaf.mark(new_label)
if clean_up and coarsening_flag is not None:
#
# Remove coarsening flag
#
to_clean.append(leaf)
# On to the next leaf
continue
#
# Can get parent
#
parent = leaf.get_parent()
if debug:
print('LEAF has parent')
parent.info()
#
# Determine whether to coarsen
#
if coarsening_flag is None:
coarsen = True
elif parent.is_marked(coarsening_flag):
coarsen = True
if clean_up:
#
# Remove coarsening flag
#
parent.unmark(coarsening_flag, recursive=True)
else:
coarsen = False
if debug:
print('Coarsen', coarsen)
if not coarsen:
#
# Don't coarsen
#
if new_label is not None:
#
# Apply new label to leaf and siblings
#
for child in parent.get_children():
child.mark(new_label)
# Move to the next LEAF
continue
else:
#
# Coarsen
#
if subforest_flag is None and new_label is None:
#
# Delete marked node's children
#
parent.delete_children()
if debug:
print('Deleting children')
parent.info()
elif new_label is None:
#
# Remove 'subforest_label' from leaf and siblings
#
for child in parent.get_children():
child.unmark(subforest_flag)
if debug:
print('Removing subforest_flag')
for child in parent.get_children():
print(child.is_marked(subforest_flag))
else:
#
# Mark parents with new_label
#
parent.mark(new_label)
if debug:
print('Marking parent with new label')
parent.info()
if clean_up and coarsening_flag is not None:
#
# Remove coarsening flag
#
parent.unmark(coarsening_flag)
if debug:
print('removing flag', coarsening_flag)
parent.info()
#
# Apply new label to coarsened submesh if necessary
#
if new_label is not None:
self.root_subtrees(new_label)
"""
def refine(self, subforest_flag=None, refinement_flag=None, new_label=None,
clean_up=True):
"""
Refine (sub)forest (delimited by 'subforest_flag'), by (possibly)
splitting (subforest)nodes with refinement_flag and marking their
children (with new_label).
Inputs:
subforest_flag: flag, used to specify the subforest being refined
refinement_flag: flag, specifying the nodes within the submesh that
are being refined.
new_label: flag, new label to be applied to refined submesh
clean_up: bool, remove the "refinement_flag" once the cell is split.
"""
#
# Ensure that the subforest is rooted
#
if subforest_flag is not None:
self.root_subtrees(subforest_flag)
#
# Look for marked leaves within the submesh
#
for leaf in self.get_leaves(subforest_flag=subforest_flag):
#
# Mark tree with new label to ensure new forest contains old subforest
#
if new_label is not None:
leaf.mark(new_label)
#
# If the refinement flag is used, ensure that the node is marked
# before continuing.
#
if refinement_flag is not None:
if not leaf.is_marked(refinement_flag):
continue
#
# Add new children if necessary
#
if not leaf.has_children():
leaf.split()
#
# Label each (new) child
#
for child in leaf.get_children():
if new_label is None and subforest_flag is None:
#
# No labels specified: do nothing
#
continue
elif new_label is None:
#
# No new label given, use the subforest label
#
child.mark(subforest_flag)
else:
#
# New label given, mark child with new label
#
child.mark(new_label)
#
# Remove refinement flag
#
if refinement_flag is not None and clean_up:
leaf.unmark(refinement_flag)
#
# Label ancestors of newly labeled children
#
if new_label is not None:
self.root_subtrees(new_label)
class Vertex(object):
"""
Description:
Attributes:
coordinates: double, tuple (x,y)
flag: boolean
Methods:
"""
def __init__(self, coordinates):
"""
Description: Constructor
Inputs:
coordinates: double tuple, x- and y- coordinates of vertex
on_boundary: boolean, true if on boundary
"""
if isinstance(coordinates, numbers.Real):
#
# Coordinate passed as a real number 1D
#
dim = 1
coordinates = (coordinates,) # recast coordinates as tuple
elif type(coordinates) is tuple:
#
# Coordinate passed as a tuple
#
dim = len(coordinates)
assert dim <= 2, 'Only 1D and 2D meshes supported.'
else:
raise Exception('Enter coordinates as a number or a tuple.')
self.__coordinate = coordinates
self._flags = set()
self.__dim = dim
self.__periodic_pair = set()
self.__is_periodic = False
def coordinates(self):
"""
Return coordinates tuple
"""
return self.__coordinate
def dim(self):
"""
Return the dimension of the vertex
"""
return self.__dim
def mark(self, flag=None):
"""
Mark Vertex
Inputs:
flag: int, optional label
"""
if flag is None:
self._flags.add(True)
else:
self._flags.add(flag)
def unmark(self, flag=None):
"""
Unmark Vertex
Inputs:
flag: label to be removed
"""
#
# Remove label from own list
#
if flag is None:
# No flag specified -> delete all
self._flags.clear()
else:
# Remove specified flag (if present)
if flag in self._flags: self._flags.remove(flag)
def is_marked(self,flag=None):
"""
Check whether Vertex is marked
Input: flag, label for QuadCell: usually one of the following:
True (catchall), 'split' (split cell), 'count' (counting)
"""
if flag is None:
# No flag -> check whether set is empty
if self._flags:
return True
else:
return False
else:
# Check wether given label is contained in cell's set
return flag in self._flags
def is_periodic(self):
"""
Determine whether a Vertex lies on a periodic boundary
"""
return self.__is_periodic
def set_periodic(self, periodic=True):
"""
Label vertex periodic
"""
self.__is_periodic = periodic
def set_periodic_pair(self, cell_vertex_pair):
"""
Pair a periodic vertex with its periodic counterpart. The periodic
pair can be accessed by specifying the neighboring interval (in 1D)
or cell (in 2D).
Inputs:
half_edge: HalfEdge/Interval
In 1D: half_edge represents the Interval on which the vertex pair resides
In 2D: half_edge represents the HalfEdge on which the vertex itself resides
vertex: Vertex associated with
See also: get_periodic_pair
"""
assert self.is_periodic(), 'Vertex should be periodic.'
if self.dim()==1:
#
# 1D: There is only one pairing for the entire mesh
#
interval, vertex = cell_vertex_pair
assert isinstance(vertex, Vertex), \
'Input "vertex" should be of class "Vertex".'
assert isinstance(interval, Interval), \
'Input "interval" should be of class "Interval".'
assert vertex.is_periodic(), \
'Input "vertex" should be periodic.'
#
# 1D: Store periodic pair
#
self.__periodic_pair.add((interval, vertex))
elif self.dim()==2:
#
# 2D
#
c_nb, v_nb = cell_vertex_pair
assert isinstance(v_nb, Vertex), \
'Input "cell_vertex_pair[1]" should be of class "Vertex".'
assert isinstance(c_nb, Cell), \
'Input "cell_vertex_pair[0]" should be of class "HalfEdge".'
assert v_nb.is_periodic(), \
'Input "cell_vertex_pair[1]" should be periodic.'
#
# Collect all possible c/v pairs in a set
#
cell_vertex_pairs = v_nb.get_periodic_pair().union(set([cell_vertex_pair]))
assert len(cell_vertex_pairs)!=0, 'Set of pairs should be nonempty'
for c_nb, v_nb in cell_vertex_pairs:
#
# Check whether v_nb already in list
#
in_list = False
for c, v in self.get_periodic_pair():
if v==v_nb and c.contains(c_nb):
#
# Vertex already appears in list
#
in_list = True
break
if not in_list:
#
# Not in list, add it
#
self.__periodic_pair.add((c_nb, v_nb))
def get_periodic_pair(self, cell=None):
"""
Returns the other vertex that is mapped onto self through periodicity
Input:
cell: Cell/HalfEdge in which paired vertex resides
"""
if cell is None:
#
# Return all cell, vertex pairs
#
return self.__periodic_pair
else:
#
# Return all paired vertices within a given cell
#
vertices = [v for c, v in self.__periodic_pair if c==cell]
return vertices
class HalfEdge(Tree):
"""
Description: Half-Edge in Quadtree mesh
Attributes:
base: Vertex, at base
head: Vertex, at head
twin: HalfEdge, in adjoining cell pointing from head to base
cell: QuadCell, lying to half edge's left
Methods:
"""
def __init__(self, base, head, cell=None, previous=None, nxt=None,
twin=None, parent=None, position=None, n_children=2,
regular=True, forest=None, flag=None, periodic=False):
"""
Constructor
Inputs:
base: Vertex, at beginning
head: Vertex, at end
parent: HalfEdge, parental
cell: QuadCell, lying to the left of half edge
previous: HalfEdge, whose head is self's base
nxt: HalfEdge, whose base is self's head
twin: Half-Edge, in adjoining cell pointing from head to base
position: int, position within parental HalfEdge
n_children: int, number of sub-HalfEdges
regular: bool, do all tree subnodes have the same no. of children?
forest: Forest, clever list of trees containing self
flag: (set of) int/string/bool, used to mark half-edge
periodic: bool, True if HalfEdge lies on a periodic boundary
"""
#
# Initialize Tree structure
#
Tree.__init__(self, n_children=n_children, regular=regular,
parent=parent, position=position, forest=forest, flag=flag)
#
# Assign head and base
#
self.set_vertices(base, head)
#
# Check parent
#
if parent is not None:
assert isinstance(parent, HalfEdge), \
'Parent should be a HalfEdge.'
#
# Assign incident cell
#
if cell is not None:
assert isinstance(cell, Cell), \
'Input "cell" should be a Cell object.'
self.__cell = cell
#
# Assign previous half-edge
#
if previous is not None:
assert isinstance(previous, HalfEdge), \
'Input "previous" should be a HalfEdge object.'
assert self.base()==previous.head(),\
'Own base should equal previous head.'
self.__previous = previous
#
# Assign next half-edge
#
if nxt is not None:
assert isinstance(nxt, HalfEdge), \
'Input "nxt" should be a HalfEdge object.'
assert self.head()==nxt.base(), \
'Own head should equal base of next.'
self.__next = nxt
#
# Mark periodic
#
self.__is_periodic = periodic
#
# Assign twin half-edge
#
if twin is not None:
assert isinstance(twin, HalfEdge), \
'Input "twin" should be a HalfEdge object.'
self.assign_twin(twin)
else:
self.__twin = None
def is_periodic(self):
"""
Returns True is the HalfEdge lies on a periodic boundary
"""
return self.__is_periodic
def set_periodic(self, periodic=True):
"""
Flag HalfEdge as periodic
"""
self.__is_periodic = periodic
def pair_periodic_vertices(self):
"""
Pair up HalfEdge vertices that are periodic
"""
if self.is_periodic():
#
# Pair up periodic vertices along half_edge
#
cell = self.cell()
cell_nb = self.twin().cell()
assert cell_nb is not None,\
'Periodic HalfEdge: Neighboring cell should not be None.'
#
# Pair up adjacent vertices
#
for v, v_nb in [(self.base(), self.twin().head()),
(self.head(), self.twin().base())]:
# Label vertices 'periodic'
v.set_periodic()
v_nb.set_periodic()
# Add own vertex-cell pair to own set of periodic pairs
v.set_periodic_pair((cell, v))
v_nb.set_periodic_pair((cell_nb, v_nb))
# Add adjoining vertex-cell pair to set of periodic pairs
v.set_periodic_pair((cell_nb, v_nb))
v_nb.set_periodic_pair((cell, v))
def base(self):
"""
Returns half-edge's base vertex
"""
return self.__base
def head(self):
"""
Returns half-edge's head vertex
"""
return self.__head
def get_vertices(self):
"""
Returns all half-edge vertices
"""
return [self.__base, self.__head]
def set_vertices(self, base, head):
"""
Define base and head vertices
"""
assert isinstance(base, Vertex) and isinstance(head, Vertex),\
'Inputs "base" and "head" should be Vertex objects.'
self.__base = base
self.__head = head
def cell(self):
"""
Returns the cell containing half-edge
"""
return self.__cell
def assign_cell(self, cell):
"""
Assign cell to half-edge
"""
self.__cell = cell
def twin(self):
"""
Returns the half-edge's twin
"""
return self.__twin
def assign_twin(self, twin):
"""
Assigns twin to half-edge
"""
if not self.is_periodic():
assert self.base()==twin.head() and self.head()==twin.base(),\
'Own head vertex should be equal to twin base vertex & vice versa.'
self.__twin = twin
def delete_twin(self):
"""
Deletes half-edge's twin
"""
self.__twin = None
def make_twin(self):
"""
Construct a twin HalfEdge
"""
assert not self.is_periodic(), \
'Twin HalfEdge of a periodic HalfEdge may have different vertices.'
if self.has_parent() and self.get_parent().twin() is not None:
twin_parent = self.get_parent().twin()
twin_position = 1-self.get_node_position()
else:
twin_parent = None
twin_position = None
twin = HalfEdge(self.head(), self.base(), parent=twin_parent,
position=twin_position)
self.assign_twin(twin)
twin.assign_twin(self)
return twin
def next(self):
"""
Returns the next half-edge, whose base is current head
"""
return self.__next
def assign_next(self, nxt):
"""
Assigns half edge to next
"""
if nxt is None:
return
else:
if not self.is_periodic():
assert self.head() == nxt.base(), \
'Own head vertex is not equal to next base vertex.'
self.__next = nxt
if nxt.previous() != self:
nxt.assign_previous(self)
def previous(self):
"""
Returns previous half-edge, whose head is current base
"""
return self.__previous
def assign_previous(self, previous):
"""
Assigns half-edge to previous
"""
if previous is None:
return
else:
if not self.is_periodic():
assert self.base() == previous.head(), \
'Own base vertex is not equal to previous head vertex.'
self.__previous = previous
if previous.next()!=self:
previous.assign_next(self)
def split(self):
"""
Refine current half-edge (overwrite Tree.split)
Note:
This function could potentially be generalized to HalfEdges with
multiple children (already implemented for Intervals).
"""
#
# Check if twin has been split
#
twin_split = False
twin = self.twin()
if twin is not None and twin.has_children():
t0, t1 = twin.get_children()
twin_split = True
else:
t0, t1 = None, None
#
# Determine whether to inherit midpoint vertex
#
if twin_split and not self.is_periodic():
#
# Share twin's midpoint Vertex
#
vm = t0.head()
else:
#
# Compute new midpoint vertex
#
x = convert_to_array([self.base().coordinates(),\
self.head().coordinates()])
xm = 0.5*(x[0,:]+x[1,:])
vm = Vertex(tuple(xm))
#
# Define own children and combine with twin children
#
c0 = HalfEdge(self.base(), vm, parent=self, twin=t1, position=0, periodic=self.is_periodic())
c1 = HalfEdge(vm, self.head(), parent=self, twin=t0, position=1, periodic=self.is_periodic())
#
# Assign new HalfEdges to twins if necessary
#
if twin_split:
t0.assign_twin(c1)
t1.assign_twin(c0)
#
# Save the babies
#
self._children[0] = c0
self._children[1] = c1
def to_vector(self):
"""
Returns the vector associated with the HalfEdge
"""
x = convert_to_array([self.base().coordinates(),\
self.head().coordinates()])
return x[1,:] - x[0,:]
def length(self):
"""
Returns the HalfEdge's length
"""
return np.linalg.norm(self.to_vector())
def unit_normal(self):
"""
Returns the unit normal vector of HalfEdge, pointing to the right
Note: This only works in 2D
"""
x0, y0 = self.base().coordinates()
x1, y1 = self.head().coordinates()
u = np.array([y1-y0, x0-x1])
return u/np.linalg.norm(u, 2)
def contains_points(self, points):
"""
Determine whether points lie on a HalfEdge
Inputs:
points: double,
"""
tol = 1e-10
x0 = convert_to_array(self.base().coordinates())
v = self.to_vector()
dim = x0.shape[1]
p = convert_to_array(points, dim)
n_points = p.shape[0]
in_half_edge = np.ones(n_points, dtype=np.bool)
if np.abs(v[0])<tol:
#
# Vertical line
#
assert np.abs(v[1])>tol, 'Half-edge is too short'
# Locate y-coordinate along segment
t = (p[:,1]-x0[:,1])/v[1]
# Discard points whose location parameter t is not in [0,1]
in_half_edge[np.abs(t-0.5)>0.5] = False
# Discard points whose x-values don't lie on Edge
in_half_edge[np.abs(p[:,0]-x0[0,0])>tol] = False
elif dim==1 or np.abs(v[1]<1e-14):
#
# Horizontal line
#
assert np.abs(v[0])>tol, 'Half-edge is too short'
# Locate x-coordinate along line
t = (p[:,0]-x0[:,0])/v[0]
# Check that t in [0,1]
in_half_edge[np.abs(t-0.5)>0.5] = False
if dim > 1:
# Check distance between y-values
in_half_edge[np.abs(p[:,1]-x0[0,1])>tol] = False
else:
#
# Skew line
#
s = (p[:,0]-x0[:,0])/v[0]
t = (p[:,1]-x0[:,1])/v[1]
# Check coordinates have same location parameters
in_half_edge[np.abs(t-s)>tol] = False
# Check that location parameter lies in [0,1]
in_half_edge[np.abs(t-0.5)>0.5] = False
return in_half_edge
def intersects_line_segment(self, line):
"""
Determine whether the HalfEdge intersects with a given line segment
Input:
line: double, list of two tuples
Output:
boolean, true if intersection, false otherwise.
Note: This only works in 2D
"""
# Express edge as p + t*r, t in [0,1]
p = np.array(self.base().coordinates())
r = np.array(self.head().coordinates()) - p
# Express line as q + u*s, u in [0,1]
q = np.array(line[0])
s = np.array(line[1]) - q
if abs(np.cross(r,s)) < 1e-14:
#
# Lines are parallel
#
if abs(np.cross(q-p,r)) < 1e-14:
#
# Lines are collinear
#
t0 = np.dot(q-p,r)/np.dot(r,r)
t1 = t0 + np.dot(s,r)/np.dot(r,r)
if (max(t0,t1) >= 0) and (min(t0,t1) <= 1):
#
# Line segments overlap
#
return True
else:
return False
else:
#
# Lines not collinear
#
return False
else:
#
# Lines not parallel
#
t = np.cross(q-p,s)/np.cross(r,s)
u = np.cross(p-q,r)/np.cross(s,r)
if 0 <= t <= 1 and 0 <= u <= 1:
#
# Line segments meet
#
return True
else:
return False
def reference_map(self, x_in, mapsto='physical',
jac_p2r=False, jac_r2p=False,
hess_p2r=False, hess_r2p=False):
"""
Map points x from the reference interval to the physical HalfEdge or
vice versa.
Inputs:
x_in: double, (n,) array or a list of points to be mapped.
jac_p2r: bool, return jacobian of mapping from physical to
reference domain
jac_r2p: bool, return jacobian of mapping from reference to
physical domain
hess_p2r: bool, return hessian of mapping from physical to
reference domain
hess_r2p: bool, return hessian of mapping from phyical to
reference domain
mapsto: str, 'physical' (map from reference to physical), or
'reference' (map from physical to reference).
Outputs:
x_trg: double, (n,) array of mapped points
mg: dictionary, of jacobians and hessians associated with the
mapping.
jac_p2r: double, n-list of physical-to-reference jacobians
jac_r2p: double, n-list of reference-to-physical jacobians
hess_p2r: double, n-list of physical-to-reference hessians
hess_r2p: double, n-list of reference-to-phyiscal hessians
"""
#
# Preprocessing
#
if mapsto=='physical':
#
# Check that input is an array
#
assert type(x_in) is np.ndarray, \
'If "mapsto" is "physical", then input should '+\
'be an array.'
#
# Check that points contained in [0,1]
#
assert x_in.max()>=0 and x_in.min()<=1, \
'Reference point should be between 0 and 1.'
elif mapsto=='reference':
x_in = convert_to_array(x_in, dim=self.head().dim())
#
# Check that points lie on the HalfEdge
#
assert all(self.contains_points(x_in)), \
'Some points are not contained in the HalfEdge.'
#
# Compute mapped points
#
n = x_in.shape[0]
x0, y0 = self.base().coordinates()
x1, y1 = self.head().coordinates()
if mapsto == 'physical':
x_trg = [(x0 + (x1-x0)*xi, y0 + (y1-y0)*xi) for xi in x_in]
elif mapsto == 'reference':
if not np.isclose(x0, x1):
#
# Not a vertical line
#
x_trg = list((x_in[:,0]-x0)/(x1-x0))
elif not np.isclose(y0, y1):
#
# Not a horizontal line
#
x_trg = list((x_in[:,1]-y0)/(y1-y0))
#
# Compute the Jacobians and Hessians (if asked for)
#
if any([jac_r2p, jac_p2r, hess_r2p, hess_p2r]):
#
# Gradients of the mapping sought
#
# Initialize map gradients (mg) dictionary
mg = {}
if jac_r2p:
#
# Jacobian of mapping from reference to physical region
#
mg['jac_r2p'] = [np.array([[x1-x0],[y1-y0]])]*n
if jac_p2r:
#
# Jacobian of mapping from physical to reference region
# TODO: Shouldn't this also be a list?
mg['jac_p2r'] = np.array([[1/(x1-x0), 1/(y1-y0)]])
if hess_r2p:
#
# Hessian of mapping from reference to physical region
#
mg['hess_r2p'] = [np.zeros((2,2))]*n
if hess_p2r:
#
# Hessian of mappring from physical to reference region
#
mg['hess_p2r'] = [np.zeros((2,2))]*n
return x_trg, mg
else:
#
# No gradients of the mapping sought
#
return x_trg
"""
# TODO: Remove this...
#
# Compute the Jacobian
#
if jacobian:
if mapsto == 'physical':
#
# Derivative of mapping from refence to physical cell
#
jac = [np.array([[x1-x0],[y1-y0]])]*n
elif mapsto == 'reference':
#
# Derivative of inverse map
#
jac = np.array([[1/(x1-x0), 1/(y1-y0)]])
#
# Compute the Hessian (linear mapping, so Hessian = 0)
#
hess = np.zeros((2,2))
#
# Return output
#
if jacobian and hessian:
return x_trg, jac, hess
elif jacobian and not hessian:
return x_trg, jac
elif hessian and not jacobian:
return x_trg, hess
else:
return x_trg
"""
class Interval(HalfEdge):
"""
Interval Class (1D equivalent of a Cell)
"""
def __init__(self, vertex_left, vertex_right, n_children=2, \
regular=True, parent=None, position=None, forest=None, \
periodic=False):
"""
Constructor
"""
assert vertex_left.dim()==1 and vertex_right.dim()==1, \
'Input "half_edge" should be one dimensional.'
HalfEdge.__init__(self, vertex_left, vertex_right, \
n_children=n_children, regular=regular,\
parent=parent, position=position, forest=forest,\
periodic=periodic)
def get_vertices(self):
"""
Return interval endpoints
"""
return [self.base(), self.head()]
def get_vertex(self, position):
"""
Return a given vertex
"""
assert position in [0,1], 'Position should be 0 or 1.'
return self.base() if position==0 else self.head()
def assign_previous(self, prev):
"""
Assign a previous interval
"""
if prev is not None:
assert isinstance(prev, Interval), \
'Input "prev" should be an Interval.'
HalfEdge.assign_previous(self, prev)
def assign_next(self, nxt):
"""
Assign the next interval
"""
if nxt is not None:
assert isinstance(nxt, Interval), \
'Input "nxt" should be an Interval.'
HalfEdge.assign_next(self,nxt)
def get_neighbor(self, pivot, subforest_flag=None, mode='physical'):
"""
Returns the neighboring interval
Input:
pivot: int, 0 (=left) or 1 (=right) or Vertex
subforest_flag (optional): marker to specify submesh
mode: str, specify the type of neighbor search. When intervals are
arranged within a forest, two adjoining intervals may be on
different refinement levels.
mode='physical': return the interval adjoining input interval
on the mesh
mode='level-wise': return the neighboring interval on the same
level in the forest.
"""
#
# Pivot is a vertex
#
if isinstance(pivot, Vertex):
if pivot==self.base():
pivot = 0
elif pivot==self.head():
pivot = 1
else:
raise Exception('Vertex not an interval endpoint')
if mode=='level-wise':
# =================================================================
# Return Level-wise Neighbor
# =================================================================
if pivot == 0:
#
# Left neighbor
#
nbr = self.previous()
if nbr is None:
#
# No previous, may still be periodic
#
v = self.base()
if v.is_periodic():
#
# Get coarsest cell periodically associated with v
#
for pair in v.get_periodic_pair():
nbr, dummy = pair
while nbr.get_depth()<self.get_depth():
#
# Search children until depth matches
#
if nbr.has_children(flag=subforest_flag):
nbr = nbr.get_child(0)
else:
#
# There are no children at same depth as interval
#
return None
#
# Found nbr at correct depth
#
return nbr
else:
#
# Return previous interval
#
return nbr
elif pivot == 1:
#
# Right neighbor
#
nbr = self.next()
if nbr is None:
#
# No next, may still be periodic
#
v = self.head()
if v.is_periodic():
#
# Get coarsest cell periodically associated with v
#
for pair in v.get_periodic_pair():
nbr, dummy = pair
while nbr.get_depth()<self.get_depth():
#
# Iterate through children until depth matches
#
if nbr.has_children(flag=subforest_flag):
nbr = nbr.get_child(1)
else:
#
# There are no cells matching cell's depth
#
return None
#
# Found nbr at correct depth
#
return nbr
else:
#
# Return next interval
#
return nbr
elif mode=='physical':
# =================================================================
# Return Physical Neighbor
# =================================================================
#
# Move left or right
#
if pivot == 0:
#
# Left neighbor
#
itv = self
prev = itv.previous()
#
# Go up the tree until there is a "previous"
#
while prev is None:
if itv.has_parent():
#
# Go up one level and check
#
itv = itv.get_parent()
prev = itv.previous()
else:
#
# No parent: check whether vertex is periodic
#
if itv.base().is_periodic():
for pair in itv.base().get_periodic_pair():
prev, dummy = pair
else:
return None
#
# Go down tree (to the right) as far as you can
#
nxt = prev
while nxt.has_children(flag=subforest_flag):
nxt = nxt.get_child(nxt.n_children()-1)
return nxt
elif pivot==1:
#
# Right neighbor
#
itv = self
nxt = itv.next()
#
# Go up the tree until there is a "next"
#
while nxt is None:
if itv.has_parent():
#
# Go up one level and check
#
itv = itv.get_parent()
nxt = itv.next()
else:
#
# No parent: check whether vertex is periodic
#
if itv.head().is_periodic():
for nxt, dummy in itv.head().get_periodic_pair():
pass
else:
return None
#
# Go down tree (to the left) as far as you can
#
prev = nxt
while prev.has_children(flag=subforest_flag):
prev = prev.get_child(0)
return prev
def split(self, n_children=None):
"""
Split a given interval into subintervals
"""
#
# Determine interval endpoints
#
x0, = self.base().coordinates()
x1, = self.head().coordinates()
n = self.n_children()
#
# Loop over children
#
for i in range(n):
#
# Determine children base and head Vertices
#
if i==0:
base = self.base()
if i==n-1:
head = self.head()
else:
head = Vertex(x0+(i+1)*(x1-x0)/n)
#
# Define new child interval
#
subinterval = Interval(base, head, parent=self, \
regular=self.is_regular(),\
position=i, n_children=n_children)
#
# Store in children
#
self._children[i] = subinterval
#
# The head of the current subinterval
# becomes the base of the next one
base = subinterval.head()
#
# Assign previous/next
#
for child in self.get_children():
i = child.get_node_position()
#
# Assign previous
#
if i != 0:
# Middle children
child.assign_previous(self.get_child(i-1))
def bin_points(self, points, i_points=None, subforest_flag=None):
"""
Determine the set of smallest subintervals (within submesh) that
contain the set of points, as well as the indices of these.
Inputs:
points: set of points
i_points: indices of these points
subforest_flag: submesh flag
Outputs:
bins: (cell, index) tuples of cells containing subsets of the
points, and the points' indices.
"""
assert all(self.contains_points(points)), \
'Not all points contained in cell'
sf = subforest_flag
# Convert points to array
x = convert_to_array(points)
if i_points is None:
i_points = np.arange(x.shape[0])
bins = []
#
# Cell is not in submesh
#
if not (sf is None or self.is_marked(flag=sf)):
#
# Move up tree until in submesh
#
if self.has_parent():
cell = self.get_parent()
bins.extend(cell.bin_points(x, i_points, subforest_flag=sf))
return bins
#
# Cell in submesh
#
if self.has_children(flag=sf):
#
# Points must be contained in some child cells
#
for child in self.get_children(flag=sf):
in_cell = child.contains_points(x)
if any(in_cell):
# Extract the points in child and bin
y = x[in_cell]
i_y = i_points[in_cell]
c_bin = child.bin_points(y,i_y, subforest_flag=sf)
bins.extend(c_bin)
# Remove points contained in child from list
x = x[~in_cell]
i_points = i_points[~in_cell]
else:
#
# Base case
#
bins.append((self, i_points))
return bins
return bins
def contains_points(self, points):
"""
Determine which of the points in x are contained in the interval.
Inputs:
points: double, collection of 1D points
Outputs:
in_cell: bool, (n_points,) array whose ith entry is True if point i
is contained in interval, False otherwise.
"""
# Get interval enpoints
x0, = self.base().coordinates()
x1, = self.head().coordinates()
# Convert points to (n_points,1) array
x = convert_to_array(points,1)
in_cell = np.ones(x.shape, dtype=bool)
in_cell[x<x0] = False
in_cell[x>x1] = False
return in_cell.ravel()
def reference_map(self, x_in, mapsto='physical',
jac_r2p=False, jac_p2r=False,
hess_r2p=False, hess_p2r=False,
jacobian=False, hessian=False):
"""
Map points x from the reference to the physical Interval or vice versa
Inputs:
x_in: double, (n,) array or a list of points to be mapped
jac_p2r: bool, return jacobian of mapping from physical to
reference domain
jac_r2p: bool, return jacobian of mapping from reference to
physical domain
hess_p2r: bool, return hessian of mapping from physical to
reference domain
hess_r2p: bool, return hessian of mapping from phyical to
reference domain
mapsto: str, 'physical' (map from reference to physical), or
'reference' (map from physical to reference).
Outputs:
x_trg: double, (n,) array of mapped points
mg: dictionary, of jacobians and hessians associated with the
mapping.
jac_p2r: double, n-list of physical-to-reference jacobians
jac_r2p: double, n-list of reference-to-physical jacobians
hess_p2r: double, n-list of physical-to-reference hessians
hess_r2p: double, n-list of reference-to-phyiscal hessians
"""
#
# Convert input to array
#
x_in = convert_to_array(x_in,dim=1)
#
# Compute mapped points
#
n = len(x_in)
x0, = self.get_vertex(0).coordinates()
x1, = self.get_vertex(1).coordinates()
#
# Compute target point
#
if mapsto == 'physical':
x_trg = x0 + (x1-x0)*x_in
elif mapsto == 'reference':
x_trg = (x_in-x0)/(x1-x0)
#
# Compute the Jacobians and Hessians (if asked for)
#
if any([jac_r2p, jac_p2r, hess_r2p, hess_p2r]):
#
# Gradients of the mapping sought
#
# Initialize map gradients (mg) dictionary
mg = {}
if jac_r2p:
#
# Jacobian of mapping from reference to physical region
#
mg['jac_r2p'] = [(x1-x0)]*n
if jac_p2r:
#
# Jacobian of mapping from physical to reference region
#
mg['jac_p2r'] = [1/(x1-x0)]*n
if hess_r2p:
#
# Hessian of mapping from reference to physical region
#
mg['hess_r2p'] = list(np.zeros(n))
if hess_p2r:
#
# Hessian of mappring from physical to reference region
#
mg['hess_p2r'] = list(np.zeros(n))
return x_trg, mg
else:
#
# No gradients of the mapping sought
#
return x_trg
# TODO: Remove whatever is underneath
if jacobian:
if mapsto == 'physical':
#
# Derivative of mapping from refence to physical cell
#
jac = [(x1-x0)]*n
elif mapsto == 'reference':
#
# Derivative of inverse map
#
jac = [1/(x1-x0)]*n
#
# Compute the Hessian (linear mapping, so Hessian = 0)
#
hess = list(np.zeros(n))
#
# Return output
#
if jacobian and hessian:
return x_trg, jac, hess
elif jacobian and not hessian:
return x_trg, jac
elif hessian and not jacobian:
return x_trg, hess
else:
return x_trg
class Cell(Tree):
"""
Cell object: A two dimensional polygon
"""
def __init__(self, half_edges, n_children=0, parent=None, position=None, grid=None):
"""
Constructor
Inputs:
half_edges: HalfEdge, list of half-edges that determine the cell
n_children: int, number of sub-cells within cell
"""
Tree.__init__(self, n_children=n_children, parent=parent, \
position=position, forest=grid)
# =====================================================================
# Half-Edges
# =====================================================================
assert type(half_edges) is list, 'Input "half_edges" should be a list.'
#
# 2D Cells are constructed from lists of HalfEdges
#
for he in half_edges:
assert isinstance(he, HalfEdge), 'Not a HalfEdge.'
self._half_edges = half_edges
for he in self._half_edges:
# Assign self as incident cell
he.assign_cell(self)
#
# String half-edges together
#
n_hes = self.n_half_edges()
for i in range(n_hes):
he_nxt = self._half_edges[(i+1)%n_hes]
he_cur = self._half_edges[i]
he_cur.assign_next(he_nxt)
he_nxt.assign_previous(he_cur)
#
# Check that base of first halfedge coincides with head of last
#
assert half_edges[0].base()==half_edges[-1].head(),\
'HalfEdges should form a closed loop.'
#
# Check winding order
#
self.check_winding_order()
def n_half_edges(self):
"""
Return the number of half_edges
"""
return len(self._half_edges)
def get_half_edge(self, position):
"""
Return specific half_edge
"""
assert position>=0 and position<self.n_half_edges(),\
'Input "position" incompatible with number of HalfEdges'
return self._half_edges[position]
def get_half_edges(self):
"""
Iterate over half-edges
"""
return self._half_edges
def incident_half_edge(self, vertex, reverse=False):
"""
Returns the edge whose head (base) is the given vertex
"""
assert isinstance(vertex, Vertex), \
'Input "vertex" should be of type Vertex.'
for half_edge in self.get_half_edges():
if reverse:
#
# HalfEdge's base coincides with vertex
#
if half_edge.base()==vertex:
return half_edge
else:
#
# HalfEdge's head coincides with vertex
#
if half_edge.head()==vertex:
return half_edge
#
# No such HalfEdge
#
return None
def area(self):
"""
Determine the area of the polygon
"""
area = 0
for half_edge in self.get_half_edges():
x0, y0 = half_edge.base().coordinates()
x1, y1 = half_edge.head().coordinates()
area += (x0+x1)*(y1-y0)
return 0.5*area
def bounding_box(self):
"""
Returns the cell's bounding box in the form of a tuple (x0,x1,y0,y1),
so that the cell is contained in the rectangle [x0,x1]x[y0,y1]
"""
xy = convert_to_array(self.get_vertices(), 2)
x0 = np.min(xy[:,0], axis=0)
x1 = np.max(xy[:,0], axis=0)
y0 = np.min(xy[:,1], axis=0)
y1 = np.max(xy[:,1], axis=0)
return x0, x1, y0, y1
def check_winding_order(self):
"""
Check whether the winding order is correct
"""
winding_error = 'Cell vertices not ordered correctly.'
area = self.area()
assert area > 0, winding_error
def n_vertices(self):
"""
Return the number of vertices
"""
return self.n_half_edges()
def get_vertex(self, position):
"""
Return a specific vertex
"""
assert position < self.n_vertices(), 'Input "position" incorrect.'
half_edge = self.get_half_edge(position)
return half_edge.base()
def get_vertices(self):
"""
Returns the vertices of the current cell.
Outputs:
vertices: list of vertices
"""
return [half_edge.base() for half_edge in self.get_half_edges()]
def get_neighbors(self, pivot, flag=None):
"""
Returns all neighboring cells about a given pivot
Input:
pivot: Vertex/HalfEdge,
- If the pivot is a HalfEdge, then neighbors are cells
containing the twin HalfEdge
- If it's a Vertex, then the neighbors are all cells (of
the "same" size) that contain the vertex
flag: marker - only return neighbors with given marker
Output:
neighbor(s):
- If the pivot is a HalfEdge, then return a Cell/None
- If the pivot is a Vertex, then return a list of Cells
Note: Neighbors are chosen via shared edges, which means
Not OK, Ok + is a neighbor of o, but x is not
----- ----- -------------
| x | | x | | + | |
---*---- ----*---- ----- x
| x | | x | x | | o | |
----- --------- -------------
"""
if isinstance(pivot, HalfEdge):
# =================================================================
# Direction is given by a HalfEdge
# =================================================================
twin = pivot.twin()
if twin is not None:
#
# Halfedge has a twin
#
neighbor = twin.cell()
if flag is not None:
if neighbor.is_marked(flag):
return neighbor
else:
return None
else:
return neighbor
elif isinstance(pivot, Vertex):
# =================================================================
# Direction is determined by a Vertex
# =================================================================
#
# Anti-clockwise
#
neighbors = []
cell = self
while True:
#
# Get neighbor
#
half_edge = cell.incident_half_edge(pivot)
neighbor = cell.get_neighbors(half_edge)
#
# Move on
#
if neighbor is None:
break
elif neighbor==self:
#
# Full rotation or no neighbors
#
return neighbors
else:
#
# Got at neighbor!
#
neighbors.append(neighbor)
cell = neighbor
if pivot.is_periodic() and len(pivot.get_periodic_pair(cell))!=0:
pivot = pivot.get_periodic_pair(cell)[0]
#
# Clockwise
#
neighbors_clockwise = []
cell = self
while True:
#
# Get neighbor
#
half_edge = cell.incident_half_edge(pivot, reverse=True)
neighbor = cell.get_neighbors(half_edge)
#
# Move on
#
if neighbor is None:
break
elif neighbor==self:
#
# Full rotation or no neighbors
#
return neighbors
else:
#
# Got a neighbor
#
neighbors_clockwise.append(neighbor)
cell = neighbor
if pivot.is_periodic() and len(pivot.get_periodic_pair(cell))!=0:
pivot = pivot.get_periodic_pair(cell)[0]
#
# Combine clockwise and anticlockwise neighbors
#
neighbors.extend(reversed(neighbors_clockwise))
if flag is not None:
return [nb for nb in neighbors if nb.is_marked(flag)]
else:
return neighbors
def contains_points(self, points, tol=1e-10):
"""
Determine whether the given cell contains a point
Input:
point: tuple (x,y), list of tuples, or (n,2) array
Output:
in_cell: boolean array (n,1), True if cell contains points,
False otherwise
"""
xy = convert_to_array(points, 2)
x,y = xy[:,0], xy[:,1]
n_points = len(x)
in_cell = np.ones(n_points, dtype=np.bool)
for half_edge in self.get_half_edges():
#
# Traverse vertices in counter-clockwise order
#
x0, y0 = half_edge.base().coordinates()
x1, y1 = half_edge.head().coordinates()
# Determine which points lie outside cell
pos_means_left = (y-y0)*(x1-x0)-( x-x0)*(y1-y0)
in_cell[pos_means_left<-tol] = False
"""
if len(in_cell)==1:
return in_cell[0]
else:
return in_cell
"""
return in_cell
def intersects_line_segment(self, line):
"""
Determine whether cell intersects with a given line segment
Input:
line: double, list of two tuples (x0,y0) and (x1,y1)
Output:
intersects: bool, true if line segment and cell intersect
Modified: 06/04/2016
"""
#
# Check whether line is contained in rectangle
#
if all(self.contains_points([line[0], line[1]])):
return True
#
# Check whether line intersects with any cell half_edge
#
for half_edge in self.get_half_edges():
if half_edge.intersects_line_segment(line):
return True
#
# If function has not terminated yet, there is no intersection
#
return False
class QuadCell(Cell, Tree):
"""
Quadrilateral cell
"""
def __init__(self, half_edges, parent=None, position=None, grid=None):
"""
Constructor
"""
assert len(half_edges)==4, 'QuadCells contain only 4 HalfEdges.'
Cell.__init__(self, half_edges, n_children=4, parent=parent,
position=position, grid=grid)
#
# Check whether cell's parent is a rectangle
#
if self.has_parent():
is_rectangle = self.get_parent().is_rectangle()
elif self.in_forest() and self.get_forest().is_rectangular:
is_rectangle = True
else:
is_rectangle = True
for i in range(4):
he = half_edges[i].to_vector()
he_nxt = half_edges[(i+1)%4].to_vector()
on_axis = min(abs(he)) <1e-12
perpendicular = abs(np.dot(he, he_nxt)) < 1e-12
if not (perpendicular and on_axis):
is_rectangle = False
break
self._is_rectangle = is_rectangle
def is_rectangle(self):
"""
Is the cell a rectangle?
"""
return self._is_rectangle
def split(self, flag=None):
"""
Split QuadCell into 4 subcells (and mark children with flag)
"""
assert not self.has_children(), 'Cell already split.'
#
# Middle Vertex
#
xx = convert_to_array(self.get_vertices())
v_m = Vertex((np.mean(xx[:,0]),np.mean(xx[:,1])))
interior_half_edges = []
for half_edge in self.get_half_edges():
#
# Split each half_edge
#
if not half_edge.has_children():
half_edge.split()
#
# Form new HalfEdges to and from the center
#
h_edge_up = HalfEdge(half_edge.get_child(0).head(),v_m)
h_edge_down = h_edge_up.make_twin()
# Add to list
interior_half_edges.append([h_edge_up, h_edge_down])
#
# Form new cells using new half_edges
#
i = 0
for half_edge in self.get_half_edges():
#
# Define Child's HalfEdges
#
h1 = half_edge.get_child(0)
h2 = interior_half_edges[i][0]
h3 = interior_half_edges[(i-1)%self.n_half_edges()][1]
h4 = half_edge.previous().get_child(1)
hes = deque([h1, h2, h3, h4])
hes.rotate(i)
hes = list(hes)
#
# Define new QuadCell
#
self._children[i] = QuadCell(hes, parent=self, position=i)
# Increment counter
i += 1
if flag is not None:
for child in self.get_children():
child.mark(flag)
#
# Pair up periodic vertices
#
for half_edge in self.get_half_edges():
for he_child in half_edge.get_children():
if he_child.is_periodic() and he_child.twin() is not None:
he_child.pair_periodic_vertices()
def bin_points(self, points, i_points=None, subforest_flag=None):
"""
Returns a list of the smallest flagged subcells in containing at least
one point, together with the indices of the included points
Inputs:
points: points in cell, to be categorized
i_points: point indices (if contained within a larger array).
subforest_flag: submesh indicator
Outputs:
bins: list of (cell, i_points) pairs enumerating all cells
that contain points, and the indices of these.
"""
#
# Check that cell contains points
#
assert all(self.contains_points(points)), \
'Not all points contained in cell'
sf = subforest_flag
# Convert points to array
x = convert_to_array(points)
if i_points is None:
i_points = np.arange(x.shape[0])
bins = []
#
# Cell is not in submesh
#
if not (sf is None or self.is_marked(flag=sf)):
#
# Move up tree until in submesh
#
if self.has_parent():
cell = self.get_parent()
bins.extend(cell.bin_points(x, i_points, subforest_flag=sf))
return bins
#
# Cell is in submesh
#
if self.has_children(flag=sf):
#
# Points must be contained in some child cells
#
for child in self.get_children(flag=sf):
in_cell = child.contains_points(x)
if any(in_cell):
# Extract the points in child and bin
y = x[in_cell]
i_y = i_points[in_cell]
c_bin = child.bin_points(y,i_y, subforest_flag=sf)
bins.extend(c_bin)
# Remove points contained in child from list
x = x[~in_cell]
i_points = i_points[~in_cell]
else:
#
# Base case
#
bins.append((self, i_points))
return bins
return bins
def reference_map(self, x_in, mapsto='physical',
jac_p2r=False, jac_r2p=False,
hess_p2r=False, hess_r2p=False,
jacobian=False, hessian=False):
"""
Bilinear map between reference cell [0,1]^2 and physical QuadCell
Inputs:
x_in: double, (n,) array or a list of points.
jac_p2r: bool, return jacobian of mapping from physical to
reference domain
jac_r2p: bool, return jacobian of mapping from reference to
physical domain
hess_p2r: bool, return hessian of mapping from physical to
reference domain
hess_r2p: bool, return hessian of mapping from phyical to
reference domain
mapsto: str, 'physical' (map from reference to physical), or
'reference' (map from physical to reference).
Outputs:
x_trg: double, (n,) array of mapped points
mg: dictionary, of jacobians and hessians associated with the
mapping.
jac_p2r: double, n-list of (2,2) physical-to-reference
jacobians.
jac_r2p: double, n-list of (2,2) reference-to-physical
jacobians.
hess_p2r: double, n-list of (2,2,2) physical-to-reference
hessians.
hess_r2p: double, n-list of (2,2,2) reference-to-phyiscal
hessians.
"""
#
# Convert input to array
#
x_in = convert_to_array(x_in, dim=2)
n = x_in.shape[0]
assert x_in.shape[1]==2, 'Input "x" has incorrect dimension.'
#
# Get cell corner vertices
#
x_verts = convert_to_array(self.get_vertices())
p_sw_x, p_sw_y = x_verts[0,:]
p_se_x, p_se_y = x_verts[1,:]
p_ne_x, p_ne_y = x_verts[2,:]
p_nw_x, p_nw_y = x_verts[3,:]
if mapsto=='physical':
#
# Map points from [0,1]^2 to the physical cell, using bilinear
# nodal basis functions
#
# Points in reference domain
s, t = x_in[:,0], x_in[:,1]
# Mapped points
x = p_sw_x*(1-s)*(1-t) + p_se_x*s*(1-t) +\
p_ne_x*s*t + p_nw_x*(1-s)*t
y = p_sw_y*(1-s)*(1-t) + p_se_y*s*(1-t) +\
p_ne_y*s*t + p_nw_y*(1-s)*t
# Store points in an array
x_trg = np.array([x,y]).T
elif mapsto=='reference':
#
# Map from physical- to reference domain using Newton iteration
#
# Points in physical domain
x, y = x_in[:,0], x_in[:,1]
if self.is_rectangle():
#
# Cell is a rectangle - the inverse mapping is explicit
#
s = (x-p_sw_x)/(p_se_x-p_sw_x)
t = (y-p_sw_y)/(p_nw_y-p_sw_y)
x_trg = np.array([s,t]).T
else:
#
# Cell is quadrilateral - the inverse mapping must be estimated
#
# Initialize points in reference domain
s, t = 0.5*np.ones(n), 0.5*np.ones(n)
n_iterations = 5
for dummy in range(n_iterations):
#
# Compute residual
#
rx = p_sw_x*(1-s)*(1-t) + p_se_x*s*(1-t) \
+ p_ne_x*s*t + p_nw_x*(1-s)*t - x
ry = p_sw_y*(1-s)*(1-t) + p_se_y*s*(1-t) \
+ p_ne_y*s*t + p_nw_y*(1-s)*t - y
#
# Compute jacobian
#
drx_ds = -p_sw_x*(1-t) + p_se_x*(1-t) + p_ne_x*t - p_nw_x*t # J11
dry_ds = -p_sw_y*(1-t) + p_se_y*(1-t) + p_ne_y*t - p_nw_y*t # J21
drx_dt = -p_sw_x*(1-s) - p_se_x*s + p_ne_x*s + p_nw_x*(1-s) # J12
dry_dt = -p_sw_y*(1-s) - p_se_y*s + p_ne_y*s + p_nw_y*(1-s) # J22
#
# Newton Update:
#
Det = drx_ds*dry_dt - drx_dt*dry_ds
s -= ( dry_dt*rx - drx_dt*ry)/Det
t -= (-dry_ds*rx + drx_ds*ry)/Det
#
# Project onto [0,1]^2
#
s = np.minimum(np.maximum(s,0),1)
t = np.minimum(np.maximum(t,0),1)
x_trg = np.array([s,t]).T
#
# Compute the Jacobians and Hessians (if asked for)
#
if any([jac_r2p, jac_p2r, hess_r2p, hess_p2r]):
#
# Gradients of the mapping sought
#
# Initialize map gradients (mg) dictionary
mg = {}
if jac_r2p or jac_p2r:
#
# Compute Jacobian of the forward mapping
#
xs = -p_sw_x*(1-t) + p_se_x*(1-t) + p_ne_x*t - p_nw_x*t # J11
ys = -p_sw_y*(1-t) + p_se_y*(1-t) + p_ne_y*t - p_nw_y*t # J21
xt = -p_sw_x*(1-s) - p_se_x*s + p_ne_x*s + p_nw_x*(1-s) # J12
yt = -p_sw_y*(1-s) - p_se_y*s + p_ne_y*s + p_nw_y*(1-s) # J22
if jac_r2p:
#
# Jacobian of mapping from reference to physical region
#
mg['jac_r2p'] = [np.array([[xs[i], xt[i]], [ys[i], yt[i]]])\
for i in range(n)]
if jac_p2r:
#
# Jacobian of mapping from physical to reference region
#
# Compute matrix inverse of jacobian for backward mapping
Det = xs*yt-xt*ys
sx = yt/Det
sy = -xt/Det
tx = -ys/Det
ty = xs/Det
mg['jac_p2r'] = [np.array([[sx[i], sy[i]],[tx[i], ty[i]]])\
for i in range(n)]
if hess_r2p:
#
# Hessian of mapping from reference to physical region
#
if self.is_rectangle():
# Linear mapping (no curvature)
hr2p = [np.zeros((2,2,2)) for dummy in range(n)]
else:
hr2p = []
# Nonlinear mapping: compute curvature for each point
for i in range(n):
h = np.zeros((2,2,2))
xts = p_sw_x - p_se_x + p_ne_x - p_nw_x
yts = p_sw_y - p_se_y + p_ne_y - p_nw_y
h[:,:,0] = np.array([[0, xts], [xts, 0]])
h[:,:,1] = np.array([[0, yts], [yts, 0]])
hr2p.append(h)
# Store result
mg['hess_r2p'] = hr2p
if hess_p2r:
#
# Hessian of mapping from physical to reference region
#
if self.is_rectangle():
# Linear mapping (no curvature)
hp2r = [np.zeros((2,2,2)) for dummy in range(n)]
else:
# Nonlinear mapping: compute curvature for each point
hp2r = []
Dx = p_sw_x - p_se_x + p_ne_x - p_nw_x
Dy = p_sw_y - p_se_y + p_ne_y - p_nw_y
dxt_dx = Dx*sx
dxt_dy = Dx*sy
dyt_dx = Dy*sx
dyt_dy = Dy*sy
dxs_dx = Dx*tx
dxs_dy = Dx*ty
dys_dx = Dy*tx
dys_dy = Dy*ty
dDet_dx = dxs_dx*yt + dyt_dx*xs - dys_dx*xt - dxt_dx*ys
dDet_dy = dxs_dy*yt + dyt_dy*xs - dys_dy*xt - dxt_dy*ys
sxx = dyt_dx/Det - yt*dDet_dx/Det**2
sxy = dyt_dy/Det - yt*dDet_dy/Det**2
syy = -dxt_dy/Det + xt*dDet_dy/Det**2
txx = -dys_dx/Det + ys*dDet_dx/Det**2
txy = -dys_dy/Det + ys*dDet_dy/Det**2
tyy = dxs_dy/Det - xs*dDet_dy/Det**2
for i in range(n):
h = np.zeros((2,2,2))
h[:,:,0] = np.array([[sxx[i], sxy[i]],
[sxy[i], syy[i]]])
h[:,:,1] = np.array([[txx[i], txy[i]],
[txy[i], tyy[i]]])
hp2r.append(h)
# Store result
mg['hess_p2r'] = hp2r
#
# Return points and gradients
#
return x_trg, mg
else:
#
# No gradients of the mapping sought
#
return x_trg
class RVertex(Vertex):
"""
Vertex on the reference cell
"""
def __init__(self, coordinates):
"""
Constructor
"""
Vertex.__init__(self, coordinates)
self.__pos = {0: None, 1: {0: None, 1: None, 2: None, 3: None}}
self.__basis_index = None
def set_pos(self, pos, level=0, child=None):
"""
Set the position of the Dof Vertex
Inputs:
pos: int, a number not exceeding the element's number of dofs
level: int in {0,1}, number specifying the refinement level
( 0 = coarse, 1 = fine ).
child: int in {0,1,2,3}, number specifying the child cell
"""
assert level in [0,1], 'Level should be either 0 or 1.'
if level==0:
self.__pos[level] = pos
if level==1:
assert child in [0,1,2,3], 'Level=1. Child should be specified.'
self.__pos[level][child] = pos
def get_pos(self, level, child=None, debug=False):
"""
Return the dof vertex's position at a given level for a given child
"""
if debug:
print(self.__pos)
if level==1:
assert child is not None, 'On fine level, child must be specified.'
return self.__pos[level][child]
else:
return self.__pos[level]
def set_basis_index(self, idx):
self.__basis_index = idx
class RHalfEdge(HalfEdge):
"""
HalfEdge for reference element
"""
def __init__(self, base, head, dofs_per_edge,
parent=None, position=None, twin=None):
"""
Constructor
"""
HalfEdge.__init__(self, base, head, parent=parent, \
position=position, twin=twin)
#
# Assign edge dof vertices
#
self.__dofs_per_edge = dofs_per_edge
self.assign_edge_dof_vertices()
def get_edge_dof_vertices(self, pos=None):
"""
Returns all dof vertices associated with HalfEdge
"""
if pos is None:
return self.__edge_dof_vertices
else:
return self.__edge_dof_vertices[pos]
def assign_edge_dof_vertices(self):
if self.twin() is not None:
#
# Use RHalfEdge's twin's dof vertices
#
assert isinstance(self.twin(),RHalfEdge), \
'Twin should also be an RHalfEdge'
edge_dofs = self.twin().get_edge_dof_vertices()
edge_dofs.reverse()
else:
#
# Make new dof Vertices
#
dofs_per_edge = self.n_dofs()
x0, y0 = self.base().coordinates()
x1, y1 = self.head().coordinates()
edge_dofs = []
if dofs_per_edge!=0:
h = 1/(dofs_per_edge+1)
for i in range(dofs_per_edge):
#
# Compute coordinates for dof vertex
#
t = (i+1)*h
x = x0 + t*(x1-x0)
y = y0 + t*(y1-y0)
v = RVertex((x,y))
if self.has_parent():
#
# Check if vertex already exists
#
for v_p in self.get_parent().get_edge_dof_vertices():
if np.allclose(v.coordinates(),v_p.coordinates()):
v = v_p
edge_dofs.append(v)
#
# Store edge dof vertices
#
self.__edge_dof_vertices = edge_dofs
def make_twin(self):
"""
Returns the twin RHalfEdge
"""
return RHalfEdge(self.head(), self.base(), self.n_dofs(), twin=self)
def n_dofs(self):
"""
Returns the number of dofs associated with the HalfEdge
"""
return self.__dofs_per_edge
def split(self):
"""
Refine current half-edge (overwrite Tree.split)
"""
#
# Compute new midpoint vertex
#
x = convert_to_array([self.base().coordinates(),\
self.head().coordinates()])
xm = 0.5*(x[0,:]+x[1,:])
vm = RVertex(tuple(xm))
for v in self.get_edge_dof_vertices():
if np.allclose(vm.coordinates(), v.coordinates()):
vm = v
#
# Define own children independently of neighbor
#
c0 = RHalfEdge(self.base(), vm, self.n_dofs(), parent=self, position=0)
c1 = RHalfEdge(vm, self.head(), self.n_dofs(), parent=self, position=1)
#
# Save the babies
#
self._children[0] = c0
self._children[1] = c1
class RQuadCell(QuadCell):
"""
Quadrilateral Reference Cell
"""
def __init__(self, element, half_edges=None, parent=None, position=None):
"""
Constructor
"""
#
# Check if the element is correct
#
self.element = element
# Extract numbers of degrees of freedom
dofs_per_vertex = element.n_dofs('vertex')
assert dofs_per_vertex<=1, \
'Only elements with at most one dof per vertex supported'
#
# Determine Cell's RHalfEdges
#
if parent is None:
#
# Corner Vertices
#
vertices = [RVertex((0,0)), RVertex((1,0)),
RVertex((1,1)), RVertex((0,1))]
#
# Reference HalfEdges
#
dofs_per_edge = element.n_dofs('edge')
half_edges = []
for i in range(4):
he = RHalfEdge(vertices[i], vertices[(i+1)%4], dofs_per_edge)
half_edges.append(he)
else:
assert half_edges is not None, 'Cell has parent. Specify RefHalfEdges.'
# Define Quadcell
QuadCell.__init__(self, half_edges, parent=parent, position=position)
#
# Assign cell dof vertices
#
self.assign_cell_dof_vertices()
if not self.has_parent():
#
# Assign positions on coarse level
#
self.assign_dof_positions(0)
#
# Split
#
self.split()
#
# Assign positions
#
self.assign_dof_positions(1)
def split(self):
"""
Split refQuadCell into 4 subcells
"""
assert not self.has_children(), 'Cell already split.'
#
# Middle Vertex
#
xx = convert_to_array(self.get_vertices())
v_m = RVertex((np.mean(xx[:,0]),np.mean(xx[:,1])))
# Check if this vertex is contained in cell
for v_p in self.get_dof_vertices():
if np.allclose(v_m.coordinates(), v_p.coordinates()):
# Vertex already exists
v_m = v_p
break
dofs_per_edge = self.element.n_dofs('edge')
interior_half_edges = []
for half_edge in self.get_half_edges():
#
# Split each half_edge
#
if not half_edge.has_children():
half_edge.split()
#
# Form new HalfEdges to and from the center
#
h_edge_up = RHalfEdge(half_edge.get_child(0).head(),v_m, dofs_per_edge)
h_edge_down = h_edge_up.make_twin()
# Add to list
interior_half_edges.append([h_edge_up, h_edge_down])
#
# Form new cells using new half_edges
#
i = 0
for half_edge in self.get_half_edges():
#
# Define Child's HalfEdges
# key
h1 = half_edge.get_child(0)
h2 = interior_half_edges[i][0]
h3 = interior_half_edges[(i-1)%self.n_half_edges()][1]
h4 = half_edge.previous().get_child(1)
hes = deque([h1, h2, h3, h4])
hes.rotate(i)
hes = list(hes)
#hes = [h1, h2, h3, h4]
#
# Define new QuadCell
#
self._children[i] = RQuadCell(self.element, hes, parent=self, position=i)
# Increment counter
i += 1
def assign_cell_dof_vertices(self):
"""
Assign interior dof vertices to cell
"""
dofs_per_cell = self.element.n_dofs('cell')
cell_dofs = []
if dofs_per_cell!=0:
n = int(np.sqrt(dofs_per_cell)) # number of dofs per direction
x0, x1, y0, y1 = self.bounding_box()
h = 1/(n+1) # subcell width
for i in range(n): # y-coordinates
for j in range(n): # x-coordinates
#
# Compute new Vertex
#
v_c = RVertex((x0+(j+1)*h*(x1-x0),y0+(i+1)*h*(y1-y0)))
#
# Check if vertex exists within parent cell
#
inherits_dof_vertex = False
if self.has_parent():
for v_p in self.get_parent().get_cell_dof_vertices():
if np.allclose(v_c.coordinates(), v_p.coordinates()):
cell_dofs.append(v_p)
inherits_dof_vertex = True
break
if not inherits_dof_vertex:
cell_dofs.append(v_c)
self.__cell_dof_vertices = cell_dofs
def get_cell_dof_vertices(self, pos=None):
"""
Return the interior dof vertices
"""
if pos is None:
return self.__cell_dof_vertices
else:
return self.__cell_dof_vertices[pos]
def assign_dof_positions(self, level):
"""
"""
if level==0:
#
# Level 0: Assign positions to vertices on coarse level
#
self.__dof_vertices = {0: [], 1: {0: [], 1: [], 2: [], 3: []}}
count = 0
# Corner dof vertices
for vertex in self.get_vertices():
if self.element.n_dofs('vertex')!=0:
vertex.set_pos(count, level)
self.__dof_vertices[level].append(vertex)
count += 1
# HalfEdge dof vertices
for half_edge in self.get_half_edges():
for vertex in half_edge.get_edge_dof_vertices():
vertex.set_pos(count, level)
self.__dof_vertices[level].append(vertex)
count += 1
# Cell dof vertices
for vertex in self.get_cell_dof_vertices():
vertex.set_pos(count, level)
self.__dof_vertices[level].append(vertex)
count += 1
elif level==1:
#
# Assign positions to child vertices
#
coarse_dofs = [i for i in range(self.element.n_dofs())]
for i_child in range(4):
#
# Add all dof vertices to one list
#
child = self.get_child(i_child)
child_dof_vertices = []
# Dofs at Corners
for vertex in child.get_vertices():
if self.element.n_dofs('vertex')!=0:
child_dof_vertices.append(vertex)
# Dofs on HalfEdges
for half_edge in child.get_half_edges():
for vertex in half_edge.get_edge_dof_vertices():
child_dof_vertices.append(vertex)
# Dofs in Cell
for vertex in child.get_cell_dof_vertices():
child_dof_vertices.append(vertex)
count = 0
for vertex in child_dof_vertices:
if not self.element.torn_element():
#
# Continuous Element (Dof Vertex can be inherited multiple times)
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
else:
#
# Discontinuous Element (Dof Vertex can be inherited once)
#
if vertex in self.__dof_vertices[0]:
i_vertex = self.__dof_vertices[0].index(vertex)
if i_vertex in coarse_dofs:
#
# Use vertex within child cell
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
# Delete the entry (preventing reuse).
coarse_dofs.pop(coarse_dofs.index(i_vertex))
else:
#
# Vertex has already been used, make a new one
#
vcopy = RVertex(vertex.coordinates())
vcopy.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vcopy)
count += 1
else:
#
# Not contained in coarse vertex set
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
def get_dof_vertices(self, level=0, child=None, pos=None):
"""
Returns all dof vertices in cell
"""
if level==0:
return self.__dof_vertices[0]
elif level==1:
assert child is not None, 'On level 1, child must be specified.'
if pos is None:
return self.__dof_vertices[1][child]
else:
return self.__dof_vertices[1][child][pos]
class RInterval(Interval):
def __init__(self, element, base=None, head=None,
parent=None, position=None):
"""
Constructor
"""
assert element.dim()==1, 'Element must be one dimensional'
self.element = element
if parent is None:
base = RVertex(0)
head = RVertex(1)
else:
assert isinstance(head, RVertex), 'Input "head" must be an RVertex.'
assert isinstance(base, RVertex), 'Input "base" must be an RVertex.'
Interval.__init__(self, base, head, parent=parent, position=position)
#
# Assign cell dof vertices
#
self.assign_cell_dof_vertices()
if not self.has_parent():
#
# Assign positions on coarse level
#
self.assign_dof_positions(0)
#
# Split
#
self.split()
#
# Assign positions
#
self.assign_dof_positions(1)
def split(self):
"""
Split a given interval into 2 subintervals
"""
#
# Determine interval endpoints
#
x0, = self.base().coordinates()
x1, = self.head().coordinates()
n = self.n_children()
#
# Loop over children
#
for i in range(n):
#
# Determine children base and head Vertices
#
if i==0:
base = self.base()
if i==n-1:
head = self.head()
else:
head = RVertex(x0+(i+1)*(x1-x0)/n)
#
# Check whether Vertex appears in parent
#
for v_p in self.get_dof_vertices():
if np.allclose(head.coordinates(), v_p.coordinates()):
head = v_p
#
# Define new child interval
#
subinterval = RInterval(self.element, base, head, \
parent=self, position=i)
#
# Store in children
#
self._children[i] = subinterval
#
# The head of the current subinterval
# becomes the base of the next one
base = subinterval.head()
#
# Assign previous/next
#
for child in self.get_children():
i = child.get_node_position()
#
# Assign previous
#
if i==0:
# Leftmost child assign own previous
child.assign_previous(self.previous())
else:
# Child in the middle
#print(child.get_node_position(), child.base().coordinates())
#print(self.get_child(i-1).get_node_position(), child.base().coordinates())
child.assign_previous(self.get_child(i-1))
#
# Assign next
#
if i==n-1:
# Rightmost child, assign own right
child.assign_next(self.next())
def assign_cell_dof_vertices(self):
dofs_per_cell = self.element.n_dofs('edge')
cell_dofs = []
if dofs_per_cell !=0:
#
# Compute coordinates for cell dof vertices
#
x0, = self.base().coordinates()
x1, = self.head().coordinates()
h = 1/(dofs_per_cell+1)
for i in range(dofs_per_cell):
x = x0 + (i+1)*h*(x1-x0)
v_c = RVertex(x)
#
# Check if vertex exists within parent cell
#
inherits_dof_vertex = False
if self.has_parent():
for v_p in self.get_parent().get_cell_dof_vertices():
if np.allclose(v_c.coordinates(), v_p.coordinates()):
cell_dofs.append(v_p)
inherits_dof_vertex = True
break
if not inherits_dof_vertex:
cell_dofs.append(v_c)
self.__cell_dof_vertices = cell_dofs
def get_cell_dof_vertices(self, pos=None):
"""
Returns the Dofs associated with the interior of the cell
Note: This function is only used during construction
"""
if pos is None:
return self.__cell_dof_vertices
else:
return self.__cell_dof_vertices[pos]
def get_dof_vertices(self, level=0, child=None, pos=None):
"""
Returns all dof vertices in cell
Inputs:
level: int 0/1, 0=coarse, 1=fine
child: int, child node position within parent (0/1)
pos: int, 0,...n_dofs-1, dof number within cell
"""
if level==0:
return self.__dof_vertices[0]
elif level==1:
assert child is not None, 'On level 1, child must be specified.'
if pos is None:
return self.__dof_vertices[1][child]
else:
return self.__dof_vertices[1][child][pos]
def assign_dof_positions(self, level):
"""
Assigns a number to each dof vertex in the interval.
Note: We only deal with bisection
"""
if level==0:
#
# Level 0: Assign position to vertices on coarse level
#
self.__dof_vertices = {0: [], 1: {0: [], 1: []}}
count = 0
#
# Add endpoints
#
dpv = self.element.n_dofs('vertex')
if dpv != 0:
for vertex in self.get_vertices():
vertex.set_pos(count, level)
self.__dof_vertices[level].append(vertex)
count += 1
#
# Add cell dof vertices
#
for vertex in self.get_cell_dof_vertices():
vertex.set_pos(count, level)
self.__dof_vertices[level].append(vertex)
count += 1
elif level==1:
#
# Assign positions to child vertices
#
coarse_dofs = [i for i in range(self.element.n_dofs())]
for i_child in range(2):
#
# Add all dof vertices to a list
#
child = self.get_child(i_child)
child_dof_vertices = []
# Dofs at corners
for vertex in child.get_vertices():
if self.element.n_dofs('vertex')!=0:
child_dof_vertices.append(vertex)
# Dofs in Interval
for vertex in child.get_cell_dof_vertices():
child_dof_vertices.append(vertex)
#
# Inspect each vertex in the child, to see
# whether it is duplicated in the parent.
#
count = 0
for vertex in child_dof_vertices:
if not self.element.torn_element():
#
# Continuous Element (Dof Vertex can be inherited multiple times)
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
else:
#
# Discontinuous Element (Dof Vertex can be inherited once)
#
if vertex in self.__dof_vertices[0]:
i_vertex = self.__dof_vertices[0].index(vertex)
if i_vertex in coarse_dofs:
#
# Use vertex within child cell
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
# Delete the entry (preventing reuse).
coarse_dofs.pop(coarse_dofs.index(i_vertex))
else:
#
# Vertex has already been used, make a new one
#
vcopy = RVertex(vertex.coordinates())
vcopy.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vcopy)
count += 1
else:
#
# Not contained in coarse vertex set
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
'''
class Mesh(object):
"""
Mesh class, consisting of a grid (a doubly connected edge list), as well
as a list of root cells, -half-edges and vertices.
Attributes:
Methods:
"""
def __init__(self, grid):
"""
Constructor
class Mesh(object):
"""
Mesh class, consisting of a grid (a doubly connected edge list), as well
as a list of root cells, -half-edges and vertices.
Attributes:
Methods:
"""
def __init__(self, grid):
"""
Constructor
Inputs:
grid: DCEL object, doubly connected edge list specifying
the mesh topology.
"""
self.__grid = grid
# =====================================================================
# Vertices
# =====================================================================
n_vertices = grid.points['n']
vertices = []
for i in range(n_vertices):
vertices.append(Vertex(grid.points['coordinates'][i]))
# =====================================================================
# Half-edges
# =====================================================================
n_he = grid.half_edges['n']
#
# Define Half-Edges via base and head vertices
#
half_edges = []
for i in range(n_he):
i_base, i_head = grid.half_edges['connectivity'][i]
v_base = grid.points['coordinates'][i_base]
v_head = grid.points['coordinates'][i_head]
half_edges.append(HalfEdge(Vertex(v_base), Vertex(v_head)))
#
# Specify relations among Half-Edges
#
for i in range(n_he):
he = half_edges[i]
i_prev = grid.half_edges['prev'][i]
i_next = grid.half_edges['next'][i]
i_twin = grid.half_edges['twin'][i]
he.assign_next(half_edges[i_next])
he.assign_prev(half_edges[i_prev])
if i_twin != -1:
he.assign_twin(half_edges[i_twin])
# =====================================================================
# Cells
# =====================================================================
n_faces = grid.faces['n']
cells = []
for i in range(n_faces):
cell_type = grid.faces['type'][i]
if cell_type == 'interval':
cell = BCell()
pass
elif cell_type == 'triangle':
#cell = TriCell()
pass
elif cell_type == 'quadrilateral':
cell = QuadCell()
else:
unknown_cell_type = 'Unknown cell type. Use "interval", '+\
'"triangle", or "quadrilateral".'
raise Exception(unknown_cell_type)
cells.append(cell)
if grid is not None:
#
# grid specified
#
#assert all(i is None for i in [node, cell, dim]),\
#'Grid specified: All other inputs should be None.'
#
# ROOT node
#
dim = grid.dim()
if dim == 1:
node = BiNode(grid=grid)
elif dim == 2:
node = QuadNode(grid=grid)
else:
raise Exception('Only dimensions 1 and 2 supported.')
#
# Cells
#
node.split()
for pos in node._child_positions:
#
# ROOT cells
#
if dim == 1:
cell = BiCell(grid=grid, position=pos)
elif dim == 2:
cell = QuadCell(grid=grid, position=pos)
child = node.children[pos]
child.link(cell)
#
# Mark nodes, edges, and vertices
#
elif cell is not None:
#
# Cell specified
#
assert all(i is None for i in [node, grid, dim]),\
'Cell specified: All other inputs should be None.'
#
# ROOT node linked to cell
#
dim = cell.dim()
if dim == 1:
node = BiNode(bicell=cell)
elif dim == 2:
node = QuadNode(quadcell=cell)
else:
raise Exception('Only dimensions 1 and 2 supported.')
elif node is not None:
#
# Tree specified
#
assert all(i is None for i in [cell, grid, dim]),\
'Tree specified: All other inputs should be None.'
#
# Default cell
#
dim = node.dim()
if dim == 1:
cnr_vtcs = [0,1]
cell = BiCell(corner_vertices=cnr_vtcs)
elif dim == 2:
cnr_vtcs = [0,1,0,1]
cell = QuadCell(corner_vertices=cnr_vtcs)
node.link(cell)
elif dim is not None:
#
# Dimension specified
#
assert all(i is None for i in [node, cell, grid]),\
'Dimension specified: All other inputs should be None.'
#
# Default cell
#
if dim == 1:
cnr_vtcs = [0,1]
cell = BiCell(corner_vertices=cnr_vtcs)
elif dim == 2:
cnr_vtcs = [0,1,0,1]
cell = QuadCell(corner_vertices=cnr_vtcs)
#
# Default node, linked to cell
#
if dim == 1:
node = BiNode(bicell=cell)
elif dim==2:
node = QuadNode(quadcell=cell)
else:
raise Exception('Only dimensions 1 or 2 supported.')
else:
#
# Default cell
#
cnr_vtcs = [0,1,0,1]
cell = QuadCell(corner_vertices=cnr_vtcs)
node = QuadNode(quadcell=cell)
dim = 2
self.__root_node = node
self.grid = grid
self.__mesh_count = 0
self.__dim = dim
def dim(self):
"""
Return the spatial dimension of the region
"""
return self.__dim
def depth(self):
"""
Return the maximum refinement level
"""
return self.root_node().tree_depth()
def n_nodes(self, flag=None):
"""
Return the number of cells
"""
if hasattr(self, '__n_cells'):
return self.__n_cells
else:
self.__n_cells = len(self.__root_node.get_leaves(flag=flag))
return self.__n_cells
def root_node(self):
"""
Return tree node used for mesh
"""
return self.__root_node
def boundary(self, entity, flag=None):
"""
Returns a set of all boundary entities (vertices/edges)
Input:
entity: str, 'vertices', 'edges', or 'quadcells'
flag:
TODO: Add support for tricells
"""
boundary = set()
print(entity)
print(len(boundary))
for node in self.root_node().get_leaves(flag=flag):
cell = node.cell()
for direction in ['W','E','S','N']:
#
# Look in 4 directions
#
if node.get_neighbor(direction) is None:
if entity=='quadcells':
boundary.add(cell)
break
edge = cell.get_edges(direction)
if entity=='edges':
boundary.add(edge)
if entity=='vertices':
for v in edge.vertices():
boundary.add(np.array(v.coordinates()))
return boundary
def bounding_box(self):
"""
Returns the mesh's bounding box
Output:
box: double, [x_min, x_max, y_min, y_max] if mesh is 2d
and [x_min, x_max] if mesh is 1d.
"""
root = self.root_node()
if root.grid is not None:
#
# DCEL on coarsest level
#
grid = root.grid
if self.dim() == 1:
x_min, x_max = grid.points['coordinates'][[0,-1]]
return [x_min, x_max]
elif self.dim() == 2:
#
# Determine bounding box from boundary points
#
i_vbnd = grid.get_boundary_points()
v_bnd = []
for k in i_vbnd:
v_bnd.append( \
grid.points['coordinates'][i_vbnd[k]].coordinates())
v_bnd = np.array(v_bnd)
x_min, x_max = v_bnd[:,0].min(), v_bnd[:,0].max()
y_min, y_max = v_bnd[:,1].min(), v_bnd[:,1].max()
return [x_min, x_max, y_min, y_max]
else:
#
# No DCEL: Use Cell
#
cell = root.cell()
if cell.dim()==1:
x_min, x_max = cell.get_vertices(pos='corners', as_array=True)
return [x_min, x_max]
elif cell.dim()==2:
vbnd = cell.get_vertices(pos='corners', as_array=True)
x_min, x_max = vbnd[:,0].min(), vbnd[:,0].max()
y_min, y_max = vbnd[:,1].min(), vbnd[:,1].max()
return [x_min, x_max, y_min, y_max]
else:
raise Exception('Only 1D and 2D supported.')
def unmark_all(self, flag=None, nodes=False, cells=False, edges=False,
vertices=False, all_entities=False):
"""
Unmark all nodes, cells, edges, or vertices.
"""
if all_entities:
#
# Unmark everything
#
nodes = True
cells = True
edges = True
vertices = True
for node in self.root_node().traverse():
if nodes:
#
# Unmark node
#
node.unmark(flag=flag, recursive=True)
if cells:
#
# Unmark quad cell
#
node.cell().unmark(flag=flag, recursive=True)
if edges:
#
# Unmark quad edges
#
for edge in node.cell().edges.values():
edge.unmark(flag=flag)
if vertices:
#
# Unmark quad vertices
#
for vertex in node.cell().vertices.values():
vertex.unmark(flag=flag)
def iter_quadedges(self, flag=None, nested=False):
"""
Iterate over cell edges
Output:
quadedge_list, list of all active cell edges
"""
quadedge_list = []
#
# Unmark all edges
#
self.unmark_all(quadedges=True)
for cell in self.iter_quadcells(flag=flag, nested=nested):
for edge_key in [('NW','SW'),('SE','NE'),('SW','SE'),('NE','NW')]:
edge = cell.edges[edge_key]
if not(edge.is_marked()):
#
# New edge: add it to the list
#
quadedge_list.append(edge)
edge.mark()
#
# Unmark all edges again
#
self.unmark_all(quadedges=True)
return quadedge_list
def quadvertices(self, coordinate_array=True, flag=None, nested=False):
"""
Iterate over quad cell vertices
Inputs:
coordinate_array: bool, if true, return vertices as arrays
nested: bool, traverse tree depthwise
Output:
quadvertex_list, list of all active cell vertices
"""
quadvertex_list = []
#
# Unmark all vertices
#
self.unmark_all(quadvertices=True)
for cell in self.iter_quadcells(flag=flag, nested=nested):
for direction in ['SW','SE','NW','NE']:
vertex = cell.vertices[direction]
if not(vertex.is_marked()):
#
# New vertex: add it to the list
#
quadvertex_list.append(vertex)
vertex.mark()
self.unmark_all(quadvertices=True)
if coordinate_array:
return np.array([v.coordinates() for v in quadvertex_list])
else:
return quadvertex_list
def refine(self, flag=None):
"""
Refine mesh by splitting marked LEAF nodes
"""
for leaf in self.root_node().get_leaves(flag=flag):
leaf.split()
def coarsen(self, flag=None):
"""
Coarsen mesh by merging marked LEAF nodes.
Inputs:
flag: str/int, marker flag.
If flag is specified, merge a node if all
of its children are flagged.
If no flag is specified, merge nodes so that
mesh depth is reduced by 1.
"""
root = self.root_node()
if flag is None:
tree_depth = root.tree_depth()
for leaf in root.get_leaves():
if leaf.depth == tree_depth:
leaf.parent.merge()
else:
for leaf in root.get_leaves(flag=flag):
parent = leaf.parent
if all(child.is_marked(flag=flag) \
for child in parent.get_children()):
parent.merge()
def record(self,flag=None):
"""
Mark all mesh nodes with flag
"""
count = self.__mesh_count
for node in self.root_node().traverse(mode='breadth-first'):
if flag is None:
node.mark(count)
else:
node.mark(flag)
self.__mesh_count += 1
def n_meshes(self):
"""
Return the number of recorded meshes
"""
return self.__mesh_count
'''
class DCEL(object):
"""
Description: Doubly connected edge list
Attributes:
__dim: int, dimension of grid
format: str, version of mesh file
is_rectangular: bool, specifying whether 2D grid has rectangular faces
subregions: struct, encoding the mesh's subregions, with fields:
n: int, number of subregions
dim: int, dimension of subregion
tags: int, tags of subregions
names: str, names of subregions
points: struct, encoding the mesh's vertices, with fields:
n: int, number of points
n_dofs: int, number of dofs associated with point
tags: tags associated with vertices
phys: int list, indicating membership to one of the
physical subregions listed above.
geom: int list, indicating membership to certain
geometric entities.
partition: int, list indicating membership to certain
mesh partitions.
half_edge: int array, pointing to a half-edge based at
point.
coordinates: double, list of tuples
edges: struct, encoding the mesh's edges associated with
specific subregions, w. fields:
n: int, number of edges
n_dofs: int, number of dofs associated with edge
tags: struct, tags associated with edges (see points)
connectivity: int, list of sets containing edge vertices
half_edge: int, array pointing to associated half-edge
Edges: Edge list in same order as connectivity
half_edges: struct, encoding the mesh's half-edges
n: int, number of half-edges
n_dofs: int, number of dofs associated with half_edge
tags: struct, tags associated with half-edges (see points)
connectivity: int, list pointing to initial and final
vertices [v1,v2].
prev: int, array pointing to the preceding half-edge
next: int, array pointing to the next half-edge
twin: int, array pointing to the reversed half-edge
edge: int, array pointing to an associated edge
face: int, array pointing to an incident face
faces: struct, encoding the mesh's faces w. fields:
n: int, number of faces
n_dofs: int, list containing number of dofs per face
type: str, type of face (interval, triangle, or quadrilateral)
tags: tags associated with faces (same as for points)
connectivity: int, list of indices of vertices that make
up faces.
half_edge: int, array pointing to a half-edge on the boundary
Methods:
__init__
initialize_grid_structure
rectangular_grid
grid_from_gmsh
determine_half_edges
dim
get_neighbor
contains_node
Note: The grid can be used to describe the connectivity associated with a
ROOT Tree.
"""
def __init__(self, box=None, resolution=None, periodic=None, dim=None,
x=None, connectivity=None, file_path=None, file_format='gmsh'):
"""
Constructor
Inputs:
box: list of endpoints for rectangular mesh
1D [x_min, x_max]
2D [x_min, x_max, y_min, y_max]
resolution: tuple, with number of cells in each direction
dim: int, spatial dimension of the grid
x: double, (n,) array of points in for constructing a grid
connectivity: int, list of cell connectivities
file_path: str, path to mesh file
file_format: str, type of mesh file (currently only gmsh)
periodic: int, set containing integers 0 and/or 1.
0 in periodic: make periodic in x-direction
1 in periodic: make periodic in y-direction
"""
#
# Initialize struct
#
self.is_rectangular = False
self.is_periodic = False
self.resolution = resolution
self.initialize_grid_structure()
if file_path is not None:
# =================================================================
# Import grid from gmsh
# =================================================================
assert file_format=='gmsh', \
'For input file_format, use "gmsh".'
#
# Import grid from gmsh
#
self.grid_from_gmsh(file_path)
elif x is not None:
# =================================================================
# Generate grid from connectivity
# =================================================================
self.grid_from_connectivity(x, connectivity)
else:
# =================================================================
# Rectangular Grid
# =================================================================
#
# Determine dimension
#
if dim is None:
if resolution is not None:
assert type(resolution) is tuple, \
'Input "resolution" should be a tuple.'
dim = len(resolution)
elif box is not None:
assert type(box) is list, 'Input "box" should be a list.'
if len(box) == 2:
dim = 1
elif len(box) == 4:
dim = 2
else:
box_length = 'Box should be a list of length 2 or 4.'
raise Exception(box_length)
else:
raise Exception('Unable to verify dimension of grid')
self.__dim = dim
#
# Specify box
#
if box is None:
#
# Default boundary box
#
if dim==1:
box = [0,1]
elif dim==2:
box = [0,1,0,1]
#
# Specify resolution
#
if resolution is None:
#
# Default resolution
#
if dim==1:
resolution = (1,)
elif dim==2:
resolution = (1,1)
self.is_rectangular = True
self.rectangular_grid(box=box, resolution=resolution)
# =====================================================================
# Generate doubly connected edge list
# =====================================================================
self.determine_half_edges()
#
# Add periodicity
#
self.periodic_coordinates = {}
if periodic is not None:
if self.dim()==2:
assert self.is_rectangular, \
'Only rectangular meshes can be made periodic'
self.make_periodic(periodic, box)
self.is_periodic = True
def initialize_grid_structure(self):
"""
Initialize empty grid.
"""
self.format = None
# Subregions
self.subregions = {'dim': [], 'n': None, 'names': [], 'tags': []}
# Points
self.points = {'half_edge': [], 'n': None, 'tags': {}, 'n_dofs': None,
'coordinates': []}
# Edges
# TODO: Remove
self.edges = {'n': None, 'tags': {}, 'n_dofs': None, 'connectivity': []}
# Half-Edges
self.half_edges = {'n': None, 'tags': {}, 'n_dofs': None,
'connectivity': [], 'prev': [], 'next': [],
'twin': [], 'edge': [], 'face': [], 'position': []}
# Faces
self.faces = {'n': None, 'type': [], 'tags': {}, 'n_dofs': [],
'connectivity': []}
def rectangular_grid(self, box, resolution):
"""
Construct a grid on a rectangular region
Inputs:
box: int, tuple giving bounding vertices of rectangular domain:
(x_min, x_max) in 1D, (x_min, x_max, y_min, y_max) in 2D.
resolution: int, tuple giving the number of cells in each direction
"""
assert type(resolution) is tuple, \
'Input "resolution" should be a tuple.'
dim = len(resolution)
if dim == 1:
# =================================================================
# One dimensional grid
# =================================================================
# Generate DCEL
x_min, x_max = box
n_points = resolution[0] + 1
x = np.linspace(x_min, x_max, n_points)
# Store grid information
self.__dim = 1
self.points['coordinates'] = [(xi,) for xi in x]
self.points['n'] = n_points
elif dim == 2:
# =================================================================
# Two dimensional grid
# =================================================================
self.__dim = 2
x_min, x_max, y_min, y_max = box
nx, ny = resolution
n_points = (nx+1)*(ny+1)
self.points['n'] = n_points
#
# Record vertices
#
x = np.linspace(x_min, x_max, nx+1)
y = np.linspace(y_min, y_max, ny+1)
for i_y in range(ny+1):
for i_x in range(nx+1):
self.points['coordinates'].append((x[i_x],y[i_y]))
#
# Face connectivities
#
# Vertex indices
idx = np.arange((nx+1)*(ny+1)).reshape(ny+1,nx+1).T
for i_y in range(ny):
for i_x in range(nx):
fv = [idx[i_x,i_y], idx[i_x+1,i_y],
idx[i_x+1,i_y+1], idx[i_x,i_y+1]]
self.faces['connectivity'].append(fv)
self.faces['n'] = nx*ny
self.faces['type'] = ['quadrilateral']*self.faces['n']
else:
raise Exception('Only 1D/2D supported.')
def grid_from_connectivity(self, x, connectivity):
"""
Construct grid from connectivity information
"""
points = self.points
x = convert_to_array(x, dim=1)
dim = x.shape[1]
if dim==1:
#
# 1D
#
self.__dim = 1
#
# Store points
#
x = np.sort(x, axis=0) # ensure the vector is sorted
points['coordinates'] = [(xi[0],) for xi in x]
points['n'] = len(x)
elif dim==2:
#
# 2D
#
self.__dim = 2
#
# Store points
#
n_points = x.shape[0]
points['coordinates'] = [(x[i,0],x[i,1]) for i in range(n_points)]
points['n'] = n_points
#
# Store faces
#
faces = self.faces
assert connectivity is not None, 'Specify connectivity.'
assert type(connectivity) is list, \
'Connectivity should be passed as a list.'
n_faces = len(connectivity)
faces['n'] = n_faces
for i in range(n_faces):
assert type(connectivity[i]) is list, \
'Connectivity entries should be lists'
faces['connectivity'].append(connectivity[i])
faces['n_dofs'].append(len(connectivity[i]))
def grid_from_gmsh(self, file_path):
"""
Import computational mesh from a .gmsh file and store it in the grid.
Input:
file_path: str, path to gmsh file
"""
points = self.points
edges = self.edges
faces = self.faces
subregions = self.subregions
#
# Initialize tag categories
#
for entity in [points, edges, faces]:
entity['tags'] = {'phys': [], 'geom': [], 'partition': []}
with open(file_path, 'r') as infile:
while True:
line = infile.readline()
#
# Mesh format
#
if line == '$MeshFormat\n':
# Read next line
line = infile.readline()
self.format = line.rstrip()
# TODO: Put an assert statement here to check version
while line != '$EndMeshFormat\n':
line = infile.readline()
line = infile.readline()
#
# Subregions
#
if line == '$PhysicalNames\n':
#
# Record number of subregions
#
line = infile.readline()
subregions['n'] = int(line.rstrip())
line = infile.readline()
while True:
if line == '$EndPhysicalNames\n':
line = infile.readline()
break
#
# Record names, dimensions, and tags of subregions
#
words = line.split()
name = words[2].replace('"','')
subregions['names'].append(name)
subregions['dim'].append(int(words[0]))
subregions['tags'].append(int(words[1]))
line = infile.readline()
# TODO: Is this necessary?
# =============================================================
# Cell Vertices
# =============================================================
if line == '$Nodes\n':
#
# Record number of nodes
#
line = infile.readline()
points['n'] = int(line.rstrip())
line = infile.readline()
while True:
if line == '$EndNodes\n':
line = infile.readline()
break
#
# Record vertex coordinates
#
words = line.split()
vtx = (float(words[1]),float(words[2]))
points['coordinates'].append(vtx)
line = infile.readline()
# =============================================================
# Faces
# =============================================================
if line == '$Elements\n':
next(infile) # skip 'number of elements' line
line = infile.readline()
n_faces = 0 # count number of faces
while True:
"""
General format for elements
$Elements
n_elements
el_number | el_type* | num_tags** | ...
tag1 .. tag_num_tags |...
node_number_list
*el_type: element type
points: 15 (1 node point)
lines: 1 (2 node line), 0 --------- 1
8 (3 node 2nd order line), 0 --- 2 --- 1
26 (4 node 3rd order line) 0 - 2 - 3 - 1
triangles: 2 (3 node 1st order triangle)
9 (6 node 2nd order triangle)
21 (9 node 3rd order triangle)
quadrilateral: 3 (4 node first order quadrilateral)
10 (9 node second order quadrilateral)
**num_tags:
1st tag - physical entity to which element belongs
(often 0)
2nd tag - number of elementary geometrical entity to
which element belongs (as defined in the
.geo file).
3rd tag - number of the mesh partition to which the
element belongs.
"""
if line == '$EndElements\n':
faces['n'] = n_faces
line = infile.readline()
break
words = line.split()
#
# Identify entity
#
element_type = int(words[1])
if element_type==15:
#
# Point (1 node)
#
dofs_per_entity = 1
entity = points
if element_type==1:
#
# Linear edge (2 nodes)
#
dofs_per_entity = 2
entity = edges
elif element_type==8:
#
# Quadratic edge (3 nodes)
#
dofs_per_entity = 3
entity = edges
elif element_type==26:
#
# Cubic edge (4 nodes)
#
dofs_per_entity = 4
entity = edges
elif element_type==2:
#
# Linear triangular element (3 nodes)
#
dofs_per_entity = 3
entity = faces
entity['type'].append('triangle')
n_faces += 1
elif element_type==9:
#
# Quadratic triangular element (6 nodes)
#
dofs_per_entity = 6
entity = faces
entity['type'].append('triangle')
n_faces += 1
elif element_type==21:
#
# Cubic triangle (10 nodes)
#
dofs_per_entity = 10
entity = faces
entity['type'].append('triangle')
n_faces += 1
elif element_type==3:
#
# Linear quadrilateral (4 nodes)
#
dofs_per_entity = 4
entity = faces
entity['type'].append('quadrilateral')
n_faces += 1
elif element_type==10:
#
# Quadratic quadrilateral (9 nodes)
#
dofs_per_entity = 9
entity = faces
entity['type'].append('quadrilateral')
n_faces += 1
entity['n_dofs'] = dofs_per_entity
#
# Record tags
#
num_tags = int(words[2])
if num_tags > 0:
#
# Record Physical Entity tag
#
entity['tags']['phys'].append(int(words[3]))
else:
#
# Tag not included ... delete
#
entity['tags'].pop('phys', None)
if num_tags > 1:
#
# Record Geometrical Entity tag
#
entity['tags']['geom'].append(int(words[4]))
else:
#
# Tag not included ... delete
#
entity['tags'].pop('geom', None)
if num_tags > 2:
#
# Record Mesh Partition tag
#
entity['tags']['partition'].append(int(words[5]))
else:
#
# Tag not included ... delete
#
entity['tags'].pop('partition', None)
if dofs_per_entity > 1:
#
# Connectivity
#
i_begin = 3 + num_tags
i_end = 3 + num_tags + dofs_per_entity
connectivity = [int(words[i])-1 for i in \
np.arange(i_begin,i_end) ]
entity['connectivity'].append(connectivity)
line = infile.readline()
if line == '':
break
#
# Check for mixed Faces
#
if len(set(faces['type']))>1:
raise Warning('Face types are mixed')
#
# Turn Edge connectivities into sets
#
for i in range(len(edges['connectivity'])):
edges['connectivity'][i] = frozenset(edges['connectivity'][i])
#
# There are faces, dimension = 2
#
if n_faces > 0:
self.__dim = 2
def determine_half_edges(self):
"""
Returns a doubly connected edge list.
The grid should already have the following specified:
1D: points
2D: points, faces
Currently,
"""
#
# Update Point Fields
#
n_points = self.points['n']
self.points['half_edge'] = np.full((n_points,), -1, dtype=np.int)
# =====================================================================
# Initialize Half-Edges
# =====================================================================
if self.dim()==1:
#
# 1D mesh
#
n_he = self.points['n']-1
elif self.dim()==2:
#
# 2D mesh
#
n_faces = self.faces['n']
n_he = 0
for i in range(n_faces):
n_he += len(self.faces['connectivity'][i])
self.half_edges['n'] = n_he
self.half_edges['connectivity'] = np.full((n_he,2), -1, dtype=np.int)
self.half_edges['prev'] = np.full((n_he,), -1, dtype=np.int)
self.half_edges['next'] = np.full((n_he,), -1, dtype=np.int)
self.half_edges['twin'] = np.full((n_he,), -1, dtype=np.int)
self.half_edges['edge'] = np.full((n_he,), -1, dtype=np.int)
self.half_edges['face'] = np.full((n_he,), -1, dtype=np.int)
# =====================================================================
# Define Half-Edges
# =====================================================================
if self.dim()==1:
#
# 1D: Define HE's and link with others and points
#
n_points = self.points['n']
for i in range(n_points-1):
# Connectivity
self.half_edges['connectivity'][i] = [i,i+1]
# Previous and next half_edge in the DCEL
# NOTE: Here (unlike 2D), prev and next are used to
# navigate in the grid.
self.half_edges['prev'][i] = i-1
self.half_edges['next'][i] = i+1 if i+1<n_points-1 else -1
# Incident half_edge to left endpoint
self.points['half_edge'][i] = i
'''
#
# Twin
#
# Define twin half-edge
self.half_edges['connectivity'][n_points-1+i] = [i+1,i]
self.half_edges['twin'][i] = n_points-1+i
self.half_edges['twin'][n_points-1+i] = i
# Incident half-edge to right endpoint
self.points['half_edge'][i+1] = n_points + i
# Next and previous
self.half_edges['next'][n_points-1+i] = i-1
self.half_edges['prev'][n_points-1+i] = \
i+1 if i+1<n_points else -1
'''
elif self.dim()==2:
#
# 2D: Define HE's and link with others, faces, and points
#
n_faces = self.faces['n']
self.faces['half_edge'] = np.full((n_faces,), -1, dtype=np.int)
#
# Loop over faces
#
half_edge_count = 0
for i_fce in range(n_faces):
fc = self.faces['connectivity'][i_fce]
n_sides = len(fc)
#
# Face's half-edge numbers
#
fhe = [half_edge_count + j for j in range(n_sides)]
#
# Update face information
#
self.faces['half_edge'][i_fce] = fhe[0]
for i in range(n_sides):
#
# Update half-edge information
#
#
# Connectivity
#
hec = [fc[i%n_sides], fc[(i+1)%n_sides]]
self.half_edges['connectivity'][fhe[i],:] = hec
'''
DEBUG
if fhe[i] >= n_he:
print('Half-edge index exceeds matrix dimensions.')
print('Number of faces: {0}'.format(self.faces['n']))
print('Number of half-edges: 3x#faces =' + \
' {0}'.format(3*self.faces['n']))
print('#Half-Edges recorded: {0}'+\
''.format(self.half_edges['n']))
'''
#
# Previous Half-Edge
#
self.half_edges['prev'][fhe[i]] = fhe[(i-1)%n_sides]
#
# Next Half-Edge
#
self.half_edges['next'][fhe[i]] = fhe[(i+1)%n_sides]
#
# Face
#
self.half_edges['face'][fhe[i]] = i_fce
#
# Points
#
self.points['half_edge'][fc[i%n_sides]] = fhe[i]
#
# Update half-edge count
#
half_edge_count += n_sides
hec = self.half_edges['connectivity']
# =====================================================================
# Determine twin half_edges
# =====================================================================
for i in range(n_he):
#
# Find the row whose reversed entries match current entry
#
row = np.argwhere((hec[:,0]==hec[i,1]) & (hec[:,1]==hec[i,0]))
if len(row) == 1:
#
# Update twin field
#
self.half_edges['twin'][i] = int(row)
"""
# =====================================================================
# Link with Edges
# =====================================================================
#
# Update Edge Fields
#
# TODO: Delete when safe to do so!!
edge_set = set(self.edges['connectivity'])
self.edges['half_edge'] = [None]*len(edge_set)
for i_he in range(n_he):
#
# Loop over half-edges
#
hec = self.half_edges['connectivity'][i_he]
'''
DEBUG
#print('Size of edge_set: {0}'.format(len(edge_set)))
#print('Size of edge connectivity: {0}'.format(len(self.edges['connectivity'])))
'''
if set(hec) in edge_set:
'''
DEBUG
print('Set {0} is in edge_set. Locating it'.format(hec))
'''
#
# Edge associated with Half-Edge exists
#
i_edge = self.edges['connectivity'].index(set(hec))
'''
DEBUG
print('Location: {0}'.format(i_edge))
print('Here it is: {0}'.format(self.edges['connectivity'][i_edge]))
#print('Linking half edge with edge:')
#print('Half-edge: {0}'.format(self.edges['connectivity'][i_edge]))
#print('Edge: {0}'.format(self.half_edges['connectivity'][fhe[i]]))
#print(len(self.edges['half_edge']))
#print('Length of edge_set {0}'.format(len(edge_set)))
#print(edge_set)
'''
#
# Link edge to half edge
#
self.edges['half_edge'][i_edge] = i_he
else:
#print('Set {0} is not in edge_set \n '.format(hec))
#
# Add edge
#
new_edge = frozenset(hec)
self.edges['connectivity'].append(new_edge)
edge_set.add(new_edge)
i_edge =len(self.edges['connectivity'])-1
#
# Assign empty tags
#
for tag in self.edges['tags'].values():
tag.append(None)
#
# Link edge to half-edge
#
self.edges['half_edge'].append(i)
#
# Link half-edge to edge
#
self.half_edges['edge'][i] = i_edge
#
# Update size of edge list
#
self.edges['n'] = len(self.edges['connectivity'])
"""
def dim(self):
"""
Returns the underlying dimension of the grid
"""
return self.__dim
def get_neighbor(self, i_entity, i_direction):
"""
Returns the neighbor of an entity in a given direction
Inputs:
i_entity: int, index of the entity whose neighbor we seek
In 1D: i_entity indexes a half_edge
In 2D: i_entity indexes a face
i_direction: int, index of an entity specifying a direction
In 1D: i_direction indexes an interval endpoint
In 2D: i_direction indexes a half_edge
"""
if self.dim() == 1:
#
# 1D grid
#
hec = self.half_edges['connectivity'][i_entity]
assert i_direction in hec, \
'Point index not in connectivity of this Half-Edge.'
if i_direction == hec[0]:
#
# Left endpoint: go to previous half-edge
#
i_nbr = self.half_edges['prev'][i_entity]
elif i_direction == hec[1]:
#
# Right endpoint: go to next Half-Edge
#
i_nbr = self.half_edges['next'][i_entity]
elif self.dim() == 2:
#
# 2D grid: use half_edges
#
assert self.half_edges['face'][i_direction] == i_entity,\
'Cell not incident to Half-Edge.'
i_nbr_he = self.half_edges['twin'][i_direction]
i_nbr = self.half_edges['face'][i_nbr_he]
if i_nbr != -1:
return i_nbr
else:
return None
def get_boundary_half_edges(self):
"""
Returns a list of the boundary half_edge indices
"""
assert self.dim()==2, 'Half edges only present in 2D grids.'
bnd_hes_conn = []
bnd_hes = []
#
# Locate half-edges on the boundary
#
for i_he in range(self.half_edges['n']):
if self.half_edges['twin'][i_he] == -1:
bnd_hes.append(i_he)
bnd_hes_conn.append(self.half_edges['connectivity'][i_he])
#
# Group and sort half-edges
#
bnd_hes_sorted = [deque([he]) for he in bnd_hes]
while True:
for g1 in bnd_hes_sorted:
#
# Check if g1 can add a deque in bnd_hes_sorted
#
merger_activity = False
for g2 in bnd_hes_sorted:
#
# Does g1's head align with g2's tail?
#
if self.half_edges['connectivity'][g1[-1]][1]==\
self.half_edges['connectivity'][g2[0]][0]:
# Remove g2 from list
if len(bnd_hes_sorted) > 1:
g2 = bnd_hes_sorted.pop(bnd_hes_sorted.index(g2))
g1.extend(g2)
merger_activity = True
#
# Does g1's tail align with g2's head?
#
elif self.half_edges['connectivity'][g1[0]][0]==\
self.half_edges['connectivity'][g2[-1]][1]:
if len(bnd_hes_sorted) > 1:
g2 = bnd_hes_sorted.pop(bnd_hes_sorted.index(g2))
g1.extendleft(g2)
merger_activity = True
if not merger_activity:
break
#
# Multiple boundary segments
#
return [list(segment) for segment in bnd_hes_sorted]
"""
bnd_hes_sorted = []
i_he_left = bnd_hes.pop()
i_he_right = i_he_left
he_conn_left = bnd_hes_conn.pop()
he_conn_right = he_conn_left
subbnd_hes_sorted = deque([i_he])
while len(bnd_hes)>0:
added_to_left = False
added_to_right = False
for i in range(len(bnd_hes)):
if bnd_hes_conn[i][0] == he_conn_right[1]:
#
# Base vertex of he in list matches
# head vertex of popped he.
#
i_he_right = bnd_hes.pop(i)
he_conn_right = bnd_hes_conn.pop(i)
subbnd_hes_sorted.append(i_he_right)
added_to_right = True
elif bnd_hes_conn[i][1] == he_conn_left[0]:
#
# Head vertex of he in list matches
# base vertex of popped he.
#
i_he_left = bnd_hes_conn.pop(i)
he_conn_left = bnd_hes_conn.pop(i)
subbnd_hes_sorted.appendleft(i_he_left)
added_to_left = True
if added_to_left and added_to_right:
break
if not added_to_left and not added_to_right:
# Could not find any half-edges to add
#
# Add boundary segment to sorted hes
#
bnd_hes_sorted.extend(ihe for ihe in subbnd_hes_sorted)
#
# Reinitialize subbnd_hes_sorted
#
i_he_left = bnd_hes.pop()
i_he_right = i_he_left
he_conn_left = bnd_hes_conn.pop()
he_conn_right = he_conn_left
subbnd_hes_sorted = deque([i_he])
return bnd_hes_sorted
"""
'''
def get_boundary_edges(self):
"""
Returns a list of the boundary edge indices
TODO: Get rid of this
"""
bnd_hes_sorted = self.get_boundary_half_edges()
#
# Extract boundary edges
#
bnd_edges = [self.half_edges['edge'][i] for i in bnd_hes_sorted]
return bnd_edges
'''
def get_boundary_points(self):
"""
Returns a list of boundary point indices
"""
if self.dim() == 1:
#
# One dimensional grid (assume sorted)
#
bnd_points = [0, self.points['n']-1]
elif self.dim() == 2:
#
# Two dimensional grid
#
bnd_points = []
for i_he in self.get_boundary_half_edges():
#
# Add initial point of each boundary half edge
#
bnd_points.append(self.half_edges['connectivity'][i_he][0])
else:
raise Exception('Only dimensions 1 and 2 supported.')
return bnd_points
def make_periodic(self, coordinates, box):
"""
Make a rectangular DCEL periodic by assigning the correct twins to
HalfEdges on the boundary.
Inputs:
Coordinates: set, containing 0 (x-direction) and/or 1 (y-direction).
TODO: Cannot make periodic (1,1) DCEL objects
"""
if self.dim()==1:
#
# In 1D, first half-edge becomes "next" of last half-edge
#
self.half_edges['next'][-1] = 0
self.half_edges['prev'][0] = self.half_edges['n']-1
elif self.dim()==2:
#
# In 2D, must align vertices on both side of the box
#
x_min, x_max, y_min, y_max = box
if 0 in coordinates:
#
# Make periodic in the x-direction
#
left_hes = []
right_hes = []
for segment in self.get_boundary_half_edges():
for he in segment:
#
# Record coordinates of half-edge's base and head
#
i_base, i_head = self.half_edges['connectivity'][he][:]
x_base, y_base = self.points['coordinates'][i_head]
x_head, y_head = self.points['coordinates'][i_base]
if np.isclose(x_base,x_max) and np.isclose(x_head,x_max):
#
# If x-values are near x_max, it's on the right
#
right_hes.append((he, y_base, y_head))
elif np.isclose(x_base,x_min) and np.isclose(x_head,x_min):
#
# If x-values are near x_min, it's on the left
#
left_hes.append((he, y_base, y_head))
#
# Look for twin half-edges
#
n_right = len(left_hes)
n_left = len(right_hes)
assert n_right==n_left, \
'Number of half-edges on either side of domain differ.'+\
'Cannot make periodic.'
while len(left_hes)>0:
l_he, l_ybase, l_yhead = left_hes.pop()
for ir in range(len(right_hes)):
#
# For each halfedge on the left, check if there is a
# corresponding one on the right.
#
r_he, r_ybase, r_yhead = right_hes[ir]
if np.isclose(l_ybase, r_yhead) and np.isclose(l_yhead, r_ybase):
self.half_edges['twin'][l_he] = r_he
self.half_edges['twin'][r_he] = l_he
del right_hes[ir]
break
assert len(right_hes)==0, \
'All HalfEdges on the left should be matched with '+\
'one on the right.'
if 1 in coordinates:
#
# Make periodic in the y-direction
#coordinates
top_hes = []
bottom_hes = []
for segment in self.get_boundary_half_edges():
for he in segment:
#
# Record coordinates of half-edge's base and head
#
i_base, i_head = self.half_edges['connectivity'][he]
x_base, y_base = self.points['coordinates'][i_head]
x_head, y_head = self.points['coordinates'][i_base]
if np.isclose(y_base,y_max) and np.isclose(y_head,y_max):
#
# If y-values are near y_max, it's on the top
#
top_hes.append((he, x_base, x_head))
elif np.isclose(y_base,y_min) and np.isclose(y_head,y_min):
#
# If y-values are near y_min, it's on the bottom
#
bottom_hes.append((he, x_base, x_head))
#
# Look for twin half-edges
#
while len(bottom_hes)>0:
b_he, b_xbase, b_xhead = bottom_hes.pop()
for it in range(len(top_hes)):
#
# For each halfedge on the left, check if there is a
# corresponding one on the right.
#
t_he, t_xbase, t_xhead = top_hes[it]
if np.isclose(t_xbase, b_xhead) and np.isclose(t_xhead, b_xbase):
self.half_edges['twin'][b_he] = t_he
self.half_edges['twin'][t_he] = b_he
del top_hes[it]
break
assert len(top_hes)==0, \
'All HalfEdges on the left should be matched with '+\
'one on the right.'
self.periodic_coordinates = coordinates
class Mesh(object):
"""
Mesh class
"""
def __init__(self, dcel=None, box=None, resolution=None, periodic=None,
dim=None, x=None, connectivity=None, file_path=None,
file_format='gmsh'):
# =====================================================================
# Doubly connected Edge List
# =====================================================================
if dcel is None:
#
# Initialize doubly connected edge list if None
#
dcel = DCEL(box=box, resolution=resolution, periodic=periodic,
dim=dim, x=x, connectivity=connectivity,
file_path=file_path, file_format=file_format)
else:
assert isinstance(dcel,DCEL)
self.dcel = dcel
#
# Determine mesh dimension
#
dim = dcel.dim()
self._dim = dim
# =====================================================================
# Vertices
# =====================================================================
vertices = []
n_points = dcel.points['n']
for i in range(n_points):
vertices.append(Vertex(dcel.points['coordinates'][i]))
self.vertices = vertices
def dim(self):
"""
Returns the dimension of the mesh (1 or 2)
"""
return self._dim
class Mesh1D(Mesh):
"""
1D Mesh Class
"""
def __init__(self, dcel=None, box=None, resolution=None, periodic=False,
x=None, connectivity=None, file_path=None, file_format='gmsh'):
#
# Convert input "periodic" to something intelligible for DCEL
#
if periodic is True:
periodic = {0}
else:
periodic = None
Mesh.__init__(self, dcel=dcel, box=box, resolution=resolution,
periodic=periodic, dim=1, x=x, connectivity=connectivity,
file_path=file_path, file_format=file_format)
assert self.dim()==1, 'Mesh dimension not 1.'
# =====================================================================
# Intervals
# =====================================================================
intervals = []
n_intervals = self.dcel.half_edges['n']
for i in range(n_intervals):
#
# Make list of intervals
#
i_vertices = self.dcel.half_edges['connectivity'][i]
v_base = self.vertices[i_vertices[0]]
v_head = self.vertices[i_vertices[1]]
interval = Interval(v_base, v_head)
intervals.append(interval)
#
# Align intervals (assign next)
#
for i in range(n_intervals):
i_nxt = self.dcel.half_edges['next'][i]
if i_nxt!=-1:
if intervals[i].head() != intervals[i_nxt].base():
assert self.dcel.is_periodic, 'DCEL should be periodic'
#
# Intervals linked by periodicity
#
itv_1, vtx_1 = intervals[i], intervals[i].head()
itv_2, vtx_2 = intervals[i_nxt], intervals[i_nxt].base()
# Mark intervals periodic
itv_1.set_periodic()
itv_2.set_periodic()
# Mark vertices periodic
vtx_1.set_periodic()
vtx_2.set_periodic()
# Associate vertices with one another
vtx_1.set_periodic_pair((itv_2, vtx_2))
vtx_2.set_periodic_pair((itv_1, vtx_1))
else:
intervals[i].assign_next(intervals[i_nxt])
#
# Store intervals in Forest
#
self.cells = Forest(intervals)
self.__periodic_coordinates = self.dcel.periodic_coordinates
def is_periodic(self):
"""
Returns true if the mesh is periodic
"""
return 0 in self.__periodic_coordinates
def bin_points(self, points, i_points=None, subforest_flag=None):
"""
Determine a list of LEAF cells in the submesh, each of which contains
at least one point in points. Return the list of tuples of LEAF cells
and point indices.
Inputs:
points: Set of admissible points
subforest_flag: submesh flag
Outputs:
bins: tuple of (cell, index) pairs detailing the bins and indices
of points.
"""
x = convert_to_array(points)
n_points = x.shape[0]
if i_points is None:
i_points = np.arange(n_points)
else:
assert n_points==len(i_points)
bins = []
for cell in self.cells.get_children(flag=subforest_flag):
in_cell = cell.contains_points(x)
if any(in_cell):
#
# Cell contains (some) points
#
# Isolate points in cell and their indices
y = x[in_cell] # subset of points
y_idx = i_points[in_cell] # index of subset
# Recursion step
c_bin = cell.bin_points(y, y_idx, subforest_flag)
bins.extend(c_bin)
# Eliminate points from list
x = x[~in_cell]
i_points = i_points[~in_cell]
assert len(x)==0, 'Some points are not in domain.'
return bins
def get_boundary_vertices(self):
"""
Returns the mesh endpoint vertices
"""
if self.is_periodic():
return None
else:
v0 = self.cells.get_child(0).base()
v1 = self.cells.get_child(-1).head()
return v0, v1
def get_boundary_cells(self, subforest_flag=None):
"""
Returns the mesh endpoint cells
"""
if self.is_periodic():
#
# Periodic Mesh: No cells on the boundary
#
return None
else:
for cell in self.cells.get_leaves(subforest_flag=subforest_flag):
#
# Iterate over cells
#
if cell.get_neighbor(0, subforest_flag=subforest_flag) is None:
#
# Cannot find a left neighbor: found left boundary cell
#
cell_left = cell
if cell.get_neighbor(1, subforest_flag=subforest_flag) is None:
#
# Cannot find a right neighbor: found right boundary cell
#
cell_right = cell
return cell_left, cell_right
def bounding_box(self):
"""
Returns the interval endpoints
"""
if self.is_periodic():
#
# Periodic meshes have no boundary vertices, get them explicitly
#
v0 = self.cells.get_child(0).base()
v1 = self.cells.get_child(-1).head()
else:
v0, v1 = self.get_boundary_vertices()
x0, = v0.coordinates()
x1, = v1.coordinates()
return x0, x1
def mark_region(self, flag, f, entity_type='vertex', strict_containment=True,
on_boundary=False, subforest_flag=None):
"""
Flags all entities of specified type within specified 1D region in mesh
Inputs:
flag: str/int/tuple, marker
f: boolean function whose input is a number x and whose
output is True if the point is contained in the region to be
marked, False otherwise.
entity_type: str, entity to be marked ('cell', 'vertex')
strict_containment: bool, if True, an entity is marked only
if all its vertices are contained in the region. If False,
one vertex suffices
on_boundary: bool, if True, consider only entities on the boundary
subforest_flag: str/int/tuple, mesh marker.
"""
if on_boundary:
#
# Entity adjacent to boundary
#
if entity_type=='vertex':
#
# Vertices
#
for v in self.get_boundary_vertices():
x, = v.coordinates()
if f(x):
#
# Vertex in region -> mark it
#
v.mark(flag)
elif entity_type=='cell':
#
# Intervals
#
for cell in self.get_boundary_cells(subforest_flag=subforest_flag):
#
# Iterate over boundary cells
#
if strict_containment:
#
# Only mark interval if all vertices are in region
#
mark = True
for v in cell.get_vertices():
x, = v.coordinates()
if not f(x):
#
# One vertex outide region -> don't mark interval
#
mark = False
break
else:
#
# Mark interval if any vertex is in region
#
mark = False
for v in cell.get_vertices():
x, = v.coordinates()
if f(x):
#
# One vertex in region -> mark interval
#
mark = True
break
if mark:
#
# Mark interval if necessary
#
cell.mark(flag)
else:
#
# Region not adjacent to boundary
#
for cell in self.cells.get_leaves(subforest_flag=subforest_flag):
if entity_type=='vertex':
#
# Mark vertices
#
for v in cell.get_vertices():
x, = v.coordinates()
if f(x):
#
# Vertex is in region -> mark it
#
v.mark(flag)
elif entity_type=='cell':
#
# Mark intervals
#
if strict_containment:
mark = True
for v in cell.get_vertices():
x, = v.coordinates()
if not f(x):
#
# One cell vertex outside region -> don't mark
#
mark = False
break
else:
mark = False
for v in cell.get_vertices():
x, = v.coordinates()
if f(x):
#
# One vertex in region -> mark interval
#
mark = True
break
if mark:
#
# Mark interval if necessary
#
cell.mark(flag)
def get_region(self, flag=None, entity_type='vertex', on_boundary=False,
subforest_flag=None, return_cells=False):
"""
Returns a list of entities marked with the specified flag in 1D mesh
Inputs:
flag: str/int/tuple, entity marker
entity_type: str, type of entity to be returned
('vertex', 'cell', or 'half_edge')
on_boundary: bool, if True, seek region only along boundary
subforest_flag: str/int/tuple, submesh flag
return_cells: bool, if True, return tuples of the form
(entity, cell), i.e. include the cell containing the entity.
Outputs:
region_entities: list, or Cells/Intervals/HalfEdges/Vertices
located within region.
"""
region_entities = set()
if on_boundary:
#
# Restrict region to boundary
#
cells = self.get_boundary_cells(subforest_flag=subforest_flag)
bnd_vertices = self.get_boundary_vertices()
else:
#
# Region within 1D domain
#
cells = self.cells.get_leaves(subforest_flag=subforest_flag)
for cell in cells:
#
# Iterate over cells
#
if entity_type=='vertex':
#
# Vertex
#
for v in cell.get_vertices():
add_entity = flag is None or v.is_marked(flag)
if on_boundary:
#
# Additional check when on boundary
#
add_entity = add_entity and v in bnd_vertices
if add_entity:
#
# Add vertex to set
#
if return_cells:
#
# Add (vertex, cell) tuple
#
region_entities.add((v,cell))
else:
#
# Add only vertex
#
region_entities.add(v)
elif entity_type=='cell':
#
# Intervals
#
add_entity = flag is None or cell.is_marked(flag)
if add_entity:
#
# Add cell to set
#
if return_cells:
#
# Add (cell, cell) tuple
#
region_entities.add((cell, cell))
else:
#
# Add only cell
#
region_entities.add(cell)
return list(region_entities)
def record(self, subforest_flag):
"""
Record current mesh (intervals)
Input:
subforest_flag: str/int/tuple, name of mesh
"""
self.cells.record(subforest_flag)
class Mesh2D(Mesh):
"""
2D Mesh class
"""
def __init__(self, dcel=None, box=None, resolution=None, x=None,
periodic=None, connectivity=None, file_path=None,
file_format='gmsh'):
Mesh.__init__(self, dcel=dcel, box=box, resolution=resolution,
periodic=periodic, dim=2, x=x, connectivity=connectivity,
file_path=file_path, file_format=file_format)
self._is_rectangular = self.dcel.is_rectangular
self._periodic_coordinates = self.dcel.periodic_coordinates
# ====================================================================
# HalfEdges
# ====================================================================
half_edges = []
n_hes = self.dcel.half_edges['n']
for i in range(n_hes):
i_vertices = self.dcel.half_edges['connectivity'][i]
v_base = self.vertices[i_vertices[0]]
v_head = self.vertices[i_vertices[1]]
half_edge = HalfEdge(v_base, v_head)
half_edges.append(half_edge)
#
# Assign twins (2D)
#
for i_he in range(n_hes):
i_twin = self.dcel.half_edges['twin'][i_he]
if i_twin!=-1:
#
# HalfEdge has twin
#
he_nodes = self.dcel.half_edges['connectivity'][i_he]
twin_nodes = self.dcel.half_edges['connectivity'][i_twin]
if not all(he_nodes == list(reversed(twin_nodes))):
#
# Heads and Bases don't align, periodic boundary
#
assert self.is_periodic(), 'Mesh is not periodic.'\
'All HalfEdges should align.'
half_edges[i_he].set_periodic()
half_edges[i_twin].set_periodic()
half_edges[i_he].assign_twin(half_edges[i_twin])
half_edges[i_twin].assign_twin(half_edges[i_he])
#
# Store HalfEdges in Forest.
#
self.half_edges = Forest(half_edges)
# =====================================================================
# Cells
# =====================================================================
cells = []
n_cells = self.dcel.faces['n']
is_quadmesh = True
for ic in range(n_cells):
i_he_pivot = self.dcel.faces['half_edge'][ic]
i_he = i_he_pivot
one_rotation = False
i_hes = []
while not one_rotation:
i_hes.append(i_he)
i_he = self.dcel.half_edges['next'][i_he]
if i_he==i_he_pivot:
one_rotation = True
if len(i_hes)==4:
cells.append(QuadCell([half_edges[i] for i in i_hes]))
else:
cells.append(Cell([half_edges[i] for i in i_hes]))
is_quadmesh = False
self._is_quadmesh = is_quadmesh
self.cells = Forest(cells)
# =====================================================================
# Pair Periodic Vertices
# =====================================================================
for half_edge in self.half_edges.get_children():
# Pair periodic vertices
#
if half_edge.is_periodic():
half_edge.pair_periodic_vertices()
def is_rectangular(self):
"""
Check whether the Mesh is rectangular
"""
return self._is_rectangular
def is_periodic(self, coordinates=None):
"""
Check whether the Mesh is periodic in the x- and/or the y direction
Input:
*coordinates: int, set containing 0 (x-direction) and/or 1 (y-direction)
if directions is None, check for periodicity in any direction
"""
if coordinates is None:
return 0 in self._periodic_coordinates or 1 in self._periodic_coordinates
else:
is_periodic = True
for i in coordinates:
if i not in self._periodic_coordinates:
return False
return is_periodic
def is_quadmesh(self):
"""
Check if the mesh is a quadmesh
"""
return self._is_quadmesh
def locate_point(self, point, flag=None):
"""
Returns the smallest (flagged) cell containing a given point
or None if current cell doesn't contain the point
Input:
point: Vertex
Output:
cell: smallest cell that contains x
"""
for cell in self.cells.get_children():
if flag is None:
if cell.contains_points(point):
return cell
else:
if cell.is_marked(flag) and cell.contains_points(point):
return cell
def get_boundary_segments(self, subforest_flag=None, flag=None):
"""
Returns a list of segments of boundary half edges
Inputs:
subforest_flag: optional flag (int/str) specifying the submesh
within which boundary segments are sought.
Note: This flag is applied to the cells in the submesh, not the edges
flag: optional flag (int/str) specifying boundary segments
Notes:
- The subforest flag specified above refers to the mesh cells,
not to the half-edges
- This implementation assumes that the boundary edges on the coarsest
mesh are a good representation of the computational region.
"""
bnd_hes = []
#
# Locate half-edges on the boundary (coarsest level)
#
for he in self.half_edges.get_children():
if he.twin() is None:
bnd_hes.append(he)
#
# Group and sort half-edges
#
bnd_hes_sorted = [deque([he]) for he in bnd_hes]
while True:
merger_activity = False
for g1 in bnd_hes_sorted:
#
# Check if g1 can add a deque in bnd_hes_sorted
#
merger_activity = False
for g2 in bnd_hes_sorted:
#
# Does g1's head align with g2's tail?
#
if g1[-1].head()==g2[0].base():
# Remove g2 from list
g2 = bnd_hes_sorted.pop(bnd_hes_sorted.index(g2))
g1.extend(g2)
merger_activity = True
#
# Does g1's tail align with g2's head?
#
elif g1[0].base()==g2[-1].head():
g2 = bnd_hes_sorted.pop(bnd_hes_sorted.index(g2))
g2.reverse()
g1.extendleft(g2)
merger_activity = True
if not merger_activity or len(bnd_hes_sorted)==1:
break
#
# Multiple boundary segments
#
bnd = [list(segment) for segment in bnd_hes_sorted]
#
# Get edges on finest level (allowed by submesh)
#
for segment in bnd:
hes_todo = [he for he in segment]
while len(hes_todo)>0:
#
# Pop out first half-edge in list
#
he = hes_todo.pop(0)
if he.cell().has_children(flag=subforest_flag):
#
# Half-Edge has valid sub-edges:
# Replace he in list with these.
#
i_he = segment.index(he)
del segment[i_he]
for che in he.get_children():
segment.insert(i_he, che)
i_he += 1
#
# Add che's to the list of he's to do
#
hes_todo.append(che)
#
# Throw out he's that are not flagged
#
if flag is not None:
for he, i_he in zip(segment, range(len(segment))):
if not he.is_marked(flag):
#
# Not flagged: remove from list
#
del segment[i_he]
return bnd
def get_boundary_vertices(self, flag=None, subforest_flag=None):
"""
Returns the Vertices on the boundary
"""
vertices = []
for segment in self.get_boundary_segments(subforest_flag=subforest_flag,
flag=flag):
for he in segment:
vertices.append(he.base())
return vertices
def mark_region(self, flag, f, entity_type='vertex', strict_containment=True,
on_boundary=False, subforest_flag=None):
"""
This method marks all entities within a 2D region.
Inputs:
flag: str, int, tuple marker
f: boolean function whose inputs are an x and a y vector and whose
output is True if the point is contained in the region to be
marked, False otherwise.
entity_type: str, entity to be marked ('cell', 'half_edge', 'vertex')
strict_containment: bool, if True, an entity is marked only
if all its vertices are contained in the region. If False,
one vertex suffices
on_boundary: bool, if True, consider only entities on the boundary
subforest_flag: str/int/tuple, mesh marker.
"""
if on_boundary:
#
# Iterate only over boundary segments
#
for segment in self.get_boundary_segments(subforest_flag=subforest_flag):
#
# Iterate over boundary segments
#
for he in segment:
#
# Iterate over half_edges within each segment
#
if entity_type=='vertex':
#
# Mark vertices
#
for v in he.get_vertices():
#
# Iterate over half-edge vertices
#
x,y = v.coordinates()
if f(x,y):
#
# Mark
#
v.mark(flag)
elif entity_type=='half_edge':
#
# Mark Half-Edges
#
if strict_containment:
#
# All vertices must be within region
#
mark = True
for v in he.get_vertices():
x,y = v.coordinates()
if not f(x,y):
#
# One vertex not in region, don't mark edge
#
mark = False
break
else:
#
# Only one vertex need be in the region
#
mark = False
for v in he.get_vertices():
x,y = v.coordinates()
if f(x,y):
#
# One vertex in region is enough
#
mark = True
break
if mark:
#
# Mark half_edge
#
he.mark(flag)
elif entity_type=='cell':
#
# Mark Cells
#
cell = he.cell()
if strict_containment:
mark = True
for v in cell.get_vertices():
x,y = v.coordinates()
if not f(x,y):
#
# One vertex not in region -> don't mark
#
mark = False
break
else:
mark = False
for v in cell.get_vertices():
x,y = v.coordinates()
if f(x,y):
#
# One vertex in region -> mark
#
mark = True
break
if mark:
#
# Mark cell
#
cell.mark(flag)
else:
raise Exception('Entity %s not supported'%(entity_type))
else:
#
# Region may lie within interior of the domain
#
for cell in self.cells.get_leaves(subforest_flag=subforest_flag):
#
# Iterate over mesh cells
#
if entity_type=='vertex':
#
# Mark vertices
#
for v in cell.get_vertices():
x,y = v.coordinates()
if f(x,y):
#
# Mark vertex
#
v.mark(flag)
elif entity_type=='half_edge':
#
# Mark half-edges
#
for he in cell.get_half_edges():
if strict_containment:
mark = True
for v in he.get_vertices():
x,y = v.coordinates()
if not f(x,y):
#
# Single vertex outside region disqualifies half_edge
#
mark = False
break
else:
mark = False
for v in he.get_vertices():
x,y = v.coordinates()
if f(x,y):
#
# Single vertex in region -> mark half_edge
#
mark = True
break
if mark:
#
# Mark half_edge
#
he.mark(flag)
elif entity_type=='cell':
#
# Mark cells
#
if strict_containment:
#
# All vertices must be in region
#
mark = True
for v in cell.get_vertices():
x,y = v.coordinates()
if not f(x,y):
mark = False
break
else:
#
# Only one vertex need be in region
#
mark = False
for v in cell.get_vertices():
x,y = v.coordinates()
if f(x,y):
mark = True
break
if mark:
#
# Mark cell
#
cell.mark(flag)
def tear_region(self, flag, subforest_flag=None):
"""
Tear the domain along an interior half-edge region.
As a consequence,
- Vertices on either side of the half-edge are separate
(although they still have the same coordinates).
- Adjoining half-edges along the region will no longer be
neighbors of each other.
Inputs:
flag: str/int/tuple, flag specifying the region of half-edges
subforest_flag: str/int/tuple, flag specifying the submesh
"""
#
# Iterate over half-edges along region
#
for he in self.get_region(flag=flag, entity_type='half_edge',
subforest_flag=subforest_flag):
#
# Assign New Vertices to half-edge
#
base = Vertex(he.base().coordinates())
head = Vertex(he.head().coordinates())
he.set_vertices(base, head)
#
# Disassociate from neighboring half-edge
#
twin = he.twin()
twin.delete_twin()
he.delete_twin()
def get_region(self, flag=None, entity_type='vertex', on_boundary=False,
subforest_flag=None, return_cells=False):
"""
Returns a list of entities marked with the specified flag
Inputs:
flag: str/int/tuple, entity marker
entity_type: str, type of entity to be returned
('vertex', 'cell', or 'half_edge')
on_boundary: bool, if True, seek region only along boundary
subforest_flag: str/int/tuple, submesh flag
return_cells: bool, if True, return a list of tuples of the form
(entity, cell)
Outputs:
region_entities: list, or Cells/Intervals/HalfEdges/Vertices
located within region.
"""
debug = False
region_entities = set()
if on_boundary:
if debug: print('On boundary')
#
# Region is a subset of the boundary
#
for segment in self.get_boundary_segments(subforest_flag=subforest_flag):
#
# Iterate over boundary segments
#
for he in segment:
#
# Iterate over boundary edges
#
if entity_type=='cell':
#
# Get cell associated with half-edge
#
cell = he.cell()
#
# Add cell to set
#
add_entity = flag is None or cell.is_marked(flag)
if add_entity:
if return_cells:
#
# Return containing cell as cell
#
region_entities.add((cell, cell))
else:
#
# Return only entity
#
region_entities.add(cell)
elif entity_type=='half_edge':
#
# Half-edge
#
add_entity = flag is None or he.is_marked(flag)
if add_entity:
if return_cells:
#
# Return half-edge and cell
#
cell = he.cell()
region_entities.add((he, cell))
else:
#
# Return only entity
#
region_entities.add(he)
elif entity_type=='vertex':
#
# Vertices
#
for v in he.get_vertices():
if debug:
print('considering vertex', v.coordinates())
add_entity = flag is None or v.is_marked(flag)
if debug:
print('to add?', add_entity)
print('marked?', v.is_marked(flag))
if add_entity:
if return_cells:
#
# Return containing cell and entity
#
cell = he.cell()
region_entities.add((v, cell))
else:
#
# Return only entity
#
region_entities.add(v)
else:
#
# Iterate over entire mesh.
#
for cell in self.cells.get_leaves(subforest_flag=subforest_flag):
#
# Iterate over mesh cells
#
if entity_type=='cell':
#
# Cells
#
add_entity = flag is None or cell.is_marked(flag)
if add_entity:
if return_cells:
#
# Return containing cell as cell
#
region_entities.add((cell, cell))
else:
#
# Return only entity
#
region_entities.add(cell)
elif entity_type=='half_edge':
#
# Half-Edges
#
for he in cell.get_half_edges():
add_entity = flag is None or he.is_marked(flag)
if add_entity:
if return_cells:
#
# Return half-edge and cell
#
region_entities.add((he, cell))
else:
#
# Return only entity
#
region_entities.add(he)
elif entity_type=='vertex':
#
# Vertices
#
for he in cell.get_half_edges():
for v in he.get_vertices():
add_entity = flag is None or v.is_marked(flag)
if add_entity:
if return_cells:
#
# Return containing cell and entity
#
region_entities.add((v, cell))
else:
#
# Return only entity
#
region_entities.add(v)
return region_entities
def bounding_box(self):
"""
Returns the bounding box of the mesh
"""
xy = convert_to_array(self.vertices, dim=2)
x0, x1 = xy[:,0].min(), xy[:,0].max()
y0, y1 = xy[:,1].min(), xy[:,1].max()
return x0, x1, y0, y1
def record(self, subforest_flag):
"""
Mark all cells and half-edges within current mesh with subforest_flag
"""
self.cells.record(subforest_flag)
self.half_edges.record(subforest_flag)
'''
def get_boundary_edges(self, flag=None):
"""
Returns the half-nodes on the boundary
"""
bnd_hes_unsorted = []
#
# Locate ROOT half-edges on the boundary
#
for he in self.half_edges.get_children():
if he.twin() is None:
bnd_hes_unsorted.append(he)
n_bnd = len(bnd_hes_unsorted)
#
# Sort half-edges
#
he = bnd_hes_unsorted.pop()
bnd_hes_sorted = [he]
while n_bnd>0:
for i in range(n_bnd):
nxt_he = bnd_hes_unsorted[i]
if he.head()==nxt_he.base():
bnd_hes_sorted.append(nxt_he)
he = bnd_hes_unsorted.pop(i)
n_bnd -= 1
break
#
# Get LEAF half-edges
#
bnd_hes = []
for he in bnd_hes_sorted:
bnd_hes.extend(he.get_leaves(flag=flag))
'''
class QuadMesh(Mesh2D):
"""
Two dimensional mesh with quadrilateral cells.
Note:
When coarsening and refining a QuadMesh, the HalfEdges are not deleted
Rather use submeshes.
"""
def __init__(self, dcel=None, box=None, resolution=None, x=None,
periodic=None, connectivity=None, file_path=None,
file_format='gmsh'):
#
# Initialize 2D Mesh.
#
Mesh2D.__init__(self, dcel=dcel, box=box, resolution=resolution,
periodic=periodic, x=x, connectivity=connectivity,
file_path=file_path, file_format=file_format)
self.cells = Forest(self.cells.get_children())
def bin_points(self, points, i_points=None, subforest_flag=None):
"""
Determine a list of LEAF cells in the submesh, each of which contains
at least one point in points. Return the list of tuples of LEAF cells
and point indices.
Inputs:
points: Set of admissible points
subforest_flag: submesh flag
Outputs:
bins: tuple of (cell, index) pairs detailing the bins and indices
of points.
"""
x = convert_to_array(points)
n_points = x.shape[0]
if i_points is None:
i_points = np.arange(n_points)
else:
assert n_points==len(i_points)
bins = []
for cell in self.cells.get_children(flag=subforest_flag):
in_cell = cell.contains_points(x)
if any(in_cell):
#
# Cell contains (some) points
#
# Isolate points in cell and their indices
y = x[in_cell] # subset of points
y_idx = i_points[in_cell] # index of subset
# Recursion step
c_bin = cell.bin_points(y, y_idx, subforest_flag)
bins.extend(c_bin)
# Eliminate points from list
x = x[~in_cell]
i_points = i_points[~in_cell]
assert len(x)==0, 'Some points are not in domain.'
return bins
def is_balanced(self, subforest_flag=None):
"""
Check whether the mesh is balanced
Inputs:
flag (optional): marker, allowing for the restriction to
a submesh.
"""
for cell in self.cells.get_leaves(subforest_flag=subforest_flag):
for half_edge in cell.get_half_edges():
nb = cell.get_neighbors(half_edge, flag=subforest_flag)
if nb is not None and nb.has_children(flag=subforest_flag):
twin = half_edge.twin()
for the_child in twin.get_children():
if the_child.cell().has_children(flag=subforest_flag):
return False
return True
def balance(self, subforest_flag=None):
"""
Ensure that subcells of current cell conform to the 2:1 rule
"""
assert self.cells.subtrees_rooted(subforest_flag)
#
# Get all LEAF cells
#
leaves = set(self.cells.get_leaves(subforest_flag=subforest_flag)) # set: no duplicates
while len(leaves)>0:
leaf = leaves.pop()
#
# For each Cell
#
is_split = False
for half_edge in leaf.get_half_edges():
#
# Look for neighbors in each direction
#
nb = leaf.get_neighbors(half_edge, flag=subforest_flag)
if nb is not None and nb.has_children(flag=subforest_flag):
#
# Check if neighbor has children (still fine)
#
twin = half_edge.twin()
for the_child in twin.get_children():
if the_child.cell().has_children(flag=subforest_flag):
#
# Neighbor has grandchildren
#
if not leaf.has_children(flag=subforest_flag):
#
# LEAF does not have any flagged children
#
if leaf.has_children():
#
# LEAF has children (just not flagged)
#
for child in leaf.get_children():
child.mark(subforest_flag)
else:
#
# LEAF needs new children.
#
leaf.split(flag=subforest_flag)
#
# Add children to the leaf nodes to be considered
#
for child in leaf.get_children():
leaves.add(child)
#
# If LEAF is split, add all its neighbors to leaves
# to be considered for splitting.
#
for half_edge in leaf.get_half_edges():
hep = half_edge.get_parent()
if hep is not None:
hep_twin = hep.twin()
if hep_twin is not None:
leaves.add(hep_twin.cell())
#
# Current LEAF cell has been split, move on to next one
#
is_split = True
break
if is_split:
#
# LEAF already split, no need to check other directions
#
break
def remove_supports(self, subforest_flag=None, coarsening_flag=None):
"""
Given a submesh (subforest_flag) and a coarsening_flag,
Input:
subforest_flag: flag specifying the submesh to be considered
coarsening_flag: flag specifying the cells to be removed
during coarsening
TODO: Unfinished. Loop over cells to be coarsened. Check if it's
safe to coarsen neighbors.
"""
#
# Get all flagged LEAF nodes
#
leaves = self.get_leaves(subforest_flag=subforest_flag,
coarsening_flag=coarsening_flag)
while len(leaves) > 0:
#
# For each LEAF
#
leaf = leaves.pop()
#
# Check if leaf is a support leaf
#
if subforest_flag is None:
is_support = leaf.is_marked('support')
else:
is_support = leaf.is_marked((subforest_flag, 'support'))
if is_support:
#
# Check whether its safe to delete the support cell
#
safe_to_coarsen = True
for half_edge in leaf.get_half_edges():
nb = leaf.get_neighbor(half_edge, flag=subforest_flag)
if nb is not None and nb.has_children(flag=subforest_flag):
#
# Neighbor has (flagged) children, coarsening will lead
# to an unbalanced tree
#
safe_to_coarsen = False
break
if safe_to_coarsen:
#
# Remove support by marking self with coarsening flag
#
self.mark(coarsening_flag)
leaves.append(leaf.get_parent())
'''
class TriCell(object):
"""
TriCell object
Attributes:
Methods:
"""
def __init__(self, vertices, parent=None):
"""
Inputs:
vertices: Vertex, list of three vertices (ordered counter-clockwise)
parent: QuadCell that contains triangle
"""
v = []
e = []
assert len(vertices) == 3, 'Must have exactly 3 vertices.'
for i in range(3):
#
# Define vertices and Half-Edges with minimun information
#
v.append(Vertex(vertices[i],2))
#
# Some edge on outerboundary
#
self.outer_component = e[0]
for i in range(3):
#
# Half edge originating from v[i]
#
v[i].incident_edge = e[i]
#
# Edges preceding/following e[i]
#
j = np.remainder(i+1,3)
e[i].next = e[j]
e[j].previous = e[i]
#
# Incident face
#
e[i].incident_face = self
self.parent_node = parent
self.__vertices = v
self.__edges = [
Edge(vertices[0], vertices[1], parent=self), \
Edge(vertices[1], vertices[2], parent=self), \
Edge(vertices[2], vertices[0], parent=self)
]
self.__element_no = None
self._flags = set()
def vertices(self,n):
return self.__vertices[n]
def edges(self):
return self.__edges
def area(self):
"""
Compute the area of the triangle
"""
v = self.__vertices
a = [v[1].coordinates()[i] - v[0].coordinates()[i] for i in range(2)]
b = [v[2].coordinates()[i] - v[0].coordinates()[i] for i in range(2)]
return 0.5*abs(a[0]*b[1]-a[1]*b[0])
def unit_normal(self, edge):
#p = ((y1-y0)/nnorm,(x0-x1)/nnorm)
pass
def number(self, num, overwrite=False):
"""
Assign a number to the triangle
"""
if self.__element_no == None or overwrite:
self.__element_no = num
else:
raise Warning('Element already numbered. Overwrite disabled.')
return
def get_neighbor(self, edge, tree):
"""
Find neighboring triangle across edge wrt a given tree
"""
pass
def mark(self, flag=None):
"""
Mark TriCell
Inputs:
flag: optional label used to mark cell
"""
if flag is None:
self._flags.add(True)
else:
self._flags.add(flag)
def unmark(self, flag=None, recursive=False):
"""
Remove label from TriCell
Inputs:
flag: label to be removed
recursive: bool, also unmark all subcells
"""
#
# Remove label from own list
#
if flag is None:
# No flag specified -> delete all
self._flags.clear()
else:
# Remove specified flag (if present)
if flag in self._flags: self._flags.remove(flag)
#
# Remove label from children if applicable
#
if recursive and self.has_children():
for child in self.children.values():
child.unmark(flag=flag, recursive=recursive)
def is_marked(self,flag=None):
"""
Check whether cell is marked
Input: flag, label for QuadCell: usually one of the following:
True (catchall), 'split' (split cell), 'count' (counting)
TODO: Possible to add/remove set? Useful?
"""
if flag is None:
# No flag -> check whether set is empty
if self._flags:
return True
else:
return False
else:
# Check wether given label is contained in cell's set
return flag in self._flags
'''
|
hvanwyk/quadmesh
|
src/mesh.py
|
Python
|
mit
| 263,904
|
[
"VisIt"
] |
f994c6d04d02a07b541299532eb10bcd5f453419e2723ef56d342f152c0e8668
|
# -*- coding: utf-8 -*-
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2018 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU General Public License version 3 or later.
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import numpy as np
import moose
import moose.fixXreacs as fixXreacs
def makeModel():
# create container for model
num = 1 # number of compartments
model = moose.Neutral( '/model' )
compartment = moose.CylMesh( '/model/compartment' )
compartment.x1 = 1.0e-6 # Set it to a 1 micron single-voxel cylinder
# create molecules and reactions
s = moose.Pool( '/model/compartment/s' )
t = moose.Pool( '/model/compartment/t' )
rXfer = moose.Reac( '/model/compartment/rXfer' )
#####################################################################
# Put in endo compartment. Add molecule s
endo = moose.EndoMesh( '/model/endo' )
endo.isMembraneBound = True
endo.surround = compartment
es = moose.Pool( '/model/endo/s' )
et = moose.Pool( '/model/endo/t' )
#####################################################################
moose.connect( rXfer, 'sub', s, 'reac' )
moose.connect( rXfer, 'sub', t, 'reac' )
moose.connect( rXfer, 'prd', es, 'reac' )
moose.connect( rXfer, 'prd', et, 'reac' )
rXfer.Kf = 0.02 # 0.02/mM/sec
rXfer.Kb = 0.02 # 0.02/mM/sec
#####################################################################
fixXreacs.fixXreacs( '/model' )
#fixXreacs.restoreXreacs( '/model' )
#fixXreacs.fixXreacs( '/model' )
#####################################################################
# Make solvers
ksolve = moose.Ksolve( '/model/compartment/ksolve' )
dsolve = moose.Dsolve( '/model/dsolve' )
eksolve = moose.Ksolve( '/model/endo/ksolve' )
edsolve = moose.Dsolve( '/model/endo/dsolve' )
stoich = moose.Stoich( '/model/compartment/stoich' )
stoich.compartment = compartment
stoich.ksolve = ksolve
stoich.dsolve = dsolve
stoich.path = "/model/compartment/##"
assert( dsolve.numPools == 4 )
s.vec.concInit = [1.0]*num
t.vec.concInit = [1.0]*num
estoich = moose.Stoich( '/model/endo/stoich' )
estoich.compartment = endo
estoich.ksolve = eksolve
estoich.dsolve = edsolve
estoich.path = "/model/endo/##"
assert( edsolve.numPools == 2 )
edsolve.buildMeshJunctions( dsolve )
plot1 = moose.Table2( '/model/plot1' )
plot2 = moose.Table2( '/model/plot2' )
moose.connect( '/model/plot1', 'requestOut', s, 'getN' )
moose.connect( '/model/plot2', 'requestOut', es, 'getN' )
plot3 = moose.Table2( '/model/plot3' )
plot4 = moose.Table2( '/model/plot4' )
moose.connect( '/model/plot3', 'requestOut', s, 'getConc' )
moose.connect( '/model/plot4', 'requestOut', es, 'getConc' )
def almostEq( a, b ):
#print a, b, (a-b)/(a+b)
return abs(a-b)/(a+b) < 5e-5
def test_xreac6():
for i in range( 10, 18):
moose.setClock( i, 0.01 )
runtime = 100
makeModel()
moose.reinit()
moose.start( runtime )
assert( almostEq( moose.element( 'model/compartment/s' ).conc,
moose.element( '/model/endo/s' ).conc ) )
moose.delete( '/model' )
def main():
test_xreac6()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
|
dilawar/moose-core
|
tests/core/test_Xreacs6.py
|
Python
|
gpl-3.0
| 3,592
|
[
"MOOSE"
] |
d7e6c11e1616f7218cab7b032f086f28664591afa8af71ade1d3687c6a94883f
|
'''This script will automate running blastx and blastn for putative
viral sequences.'''
import argparse
import os
from datetime import datetime
from subprocess import Popen
from shlex import split
from multiprocessing import cpu_count
from blastResults import sigHits, subsetSig
from version import version
def ublast(infile, outdir, dbdir, threads):
# Calls ublast against protein db for all input and against dna for
# significant hits
cont = True
print("\n\tRunning ublast against protein sequences...\n")
cmd = ("./usearch -ublast {} -db {}viralRefProt.udb -evalue 1e-5 \
-maxaccepts 1 -maxrejects 5 -threads {} -blast6out {}ublastX.outfmt6").format(infile,
dbdir, threads, outdir)
try:
us = Popen(split(cmd))
us.communicate()
if us.returncode == 0:
print("\n\tFinished running ublast against protein sequences.")
except:
print(("\tError: ublast protein search failure on {}.").format(infile))
cont = False
if cont == True:
hits = sigHits(outdir + "ublastX.outfmt6")
query = subsetSig(infile, hits, outdir)
print("\n\tRunning ublast against nucleotide database...\n")
cmd = ("./usearch -ublast {} -db {}viralRefSeq.udb -evalue 1e-5 \
-maxaccepts 1 -maxrejects 5 -threads {} -strand both -blast6out \
{}ublastN.outfmt6").format(query, dbdir, threads, outdir)
try:
us = Popen(split(cmd))
us.communicate()
if us.returncode == 0:
print("\n\tFinished running ublast against DNA sequences.")
except:
print(("\tError: ublast DNA search failure on {}.").format(infile))
return True
def blastSeqs(infile, outdir, dbdir, threads):
# Calls blastx on all input and blastn on all hits with e < 10^-5
starttime = datetime.now()
print("\n\tRunning blastx against protein database...")
cont = True
cmd = ("blastx -query {} -db {}viralRefProt.faa -num_threads {} -max_target_seqs 1 \
-outfmt 6 -evalue 0.00001 -out {}blastx.outfmt6").format(infile, dbdir, threads, outdir)
try:
bs = Popen(split(cmd))
bs.communicate()
if bs.returncode == 0:
print(("\tBlastx runtime: {}").format(datetime.now()-starttime))
except:
print(("\tError: Blastx failure on {}.").format(infile))
cont = False
if cont == True:
hits = sigHits(outdir + "blastx.outfmt6")
query = subsetSig(infile, hits, outdir)
starttime = datetime.now()
print("\n\tRunning blastn against nucleotide database...")
cmd = ("blastn -query {} -db {}viralRefSeq.fna -num_threads {} -max_target_seqs 1 \
-outfmt 6 -out {}blastn.outfmt6").format(query, dbdir, threads, outdir)
try:
bs = Popen(split(cmd))
bs.communicate()
if bs.returncode == 0:
print(("\tBlastn runtime: {}").format(datetime.now()-starttime))
except:
print(("\tError: Blastn failure on {}.").format(infile))
return True
def ublastDb(indir):
# Makes ublast databases
print("\tBuilding ublast databases...\n")
cmd = "./usearch -makeudb_ublast {} -output {}"
infile = indir + "viralRefSeq.fna"
outfile = indir + "viralRefSeq.udb"
try:
ndb = Popen(split(cmd.format(infile, outfile)))
ndb.communicate()
except:
print("\tError: Failed to build ublast DNA database.")
infile = indir + "viralRefProt.faa"
outfile = indir + "viralRefProt.udb"
try:
pdb = Popen(split(cmd.format(infile, outfile)))
pdb.communicate()
except:
print("\tError: Failed to build ublast protein database.")
def makeDB(indir):
# Makes protein and nucleotide blast databases
infile = indir + "viralRefSeq.fna"
print("\tConstructing BLAST databases...")
cmd = ("makeblastdb -in {} -parse_seqids -dbtype {}")
try:
mdb = Popen(split(cmd.format(infile, "nucl")))
mdb.communicate()
except:
print("\tError: Failed to build BLAST nucleotide database.")
infile = indir + "viralRefProt.faa"
try:
mdb = Popen(split(cmd.format(infile, "prot")))
mdb.communicate()
except:
print("\tError: Failed to build BLAST protein database.")
def main():
starttime = datetime.now()
parser = argparse.ArgumentParser(description = "This script will \
automate running blastx and blastn for putative viral sequences. \
Be sure to export the path to blast on your machine before using.")
parser.add_argument("-v", action = "store_true",
help = "Prints version info and exits.")
parser.add_argument("-i", help = "Path to fasta file of query sequences.")
parser.add_argument("-o", help = "Path to working/output directory.")
parser.add_argument("-d", help = "Path to directory containing database files.")
parser.add_argument("--blast", action = "store_true", default = False,
help = "Runs BLAST (default is ublast).")
parser.add_argument("-p", type = int, default = 1,
help = "Number of threads to run BLAST or ublast with.")
parser.add_argument("--ublastdb", action = "store_true",
help = "Creates new ublast DNA and protein databases. Place source fasta files \
into the directory specified with -d.")
parser.add_argument("--blastdb", action = "store_true",
help = "Creates new blast DNA and protein databases. Place source fasta files \
into the directory specified with -d.")
args = parser.parse_args()
if args.v:
version()
# Add trailing slash to database directory
if args.d[-1] != "/":
args.d += "/"
if args.ublastdb:
ublastDb(args.d)
elif args.blastdb:
makeDB(args.d)
else:
if args.o[-1] != "/":
# Add trailing slash
args.o += "/"
if not os.path.isdir(args.o):
os.mkdir(args.o)
if args.p > cpu_count():
# Prevent too many threads from being called
args.p = cpu_count()
if args.blast == True:
done = blastSeqs(args.i, args.o, args.d, args.p)
else:
done = ublast(args.i, args.o, args.d, args.p)
print(("\tTotal runtime: {}\n").format(datetime.now() - starttime))
if __name__ == "__main__":
main()
|
icwells/Kiwi
|
bin/blastSeqs.py
|
Python
|
gpl-3.0
| 5,672
|
[
"BLAST"
] |
1afdb302440a458dd9ecb9631bfeb0af7627a2afa77d7b9bfca24bc16c5a7475
|
"""Handle user related queries."""
import base64
import collections
import logging
import re
import zlib
from google.appengine.api import app_identity
from google.appengine.api import mail
from google.appengine.api import namespace_manager
from google.appengine.api import oauth
from google.appengine.api import users
from google.appengine.ext import ndb
import flask
import pusher
from appengine import building, pusher_client
from common import public_creds, utils
from pi import simple_pusher
SCOPE = "https://www.googleapis.com/auth/userinfo.email"
def get_user_object():
"""Get the appengine user object (using login or oauth), or return None."""
user_object = users.get_current_user()
if user_object is not None:
return user_object
try:
user_object = oauth.get_current_user(SCOPE)
# When running locally app engine does this for some silly
# reason.
if user_object.email() == 'example@example.com'\
and user_object.user_id() == '0':
return None
return user_object
except oauth.OAuthRequestError:
return None
# We're not going to put these in a namespace.
class Person(ndb.Model):
# key_name is userid
email = ndb.StringProperty(required=False)
buildings = ndb.StringProperty(repeated=True)
def to_dict(self):
"""Return a dict representation of this object."""
values = super(Person, self).to_dict()
values['id'] = self.key.string_id()
values['logout_url'] = users.create_logout_url('/')
# If we are running in local mode,
# tell the UI to connect somewhere
# special for push updates.
# TO DO: get hostname (socket doesn't work)
if pusher_client.should_use_local():
values['ws'] = 'ws://localhost:%d/' % (
simple_pusher.WEBSOCKET_PORT)
# What buildings have we shared with whom?
values['sharing'] = collections.defaultdict(list)
for other_person in Person.query(
Person.buildings.IN(self.buildings)).iter():
for other_building in other_person.buildings:
if other_building not in self.buildings:
continue
values['sharing'][other_building].append(
{'email': other_person.email,
'user_id': other_person.key.string_id()})
# Are there any pending invites?
for invite in Invite.query(Invite.building.IN(
self.buildings)).iter():
values['sharing'][invite.building].append(
{'email': invite.email,
'invite_id': invite.key.id()})
return values
# Users are keyed by this magic id we get from appengine
# we don't know this id yet, so we can't create a person
# object for them. Instead, we'll create an invite object
# and create the person object when they first login.
class Invite(ndb.Model):
email = ndb.StringProperty(required=False)
building = ndb.StringProperty(repeated=False)
# pylint: disable=invalid-name
blueprint = flask.Blueprint('user', __name__)
@blueprint.before_request
def authentication():
"""Ensure user is authenticated, but don't switch namespace."""
user_object = get_user_object()
if user_object is not None:
return
# Special case use_invite endpoint to login.
if flask.request.endpoint in {'user.use_invite'}:
return flask.redirect(users.create_login_url(flask.request.url))
flask.abort(401)
# Intentially don't call get_person, just in case
# this is a call to /api/invite/<id>, in which
# case we don't want to create a new user and building.
def get_person(buildings=None):
"""Return the user_id for the current logged in user."""
user = get_user_object()
assert user is not None
assert user.email() is not None
assert namespace_manager.get_namespace() == ''
# if the person is not found,
# we'll create a new one with the
# building id set to the user id.
user_id = user.user_id()
if buildings is None:
buildings = [user_id]
person = Person.get_or_insert(
user_id, email=user.email(),
buildings=buildings)
return person
@blueprint.route('/', methods=['GET'])
def get_user_request():
"""Return the user's json."""
assert namespace_manager.get_namespace() == ''
person = get_person()
values = person.to_dict()
return flask.jsonify(objects=[values])
def send_event(building_id=None, **kwargs):
"""Post events back to the pi."""
batch = flask.g.get('user_events', None)
if batch is None:
batch = []
setattr(flask.g, 'user_events', batch)
if building_id is None:
building_id = building.get_id()
batch.append((building_id, kwargs))
def push_events():
"""Push all the events that have been caused by this request."""
events = flask.g.get('user_events', None)
setattr(flask.g, 'user_events', None)
if events is None:
return
# partition events by building
events_by_building = collections.defaultdict(list)
for building_id, event in events:
# The UI can deal with compressed and uncompressed events.
# Lets compress the event, base64 encode it, and it thats
# shorter send that. Otherwise send the original event.
encoded_event = flask.json.dumps(event)
compressed_event = zlib.compress(encoded_event)
compressed_event = base64.b64encode(compressed_event)
if len(encoded_event) > len(compressed_event):
event = {'c': compressed_event}
events_by_building[building_id].append(event)
pusher_shim = pusher_client.get_client(encoder=flask.json.JSONEncoder)
for building_id, events in events_by_building.iteritems():
channel_id = 'private-%s' % building_id
for batch in utils.limit_json_batch(events, max_size=8000):
logging.info('Sending %d events to user on channel %s',
len(batch), channel_id)
pusher_shim.push(channel_id, batch)
# UI calls /api/user/channel_auth with its
# (id & secret) auth and channel name == private-user_id.
# pusher client library makes a callback
# to this end point to check the client
# can use said channel.
@blueprint.route('/channel_auth', methods=['POST'])
def pusher_client_auth_callback():
"""Authenticate a given socket for a given channel."""
# We know the user is authenticated at this point
person = get_person()
socket_id = flask.request.form.get('socket_id')
channel_name = flask.request.form.get('channel_name')
match = re.match(r'private-(\d+)', channel_name)
building_id = match.group(1)
if building_id not in person.buildings:
logging.error('User %s is not allowed channel %s!',
person.key.string_id(), channel_name)
flask.abort(401)
from common import creds
client = pusher.Pusher(
app_id=creds.PUSHER_APP_ID,
key=public_creds.pusher_key, secret=creds.PUSHER_SECRET)
auth = client[channel_name].authenticate(socket_id)
return flask.jsonify(**auth)
@blueprint.route('/invite', methods=['POST'])
def invite_handler():
"""Invite a user to this building"""
body = flask.request.get_json()
if body is None:
flask.abort(400, 'JSON body and mime type required.')
# We know the user is authenticated at this point
person = get_person()
# who is the invitee?
invitee_email = body.get('email', None)
if invitee_email is None:
flask.abort(400, 'Field invitee expected')
# figure out what building we're inviting someone too
building_id = flask.request.headers.get('building-id', person.buildings[0])
if building_id not in person.buildings:
flask.abort(401)
invite = Invite(email=invitee_email, building=building_id)
invite.put()
app_id = app_identity.get_application_id()
body = """
Dear %s:
An Awesomation house has been shared with you. Visit
http://%s.appspot.com/api/user/invite/%d and sign in using your Google Account
for access.
""" % (invitee_email, app_id, invite.key.id())
logging.info("Sending email: '%s'", body)
mail.send_mail(
sender=person.email, to=invitee_email,
subject="An Awesomation house has been shared with you.",
body=body)
send_event(building_id=building_id, cls='user',
id=person.key.string_id(),
event='update', obj=person.to_dict())
return ('', 204)
@blueprint.route('/invite/<object_id>', methods=['GET'])
def use_invite(object_id):
"""Given an invite, add the building to the current users account."""
invite = Invite.get_by_id(int(object_id))
if not invite:
flask.abort(404)
# We don't check the email on the invite, as sometimes the domain
# doesn't match (ie @googlemail.com vs @gmail.com). We just
# assume if they got this code then they can have access.
person = get_person(buildings=[invite.building])
# There is a chance the above call didn't add the building
# as it will only do so for people that don't alreay exist.
# In this case, we have to add it.
current_buildings = set(person.buildings)
all_buildings = current_buildings | set([invite.building])
if all_buildings != current_buildings:
person.buildings.extend(all_buildings - current_buildings)
person.put()
invite.key.delete()
return flask.redirect('/')
@blueprint.route('/invite/<object_id>', methods=['DELETE'])
def delete_invite(object_id):
"""Delete the given invite."""
invite = Invite.get_by_id(int(object_id))
if not invite:
flask.abort(404)
person = get_person()
if invite.building not in person.buildings:
flask.abort(401)
invite.key.delete()
send_event(building_id=invite.building,
cls='user', id=person.key.string_id(),
event='delete', obj=person.to_dict())
return ('', 204)
|
tomwilkie/awesomation
|
src/appengine/user.py
|
Python
|
mit
| 9,492
|
[
"VisIt"
] |
6602790979d2fd71b848e8ba19dfe0a5e998636b00c3eeb9968cb46898cdfe44
|
# -*- coding: utf-8 -*-
"""
Data conversion utilities for dataframe
=====================================
Convert cytoscape.js style graphs from/to dataframe object.
"""
import pandas as pd
from . import cytoscapejs as cyjs
def from_dataframe(df, source_col='source', target_col='target', interaction_col='interaction',
name='From DataFrame', edge_attr_cols=[]):
"""
Utility to convert Pandas DataFrame object into Cytoscape.js JSON
:param df: the data frame object.
:param source_col: the column name of source column.
:param target_col: the column name of target column.
:param interaction_col: the column name of interaction column.
:param name: The network name.
:param edge_attr_cols: Now, this parapeter is not valid.
:return: the Cytoscape.js JSON
"""
network = cyjs.get_empty_network(name=name)
nodes = set()
for index, row in df.iterrows():
s = row[source_col]
t = row[target_col]
if s not in nodes:
nodes.add(s)
source = get_node(s)
network['elements']['nodes'].append(source)
if t not in nodes:
nodes.add(t)
target = get_node(t)
network['elements']['nodes'].append(target)
network['elements']['edges'].append(get_edge(s, t, interaction=row[interaction_col]))
return network
def to_dataframe(network, interaction='interaction', default_interaction='-'):
"""
This method convert Cytoscape.js JSON to Pandas DataFrame.
:param network: The Cytoscape.js JSON
:param interaction: The column name of interaction
:param default_interaction: The defalt value of interaction.
:return : The Pandas DataFrame object.
"""
edges = network['elements']['edges']
network_array = []
for edge in edges:
edge_data = edge['data']
source = edge_data['source']
target = edge_data['target']
if interaction in edge_data:
itr = edge_data[interaction]
else:
itr = default_interaction
row = (source, itr, target)
network_array.append(row)
return pd.DataFrame(network_array, columns=['source', 'interaction',
'target'])
def get_node(id):
"""
Get the node information in JSON format.
:param id: The network node id or name.
:return : the json object about node.
"""
node = {
'data': {
'id': str(id),
'name': str(id)
}
}
return node
def get_edge(source, target, interaction):
"""
Get the edge information in JSON format.
:param source: the id or name of source node.
:param target: the id or name of target node.
:param interaction: the interaction of edge.
:return : the JSON value about edge.
"""
if interaction is None:
itr = '-'
else:
itr = interaction
edge = {
'data': {
'source': source,
'target': target,
'interaction': itr
}
}
return edge
|
idekerlab/cyrest-examples
|
py2cytoscape_doc/py2cytoscape/util/dataframe.py
|
Python
|
mit
| 3,100
|
[
"Cytoscape"
] |
3437a86434f22e994987024a6eed1c5849517c806bb13a521f3fc814b2da83bc
|
# encoding:utf-8
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
MODULE_VERSION="4.2"
#------------------------------------------------------------------------
#
# libcairo
#
#------------------------------------------------------------------------
register(GENERAL,
id = 'libcairodoc',
name = "Cairodoc lib",
description = _("Provides a library for using Cairo to "
"generate documents."),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'libcairodoc.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
#load_on_reg = True
)
#------------------------------------------------------------------------
#
# libgedcom
#
#------------------------------------------------------------------------
register(GENERAL,
id = 'libgedcom',
name = "GEDCOM library",
description = _("Provides GEDCOM processing functionality"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'libgedcom.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
)
#------------------------------------------------------------------------
#
# librecurse
#
#------------------------------------------------------------------------
register(GENERAL,
id='librecurse',
name="Recursive lib",
description= _("Provides recursive routines for reports"),
version='1.0',
gramps_target_version=MODULE_VERSION,
status=STABLE,
fname='librecurse.py',
authors=["The Gramps project"],
authors_email=["http://gramps-project.org"],
)
#------------------------------------------------------------------------
#
# libgrampsxml
#
#------------------------------------------------------------------------
register(GENERAL,
id = 'libgrampsxml',
name = "Grampsxml lib",
description = _("Provides common functionality for Gramps XML "
"import/export."),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'libgrampsxml.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
#load_on_reg = True
)
#------------------------------------------------------------------------
#
# libholiday
#
#------------------------------------------------------------------------
register(GENERAL,
id = 'libholiday',
name = "holiday lib",
description = _("Provides holiday information for different countries.") ,
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'libholiday.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
#load_on_reg = True
)
#------------------------------------------------------------------------
#
# llibhtmlbackend
#
#------------------------------------------------------------------------
register(GENERAL,
id = 'libhtmlbackend',
name = "htmlbackend lib",
description = _("Manages a HTML file implementing DocBackend.") ,
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'libhtmlbackend.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
#load_on_reg = True
)
#------------------------------------------------------------------------
#
# libhtmlconst
#
#------------------------------------------------------------------------
register(GENERAL,
id = 'libhtmlconst',
name = "htmlconst lib",
description = _("Common constants for html files.") ,
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'libhtmlconst.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
#load_on_reg = True
)
#------------------------------------------------------------------------
#
# libhtml
#
#------------------------------------------------------------------------
register(GENERAL,
id = 'libhtml',
name = "html lib",
description = _("Manages an HTML DOM tree.") ,
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'libhtml.py',
authors = ["Gerald Britton"],
authors_email = ["gerald.britton@gmail.com"],
#load_on_reg = True
)
#------------------------------------------------------------------------
#
# libmapservice
#
#------------------------------------------------------------------------
register(GENERAL,
id = 'libmapservice',
name = "mapservice lib",
description = _("Provides base functionality for map services.") ,
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'libmapservice.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
)
#------------------------------------------------------------------------
#
# libnarrate
#
#------------------------------------------------------------------------
register(GENERAL,
id = 'libnarrate',
name = "narration lib",
description = _("Provides Textual Narration.") ,
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'libnarrate.py',
authors = ["Brian Matherly"],
authors_email = ["brian@gramps-project.org"],
)
#------------------------------------------------------------------------
#
# libodfbackend
#
#------------------------------------------------------------------------
register(GENERAL,
id = 'libodfbackend',
name = "odfbackend lib",
description = _("Manages an ODF file implementing DocBackend.") ,
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'libodfbackend.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
)
#------------------------------------------------------------------------
#
# libpersonview
#
#------------------------------------------------------------------------
register(GENERAL,
id = 'libpersonview',
name = "person list lib",
description = _("Provides the Base needed for the List People views.") ,
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'libpersonview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
)
#------------------------------------------------------------------------
#
# libplaceview
#
#------------------------------------------------------------------------
register(GENERAL,
id = 'libplaceview',
name = "place list lib",
description = _("Provides the Base needed for the List Place views.") ,
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'libplaceview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
)
#------------------------------------------------------------------------
#
# libsubstkeyword
#
#------------------------------------------------------------------------
register(GENERAL,
id = 'libsubstkeyword',
name = "Substitution Values",
description = _("Provides variable substitution on display lines.") ,
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'libsubstkeyword.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
)
#------------------------------------------------------------------------
#
# libtreebase
#
#------------------------------------------------------------------------
register(GENERAL,
id = 'libtreebase',
name = "Graphical report lib",
description = _("Provides the base needed for the ancestor and "
"descendant graphical reports.") ,
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'libtreebase.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
)
|
pmghalvorsen/gramps_branch
|
gramps/plugins/lib/libplugins.gpr.py
|
Python
|
gpl-2.0
| 8,402
|
[
"Brian"
] |
c96cd5db64350545fb05ef59239b6538067cdf0a440f589d83b82ff3dd2410bc
|
# Draw a cone and write it out to sys.argv[1] in a few different ways. All
# output files should be bit-for-bit reproducible, i.e., no embedded
# timestamps, hostnames, floating point error, etc.
from __future__ import print_function
import os
import platform
import sys
import mpi4py.MPI
import paraview.simple as pv
# Version information.
print("ParaView %d.%d.%d on Python %s"
% (pv.paraview.servermanager.vtkSMProxyManager.GetVersionMajor(),
pv.paraview.servermanager.vtkSMProxyManager.GetVersionMinor(),
pv.paraview.servermanager.vtkSMProxyManager.GetVersionPatch(),
platform.python_version()))
# Even if you start multiple pvbatch using MPI, this script is only
# executed by rank 0. Check this assumption.
assert mpi4py.MPI.COMM_WORLD.rank == 0
# Output directory provided on command line.
outdir = sys.argv[1]
# Render a cone.
pv.Cone()
pv.Show()
pv.Render()
print("rendered")
# PNG image (serial).
filename = "%s/cone.png" % outdir
pv.SaveScreenshot(filename)
print(filename)
# Legacy VTK file (ASCII, serial).
filename = "%s/cone.vtk" % outdir
pv.SaveData(filename, FileType="Ascii")
print(filename)
# XML VTK files (parallel).
filename=("%s/cone.pvtp" % outdir)
writer = pv.XMLPPolyDataWriter(FileName=filename)
writer.UpdatePipeline()
print(filename)
# Done.
print("done")
|
hpc/charliecloud
|
examples/paraview/cone.py
|
Python
|
apache-2.0
| 1,329
|
[
"ParaView",
"VTK"
] |
f5e0e13bef8eb95bc4d66891049f58a84e469fca84f37c7c10ccec22c4e9b37e
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageSpatialAlgorithm(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageSpatialAlgorithm(), 'Processing.',
('vtkImageData',), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkImageSpatialAlgorithm.py
|
Python
|
bsd-3-clause
| 505
|
[
"VTK"
] |
8c88793e28a2932bfe546d282b5fc5e7d15c614875ee6707294616a2309514c0
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2007, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
from Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetMap
class CanopyDeviceMap(SnmpPlugin):
"""Map mib elements from Motorola mib to get hw and os products.
"""
maptype = "CanopyDeviceMap"
snmpGetMap = GetMap({
#'' : 'manufacturer',
'.1.3.6.1.4.1.161.19.3.3.1.1.0' : 'setHWProductKey',
'.1.3.6.1.4.1.161.19.3.3.1.3.0' : 'setHWSerialNumber',
'.1.3.6.1.4.1.161.19.3.3.1.2.0': 'setOSProductKey',
})
def process(self, device, results, log):
"""collect snmp information from this device"""
log.info('processing %s for device %s', self.name(), device.id)
getdata, tabledata = results
if getdata['setHWProductKey'] is None: return None
om = self.objectMap(getdata)
return om
|
anksp21/Community-Zenpacks
|
ZenPacks.AndreaConsadori.Motorola/ZenPacks/AndreaConsadori/Motorola/modeler/plugins/CanopyDeviceMap.py
|
Python
|
gpl-2.0
| 1,326
|
[
"VisIt"
] |
45a50bb54a059b37c3dc91a43994ef85d24f3e2479f026c95f777356b73d601a
|
"""
Test the Studio help links.
"""
from unittest import skip
from nose.plugins.attrib import attr
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.studio.asset_index import AssetIndexPage
from common.test.acceptance.pages.studio.course_info import CourseUpdatesPage
from common.test.acceptance.pages.studio.edit_tabs import PagesPage
from common.test.acceptance.pages.studio.import_export import (
ExportCoursePage,
ExportLibraryPage,
ImportCoursePage,
ImportLibraryPage
)
from common.test.acceptance.pages.studio.index import DashboardPage, HomePage, IndexPage
from common.test.acceptance.pages.studio.library import LibraryPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from common.test.acceptance.pages.studio.settings import SettingsPage
from common.test.acceptance.pages.studio.settings_advanced import AdvancedSettingsPage
from common.test.acceptance.pages.studio.settings_certificates import CertificatesPage
from common.test.acceptance.pages.studio.settings_graders import GradingPage
from common.test.acceptance.pages.studio.settings_group_configurations import GroupConfigurationsPage
from common.test.acceptance.pages.studio.textbook_upload import TextbookUploadPage
from common.test.acceptance.pages.studio.users import CourseTeamPage, LibraryUsersPage
from common.test.acceptance.pages.studio.utils import click_css, click_studio_help, studio_help_links
from common.test.acceptance.tests.helpers import (
AcceptanceTest,
assert_nav_help_link,
assert_side_bar_help_link,
url_for_help
)
from common.test.acceptance.tests.studio.base_studio_test import ContainerBase, StudioCourseTest, StudioLibraryTest
def _get_expected_documentation_url(path):
"""
Returns the expected URL for the building and running a course documentation.
"""
return url_for_help('course_author', path)
@attr(shard=10)
class StudioHelpTest(StudioCourseTest):
"""Tests for Studio help."""
def test_studio_help_links(self):
"""Test that the help links are present and have the correct content."""
page = DashboardPage(self.browser)
page.visit()
click_studio_help(page)
links = studio_help_links(page)
expected_links = [{
'href': u'http://docs.edx.org/',
'text': u'edX Documentation',
'sr_text': u'Access documentation on http://docs.edx.org'
}, {
'href': u'https://open.edx.org/',
'text': u'Open edX Portal',
'sr_text': u'Access the Open edX Portal'
}, {
'href': u'https://www.edx.org/course/overview-creating-edx-course-edx-edx101#.VO4eaLPF-n1',
'text': u'Enroll in edX101',
'sr_text': u'Enroll in edX101: Overview of Creating an edX Course'
}, {
'href': u'https://www.edx.org/course/creating-course-edx-studio-edx-studiox',
'text': u'Enroll in StudioX',
'sr_text': u'Enroll in StudioX: Creating a Course with edX Studio'
}, {
'href': u'mailto:partner-support@example.com',
'text': u'Contact Us',
'sr_text': 'Send an email to partner-support@example.com'
}]
for expected, actual in zip(expected_links, links):
self.assertEqual(expected['href'], actual.get_attribute('href'))
self.assertEqual(expected['text'], actual.text)
self.assertEqual(
expected['sr_text'],
actual.find_element_by_xpath('following-sibling::span').text
)
@attr(shard=10)
class SignInHelpTest(AcceptanceTest):
"""
Tests help links on 'Sign In' page
"""
def setUp(self):
super(SignInHelpTest, self).setUp()
self.index_page = IndexPage(self.browser)
self.index_page.visit()
def test_sign_in_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Sign In' page.
Given that I am on the 'Sign In" page.
And I want help about the sign in
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
sign_in_page = self.index_page.click_sign_in()
expected_url = _get_expected_documentation_url('/getting_started/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=sign_in_page,
href=expected_url,
signed_in=False
)
@attr(shard=10)
class SignUpHelpTest(AcceptanceTest):
"""
Tests help links on 'Sign Up' page.
"""
def setUp(self):
super(SignUpHelpTest, self).setUp()
self.index_page = IndexPage(self.browser)
self.index_page.visit()
def test_sign_up_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Sign Up' page.
Given that I am on the 'Sign Up" page.
And I want help about the sign up
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
sign_up_page = self.index_page.click_sign_up()
expected_url = _get_expected_documentation_url('/getting_started/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=sign_up_page,
href=expected_url,
signed_in=False
)
@attr(shard=10)
class HomeHelpTest(StudioCourseTest):
"""
Tests help links on 'Home'(Courses tab) page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(HomeHelpTest, self).setUp()
self.home_page = HomePage(self.browser)
self.home_page.visit()
def test_course_home_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Home'(Courses tab) page.
Given that I am on the 'Home'(Courses tab) page.
And I want help about the courses
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.home_page,
href=expected_url
)
def test_course_home_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Home'(Courses tab) page.
Given that I am on the 'Home'(Courses tab) page.
And I want help about the courses
And I click the 'Getting Started with edX Studio' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.home_page,
href=expected_url,
help_text='Getting Started with edX Studio',
as_list_item=True
)
@attr(shard=10)
class NewCourseHelpTest(AcceptanceTest):
"""
Test help links while creating a new course.
"""
def setUp(self):
super(NewCourseHelpTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
self.auth_page.visit()
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.new_course_button.present)
self.dashboard_page.click_new_course_button()
def test_course_create_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Create a New Course' page in the dashboard.
Given that I am on the 'Create a New Course' page in the dashboard.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.dashboard_page,
href=expected_url
)
def test_course_create_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Create a New Course' page in the dashboard.
Given that I am on the 'Create a New Course' page in the dashboard.
And I want help about the process
And I click the 'Getting Started with edX Studio' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.dashboard_page,
href=expected_url,
help_text='Getting Started with edX Studio',
as_list_item=True
)
@attr(shard=10)
class NewLibraryHelpTest(AcceptanceTest):
"""
Test help links while creating a new library
"""
def setUp(self):
super(NewLibraryHelpTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
self.auth_page.visit()
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.has_new_library_button)
self.dashboard_page.click_new_library()
def test_library_create_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Create a New Library' page in the dashboard.
Given that I am on the 'Create a New Library' page in the dashboard.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.dashboard_page,
href=expected_url
)
def test_library_create_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Create a New Library' page in the dashboard.
Given that I am on the 'Create a New Library' page in the dashboard.
And I want help about the process
And I click the 'Getting Started with edX Studio' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.dashboard_page,
href=expected_url,
help_text='Getting Started with edX Studio',
as_list_item=True
)
@attr(shard=10)
class LibraryTabHelpTest(AcceptanceTest):
"""
Test help links on the library tab present at dashboard.
"""
def setUp(self):
super(LibraryTabHelpTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
self.auth_page.visit()
self.dashboard_page.visit()
def test_library_tab_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Home'(Courses tab) page.
Given that I am on the 'Home'(Courses tab) page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
self.assertTrue(self.dashboard_page.has_new_library_button)
click_css(self.dashboard_page, '#course-index-tabs .libraries-tab', 0, False)
expected_url = _get_expected_documentation_url('/getting_started/CA_get_started_Studio.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.dashboard_page,
href=expected_url
)
@attr(shard=10)
class LibraryHelpTest(StudioLibraryTest):
"""
Test help links on a Library page.
"""
def setUp(self):
super(LibraryHelpTest, self).setUp()
self.library_page = LibraryPage(self.browser, self.library_key)
self.library_user_page = LibraryUsersPage(self.browser, self.library_key)
def test_library_content_nav_help(self):
"""
Scenario: Help link in navigation bar is working on content
library page(click a library on the Library list page).
Given that I am on the content library page(click a library on the Library list page).
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
self.library_page.visit()
expected_url = _get_expected_documentation_url('/course_components/libraries.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.library_page,
href=expected_url
)
def test_library_content_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on
content library page(click a library on the Library list page).
Given that I am on the content library page(click a library on the Library list page).
And I want help about the process
And I click the 'Learn more about content libraries' in the sidebar links
Then Help link should open.
And help url should be correct
"""
self.library_page.visit()
expected_url = _get_expected_documentation_url('/course_components/libraries.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.library_page,
href=expected_url,
help_text='Learn more about content libraries'
)
def test_library_user_access_setting_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'User Access'
settings page of library.
Given that I am on the 'User Access' settings page of library.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct.
"""
self.library_user_page.visit()
expected_url = _get_expected_documentation_url(
'/course_components/libraries.html#give-other-users-access-to-your-library'
)
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.library_user_page,
href=expected_url,
)
@attr(shard=10)
class LibraryImportHelpTest(StudioLibraryTest):
"""
Test help links on a Library import and export pages.
"""
def setUp(self):
super(LibraryImportHelpTest, self).setUp()
self.library_import_page = ImportLibraryPage(self.browser, self.library_key)
self.library_import_page.visit()
def test_library_import_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Library import page.
Given that I am on the Library import page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#import-a-library')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.library_import_page,
href=expected_url
)
def test_library_import_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on Library import page.
Given that I am on the Library import page.
And I want help about the process
And I click the 'Learn more about importing a library' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#import-a-library')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.library_import_page,
href=expected_url,
help_text='Learn more about importing a library'
)
@attr(shard=10)
class LibraryExportHelpTest(StudioLibraryTest):
"""
Test help links on a Library export pages.
"""
def setUp(self):
super(LibraryExportHelpTest, self).setUp()
self.library_export_page = ExportLibraryPage(self.browser, self.library_key)
self.library_export_page.visit()
def test_library_export_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Library export page.
Given that I am on the Library export page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#export-a-library')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.library_export_page,
href=expected_url
)
def test_library_export_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on Library export page.
Given that I am on the Library export page.
And I want help about the process
And I click the 'Learn more about exporting a library' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#export-a-library')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.library_export_page,
href=expected_url,
help_text='Learn more about exporting a library'
)
@attr(shard=10)
class CourseOutlineHelpTest(StudioCourseTest):
"""
Tests help links on course outline page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(CourseOutlineHelpTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_outline_page.visit()
@skip("This scenario depends upon TNL-5460")
def test_course_outline_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Course Outline page
Given that I am on the Course Outline page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/developing_course/course_outline.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_outline_page,
href=expected_url
)
def test_course_outline_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on Course Outline page
Given that I am on the Course Outline page.
And I want help about the process
And I click the 'Learn more about the course outline' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/developing_course/course_outline.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.course_outline_page,
href=expected_url,
help_text='Learn more about the course outline',
index=0
)
@attr(shard=10)
class CourseUpdateHelpTest(StudioCourseTest):
"""
Test help links on Course Update page
"""
def setUp(self): # pylint: disable=arguments-differ
super(CourseUpdateHelpTest, self).setUp()
self.course_update_page = CourseUpdatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_update_page.visit()
def test_course_update_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Course Update' page
Given that I am on the 'Course Update' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/handouts_updates.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_update_page,
href=expected_url,
)
@attr(shard=10)
class AssetIndexHelpTest(StudioCourseTest):
"""
Test help links on Course 'Files & Uploads' page
"""
def setUp(self): # pylint: disable=arguments-differ
super(AssetIndexHelpTest, self).setUp()
self.course_asset_index_page = AssetIndexPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_asset_index_page.visit()
def test_asset_index_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Files & Uploads' page
Given that I am on the 'Files & Uploads' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/course_files.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_asset_index_page,
href=expected_url,
)
def test_asset_index_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Files & Uploads' page
Given that I am on the 'Files & Uploads' page.
And I want help about the process
And I click the 'Learn more about managing files' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/course_files.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.course_asset_index_page,
href=expected_url,
help_text='Learn more about managing files'
)
@attr(shard=10)
class CoursePagesHelpTest(StudioCourseTest):
"""
Test help links on Course 'Pages' page
"""
def setUp(self): # pylint: disable=arguments-differ
super(CoursePagesHelpTest, self).setUp()
self.course_pages_page = PagesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_pages_page.visit()
def test_course_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Pages' page
Given that I am on the 'Pages' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/pages.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_pages_page,
href=expected_url,
)
@attr(shard=10)
class UploadTextbookHelpTest(StudioCourseTest):
"""
Test help links on Course 'Textbooks' page
"""
def setUp(self): # pylint: disable=arguments-differ
super(UploadTextbookHelpTest, self).setUp()
self.course_textbook_upload_page = TextbookUploadPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_textbook_upload_page.visit()
def test_course_textbook_upload_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Textbooks' page
Given that I am on the 'Textbooks' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/textbooks.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_textbook_upload_page,
href=expected_url,
)
def test_course_textbook_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Textbooks' page
Given that I am on the 'Textbooks' page
And I want help about the process
And I click the 'Learn more about textbooks' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/textbooks.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.course_textbook_upload_page,
href=expected_url,
help_text='Learn more about textbooks'
)
@attr(shard=10)
class StudioUnitHelpTest(ContainerBase):
"""
Tests help links on Unit page.
"""
def setUp(self, is_staff=True):
super(StudioUnitHelpTest, self).setUp(is_staff=is_staff)
def populate_course_fixture(self, course_fixture):
"""
Populates the course fixture.
We are modifying 'advanced_modules' setting of the
course.
Also add a section with a subsection and a unit.
"""
course_fixture.add_advanced_settings(
{u"advanced_modules": {"value": ["split_test"]}}
)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def test_unit_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Unit page.
Given that I am on the Unit page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
unit_page = self.go_to_unit_page()
expected_url = _get_expected_documentation_url('/developing_course/course_units.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=unit_page,
href=expected_url,
)
@attr(shard=10)
class SettingsHelpTest(StudioCourseTest):
"""
Tests help links on Schedule and Details Settings page
"""
def setUp(self, is_staff=False, test_xss=True):
super(SettingsHelpTest, self).setUp()
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.settings_page.visit()
def test_settings_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Settings page.
Given that I am on the Settings page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/setting_up_student_view.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.settings_page,
href=expected_url,
)
@attr(shard=10)
class GradingPageHelpTest(StudioCourseTest):
"""
Tests help links on Grading page
"""
def setUp(self, is_staff=False, test_xss=True):
super(GradingPageHelpTest, self).setUp()
self.grading_page = GradingPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.grading_page.visit()
def test_grading_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Grading page.
Given that I am on the Grading page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/grading/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.grading_page,
href=expected_url,
)
@attr(shard=10)
class CourseTeamSettingsHelpTest(StudioCourseTest):
"""
Tests help links on Course Team settings page
"""
def setUp(self, is_staff=False, test_xss=True):
super(CourseTeamSettingsHelpTest, self).setUp()
self.course_team_settings_page = CourseTeamPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_team_settings_page.visit()
def test_course_course_team_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Course Team settings page
Given that I am on the Course Team settings page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/course_staffing.html#add-course-team-members')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_team_settings_page,
href=expected_url,
)
@attr(shard=10)
class CourseGroupConfigurationHelpTest(StudioCourseTest):
"""
Tests help links on course Group Configurations settings page
"""
def setUp(self, is_staff=False, test_xss=True):
super(CourseGroupConfigurationHelpTest, self).setUp()
self.course_group_configuration_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_group_configuration_page.visit()
def test_course_group_conf_nav_help(self):
"""
Scenario: Help link in navigation bar is working on
Group Configurations settings page
Given that I am on the Group Configurations settings page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_group_configuration_page,
href=expected_url,
)
def test_course_group_conf_content_group_side_bar_help(self):
"""
Scenario: Help link in side bar under the 'content group' is working
on Group Configurations settings page
Given that I am on the Group Configurations settings page
And I want help about the process
And I click the 'Learn More' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_features/cohorts/cohorted_courseware.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.course_group_configuration_page,
href=expected_url,
help_text='Learn More'
)
@attr(shard=10)
class AdvancedSettingHelpTest(StudioCourseTest):
"""
Tests help links on course Advanced Settings page.
"""
def setUp(self, is_staff=False, test_xss=True):
super(AdvancedSettingHelpTest, self).setUp()
self.advanced_settings = AdvancedSettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.advanced_settings.visit()
def test_advanced_settings_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Advanced Settings page.
Given that I am on the Advanced Settings page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.advanced_settings,
href=expected_url,
)
@attr(shard=10)
class CertificatePageHelpTest(StudioCourseTest):
"""
Tests help links on course Certificate settings page.
"""
def setUp(self, is_staff=False, test_xss=True):
super(CertificatePageHelpTest, self).setUp()
self.certificates_page = CertificatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.certificates_page.visit()
def test_certificate_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Certificate settings page
Given that I am on the Certificate settings page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/creating_course_certificates.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.certificates_page,
href=expected_url,
)
def test_certificate_page_side_bar_help(self):
"""
Scenario: Help link in side bar is working Certificate settings page
Given that I am on the Certificate settings page
And I want help about the process
And I click the 'Learn more about certificates' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/creating_course_certificates.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.certificates_page,
href=expected_url,
help_text='Learn more about certificates',
)
@attr(shard=10)
class GroupExperimentConfigurationHelpTest(ContainerBase):
"""
Tests help links on course Group Configurations settings page
It is related to Experiment Group Configurations on the page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(GroupExperimentConfigurationHelpTest, self).setUp()
self.group_configuration_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# self.create_poorly_configured_split_instance()
self.group_configuration_page.visit()
def populate_course_fixture(self, course_fixture):
"""
Populates the course fixture.
We are modifying 'advanced_modules' setting of the
course.
"""
course_fixture.add_advanced_settings(
{u"advanced_modules": {"value": ["split_test"]}}
)
def test_course_group_configuration_experiment_side_bar_help(self):
"""
Scenario: Help link in side bar under the 'Experiment Group Configurations'
is working on Group Configurations settings page
Given that I am on the Group Configurations settings page
And I want help about the process
And I click the 'Learn More' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url(
'/course_features/content_experiments/content_experiments_configure.html'
'#set-up-group-configurations-in-edx-studio'
)
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.group_configuration_page,
href=expected_url,
help_text='Learn More',
)
@attr(shard=10)
class ToolsImportHelpTest(StudioCourseTest):
"""
Tests help links on tools import pages.
"""
def setUp(self, is_staff=False, test_xss=True):
super(ToolsImportHelpTest, self).setUp()
self.import_page = ImportCoursePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.import_page.visit()
def test_tools_import_nav_help(self):
"""
Scenario: Help link in navigation bar is working on tools Library import page
Given that I am on the Library import tools page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#import-a-course')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.import_page,
href=expected_url,
)
def test_tools_import_side_bar_help(self):
"""
Scenario: Help link in side bar is working on tools Library import page
Given that I am on the tools Library import page
And I want help about the process
And I click the 'Learn more about importing a course' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#import-a-course')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.import_page,
href=expected_url,
help_text='Learn more about importing a course',
)
@attr(shard=10)
class ToolsExportHelpTest(StudioCourseTest):
"""
Tests help links on tools export pages.
"""
def setUp(self, is_staff=False, test_xss=True):
super(ToolsExportHelpTest, self).setUp()
self.export_page = ExportCoursePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.export_page.visit()
def test_tools_import_nav_help(self):
"""
Scenario: Help link in navigation bar is working on tools Library export page
Given that I am on the Library export tools page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#export-a-course')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.export_page,
href=expected_url,
)
def test_tools_import_side_bar_help(self):
"""
Scenario: Help link in side bar is working on tools Library export page
Given that I am on the tools Library import page
And I want help about the process
And I click the 'Learn more about exporting a course' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#export-a-course')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.export_page,
href=expected_url,
help_text='Learn more about exporting a course',
)
@attr(shard=10)
class StudioWelcomeHelpTest(AcceptanceTest):
"""
Tests help link on 'Welcome' page ( User not logged in)
"""
def setUp(self):
super(StudioWelcomeHelpTest, self).setUp()
self.index_page = IndexPage(self.browser)
self.index_page.visit()
def test_welcome_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Welcome' page (User not logged in).
Given that I am on the 'Welcome' page.
And I want help about the edx
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/getting_started/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.index_page,
href=expected_url,
signed_in=False
)
|
fintech-circle/edx-platform
|
common/test/acceptance/tests/studio/test_studio_help.py
|
Python
|
agpl-3.0
| 43,324
|
[
"VisIt"
] |
ba6ae35172fba1d1d702e2413fb74697e10a5288e4afa7784ba00cff3310a319
|
import sys
import numpy
from python.src.data_processing.normalizer import Normalizer
from python.src.neural_networks.neural_network import NeuralNetwork
from python.src.neurons.receive_all_neuron import ReceiveAllNeuron
from python.src.neurons.output_only_neuron import OutputOnlyNeuron
from python.src.neurons.activation_functions.linear_activation import LinearActivation
class FeedForwardNN(NeuralNetwork):
def __init__(self, normalizer = Normalizer(), structure = [1, 5, 1],
has_bias_nodes = True, is_regression = False):
NeuralNetwork.__init__(self, normalizer)
self.structure = structure
self.has_bias_nodes = has_bias_nodes
self.is_regression = is_regression
self.bias_nodes = []
num_layers = len(structure)
last_layer_index = num_layers - 1
for layer_index in range(num_layers):
num_neurons = structure[layer_index]
if has_bias_nodes and layer_index != last_layer_index:
num_neurons += 1
self.add_layer(self.create_layer(
num_neurons, lambda neuron_index: self.neuron_constructor(
last_layer_index, layer_index, neuron_index)))
self.connect_neurons()
@property
def num_inputs(self):
input_count = NeuralNetwork.num_inputs.fget(self)
if self.has_bias_nodes:
input_count -= 1
return input_count
def neuron_constructor(self, last_layer_index, current_layer_index,
neuron_index):
if (self.has_bias_nodes and
current_layer_index != last_layer_index and
neuron_index == self.structure[current_layer_index]):
bias_node = OutputOnlyNeuron()
self.bias_nodes.append(bias_node)
return bias_node
#if self.is_regression and current_layer_index == last_layer_index:
# return ReceiveAllNeuron(activation=LinearActivation())
return ReceiveAllNeuron()
def connect_neurons(self):
# Connect the neurons
for iLayer in range(self.num_layers - 1):
for sender in self.layers[iLayer]:
for reciever in self.layers[iLayer + 1]:
sender.connect_to(reciever)
def prepair_for_input(self):
for neuron in self.neurons:
neuron.reset()
def receive_inputs(self, inputs):
if len(inputs) != self.num_inputs:
raise ValueError("Inputs lenth must equal num_inputs")
self.prepair_for_input()
if self.has_bias_nodes:
for bias in self.bias_nodes:
bias.receive_signal(1)
for i in range(len(inputs)):
input = inputs[i]
neuron = self.input_layer[i]
neuron.receive_signal(self.normalizer.norm_input(input))
return [self.normalizer.norm_output(output.output)
for output in self.output_layer]
# TASK
# 1) [Done] Implement a simple feed forward neural network
# 2) Implement a backpropagation training algorithm for the simple network
# 3) Train simple network to represent a simple forth degree quadratic function
"""
http://www.iro.umontreal.ca/~bengioy/dlbook/mlp.html#pf2
f?(x) = b + V sigmoid(c + W x),
"""
|
DomenicD/dom_ml_playground
|
python/src/neural_networks/feed_forward.py
|
Python
|
mit
| 3,406
|
[
"NEURON"
] |
4ba0131741da363b410507c6de92ea0417aa2ae6dcd89868bf2cd2f15a056eb5
|
from __future__ import unicode_literals
import os
import bson
import logging
import pymongo
import datetime
import requests
import functools
from modularodm import fields, Q
from modularodm.exceptions import NoResultsFound
from dateutil.parser import parse as parse_date
from framework.guid.model import Guid
from framework.mongo import StoredObject
from framework.mongo.utils import unique_on
from framework.analytics import get_basic_counters
from website import util
from website.files import utils
from website.files import exceptions
from website.project.commentable import Commentable
__all__ = (
'File',
'Folder',
'FileNode',
'FileVersion',
'StoredFileNode',
'TrashedFileNode',
)
PROVIDER_MAP = {}
logger = logging.getLogger(__name__)
class TrashedFileNode(StoredObject, Commentable):
"""The graveyard for all deleted FileNodes"""
__indices__ = [{
'unique': False,
'key_or_list': [
('node', pymongo.ASCENDING),
('is_file', pymongo.ASCENDING),
('provider', pymongo.ASCENDING),
]
}]
_id = fields.StringField(primary=True)
last_touched = fields.DateTimeField()
history = fields.DictionaryField(list=True)
versions = fields.ForeignField('FileVersion', list=True)
node = fields.ForeignField('node', required=True)
parent = fields.AbstractForeignField(default=None)
is_file = fields.BooleanField(default=True)
provider = fields.StringField(required=True)
name = fields.StringField(required=True)
path = fields.StringField(required=True)
materialized_path = fields.StringField(required=True)
checkout = fields.AbstractForeignField('User')
deleted_by = fields.AbstractForeignField('User')
deleted_on = fields.DateTimeField(auto_now_add=True)
tags = fields.ForeignField('Tag', list=True)
suspended = fields.BooleanField(default=False)
visit = fields.IntegerField(default=0)
copied_from = fields.ForeignField('StoredFileNode', default=None)
@property
def deep_url(self):
"""Allows deleted files to resolve to a view
that will provide a nice error message and http.GONE
"""
return self.node.web_url_for('addon_deleted_file', trashed_id=self._id)
# For Comment API compatibility
@property
def target_type(self):
"""The object "type" used in the OSF v2 API."""
return 'files'
@property
def root_target_page(self):
"""The comment page type associated with TrashedFileNodes."""
return 'files'
@property
def is_deleted(self):
return True
def belongs_to_node(self, node_id):
"""Check whether the file is attached to the specified node."""
return self.node._id == node_id
def get_extra_log_params(self, comment):
return {'file': {'name': self.name, 'url': comment.get_comment_page_url()}}
def restore(self, recursive=True, parent=None):
"""Recreate a StoredFileNode from the data in this object
Will re-point all guids and finally remove itself
:raises KeyExistsException:
"""
data = self.to_storage()
data.pop('deleted_on')
data.pop('deleted_by')
data.pop('suspended')
if parent:
data['parent'] = parent._id
elif data['parent']:
# parent is an AbstractForeignField, so it gets stored as tuple
data['parent'] = data['parent'][0]
restored = FileNode.resolve_class(self.provider, int(self.is_file))(**data)
if not restored.parent:
raise ValueError('No parent to restore to')
restored.save()
# repoint guid
for guid in Guid.find(Q('referent', 'eq', self)):
guid.referent = restored
guid.save()
if recursive:
for child in TrashedFileNode.find(Q('parent', 'eq', self)):
child.restore(recursive=recursive, parent=restored)
TrashedFileNode.remove_one(self)
return restored
def get_guid(self):
"""Attempt to find a Guid that points to this object.
:rtype: Guid or None
"""
try:
# Note sometimes multiple GUIDs can exist for
# a single object. Just go with the first one
return Guid.find(Q('referent', 'eq', self))[0]
except IndexError:
return None
@unique_on(['node', 'name', 'parent', 'is_file', 'provider', 'path'])
class StoredFileNode(StoredObject, Commentable):
"""The storage backend for FileNode objects.
This class should generally not be used or created manually as FileNode
contains all the helpers required.
A FileNode wraps a StoredFileNode to provider usable abstraction layer
"""
__indices__ = [{
'unique': False,
'key_or_list': [
('path', pymongo.ASCENDING),
('node', pymongo.ASCENDING),
('is_file', pymongo.ASCENDING),
('provider', pymongo.ASCENDING),
]
}, {
'unique': False,
'key_or_list': [
('node', pymongo.ASCENDING),
('is_file', pymongo.ASCENDING),
('provider', pymongo.ASCENDING),
]
}, {
'unique': False,
'key_or_list': [
('parent', pymongo.ASCENDING),
]
}]
_id = fields.StringField(primary=True, default=lambda: str(bson.ObjectId()))
# The last time the touch method was called on this FileNode
last_touched = fields.DateTimeField()
# A list of dictionaries sorted by the 'modified' key
# The raw output of the metadata request deduped by etag
# Add regardless it can be pinned to a version or not
history = fields.DictionaryField(list=True)
# A concrete version of a FileNode, must have an identifier
versions = fields.ForeignField('FileVersion', list=True)
node = fields.ForeignField('Node', required=True)
parent = fields.ForeignField('StoredFileNode', default=None)
copied_from = fields.ForeignField('StoredFileNode', default=None)
is_file = fields.BooleanField(default=True)
provider = fields.StringField(required=True)
name = fields.StringField(required=True)
path = fields.StringField(required=True)
materialized_path = fields.StringField(required=True)
visit = fields.IntegerField(default=0)
# The User that has this file "checked out"
# Should only be used for OsfStorage
checkout = fields.AbstractForeignField('User')
#Tags for a file, currently only used for osfStorage
tags = fields.ForeignField('Tag', list=True)
# For Django compatibility
@property
def pk(self):
return self._id
# For Django compatibility
# TODO Find a better way
@property
def node_id(self):
return self.node._id
@property
def deep_url(self):
return self.wrapped().deep_url
@property
def absolute_api_v2_url(self):
path = '/files/{}/'.format(self._id)
return util.api_v2_url(path)
# For Comment API compatibility
@property
def target_type(self):
"""The object "type" used in the OSF v2 API."""
return 'files'
@property
def root_target_page(self):
"""The comment page type associated with StoredFileNodes."""
return 'files'
@property
def is_deleted(self):
if self.provider == 'osfstorage':
return False
def belongs_to_node(self, node_id):
"""Check whether the file is attached to the specified node."""
return self.node._id == node_id
def get_extra_log_params(self, comment):
return {'file': {'name': self.name, 'url': comment.get_comment_page_url()}}
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
def wrapped(self):
"""Wrap self in a FileNode subclass
"""
return FileNode.resolve_class(self.provider, int(self.is_file))(self)
def get_guid(self, create=False):
"""Attempt to find a Guid that points to this object.
One will be created if requested.
:param Boolean create: Should we generate a GUID if there isn't one? Default: False
:rtype: Guid or None
"""
try:
# Note sometimes multiple GUIDs can exist for
# a single object. Just go with the first one
return Guid.find(Q('referent', 'eq', self))[0]
except IndexError:
if not create:
return None
return Guid.generate(self)
class FileNodeMeta(type):
"""Keeps track of subclasses of the ``FileNode`` object
Inserts all into the PROVIDER_MAP following the pattern:
{
provider: [ProviderFolder, ProviderFile, ProviderFileNode]
}
"""
def __init__(cls, name, bases, dct):
super(FileNodeMeta, cls).__init__(name, bases, dct)
if hasattr(cls, 'provider'):
cls_map = PROVIDER_MAP.setdefault(cls.provider, [None, None, None])
index = int(getattr(cls, 'is_file', 2))
if cls_map[index] is not None:
raise ValueError('Conflicting providers')
cls_map[index] = cls
class FileNode(object):
"""The base class for the entire files storage system.
Use for querying on all files and folders in the database
Note: This is a proxy object for StoredFileNode
"""
FOLDER, FILE, ANY = 0, 1, 2
__metaclass__ = FileNodeMeta
@classmethod
def create(cls, **kwargs):
"""A layer of abstraction around the creation of FileNodes.
Provides hook in points for subclasses
This is used only for GUID creation.
"""
assert hasattr(cls, 'is_file') and hasattr(cls, 'provider'), 'Must have is_file and provider to call create'
kwargs['is_file'] = cls.is_file
kwargs['provider'] = cls.provider
return cls(**kwargs)
@classmethod
def get_or_create(cls, node, path):
"""Tries to find a FileNode with node and path
See FileNode.create
Note: Osfstorage overrides this method due to odd database constraints
"""
path = '/' + path.lstrip('/')
try:
# Note: Possible race condition here
# Currently create then find is not super feasable as create would require a
# call to save which we choose not to call to avoid filling the database
# with notfound/googlebot files/url. Raising 404 errors may roll back the transaction however
return cls.find_one(Q('node', 'eq', node) & Q('path', 'eq', path))
except NoResultsFound:
return cls.create(node=node, path=path)
@classmethod
def get_file_guids(cls, materialized_path, provider, node):
guids = []
materialized_path = '/' + materialized_path.lstrip('/')
if materialized_path.endswith('/'):
folder_children = cls.find(Q('provider', 'eq', provider) &
Q('node', 'eq', node) &
Q('materialized_path', 'startswith', materialized_path))
for item in folder_children:
if item.kind == 'file':
guid = item.get_guid()
if guid:
guids.append(guid._id)
else:
try:
file_obj = cls.find_one(Q('node', 'eq', node) & Q('materialized_path', 'eq', materialized_path))
except NoResultsFound:
return guids
guid = file_obj.get_guid()
if guid:
guids.append(guid._id)
return guids
@classmethod
def resolve_class(cls, provider, _type=2):
"""Resolve a provider and type to the appropriate subclass.
Usage:
>>> FileNode.resolve_class('box', FileNode.ANY) # BoxFileNode
>>> FileNode.resolve_class('dropbox', FileNode.FILE) # DropboxFile
:rtype: Subclass of FileNode
"""
try:
return PROVIDER_MAP[provider][int(_type)]
except IndexError:
raise exceptions.SubclassNotFound('_type must be 0, 1, or 2')
except KeyError:
raise exceptions.SubclassNotFound(provider)
@classmethod
def _filter(cls, qs=None):
"""Creates an odm query to limit the scope of whatever search method
to the given class.
:param qs RawQuery: An odm query or None
:rtype: RawQuery or None
"""
# Build a list of all possible contraints leaving None when appropriate
# filter(None, ...) removes all falsey values
qs = filter(None, (qs,
Q('is_file', 'eq', cls.is_file) if hasattr(cls, 'is_file') else None,
Q('provider', 'eq', cls.provider) if hasattr(cls, 'provider') else None,
))
# If out list is empty return None; there's no filters to be applied
if not qs:
return None
# Use reduce to & together all our queries. equavilent to:
# return q1 & q2 ... & qn
return functools.reduce(lambda q1, q2: q1 & q2, qs)
@classmethod
def find(cls, qs=None):
"""A proxy for StoredFileNode.find but applies class based contraints.
Wraps The MongoQuerySet in a GenWrapper this overrides the __iter__ of
MongoQuerySet to return wrapped objects
:rtype: GenWrapper<MongoQuerySet<cls>>
"""
return utils.GenWrapper(StoredFileNode.find(cls._filter(qs)))
@classmethod
def find_one(cls, qs):
"""A proxy for StoredFileNode.find_one but applies class based contraints.
:rtype: cls
"""
return StoredFileNode.find_one(cls._filter(qs)).wrapped()
@classmethod
def files_checked_out(cls, user):
"""
:param user: The user with checkedout files
:return: A queryset of all FileNodes checked out by user
"""
return cls.find(Q('checkout', 'eq', user))
@classmethod
def load(cls, _id):
"""A proxy for StoredFileNode.load requires the wrapped version of the found value
to be an instance of cls.
:rtype: cls
"""
inst = StoredFileNode.load(_id)
if not inst:
return None
inst = inst.wrapped()
assert isinstance(inst, cls), 'Loaded object {} is not of type {}'.format(inst, cls)
return inst
@property
def parent(self):
"""A proxy to self.stored_object.parent but forces it to be wrapped.
"""
if self.stored_object.parent:
return self.stored_object.parent.wrapped()
return None
@parent.setter
def parent(self, val):
"""A proxy to self.stored_object.parent but will unwrap it when need be
"""
if isinstance(val, FileNode):
val = val.stored_object
self.stored_object.parent = val
@property
def copied_from(self):
if self.stored_object.copied_from:
return self.stored_object.copied_from
return None
@copied_from.setter
def copied_from(self, val):
if isinstance(val, FileNode):
val = val.stored_object
self.stored_object.copied_from = val
@property
def deep_url(self):
"""The url that this filenodes guid should resolve to.
Implemented here so that subclasses may override it or path.
See OsfStorage or PathFollowingNode.
"""
return self.node.web_url_for('addon_view_or_download_file', provider=self.provider, path=self.path.strip('/'))
@property
def kind(self):
"""Whether this FileNode is a file or folder as a string.
Used for serialization and backwards compatability
:rtype: str
:returns: 'file' or 'folder'
"""
return 'file' if self.is_file else 'folder'
def __init__(self, *args, **kwargs):
"""Contructor for FileNode's subclasses
If called with only a StoredFileNode it will be attached to self
Otherwise:
Injects provider and is_file when appropriate.
Creates a new StoredFileNode with kwargs, not saved.
Then attaches stored_object to self
"""
if args and isinstance(args[0], StoredFileNode):
assert len(args) == 1
assert len(kwargs) == 0
self.stored_object = args[0]
else:
if hasattr(self, 'provider'):
kwargs['provider'] = self.provider
if hasattr(self, 'is_file'):
kwargs['is_file'] = self.is_file
self.stored_object = StoredFileNode(*args, **kwargs)
def save(self):
"""A proxy to self.stored_object.save.
Implemented top level so that child class may override it
and just call super.save rather than self.stored_object.save
"""
return self.stored_object.save()
def serialize(self, **kwargs):
return {
'id': self._id,
'path': self.path,
'name': self.name,
'kind': self.kind,
}
def generate_waterbutler_url(self, **kwargs):
return util.waterbutler_api_url_for(
self.node._id,
self.provider,
self.path,
**kwargs
)
def delete(self, user=None, parent=None):
"""Move self into the TrashedFileNode collection
and remove it from StoredFileNode
:param user User or None: The user that deleted this FileNode
"""
trashed = self._create_trashed(user=user, parent=parent)
self._repoint_guids(trashed)
self.node.save()
StoredFileNode.remove_one(self.stored_object)
return trashed
def copy_under(self, destination_parent, name=None):
return utils.copy_files(self, destination_parent.node, destination_parent, name=name)
def move_under(self, destination_parent, name=None):
self.name = name or self.name
self.parent = destination_parent.stored_object
self._update_node(save=True) # Trust _update_node to save us
return self
def update(self, revision, data, save=True, user=None):
"""Note: User is a kwargs here because of special requirements of
dataverse and django
See dataversefile.update
"""
self.name = data['name']
self.materialized_path = data['materialized']
self.last_touched = datetime.datetime.utcnow()
if save:
self.save()
def _create_trashed(self, save=True, user=None, parent=None):
trashed = TrashedFileNode(
_id=self._id,
name=self.name,
path=self.path,
node=self.node,
parent=parent or self.parent,
history=self.history,
is_file=self.is_file,
checkout=self.checkout,
provider=self.provider,
versions=self.versions,
last_touched=self.last_touched,
materialized_path=self.materialized_path,
deleted_by=user
)
if save:
trashed.save()
return trashed
def _repoint_guids(self, updated):
for guid in Guid.find(Q('referent', 'eq', self)):
guid.referent = updated
guid.save()
def _update_node(self, recursive=True, save=True):
if self.parent is not None:
self.node = self.parent.node
if save:
self.save()
if recursive and not self.is_file:
for child in self.children:
child._update_node(save=save)
def __getattr__(self, name):
"""For the purpose of proxying all calls to the below stored_object
Saves typing out ~10 properties or so
"""
if 'stored_object' in self.__dict__:
try:
return getattr(self.stored_object, name)
except AttributeError:
pass # Avoids error message about the underlying object
return object.__getattribute__(self, name)
def __setattr__(self, name, val):
# Property setters are called after __setattr__ is called
# If the requested attribute is a property with a setter go ahead and use it
maybe_prop = getattr(self.__class__, name, None)
if isinstance(maybe_prop, property) and maybe_prop.fset is not None:
return object.__setattr__(self, name, val)
if 'stored_object' in self.__dict__:
return setattr(self.stored_object, name, val)
return object.__setattr__(self, name, val)
def __eq__(self, other):
return self.stored_object == getattr(other, 'stored_object', None)
def __repr__(self):
return '<{}(name={!r}, node={!r})>'.format(
self.__class__.__name__,
self.stored_object.name,
self.stored_object.node
)
class File(FileNode):
is_file = True
version_identifier = 'revision' # For backwards compatability
def get_version(self, revision, required=False):
"""Find a version with identifier revision
:returns: FileVersion or None
:raises: VersionNotFoundError if required is True
"""
for version in reversed(self.versions):
if version.identifier == revision:
break
else:
if required:
raise exceptions.VersionNotFoundError(revision)
return None
return version
def update_version_metadata(self, location, metadata):
for version in reversed(self.versions):
if version.location == location:
version.update_metadata(metadata)
return
raise exceptions.VersionNotFoundError(location)
def touch(self, auth_header, revision=None, **kwargs):
"""The bread and butter of File, collects metadata about self
and creates versions and updates self when required.
If revisions is None the created version is NOT and should NOT be saved
as there is no identifing information to tell if it needs to be updated or not.
Hits Waterbutler's metadata endpoint and saves the returned data.
If a file cannot be rendered IE figshare private files a tuple of the FileVersion and
renderable HTML will be returned.
>>>isinstance(file_node.touch(), tuple) # This file cannot be rendered
:param str or None auth_header: If truthy it will set as the Authorization header
:returns: None if the file is not found otherwise FileVersion or (version, Error HTML)
"""
# For backwards compatability
revision = revision or kwargs.get(self.version_identifier)
version = self.get_version(revision)
# Versions do not change. No need to refetch what we already know
if version is not None:
return version
headers = {}
if auth_header:
headers['Authorization'] = auth_header
resp = requests.get(
self.generate_waterbutler_url(revision=revision, meta=True, **kwargs),
headers=headers,
)
if resp.status_code != 200:
logger.warning('Unable to find {} got status code {}'.format(self, resp.status_code))
return None
return self.update(revision, resp.json()['data']['attributes'])
# TODO Switch back to head requests
# return self.update(revision, json.loads(resp.headers['x-waterbutler-metadata']))
def update(self, revision, data, user=None):
"""Using revision and data update all data pretaining to self
:param str or None revision: The revision that data points to
:param dict data: Metadata recieved from waterbutler
:returns: FileVersion
"""
self.name = data['name']
self.materialized_path = data['materialized']
version = FileVersion(identifier=revision)
version.update_metadata(data, save=False)
# Transform here so it can be sortted on later
if data['modified'] is not None and data['modified'] != '':
data['modified'] = parse_date(
data['modified'],
ignoretz=True,
default=datetime.datetime.utcnow() # Just incase nothing can be parsed
)
# if revision is none then version is the latest version
# Dont save the latest information
if revision is not None:
version.save()
self.versions.append(version)
for entry in self.history:
if entry['etag'] == data['etag']:
break
else:
# Insert into history if there is no matching etag
utils.insort(self.history, data, lambda x: x['modified'])
# Finally update last touched
self.last_touched = datetime.datetime.utcnow()
self.save()
return version
def get_download_count(self, version=None):
"""Pull the download count from the pagecounter collection
Limit to version if specified.
Currently only useful for OsfStorage
"""
parts = ['download', self.node._id, self._id]
if version is not None:
parts.append(version)
page = ':'.join([format(part) for part in parts])
_, count = get_basic_counters(page)
return count or 0
def serialize(self):
if not self.versions:
return dict(
super(File, self).serialize(),
size=None,
version=None,
modified=None,
contentType=None,
downloads=self.get_download_count(),
checkout=self.checkout._id if self.checkout else None,
)
version = self.versions[-1]
return dict(
super(File, self).serialize(),
size=version.size,
downloads=self.get_download_count(),
checkout=self.checkout._id if self.checkout else None,
version=version.identifier if self.versions else None,
contentType=version.content_type if self.versions else None,
modified=version.date_modified.isoformat() if version.date_modified else None,
)
class Folder(FileNode):
is_file = False
@property
def children(self):
"""Finds all Filenodes that view self as a parent
:returns: A GenWrapper for all children
:rtype: GenWrapper<MongoQuerySet<cls>>
"""
return FileNode.find(Q('parent', 'eq', self._id))
def delete(self, recurse=True, user=None, parent=None):
trashed = self._create_trashed(user=user, parent=parent)
if recurse:
for child in self.children:
child.delete(user=user, parent=trashed)
self._repoint_guids(trashed)
StoredFileNode.remove_one(self.stored_object)
return trashed
def append_file(self, name, path=None, materialized_path=None, save=True):
return self._create_child(name, FileNode.FILE, path=path, materialized_path=materialized_path, save=save)
def append_folder(self, name, path=None, materialized_path=None, save=True):
return self._create_child(name, FileNode.FOLDER, path=path, materialized_path=materialized_path, save=save)
def _create_child(self, name, kind, path=None, materialized_path=None, save=True):
child = FileNode.resolve_class(self.provider, kind)(
name=name,
node=self.node,
path=path or '/' + name,
parent=self.stored_object,
materialized_path=materialized_path or
os.path.join(self.materialized_path, name) + '/' if not kind else ''
).wrapped()
if save:
child.save()
return child
def find_child_by_name(self, name, kind=2):
return FileNode.resolve_class(self.provider, kind).find_one(
Q('name', 'eq', name) &
Q('parent', 'eq', self)
)
class FileVersion(StoredObject):
"""A version of an OsfStorageFileNode. contains information
about where the file is located, hashes and datetimes
"""
__indices__ = [{
'unique': False,
'key_or_list': [
('_id', pymongo.ASCENDING),
('metadata.vault', pymongo.ASCENDING),
('metadata.archive', pymongo.ASCENDING),
('metadata.sha256', pymongo.ASCENDING),
]
}]
_id = fields.StringField(primary=True, default=lambda: str(bson.ObjectId()))
creator = fields.ForeignField('user')
identifier = fields.StringField(required=True)
# Date version record was created. This is the date displayed to the user.
date_created = fields.DateTimeField(auto_now_add=True)
# Dictionary specifying all information needed to locate file on backend
# {
# 'service': 'cloudfiles', # required
# 'container': 'osf', # required
# 'object': '20c53b', # required
# 'worker_url': '127.0.0.1',
# 'worker_host': 'upload-service-1',
# }
location = fields.DictionaryField(default=None, validate=utils.validate_location)
# Dictionary containing raw metadata from upload service response
# {
# 'size': 1024, # required
# 'content_type': 'text/plain', # required
# 'date_modified': '2014-11-07T20:24:15', # required
# 'md5': 'd077f2',
# }
metadata = fields.DictionaryField()
size = fields.IntegerField()
content_type = fields.StringField()
# Date file modified on third-party backend. Not displayed to user, since
# this date may be earlier than the date of upload if the file already
# exists on the backend
date_modified = fields.DateTimeField()
@property
def location_hash(self):
return self.location['object']
@property
def archive(self):
return self.metadata.get('archive')
def is_duplicate(self, other):
return self.location_hash == other.location_hash
def update_metadata(self, metadata, save=True):
self.metadata.update(metadata)
# metadata has no defined structure so only attempt to set attributes
# If its are not in this callback it'll be in the next
self.size = self.metadata.get('size', self.size)
self.content_type = self.metadata.get('contentType', self.content_type)
if self.metadata.get('modified'):
# TODO handle the timezone here the user that updates the file may see an
# Incorrect version
self.date_modified = parse_date(self.metadata['modified'], ignoretz=True)
if save:
self.save()
def _find_matching_archive(self, save=True):
"""Find another version with the same sha256 as this file.
If found copy its vault name and glacier id, no need to create additional backups.
returns True if found otherwise false
"""
if 'sha256' not in self.metadata:
return False # Dont bother searching for nothing
if 'vault' in self.metadata and 'archive' in self.metadata:
# Shouldn't ever happen, but we already have an archive
return True # We've found ourself
qs = self.__class__.find(
Q('_id', 'ne', self._id) &
Q('metadata.vault', 'ne', None) &
Q('metadata.archive', 'ne', None) &
Q('metadata.sha256', 'eq', self.metadata['sha256'])
).limit(1)
if qs.count() < 1:
return False
other = qs[0]
try:
self.metadata['vault'] = other.metadata['vault']
self.metadata['archive'] = other.metadata['archive']
except KeyError:
return False
if save:
self.save()
return True
|
samchrisinger/osf.io
|
website/files/models/base.py
|
Python
|
apache-2.0
| 31,875
|
[
"VisIt"
] |
9fb0c6c981f103d314a44a0a7260dd11c34ebebae3ff378e47b64c59b9df496f
|
"""
Write output files specific for xray data.
-- CXIdb files
"""
import tables
import numpy as np
from mdtraj import io
def write_cxidb(filename, shotset, sample_name='thorshotset'):
"""
Write a shotset to disk in CXIdb format.
Parameters
----------
filename : str
The name of the file!
shotset : thor.xray.ShotSet
The shotset object to save
Optional Parameters
-------------------
sample_name : str
The name of the sample, to aid future researchers
References
----------
..[1] http://www.cxidb.org
"""
if not filename.endswith('.cxi'):
filename += '.cxi'
# this code is based on the diagram on pp 13 of the document "The CXI File
# Format for Coherent X-ray Imaging" (v 1.3, F. Maia, 2012)
f = tables.File(filename, mode='w')
# generate atoms
fa = tables.Atom.from_dtype(np.dtype(np.float64))
# generate all groups
f.create_group('/', 'entry_1')
f.create_group('/entry_1', 'sample_1')
f.create_group('/entry_1', 'data_1')
f.create_group('/entry_1', 'image_1')
f.create_group('/entry_1/image_1', 'detector_1')
f.create_group('/entry_1/image_1', 'source_1')
# cxi verson
version = 110
f.create_carray('/', 'cxi_version', obj=[version])
# data name
f.create_carray('/entry_1/sample_1', 'name', obj=[sample_name])
# save data
pi_node = f.create_earray(where='/entry_1/image_1', name='intensities',
shape=(0, shotset.num_pixels),
atom=fa, filters=io.COMPRESSION,
expectedrows=shotset.num_shots)
for intx in shotset.intensities_iter:
pi_node.append(intx[None,:])
# link /entry_1/data_1/data to /entry_1/data_1/image_1 (not sure why?)
f.create_soft_link('/entry_1/data_1', 'data', '/entry_1/image_1/data')
# data attributes
f.create_carray('/entry_1/image_1', 'data_type', obj=['intensities'])
f.create_carray('/entry_1/image_1', 'data_space', obj=['diffraction'])
# detector
# THIS IS NOT CXIdb FORMAT -- but that format for detectors is a bit rough
# and I am not going to spend time on it right now
f.create_carray('/entry_1/image_1/detector_1', 'pixelmap', obj=shotset.detector.xyz)
if shotset.mask != None:
f.create_carray('/entry_1/image_1/detector_1', 'mask', obj=shotset.mask)
# energy
f.create_carray('/entry_1/image_1/source_1', 'energy', obj=[shotset.detector.beam.energy])
# fft shifted (?)
f.create_carray('/entry_1/image_1', 'is_fft_shifted', obj=[0])
return
|
tjlane/thor
|
src/python/write.py
|
Python
|
gpl-2.0
| 2,737
|
[
"MDTraj"
] |
ded483320de074c835e2dc63f0daad6b21150049d2870a1304d2ebdbb872abaf
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkQuadricClustering(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkQuadricClustering(), 'Processing.',
('vtkPolyData',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkQuadricClustering.py
|
Python
|
bsd-3-clause
| 495
|
[
"VTK"
] |
798e7f70de9de5b24a1d6b2b3d432277495febd9518014d3a1c832d40f27436f
|
# coding: utf8
""" Core functions for
- Uniform spanning trees
* :func:`ust_sampler_wilson`
* :func:`ust_sampler_aldous_broder`:
- Descent procresses :class:`Descent`:
* :func:`uniform_permutation`
- :class:`PoissonizedPlancherel` measure
* :func:`uniform_permutation`
* :func:`RSK`: Robinson-Schensted-Knuth correspondande
* :func:`xy_young_ru` young diagram -> russian convention coordinates
* :func:`limit_shape`
.. seealso:
`Documentation on ReadTheDocs <https://dppy.readthedocs.io/en/latest/exotic_dpps/index.html>`_
"""
import functools # used for decorators to pass docstring
import numpy as np
from itertools import chain # create graph edges from path
# For class PoissonizedPlancherel
from bisect import bisect_right # for RSK
from dppy.utils import check_random_state
def ust_sampler_wilson(list_of_neighbors, root=None,
random_state=None):
try:
import networkx as nx
except ImportError:
raise ValueError('The networkx package is required to sample spanning trees (see setup.py).')
rng = check_random_state(random_state)
# Initialize the tree
wilson_tree_graph = nx.Graph()
nb_nodes = len(list_of_neighbors)
# Initialize the root, if root not specified start from any node
n0 = root if root else rng.choice(nb_nodes) # size=1)[0]
# -1 = not visited / 0 = in path / 1 = in tree
state = -np.ones(nb_nodes, dtype=int)
state[n0] = 1
nb_nodes_in_tree = 1
path, branches = [], [] # branches of tree, temporary path
while nb_nodes_in_tree < nb_nodes: # |Tree| = |V| - 1
# visit a neighbor of n0 uniformly at random
n1 = rng.choice(list_of_neighbors[n0]) # size=1)[0]
if state[n1] == -1: # not visited => continue the walk
path.append(n1) # add it to the path
state[n1] = 0 # mark it as in the path
n0 = n1 # continue the walk
if state[n1] == 0: # loop on the path => erase the loop
knot = path.index(n1) # find 1st appearence of n1 in the path
nodes_loop = path[knot + 1:] # identify nodes forming the loop
del path[knot + 1:] # erase the loop
state[nodes_loop] = -1 # mark loopy nodes as not visited
n0 = n1 # continue the walk
elif state[n1] == 1: # hits the tree => new branch
if nb_nodes_in_tree == 1:
branches.append([n1] + path) # initial branch of the tree
else:
branches.append(path + [n1]) # path as a new branch
state[path] = 1 # mark nodes in path as in the tree
nb_nodes_in_tree += len(path)
# Restart the walk from a random node among those not visited
nodes_not_visited = np.where(state == -1)[0]
if nodes_not_visited.size:
n0 = rng.choice(nodes_not_visited) # size=1)[0]
path = [n0]
tree_edges = list(chain.from_iterable(map(lambda x: zip(x[:-1], x[1:]),
branches)))
wilson_tree_graph.add_edges_from(tree_edges)
return wilson_tree_graph
def ust_sampler_aldous_broder(list_of_neighbors, root=None,
random_state=None):
try:
import networkx as nx
except ImportError:
raise ValueError('The networkx package is required to sample spanning trees (see setup.py).')
rng = check_random_state(random_state)
# Initialize the tree
aldous_tree_graph = nx.Graph()
nb_nodes = len(list_of_neighbors)
# Initialize the root, if root not specified start from any node
n0 = root if root else rng.choice(nb_nodes) # size=1)[0]
visited = np.zeros(nb_nodes, dtype=bool)
visited[n0] = True
nb_nodes_in_tree = 1
tree_edges = np.zeros((nb_nodes - 1, 2), dtype=np.int)
while nb_nodes_in_tree < nb_nodes:
# visit a neighbor of n0 uniformly at random
n1 = rng.choice(list_of_neighbors[n0]) # size=1)[0]
if visited[n1]:
pass # continue the walk
else: # create edge (n0, n1) and continue the walk
tree_edges[nb_nodes_in_tree - 1] = [n0, n1]
visited[n1] = True # mark it as in the tree
nb_nodes_in_tree += 1
n0 = n1
aldous_tree_graph.add_edges_from(tree_edges)
return aldous_tree_graph
def uniform_permutation(N, random_state=None):
""" Draw a perputation :math:`\\sigma \\in \\mathfrak{S}_N` uniformly at random using Fisher-Yates' algorithm
.. seealso::
- `Fisher–Yates_shuffle <https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle>_
- `Numpy shuffle <https://github.com/numpy/numpy/blob/d429f0fe16c0407509b1f20d997bf94f1027f61b/numpy/random/mtrand.pyx#L4027>_`
"""
rng = check_random_state(random_state)
sigma = np.arange(N)
for i in range(N - 1, 0, -1): # reversed(range(1, N))
j = rng.randint(0, i + 1)
if j == i:
continue
sigma[j], sigma[i] = sigma[i], sigma[j]
# for i in range(N - 1):
# j = rng.randint(i, N)
# sigma[j], sigma[i] = sigma[i], sigma[j]
return sigma
def RSK(sequence):
"""Apply Robinson-Schensted-Knuth correspondence on a sequence of reals, e.g. a permutation, and return the corresponding insertion and recording tableaux.
:param sequence:
Sequence of real numbers
:type sequence:
array_like
:return:
:math:`P, Q` insertion and recording tableaux
:rtype:
list
.. seealso::
`RSK Wikipedia <https://en.wikipedia.org/wiki/Robinson%E2%80%93Schensted%E2%80%93Knuth_correspondence>`_
"""
P, Q = [], [] # Insertion/Recording tableau
for it, x in enumerate(sequence, start=1):
# Iterate along the rows of the tableau P to find a place for the bouncing x and record the position where it is inserted
for row_P, row_Q in zip(P, Q):
# If x finds a place at the end of a row of P
if x >= row_P[-1]:
row_P.append(x) # add the element at the end of the row of P
row_Q.append(it) # record its position in the row of Q
break
else:
# find place for x in the row of P to keep the row ordered
ind_insert = bisect_right(row_P, x)
# Swap x with the value in place
x, row_P[ind_insert] = row_P[ind_insert], x
# If no room for x at the end of any row of P create a new row
else:
P.append([x])
Q.append([it])
return P, Q
def xy_young_ru(young_diag):
""" Compute the xy coordinates of the boxes defining the young diagram, using the russian convention.
:param young_diag:
points
:type young_diag:
array_like
:return:
:math:`\\omega(x)`
:rtype:
array_like
"""
def intertwine(arr_1, arr_2):
inter = np.empty((arr_1.size + arr_2.size,), dtype=arr_1.dtype)
inter[0::2], inter[1::2] = arr_1, arr_2
return inter
# horizontal lines
x_hor = intertwine(np.zeros_like(young_diag), young_diag)
y_hor = np.repeat(np.arange(1, young_diag.size + 1), repeats=2)
# vertical lines
uniq, ind = np.unique(young_diag[::-1], return_index=True)
gaps = np.ediff1d(uniq, to_begin=young_diag[-1])
x_vert = np.repeat(np.arange(1, 1 + gaps.sum()), repeats=2)
y_vert = np.repeat(young_diag.size - ind, repeats=gaps)
y_vert = intertwine(np.zeros_like(y_vert), y_vert)
xy_young_fr = np.column_stack(
[np.hstack([x_hor, x_vert]), np.hstack([y_hor, y_vert])])
rot_45_and_scale = np.array([[1.0, -1.0],
[1.0, 1.0]])
return xy_young_fr.dot(rot_45_and_scale.T)
def limit_shape(x):
""" Evaluate :math:`\\omega(x)` the limit-shape function :cite:`Ker96`
.. math::
\\omega(x) =
\\begin{cases}
|x|, &\\text{if } |x|\\geq 2\\
\\frac{2}{\\pi} \\left(x \\arcsin\\left(\\frac{x}{2}\\right) + \\sqrt{4-x^2} \\right) &\\text{otherwise } \\end{cases}
:param x:
points
:type x:
array_like
:return:
:math:`\\omega(x)`
:rtype:
array_like
.. seealso::
- :func:`plot_diagram <plot_diagram>`
- :cite:`Ker96`
"""
w_x = np.zeros_like(x)
abs_x_gt2 = np.abs(x) >= 2.0
w_x[abs_x_gt2] = np.abs(x[abs_x_gt2])
w_x[~abs_x_gt2] = x[~abs_x_gt2] * np.arcsin(0.5 * x[~abs_x_gt2])\
+ np.sqrt(4.0 - x[~abs_x_gt2]**2)
w_x[~abs_x_gt2] *= 2.0 / np.pi
return w_x
|
guilgautier/DPPy
|
dppy/exotic_dpps_core.py
|
Python
|
mit
| 8,692
|
[
"VisIt"
] |
4de09c1b6a27e6b009ece94ac0e6bc0fe98f279c32ea513873714fa6e0b784ed
|
#!/usr/bin/env python
import sys
import numpy as np
import nibabel as nib
from scipy.signal import convolve
from scipy.ndimage.morphology import grey_closing
from scipy.ndimage.morphology import generate_binary_structure
from mne import write_surface
from mcubes import marching_cubes
def mkoutersurf(image, radius, outfile):
#radius information is currently ignored
#it is a little tougher to deal with the morphology in python
fill = nib.load( image )
filld = fill.get_data()
filld[filld==1] = 255
gaussian = np.ones((2,2))*.25
image_f = np.zeros((256,256,256))
for slice in range(256):
temp = filld[:,:,slice]
image_f[:,:,slice] = convolve(temp, gaussian, 'same')
image2 = np.zeros((256,256,256))
image2[np.where(image_f <= 25)] = 0
image2[np.where(image_f > 25)] = 255
strel15 = generate_binary_structure(3, 1)
BW2 = grey_closing(image2, structure=strel15)
thresh = np.max(BW2)/2
BW2[np.where(BW2 <= thresh)] = 0
BW2[np.where(BW2 > thresh)] = 255
v, f = marching_cubes(BW2, 100)
v2 = np.transpose(
np.vstack( ( 128 - v[:,0],
v[:,2] - 128,
128 - v[:,1], )))
write_surface(outfile, v2, f)
if __name__=='__main__':
if not len(sys.argv) == 4:
raise ValueError("the fail at give argument correct,\n"
"volume filled first, diameter integral then, arguments file "
"output")
mkoutersurf( sys.argv[1], None, sys.argv[3] )
|
aestrivex/ielu
|
ielu/mkoutersurf.py
|
Python
|
gpl-3.0
| 1,540
|
[
"Gaussian"
] |
b1ff8734365a4bd674d5a6f3d798d5ba7c58e95cc8fb2ce63eb3249557fb38e4
|
#! usr/bin/env python
import optparse, os, csv, glob, sys
import MySQLdb
import PEATSA.Core as Core
import PEATSA.Core.Matrix
import matplotlib.pyplot as plt
import numpy as np
class ProteinComplexTool:
def __init__(self):
return
def DeltaStability(self,inputFile, mutationList, configurationFile, workingDirectory, outputDirectory):
'''Calculates the stability difference between a protein and set of mutants
Parameters:
inputFile: A PDB file of the protein
mutationList: A list of Data.MutationSet instances. Each represents a mutant of the protein.
configurationFile: The location of a proteinDesignTool.conf file - defaults to home directory.
workingDirectory: Where the calculation will be run.
outputDirectory: Where the results will be written.
Returns
A Data.DataSet instance containing one matrix, stabilityResults.
Each row of this matrix corresponds to a mutant defined in the mutationList argument.'''
#Create the ProteinDesignTool instance
tool = Core.ProteinDesignTool.ProteinDesignTool(configurationFile,
workingDirectory=workingDirectory,
pdbFile=inputFile,
outputDirectory=outputDirectory,
removeHeterogens=True)
#The above cleans the pdb file and copies it to the working directory.
#Use this pdb from now on.
inputFile = tool.pdbFile
#Create the mutants
mutantCollection = Core.Data.MutantCollection(pdbFile=inputFile,mutationList=mutationList,location=outputDirectory,temporary=True)
#Run stability calculation
#The results are added to the ProteinDesignTool instance's dataDirectory attribute
#This is an instance of Data.DataSet class
tool.runStabilityCalculation(mutantFiles=mutantCollection.mutantFiles())
#Clean up - Deletes files copied to the working directory for Uffbaps
tool.cleanUp()
return tool.dataDirectory
def remALT(self,pdbfile):
import Protool
x = Protool.structureIO()
x.readpdb('%s.pdb' % (pdbfile))
x.RemoveALT()
x.writepdb('%s.pdb' % (pdbfile), dont_write_HETATMS=1)
print 'Removed alternate residues'
def splitter(self,pdbDir,pdb,reactions_list,cur,db):
import string
if reactions_list == ['']:
# query the database
cur.execute("SELECT DISTINCT Chain_ID from pdb where PDB_ID = '%s';" % (pdb))
a = cur.fetchall() # fetch results
print 'a', a
expr=[]
chains = [i[0] for i in a]
for i in chains:
s=["segid "+i]
expr.append(s)
b = str(a) # convert from tuple to string
exclude = set(string.punctuation) # set of punctutation characters
b = ''.join(ch for ch in b if ch not in exclude) # remove punctuation from b
e = ''.join(b.split(' '))
self.do_split(pdbDir, pdb, expr, e)
return e
else:
expr1=[]
for c in reactions_list:
if len(c)>1:
s = ["segid "+i for i in c]
expr1.append(s)
else:
expr1.append(["segid "+c])
self.do_split(pdbDir,pdb, expr1, reactions_list)
return reactions_list
def do_split(self,pdbDir,pdb, expr, e):
import MDAnalysis
u = MDAnalysis.Universe(pdbDir, permissive=False)
for i in range(len(expr)):
print expr[i]
Z = u.selectAtoms(*expr[i])
Z.write('%s_%s.pdb' % (pdb,e[i]))
print 'Extracted chain(s)', e[i],'from', pdb
def createMutlist(self,pdb):
mutList = Core.Data.CreateScanList(pdb, mutation='ALA', skipResidueTypes=['ALA', 'GLY'])
return mutList
def displayResults(self,pdb,split_list,comp_list,cur,db):
width=0.5
cur.execute("SELECT * FROM results_%s;" % (split_list[0]))
complexResults = cur.fetchall()
mutations = [i[0] for i in complexResults] # Mutation list
complexScores = [i[1] for i in complexResults] # dG scores of pdb complex
count = len(mutations) # Number of calcs
ind = np.arange(count)
if len(split_list)>1: # For binding calcs, no matter in what order chains were split
chainResults = []
for i in split_list[1:]:
cur.execute("select * from results_%s;" % (i))
chainResults.append(cur.fetchall())
chainScores = [i[1] for y in chainResults for i in y] # dG scores of chains split from pdb
ddG = []
cur.execute("create table if not exists ddG_%s_%s(mutation VARCHAR(10), ddG FLOAT);" % (pdb, comp_list))
for i in range(len(complexScores)):
ddG.append(complexScores[i] - chainScores[i])
for i in range(len(mutations)):
print "ddG", mutations[i], ddG[i]
cur.execute("insert into ddG_%s_%s (mutation, ddG) VALUES (%s%s%s, %s%s%s);" % (pdb,comp_list, '"', mutations[i], '"', '"',ddG[i],'"'))
plt.plot(ind+(width/2), ddG, 'o-')
plt.axhline(linewidth=2, color='r')
plt.title("ddG Binding calculations for ALA scan of %s" % (split_list[0]))
else:
for i in range(len(mutations)):
print mutations[i], complexScores[i]
plt.bar(ind,complexScores,width,color='r')
plt.title("dG Stability calculations for ALA scan of %s" % (split_list[0]))
plt.xticks(ind+(width/2), mutations, rotation=90, fontsize=8)
plt.show()
sys.exit()
def main():
# Run program
# Connect to local database containing info about BMP pdbs
db = MySQLdb.connect(host="localhost", user = "root", passwd = "samsung", db = "sat")
cur = db.cursor()
cur.execute("SELECT VERSION()")
ver = cur.fetchone()
print "MySQLdb connection successful"
print "MySQL server version:", ver[0]
# Show pdbs in database
cur.execute("SELECT distinct PDB_ID from pdb;")
print "PDBs in database:"
a = cur.fetchall()
b = ','.join([i[0] for i in a])
print b
# Option to select pdb, config file, working dir etc..
parser = optparse.OptionParser()
# PDB option
parser.add_option("-p", "--pdb", help="Choose all or a pdb id", dest="pdb", default ="all")
# Mutation List or ALA scan option
parser.add_option("-m", "--mutationList", help="Location of mutation list file", dest="mutList", default="ALA")
# Configuration File
parser.add_option("-c", "--configurationFile", help="Location of configuration file", dest="configFile", default="/home/satnam/proteinDesignTool.conf")
# Output Directory
parser.add_option("-o", "--outputDirectory", help="Location of output directory", dest="outputDir", default=os.getcwd())
# Working Directory
parser.add_option("-w", "--workingDirectory", help="Location of working directory", dest="workingDir", default=os.getcwd())
# Choose option for user-defined calculations
parser.add_option("-u", "--userCalcs", help="Choose True or False if you would like to specifiy the calculations, otherwise each chain will be split", dest="userCalcOpt", default=False)
# Show Results Option
parser.add_option("-s", "--showResults", help="Shows previous results? True or False. If they don't exist, they will be calculated.", dest="showResults", default=True)
# Delete results from database
parser.add_option("-d", "--deleteResults", help="Deletes all results for the specified pdb from the database. Default False.", dest="deleteResults", default=False)
(opts, args) = parser.parse_args()
# Instantiate the class
run = ProteinComplexTool()
# pdb name/file handling
pdb = opts.pdb
pdbFile = ''.join((pdb,'.pdb'))
pdbDir = os.path.join(opts.outputDir,pdbFile)
print pdbDir
# Checking if user selected PDB is in the database
if opts.pdb != None:
if opts.pdb not in b:
raise sys.exit('PDB not in Database, choose one from list')
if opts.pdb in b:
print 'PDB in Database'
print 'Checking what calculations can be performed'
# Check what calcs can be done with user defined PDB
cur.execute("SELECT distinct Entity_ID, Chain_ID, Chain_name, type from pdb where PDB_ID = %s%s%s;" % ('"',pdb,'"'))
entity = [] # entities in the pdbfile
chains = []
for i in cur.fetchall():
print "Entity:",i[0], "Chain Name:",i[2], "Type:",i[3] , "Chain ID:", i[1]
entity.append(i[0])
chains.append(i[1])
entity.sort()
# Delete results
if opts.deleteResults == 'True':
cur.execute("SHOW tables like 'results_%s%s';" % (pdb, '%'))
drop_tables=cur.fetchall()
print drop_tables
for i in drop_tables:
cur.execute("DROP TABLE '%s';" % (i))
print "Results for",i,"deleted"
else:
pass
# Remove Alternate Residues from pdb, will overwrite the file
run.remALT(pdb)
# User defined splitting of chains from PDB, can be left
# blank and the PDB will be split to individual chains
reactions_list = ['']
if opts.userCalcOpt != 'False':
reactants = raw_input("What reactants are consumed (enter chain IDs in the form AB+C+D):")
products = raw_input("What products are produces (enter chain IDs in the form ABC+D):")
else:
pass
# If user leaves input blank, then the default is to calculate
# every chain individually vs complex
if reactants == '':
reactants = '+'.join(chains)
else:
pass
print reactants
if products == '':
products = ''.join(chains)
else:
pass
reactants_list = reactants.split('+')
products_list = products.split('+')
# Split the pdb into chains, returns chains that have been split (A,B etc)
split_reactants = run.splitter(pdbDir,pdb,reactants_list,cur,db)
split_products = run.splitter(pdbDir,pdb,products_list,cur,db)
comp_list = split_products + split_reactants
comp_list = '_'.join(comp_list)
split_list = []
split_list_products = []
split_list_reactants = []
for i in split_reactants:
s = pdb+'_'+i
split_list_reactants.append(s)
for i in split_products:
s = pdb+'_'+i
split_list_products.append(s)
splitlist = split_list_products + split_list_reactants
for i in splitlist:
if i not in split_list:
split_list.append(i)
# Split_list is a list of the pdb and the individual pdbs
# that have been split
print split_list
# Show results
if opts.showResults == 'True':
count = 0
cur.execute("show tables;")
tables = cur.fetchall()
resTable = "".join(("results_",pdb))
for i in tables:
for y in i:
if y.startswith(resTable):
count +=1
if count != 0:
run.displayResults(pdb,split_list,comp_list,cur,db)
else:
pass
comp_list = comp_list +'_'+os.path.split(opts.mutList)[1]
# Run the calculations
# Load and check mutant list given by user, else do ALA scan
"""
if opts.mutList != "ALA":
mfile = Core.Data.MutationListFile(filename=opts.mutList,create=True)
mfile.removeDuplicates(autoUpdate=False)
mutList = mfile.mutantList()
else:
for i in split_list:
w_pdb = os.path.join(opts.outputDir,'%s.pdb' % (i))
mutList = Core.Data.CreateScanList(pdbFile=w_pdb, mutation='ALA', skipResidueTypes=['ALA', 'GLY'])
"""
for i in split_list:
w_pdb = os.path.join(opts.outputDir,'%s.pdb' % (i))
mutList = Core.Data.CreateScanList(pdbFile=w_pdb, mutation='ALA', skipResidueTypes=['ALA', 'GLY'])
results = run.DeltaStability(inputFile=w_pdb,
mutationList=mutList,
configurationFile=opts.configFile,
workingDirectory=opts.workingDir,
outputDirectory=opts.outputDir)
#Commit to database
cur.execute("create table if not exists results_%s(mutation VARCHAR(10), score FLOAT);" % (i))
for mutant in range(results.stabilityResults.numberOfRows()):
cur.execute("insert into results_%s (mutation, score) VALUES (%s%s%s, %s%s%s);" % (i, '"', results.stabilityResults[mutant][0], '"', '"', results.stabilityResults[mutant][-1],'"'))
print "Calculated ", i, "stability and results added to database"
# Display results
run.displayResults(pdb,split_list,comp_list,cur,db)
if __name__=='__main__':
main()
|
dmnfarrell/peat
|
sandbox/ProteinComplexTool.py
|
Python
|
mit
| 13,150
|
[
"MDAnalysis"
] |
0b1e7edc8e65fdeb7addb0c3bcb79029a2e52fbe0adc63b57485f06aa07793d7
|
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD
import warnings
import numpy as np
from ..base import BaseEstimator
from ..utils import check_random_state
from ..utils.graph import graph_laplacian
from .k_means_ import k_means
def spectral_embedding(adjacency, n_components=8, mode=None,
random_state=None):
"""Project the sample on the first eigen vectors of the graph Laplacian
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigen vectors associated to the
smallest eigen values) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigen vector decomposition works as expected.
Parameters
-----------
adjacency: array-like or sparse matrix, shape: (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components: integer, optional
The dimension of the projection subspace.
mode: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG (Algebraic
MultiGrid) is much faster, but requires pyamg to be
installed.
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when mode == 'amg'.
Returns
--------
embedding: array, shape: (n_samples, n_components)
The reduced samples
Notes
------
The graph should contain only one connected component, elsewhere the
results make little sense.
"""
from scipy import sparse
from ..utils.fixes import arpack_eigsh
from scipy.sparse.linalg import lobpcg
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# XXX: Should we check that the matrices given is symmetric
if not amg_loaded:
warnings.warn('pyamg not available, using scipy.sparse')
if mode is None:
mode = ('amg' if amg_loaded else 'arpack')
laplacian, dd = graph_laplacian(adjacency,
normed=True, return_diag=True)
if (mode == 'arpack'
or not sparse.isspmatrix(laplacian)
or n_nodes < 5 * n_components):
# lobpcg used with mode='amg' has bugs for low number of nodes
# We need to put the diagonal at zero
if not sparse.isspmatrix(laplacian):
laplacian[::n_nodes + 1] = 0
else:
laplacian = laplacian.tocoo()
diag_idx = (laplacian.row == laplacian.col)
laplacian.data[diag_idx] = 0
# If the matrix has a small number of diagonals (as in the
# case of structured matrices comming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
lambdas, diffusion_map = arpack_eigsh(-laplacian, k=n_components,
which='LA')
embedding = diffusion_map.T[::-1] * dd
elif mode == 'amg':
# Use AMG to get a preconditionner and speed up the eigenvalue
# problem.
laplacian = laplacian.astype(np.float) # lobpcg needs native floats
ml = smoothed_aggregation_solver(laplacian.tocsr())
X = random_state.rand(laplacian.shape[0], n_components)
X[:, 0] = 1. / dd.ravel()
M = ml.aspreconditioner()
lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
largest=False)
embedding = diffusion_map.T * dd
if embedding.shape[0] == 1:
raise ValueError
else:
raise ValueError("Unknown value for mode: '%s'."
"Should be 'amg' or 'arpack'" % mode)
return embedding
def spectral_clustering(affinity, k=8, n_components=None, mode=None,
random_state=None, n_init=10):
"""Apply k-means to a projection to the normalized laplacian
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
affinity: array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetic k-nearest neighbours connectivity matrix of the samples.
k: integer, optional
Number of clusters to extract.
n_components: integer, optional, default is k
Number of eigen vectors to use for the spectral embedding
mode: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG (Algebraic
MultiGrid) is much faster, but requires pyamg to be
installed.
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when mode == 'amg'
and by the K-Means initialization.
n_init: int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
Returns
-------
labels: array of integers, shape: n_samples
The labels of the clusters.
centers: array of integers, shape: k
The indices of the cluster centers
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
random_state = check_random_state(random_state)
n_components = k if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
mode=mode, random_state=random_state)
maps = maps[1:]
_, labels, _ = k_means(maps.T, k, random_state=random_state,
n_init=n_init)
return labels
class SpectralClustering(BaseEstimator):
"""Apply k-means to a projection to the normalized laplacian
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
k: integer, optional
The dimension of the projection subspace.
mode: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG (Algebraic
MultiGrid) is much faster, but requires pyamg to be installed.
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when mode == 'amg'
and by the K-Means initialization.
n_init: int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
Methods
-------
fit(X):
Compute spectral clustering
Attributes
----------
labels_:
Labels of each point
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
"""
def __init__(self, k=8, mode=None, random_state=None, n_init=10):
self.k = k
self.mode = mode
self.random_state = random_state
self.n_init = n_init
def fit(self, X):
"""Compute the spectral clustering from the affinity matrix
Parameters
-----------
X: array-like or sparse matrix, shape: (n_samples, n_samples)
An affinity matrix describing the pairwise similarity of the
data. If can also be an adjacency matrix of the graph to embed.
X must be symmetric and its entries must be positive or
zero. Zero means that elements have nothing in common,
whereas high values mean that elements are strongly similar.
Notes
------
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the gaussian (heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
"""
self.random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(X, k=self.k, mode=self.mode,
random_state=self.random_state,
n_init=self.n_init)
return self
|
ominux/scikit-learn
|
sklearn/cluster/spectral.py
|
Python
|
bsd-3-clause
| 11,249
|
[
"Gaussian"
] |
0c3ded1a17737f262356e0cd46a5b937df0946f3cfa78ac0089a70cf5cced0d0
|
""" Base class for MyProxy and VOMS
"""
import os
import tempfile
import DIRAC
from DIRAC import gConfig, S_OK, S_ERROR
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Security.X509Chain import X509Chain # pylint: disable=import-error
from DIRAC.Core.Security import Locations
class BaseSecurity(object):
def __init__(self, server=False, serverCert=False, serverKey=False, timeout=False):
if timeout:
self._secCmdTimeout = timeout
else:
self._secCmdTimeout = 30
if not server:
self._secServer = gConfig.getValue("/DIRAC/VOPolicy/MyProxyServer", "myproxy.cern.ch")
else:
self._secServer = server
ckLoc = Locations.getHostCertificateAndKeyLocation()
if serverCert:
self._secCertLoc = serverCert
else:
if ckLoc:
self._secCertLoc = ckLoc[0]
else:
self._secCertLoc = "%s/etc/grid-security/servercert.pem" % DIRAC.rootPath
if serverKey:
self._secKeyLoc = serverKey
else:
if ckLoc:
self._secKeyLoc = ckLoc[1]
else:
self._secKeyLoc = "%s/etc/grid-security/serverkey.pem" % DIRAC.rootPath
self._secRunningFromTrustedHost = gConfig.getValue("/DIRAC/VOPolicy/MyProxyTrustedHost", "True").lower() in (
"y",
"yes",
"true",
)
self._secMaxProxyHours = gConfig.getValue("/DIRAC/VOPolicy/MyProxyMaxDelegationTime", 168)
def getMyProxyServer(self):
return self._secServer
def getServiceDN(self):
chain = X509Chain()
retVal = chain.loadChainFromFile(self._secCertLoc)
if not retVal["OK"]:
return retVal
return chain.getCertInChain(0)["Value"].getSubjectDN()
def _getExternalCmdEnvironment(self):
return dict(os.environ)
def _unlinkFiles(self, files):
if isinstance(files, (list, tuple)):
for fileName in files:
self._unlinkFiles(fileName)
else:
try:
os.unlink(files)
except Exception:
pass
def _generateTemporalFile(self):
try:
fd, filename = tempfile.mkstemp()
os.close(fd)
except IOError:
return S_ERROR(DErrno.ECTMPF)
return S_OK(filename)
def _getUsername(self, proxyChain):
retVal = proxyChain.getCredentials()
if not retVal["OK"]:
return retVal
credDict = retVal["Value"]
if not credDict["isProxy"]:
return S_ERROR(DErrno.EX509, "chain does not contain a proxy")
if not credDict["validDN"]:
return S_ERROR(DErrno.EDISET, "DN %s is not known in dirac" % credDict["subject"])
if not credDict["validGroup"]:
return S_ERROR(DErrno.EDISET, "Group %s is invalid for DN %s" % (credDict["group"], credDict["subject"]))
mpUsername = "%s:%s" % (credDict["group"], credDict["username"])
return S_OK(mpUsername)
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/Security/BaseSecurity.py
|
Python
|
gpl-3.0
| 3,081
|
[
"DIRAC"
] |
38c0f3bd1b4be97cb5d896e9a4c0c8d58cd5cb97bb889642ae3c4297011453dc
|
# Copyright (c) 2012, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
__doc__ = """
Inference over Gaussian process latent functions
In all our GP models, the consistency propery means that we have a Gaussian
prior over a finite set of points f. This prior is
math:: N(f | 0, K)
where K is the kernel matrix.
We also have a likelihood (see GPy.likelihoods) which defines how the data are
related to the latent function: p(y | f). If the likelihood is also a Gaussian,
the inference over f is tractable (see exact_gaussian_inference.py).
If the likelihood object is something other than Gaussian, then exact inference
is not tractable. We then resort to a Laplace approximation (laplace.py) or
expectation propagation (ep.py).
The inference methods return a
:class:`~GPy.inference.latent_function_inference.posterior.Posterior`
instance, which is a simple
structure which contains a summary of the posterior. The model classes can then
use this posterior object for making predictions, optimizing hyper-parameters,
etc.
"""
class LatentFunctionInference(object):
def on_optimization_start(self):
"""
This function gets called, just before the optimization loop to start.
"""
pass
def on_optimization_end(self):
"""
This function gets called, just after the optimization loop ended.
"""
pass
class InferenceMethodList(LatentFunctionInference, list):
def on_optimization_start(self):
for inf in self:
inf.on_optimization_start()
def on_optimization_end(self):
for inf in self:
inf.on_optimization_end()
def __getstate__(self):
state = []
for inf in self:
state.append(inf)
return state
def __setstate__(self, state):
for inf in state:
self.append(inf)
from exact_gaussian_inference import ExactGaussianInference
from laplace import Laplace
from GPy.inference.latent_function_inference.var_dtc import VarDTC
from expectation_propagation import EP
from expectation_propagation_dtc import EPDTC
from dtc import DTC
from fitc import FITC
from var_dtc_parallel import VarDTC_minibatch
# class FullLatentFunctionData(object):
#
#
# class EMLikeLatentFunctionInference(LatentFunctionInference):
# def update_approximation(self):
# """
# This function gets called when the
# """
#
# def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
# """
# Do inference on the latent functions given a covariance function `kern`,
# inputs and outputs `X` and `Y`, inducing_inputs `Z`, and a likelihood `likelihood`.
# Additional metadata for the outputs `Y` can be given in `Y_metadata`.
# """
# raise NotImplementedError, "Abstract base class for full inference"
#
# class VariationalLatentFunctionInference(LatentFunctionInference):
# def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
# """
# Do inference on the latent functions given a covariance function `kern`,
# inputs and outputs `X` and `Y`, inducing_inputs `Z`, and a likelihood `likelihood`.
# Additional metadata for the outputs `Y` can be given in `Y_metadata`.
# """
# raise NotImplementedError, "Abstract base class for full inference"
|
TianpeiLuke/GPy
|
GPy/inference/latent_function_inference/__init__.py
|
Python
|
bsd-3-clause
| 3,383
|
[
"Gaussian"
] |
083308155c65da2099613f6875b80d52c9f0b11310a0c6ded8646a8c9989409d
|
# -*- coding: utf-8 -*-
import re
from numbers import Integral
from collections import namedtuple
__all__ = ["countries"]
try:
basestring
except NameError:
basestring = str
Country = namedtuple('Country',
'name alpha2 alpha3 numeric apolitical_name')
_records = [
Country(u"Afghanistan", "AF", "AFG", "004", u"Afghanistan"),
Country(u"Åland Islands", "AX", "ALA", "248", u"Åland Islands"),
Country(u"Albania", "AL", "ALB", "008", u"Albania"),
Country(u"Algeria", "DZ", "DZA", "012", u"Algeria"),
Country(u"American Samoa", "AS", "ASM", "016", u"American Samoa"),
Country(u"Andorra", "AD", "AND", "020", u"Andorra"),
Country(u"Angola", "AO", "AGO", "024", u"Angola"),
Country(u"Anguilla", "AI", "AIA", "660", u"Anguilla"),
Country(u"Antarctica", "AQ", "ATA", "010", u"Antarctica"),
Country(u"Antigua and Barbuda", "AG", "ATG", "028",
u"Antigua and Barbuda"),
Country(u"Argentina", "AR", "ARG", "032", u"Argentina"),
Country(u"Armenia", "AM", "ARM", "051", u"Armenia"),
Country(u"Aruba", "AW", "ABW", "533", u"Aruba"),
Country(u"Australia", "AU", "AUS", "036", u"Australia"),
Country(u"Austria", "AT", "AUT", "040", u"Austria"),
Country(u"Azerbaijan", "AZ", "AZE", "031", u"Azerbaijan"),
Country(u"Bahamas", "BS", "BHS", "044", u"Bahamas"),
Country(u"Bahrain", "BH", "BHR", "048", u"Bahrain"),
Country(u"Bangladesh", "BD", "BGD", "050", u"Bangladesh"),
Country(u"Barbados", "BB", "BRB", "052", u"Barbados"),
Country(u"Belarus", "BY", "BLR", "112", u"Belarus"),
Country(u"Belgium", "BE", "BEL", "056", u"Belgium"),
Country(u"Belize", "BZ", "BLZ", "084", u"Belize"),
Country(u"Benin", "BJ", "BEN", "204", u"Benin"),
Country(u"Bermuda", "BM", "BMU", "060", u"Bermuda"),
Country(u"Bhutan", "BT", "BTN", "064", u"Bhutan"),
Country(u"Bolivia, Plurinational State of", "BO", "BOL", "068",
u"Bolivia, Plurinational State of"),
Country(u"Bonaire, Sint Eustatius and Saba", "BQ", "BES", "535",
u"Bonaire, Sint Eustatius and Saba"),
Country(u"Bosnia and Herzegovina", "BA", "BIH", "070",
u"Bosnia and Herzegovina"),
Country(u"Botswana", "BW", "BWA", "072", u"Botswana"),
Country(u"Bouvet Island", "BV", "BVT", "074", u"Bouvet Island"),
Country(u"Brazil", "BR", "BRA", "076", u"Brazil"),
Country(u"British Indian Ocean Territory", "IO", "IOT", "086",
u"British Indian Ocean Territory"),
Country(u"Brunei Darussalam", "BN", "BRN", "096",
u"Brunei Darussalam"),
Country(u"Bulgaria", "BG", "BGR", "100", u"Bulgaria"),
Country(u"Burkina Faso", "BF", "BFA", "854", u"Burkina Faso"),
Country(u"Burundi", "BI", "BDI", "108", u"Burundi"),
Country(u"Cambodia", "KH", "KHM", "116", u"Cambodia"),
Country(u"Cameroon", "CM", "CMR", "120", u"Cameroon"),
Country(u"Canada", "CA", "CAN", "124", u"Canada"),
Country(u"Cabo Verde", "CV", "CPV", "132", u"Cabo Verde"),
Country(u"Cayman Islands", "KY", "CYM", "136", u"Cayman Islands"),
Country(u"Central African Republic", "CF", "CAF", "140",
u"Central African Republic"),
Country(u"Chad", "TD", "TCD", "148", u"Chad"),
Country(u"Chile", "CL", "CHL", "152", u"Chile"),
Country(u"China", "CN", "CHN", "156", u"China"),
Country(u"Christmas Island", "CX", "CXR", "162", u"Christmas Island"),
Country(u"Cocos (Keeling) Islands", "CC", "CCK", "166",
u"Cocos (Keeling) Islands"),
Country(u"Colombia", "CO", "COL", "170", u"Colombia"),
Country(u"Comoros", "KM", "COM", "174", u"Comoros"),
Country(u"Congo", "CG", "COG", "178", u"Congo"),
Country(u"Congo, Democratic Republic of the", "CD", "COD", "180",
u"Congo, Democratic Republic of the"),
Country(u"Cook Islands", "CK", "COK", "184", u"Cook Islands"),
Country(u"Costa Rica", "CR", "CRI", "188", u"Costa Rica"),
Country(u"Côte d'Ivoire", "CI", "CIV", "384", u"Côte d'Ivoire"),
Country(u"Croatia", "HR", "HRV", "191", u"Croatia"),
Country(u"Cuba", "CU", "CUB", "192", u"Cuba"),
Country(u"Curaçao", "CW", "CUW", "531", u"Curaçao"),
Country(u"Cyprus", "CY", "CYP", "196", u"Cyprus"),
Country(u"Czechia", "CZ", "CZE", "203", u"Czechia"),
Country(u"Denmark", "DK", "DNK", "208", u"Denmark"),
Country(u"Djibouti", "DJ", "DJI", "262", u"Djibouti"),
Country(u"Dominica", "DM", "DMA", "212", u"Dominica"),
Country(u"Dominican Republic", "DO", "DOM", "214", u"Dominican Republic"),
Country(u"Ecuador", "EC", "ECU", "218", u"Ecuador"),
Country(u"Egypt", "EG", "EGY", "818", u"Egypt"),
Country(u"El Salvador", "SV", "SLV", "222", u"El Salvador"),
Country(u"Equatorial Guinea", "GQ", "GNQ", "226", u"Equatorial Guinea"),
Country(u"Eritrea", "ER", "ERI", "232", u"Eritrea"),
Country(u"Estonia", "EE", "EST", "233", u"Estonia"),
Country(u"Ethiopia", "ET", "ETH", "231", u"Ethiopia"),
Country(u"Falkland Islands (Malvinas)", "FK", "FLK", "238",
u"Falkland Islands (Malvinas)"),
Country(u"Faroe Islands", "FO", "FRO", "234", u"Faroe Islands"),
Country(u"Fiji", "FJ", "FJI", "242", u"Fiji"),
Country(u"Finland", "FI", "FIN", "246", u"Finland"),
Country(u"France", "FR", "FRA", "250", u"France"),
Country(u"French Guiana", "GF", "GUF", "254", u"French Guiana"),
Country(u"French Polynesia", "PF", "PYF", "258", u"French Polynesia"),
Country(u"French Southern Territories", "TF", "ATF", "260",
u"French Southern Territories"),
Country(u"Gabon", "GA", "GAB", "266", u"Gabon"),
Country(u"Gambia", "GM", "GMB", "270", u"Gambia"),
Country(u"Georgia", "GE", "GEO", "268", u"Georgia"),
Country(u"Germany", "DE", "DEU", "276", u"Germany"),
Country(u"Ghana", "GH", "GHA", "288", u"Ghana"),
Country(u"Gibraltar", "GI", "GIB", "292", u"Gibraltar"),
Country(u"Greece", "GR", "GRC", "300", u"Greece"),
Country(u"Greenland", "GL", "GRL", "304", u"Greenland"),
Country(u"Grenada", "GD", "GRD", "308", u"Grenada"),
Country(u"Guadeloupe", "GP", "GLP", "312", u"Guadeloupe"),
Country(u"Guam", "GU", "GUM", "316", u"Guam"),
Country(u"Guatemala", "GT", "GTM", "320", u"Guatemala"),
Country(u"Guernsey", "GG", "GGY", "831", u"Guernsey"),
Country(u"Guinea", "GN", "GIN", "324", u"Guinea"),
Country(u"Guinea-Bissau", "GW", "GNB", "624", u"Guinea-Bissau"),
Country(u"Guyana", "GY", "GUY", "328", u"Guyana"),
Country(u"Haiti", "HT", "HTI", "332", u"Haiti"),
Country(u"Heard Island and McDonald Islands", "HM", "HMD", "334",
u"Heard Island and McDonald Islands"),
Country(u"Holy See", "VA", "VAT", "336", u"Holy See"),
Country(u"Honduras", "HN", "HND", "340", u"Honduras"),
Country(u"Hong Kong", "HK", "HKG", "344", u"Hong Kong"),
Country(u"Hungary", "HU", "HUN", "348", u"Hungary"),
Country(u"Iceland", "IS", "ISL", "352", u"Iceland"),
Country(u"India", "IN", "IND", "356", u"India"),
Country(u"Indonesia", "ID", "IDN", "360", u"Indonesia"),
Country(u"Iran, Islamic Republic of", "IR", "IRN", "364",
u"Iran, Islamic Republic of"),
Country(u"Iraq", "IQ", "IRQ", "368", u"Iraq"),
Country(u"Ireland", "IE", "IRL", "372", u"Ireland"),
Country(u"Isle of Man", "IM", "IMN", "833", u"Isle of Man"),
Country(u"Israel", "IL", "ISR", "376", u"Israel"),
Country(u"Italy", "IT", "ITA", "380", u"Italy"),
Country(u"Jamaica", "JM", "JAM", "388", u"Jamaica"),
Country(u"Japan", "JP", "JPN", "392", u"Japan"),
Country(u"Jersey", "JE", "JEY", "832", u"Jersey"),
Country(u"Jordan", "JO", "JOR", "400", u"Jordan"),
Country(u"Kazakhstan", "KZ", "KAZ", "398", u"Kazakhstan"),
Country(u"Kenya", "KE", "KEN", "404", u"Kenya"),
Country(u"Kiribati", "KI", "KIR", "296", u"Kiribati"),
Country(u"Korea, Democratic People's Republic of", "KP", "PRK", "408",
u"Korea, Democratic People's Republic of"),
Country(u"Korea, Republic of", "KR", "KOR", "410", u"Korea, Republic of"),
Country(u"Kuwait", "KW", "KWT", "414", u"Kuwait"),
Country(u"Kyrgyzstan", "KG", "KGZ", "417", u"Kyrgyzstan"),
Country(u"Lao People's Democratic Republic", "LA", "LAO", "418",
u"Lao People's Democratic Republic"),
Country(u"Latvia", "LV", "LVA", "428", u"Latvia"),
Country(u"Lebanon", "LB", "LBN", "422", u"Lebanon"),
Country(u"Lesotho", "LS", "LSO", "426", u"Lesotho"),
Country(u"Liberia", "LR", "LBR", "430", u"Liberia"),
Country(u"Libya", "LY", "LBY", "434", u"Libya"),
Country(u"Liechtenstein", "LI", "LIE", "438", u"Liechtenstein"),
Country(u"Lithuania", "LT", "LTU", "440", u"Lithuania"),
Country(u"Luxembourg", "LU", "LUX", "442", u"Luxembourg"),
Country(u"Macao", "MO", "MAC", "446", u"Macao"),
Country(u"Macedonia, the former Yugoslav Republic of", "MK", "MKD", "807",
u"Macedonia, the former Yugoslav Republic of"),
Country(u"Madagascar", "MG", "MDG", "450", u"Madagascar"),
Country(u"Malawi", "MW", "MWI", "454", u"Malawi"),
Country(u"Malaysia", "MY", "MYS", "458", u"Malaysia"),
Country(u"Maldives", "MV", "MDV", "462", u"Maldives"),
Country(u"Mali", "ML", "MLI", "466", u"Mali"),
Country(u"Malta", "MT", "MLT", "470", u"Malta"),
Country(u"Marshall Islands", "MH", "MHL", "584", u"Marshall Islands"),
Country(u"Martinique", "MQ", "MTQ", "474", u"Martinique"),
Country(u"Mauritania", "MR", "MRT", "478", u"Mauritania"),
Country(u"Mauritius", "MU", "MUS", "480", u"Mauritius"),
Country(u"Mayotte", "YT", "MYT", "175", u"Mayotte"),
Country(u"Mexico", "MX", "MEX", "484", u"Mexico"),
Country(u"Micronesia, Federated States of", "FM", "FSM", "583",
u"Micronesia, Federated States of"),
Country(u"Moldova, Republic of", "MD", "MDA", "498",
u"Moldova, Republic of"),
Country(u"Monaco", "MC", "MCO", "492", u"Monaco"),
Country(u"Mongolia", "MN", "MNG", "496", u"Mongolia"),
Country(u"Montenegro", "ME", "MNE", "499", u"Montenegro"),
Country(u"Montserrat", "MS", "MSR", "500", u"Montserrat"),
Country(u"Morocco", "MA", "MAR", "504", u"Morocco"),
Country(u"Mozambique", "MZ", "MOZ", "508", u"Mozambique"),
Country(u"Myanmar", "MM", "MMR", "104", u"Myanmar"),
Country(u"Namibia", "NA", "NAM", "516", u"Namibia"),
Country(u"Nauru", "NR", "NRU", "520", u"Nauru"),
Country(u"Nepal", "NP", "NPL", "524", u"Nepal"),
Country(u"Netherlands", "NL", "NLD", "528", u"Netherlands"),
Country(u"New Caledonia", "NC", "NCL", "540", u"New Caledonia"),
Country(u"New Zealand", "NZ", "NZL", "554", u"New Zealand"),
Country(u"Nicaragua", "NI", "NIC", "558", u"Nicaragua"),
Country(u"Niger", "NE", "NER", "562", u"Niger"),
Country(u"Nigeria", "NG", "NGA", "566", u"Nigeria"),
Country(u"Niue", "NU", "NIU", "570", u"Niue"),
Country(u"Norfolk Island", "NF", "NFK", "574", u"Norfolk Island"),
Country(u"Northern Mariana Islands", "MP", "MNP", "580",
u"Northern Mariana Islands"),
Country(u"Norway", "NO", "NOR", "578", u"Norway"),
Country(u"Oman", "OM", "OMN", "512", u"Oman"),
Country(u"Pakistan", "PK", "PAK", "586", u"Pakistan"),
Country(u"Palau", "PW", "PLW", "585", u"Palau"),
Country(u"Palestine, State of", "PS", "PSE", "275",
u"Palestine"),
Country(u"Panama", "PA", "PAN", "591", u"Panama"),
Country(u"Papua New Guinea", "PG", "PNG", "598",
u"Papua New Guinea"),
Country(u"Paraguay", "PY", "PRY", "600", u"Paraguay"),
Country(u"Peru", "PE", "PER", "604", u"Peru"),
Country(u"Philippines", "PH", "PHL", "608", u"Philippines"),
Country(u"Pitcairn", "PN", "PCN", "612", u"Pitcairn"),
Country(u"Poland", "PL", "POL", "616", u"Poland"),
Country(u"Portugal", "PT", "PRT", "620", u"Portugal"),
Country(u"Puerto Rico", "PR", "PRI", "630", u"Puerto Rico"),
Country(u"Qatar", "QA", "QAT", "634", u"Qatar"),
Country(u"Réunion", "RE", "REU", "638", u"Réunion"),
Country(u"Romania", "RO", "ROU", "642", u"Romania"),
Country(u"Russian Federation", "RU", "RUS", "643",
u"Russian Federation"),
Country(u"Rwanda", "RW", "RWA", "646", u"Rwanda"),
Country(u"Saint Barthélemy", "BL", "BLM", "652",
u"Saint Barthélemy"),
Country(u"Saint Helena, Ascension and Tristan da Cunha",
"SH", "SHN", "654",
u"Saint Helena, Ascension and Tristan da Cunha"),
Country(u"Saint Kitts and Nevis", "KN", "KNA", "659",
u"Saint Kitts and Nevis"),
Country(u"Saint Lucia", "LC", "LCA", "662", u"Saint Lucia"),
Country(u"Saint Martin (French part)", "MF", "MAF", "663",
u"Saint Martin (French part)"),
Country(u"Saint Pierre and Miquelon", "PM", "SPM", "666",
u"Saint Pierre and Miquelon"),
Country(u"Saint Vincent and the Grenadines", "VC", "VCT", "670",
u"Saint Vincent and the Grenadines"),
Country(u"Samoa", "WS", "WSM", "882", u"Samoa"),
Country(u"San Marino", "SM", "SMR", "674", u"San Marino"),
Country(u"Sao Tome and Principe", "ST", "STP", "678",
u"Sao Tome and Principe"),
Country(u"Saudi Arabia", "SA", "SAU", "682", u"Saudi Arabia"),
Country(u"Senegal", "SN", "SEN", "686", u"Senegal"),
Country(u"Serbia", "RS", "SRB", "688", u"Serbia"),
Country(u"Seychelles", "SC", "SYC", "690", u"Seychelles"),
Country(u"Sierra Leone", "SL", "SLE", "694", u"Sierra Leone"),
Country(u"Singapore", "SG", "SGP", "702", u"Singapore"),
Country(u"Sint Maarten (Dutch part)", "SX", "SXM", "534",
u"Sint Maarten (Dutch part)"),
Country(u"Slovakia", "SK", "SVK", "703", u"Slovakia"),
Country(u"Slovenia", "SI", "SVN", "705", u"Slovenia"),
Country(u"Solomon Islands", "SB", "SLB", "090", u"Solomon Islands"),
Country(u"Somalia", "SO", "SOM", "706", u"Somalia"),
Country(u"South Africa", "ZA", "ZAF", "710", u"South Africa"),
Country(u"South Georgia and the South Sandwich Islands",
"GS", "SGS", "239",
u"South Georgia and the South Sandwich Islands",),
Country(u"South Sudan", "SS", "SSD", "728", u"South Sudan"),
Country(u"Spain", "ES", "ESP", "724", u"Spain"),
Country(u"Sri Lanka", "LK", "LKA", "144", u"Sri Lanka"),
Country(u"Sudan", "SD", "SDN", "729", u"Sudan"),
Country(u"Suriname", "SR", "SUR", "740", u"Suriname"),
Country(u"Svalbard and Jan Mayen", "SJ", "SJM", "744",
u"Svalbard and Jan Mayen"),
Country(u"Swaziland", "SZ", "SWZ", "748", u"Swaziland"),
Country(u"Sweden", "SE", "SWE", "752", u"Sweden"),
Country(u"Switzerland", "CH", "CHE", "756", u"Switzerland"),
Country(u"Syrian Arab Republic", "SY", "SYR", "760",
u"Syrian Arab Republic"),
Country(u"Taiwan, Province of China", "TW", "TWN", "158",
u"Taiwan"),
Country(u"Tajikistan", "TJ", "TJK", "762", u"Tajikistan"),
Country(u"Tanzania, United Republic of", "TZ", "TZA", "834",
u"Tanzania, United Republic of"),
Country(u"Thailand", "TH", "THA", "764", u"Thailand"),
Country(u"Timor-Leste", "TL", "TLS", "626", u"Timor-Leste"),
Country(u"Togo", "TG", "TGO", "768", u"Togo"),
Country(u"Tokelau", "TK", "TKL", "772", u"Tokelau"),
Country(u"Tonga", "TO", "TON", "776", u"Tonga"),
Country(u"Trinidad and Tobago", "TT", "TTO", "780",
u"Trinidad and Tobago"),
Country(u"Tunisia", "TN", "TUN", "788", u"Tunisia"),
Country(u"Turkey", "TR", "TUR", "792", u"Turkey"),
Country(u"Turkmenistan", "TM", "TKM", "795", u"Turkmenistan"),
Country(u"Turks and Caicos Islands", "TC", "TCA", "796",
u"Turks and Caicos Islands"),
Country(u"Tuvalu", "TV", "TUV", "798", u"Tuvalu"),
Country(u"Uganda", "UG", "UGA", "800", u"Uganda"),
Country(u"Ukraine", "UA", "UKR", "804", u"Ukraine"),
Country(u"United Arab Emirates", "AE", "ARE", "784",
u"United Arab Emirates"),
Country(u"United Kingdom of Great Britain and Northern Ireland",
"GB", "GBR", "826",
u"United Kingdom of Great Britain and Northern Ireland"),
Country(u"United States", "US", "USA", "840", u"United States"),
Country(u"United States Minor Outlying Islands", "UM", "UMI", "581",
u"United States Minor Outlying Islands"),
Country(u"Uruguay", "UY", "URY", "858", u"Uruguay"),
Country(u"Uzbekistan", "UZ", "UZB", "860", u"Uzbekistan"),
Country(u"Vanuatu", "VU", "VUT", "548", u"Vanuatu"),
Country(u"Venezuela, Bolivarian Republic of", "VE", "VEN", "862",
u"Venezuela, Bolivarian Republic of"),
Country(u"Viet Nam", "VN", "VNM", "704", u"Viet Nam"),
Country(u"Virgin Islands, British", "VG", "VGB", "092",
u"Virgin Islands, British"),
Country(u"Virgin Islands, U.S.", "VI", "VIR", "850",
u"Virgin Islands, U.S."),
Country(u"Wallis and Futuna", "WF", "WLF", "876", u"Wallis and Futuna"),
Country(u"Western Sahara", "EH", "ESH", "732", u"Western Sahara"),
Country(u"Yemen", "YE", "YEM", "887", u"Yemen"),
Country(u"Zambia", "ZM", "ZMB", "894", u"Zambia"),
Country(u"Zimbabwe", "ZW", "ZWE", "716", u"Zimbabwe")]
def _build_index(idx):
return dict((r[idx].upper(), r) for r in _records)
# Internal country indexes
_by_alpha2 = _build_index(1)
_by_alpha3 = _build_index(2)
_by_numeric = _build_index(3)
_by_name = _build_index(0)
_by_apolitical_name = _build_index(4)
# Documented accessors for the country indexes
countries_by_alpha2 = _by_alpha2
countries_by_alpha3 = _by_alpha3
countries_by_numeric = _by_numeric
countries_by_name = _by_name
countries_by_apolitical_name = _by_apolitical_name
NOT_FOUND = object()
class _CountryLookup(object):
def get(self, key, default=NOT_FOUND):
if isinstance(key, Integral):
r = _by_numeric.get("%03d" % key, default)
elif isinstance(key, basestring):
k = key.upper()
if len(k) == 2:
r = _by_alpha2.get(k, default)
elif len(k) == 3 and re.match(r"[0-9]{3}", k):
r = _by_numeric.get(k, default)
elif len(k) == 3:
r = _by_alpha3.get(k, default)
elif k in _by_name:
r = _by_name.get(k, default)
else:
r = _by_apolitical_name.get(k, default)
else:
r = default
if r == NOT_FOUND:
raise KeyError(key)
return r
__getitem__ = get
def __len__(self):
return len(_records)
def __iter__(self):
return iter(_records)
def __contains__(self, item):
try:
self.get(item)
return True
except KeyError:
return False
countries = _CountryLookup()
|
repotvsupertuga/tvsupertuga.repository
|
script.module.streamlink.base/resources/lib/streamlink/utils/iso3166/__init__.py
|
Python
|
gpl-2.0
| 18,795
|
[
"BWA"
] |
2562343df29eaf5934602681a227db87060ac81b8582e9e6c8f502261a1aee55
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
if __name__ == '__main__':
import argparse
from Bio import AlignIO
from Bio.Align import MultipleSeqAlignment
from Bio.SeqRecord import SeqRecord
from krpy import krio
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_file', type=unicode,
help='Input alignment file path.')
parser.add_argument('-o', '--output_file', type=unicode,
help='Output alignment file path.')
parser.add_argument('-f', '--format', type=unicode,
help='Alignment file format.')
parser.add_argument('-n', '--names', type=unicode,
help='')
parser.add_argument('-a', '--action', type=unicode,
help='keep or remove')
# fasta, phylip-relaxed
# http://biopython.org/wiki/AlignIO
input_file = None
output_file = None
format = None
args = parser.parse_args()
if args.input_file:
input_file = args.input_file
if args.output_file:
output_file = args.output_file
if args.format:
format = args.format
if args.names:
names = krio.read_table_file(
path=args.names,
has_headers=False,
headers=None,
delimiter=',',
quotechar=None,
stripchar='"',
rettype='set')
if not names:
names = args.names.split(',')
if args.action:
action = args.action
if input_file and output_file and format:
alignment = AlignIO.read(input_file, format)
good_sequences = list()
if action == 'remove':
for a in alignment:
if a.id not in names:
sequence_record = SeqRecord(seq=a.seq, id=a.id, name='', description='')
good_sequences.append(sequence_record)
else:
print('Removing ' + a.id)
elif action == 'keep':
for a in alignment:
if a.id in names:
sequence_record = SeqRecord(seq=a.seq, id=a.id, name='', description='')
good_sequences.append(sequence_record)
else:
print('Removing ' + a.id)
new_aln = MultipleSeqAlignment(good_sequences)
AlignIO.write(new_aln, output_file, format)
|
karolisr/krpy
|
krpy/tools/remove-keep-given-seqs-in-alignment.py
|
Python
|
gpl-3.0
| 2,490
|
[
"Biopython"
] |
03de2967d9b951435e96de61c5d05fe6f44632c668dcca4dcd01df7f259fc250
|
import os
from ase import Atoms
from ase.io import read, write
from ase.calculators.exciting import Exciting
from ase.units import Bohr, Hartree
a = Atoms('N3O',
[(0, 0, 0), (1, 0, 0), (0, 0, 1), (0.5, 0.5, 0.5)],
pbc=True)
write('geo.exi', a)
b = read('geo.exi')
print a
print a.get_positions()
print b
print b.get_positions()
calculator = Exciting(dir='excitingtestfiles',
kpts=(4, 4, 3),
maxscl=3,
#bin='/fshome/chm/git/exciting/bin/excitingser'
)
|
grhawk/ASE
|
tools/ase/test/exciting/exciting.py
|
Python
|
gpl-2.0
| 564
|
[
"ASE",
"exciting"
] |
79d9a4be31b20604ffce758e3af4c3a598f376632041b6f9857d16658337c0c0
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_sslprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of SSLProfile Avi RESTful Object
description:
- This module is used to configure SSLProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
accepted_ciphers:
description:
- Ciphers suites represented as defined by U(http://www.openssl.org/docs/apps/ciphers.html).
- Default value when not specified in API or module is interpreted by Avi Controller as AES:3DES:RC4.
accepted_versions:
description:
- Set of versions accepted by the server.
cipher_enums:
description:
- Cipher_enums of sslprofile.
description:
description:
- User defined description for the object.
dhparam:
description:
- Dh parameters used in ssl.
- At this time, it is not configurable and is set to 2048 bits.
enable_ssl_session_reuse:
description:
- Enable ssl session re-use.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
name:
description:
- Name of the object.
required: true
prefer_client_cipher_ordering:
description:
- Prefer the ssl cipher ordering presented by the client during the ssl handshake over the one specified in the ssl profile.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
send_close_notify:
description:
- Send 'close notify' alert message for a clean shutdown of the ssl connection.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ssl_rating:
description:
- Sslrating settings for sslprofile.
ssl_session_timeout:
description:
- The amount of time before an ssl session expires.
- Default value when not specified in API or module is interpreted by Avi Controller as 86400.
tags:
description:
- List of tag.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create SSL profile with list of allowed ciphers
avi_sslprofile:
controller: ''
username: ''
password: ''
accepted_ciphers: >
ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:
ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:
AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:
AES256-SHA:DES-CBC3-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:
ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA
accepted_versions:
- type: SSL_VERSION_TLS1
- type: SSL_VERSION_TLS1_1
- type: SSL_VERSION_TLS1_2
cipher_enums:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384
- TLS_RSA_WITH_AES_128_GCM_SHA256
- TLS_RSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_128_CBC_SHA256
- TLS_RSA_WITH_AES_256_CBC_SHA256
- TLS_RSA_WITH_AES_128_CBC_SHA
- TLS_RSA_WITH_AES_256_CBC_SHA
- TLS_RSA_WITH_3DES_EDE_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
name: PFS-BOTH-RSA-EC
send_close_notify: true
ssl_rating:
compatibility_rating: SSL_SCORE_EXCELLENT
performance_rating: SSL_SCORE_EXCELLENT
security_score: '100.0'
tenant_ref: Demo
'''
RETURN = '''
obj:
description: SSLProfile (api/sslprofile) object
returned: success, changed
type: dict
'''
from pkg_resources import parse_version
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.avi import avi_common_argument_spec
HAS_AVI = True
try:
import avi.sdk
sdk_version = getattr(avi.sdk, '__version__', None)
if ((sdk_version is None) or (sdk_version and
(parse_version(sdk_version) < parse_version('16.3.5.post1')))):
# It allows the __version__ to be '' as that value is used in development builds
raise ImportError
from avi.sdk.utils.ansible_utils import avi_ansible_api
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
accepted_ciphers=dict(type='str',),
accepted_versions=dict(type='list',),
cipher_enums=dict(type='list',),
description=dict(type='str',),
dhparam=dict(type='str',),
enable_ssl_session_reuse=dict(type='bool',),
name=dict(type='str', required=True),
prefer_client_cipher_ordering=dict(type='bool',),
send_close_notify=dict(type='bool',),
ssl_rating=dict(type='dict',),
ssl_session_timeout=dict(type='int',),
tags=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslprofile',
set([]))
if __name__ == '__main__':
main()
|
0x46616c6b/ansible
|
lib/ansible/modules/network/avi/avi_sslprofile.py
|
Python
|
gpl-3.0
| 7,426
|
[
"VisIt"
] |
89e9dd372040398ddda83e93a8b52b954c09f32a53f7062c0d4d4c312d1bb9bd
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
from datetime import datetime, timedelta
from flaky import flaky
from textwrap import dedent
from unittest import skip
from nose.plugins.attrib import attr
import pytz
import urllib
from bok_choy.promise import EmptyPromise
from ..helpers import (
UniqueCourseTest,
EventsTestMixin,
load_data_str,
generate_course_key,
select_option_by_value,
element_has_text,
select_option_by_text,
get_selected_option_text
)
from ...pages.lms import BASE_URL
from ...pages.lms.account_settings import AccountSettingsPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.create_mode import ModeCreationPage
from ...pages.common.logout import LogoutPage
from ...pages.lms.course_info import CourseInfoPage
from ...pages.lms.tab_nav import TabNavPage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.lms.progress import ProgressPage
from ...pages.lms.dashboard import DashboardPage
from ...pages.lms.problem import ProblemPage
from ...pages.lms.video.video import VideoPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.studio.settings import SettingsPage
from ...pages.lms.login_and_register import CombinedLoginAndRegisterPage, ResetPasswordPage
from ...pages.lms.track_selection import TrackSelectionPage
from ...pages.lms.pay_and_verify import PaymentAndVerificationFlow, FakePaymentPage
from ...pages.lms.course_wiki import CourseWikiPage, CourseWikiEditPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc, CourseUpdateDesc
@attr('shard_8')
class ForgotPasswordPageTest(UniqueCourseTest):
"""
Test that forgot password forms is rendered if url contains 'forgot-password-modal'
in hash.
"""
def setUp(self):
""" Initialize the page object """
super(ForgotPasswordPageTest, self).setUp()
self.user_info = self._create_user()
self.reset_password_page = ResetPasswordPage(self.browser)
def _create_user(self):
"""
Create a unique user
"""
auto_auth = AutoAuthPage(self.browser).visit()
user_info = auto_auth.user_info
LogoutPage(self.browser).visit()
return user_info
def test_reset_password_form_visibility(self):
# Navigate to the password reset page
self.reset_password_page.visit()
# Expect that reset password form is visible on the page
self.assertTrue(self.reset_password_page.is_form_visible())
def test_reset_password_confirmation_box_visibility(self):
# Navigate to the password reset page
self.reset_password_page.visit()
# Navigate to the password reset form and try to submit it
self.reset_password_page.fill_password_reset_form(self.user_info['email'])
self.reset_password_page.is_success_visible(".submission-success")
# Expect that we're shown a success message
self.assertIn("Password Reset Email Sent", self.reset_password_page.get_success_message())
@attr('shard_8')
class LoginFromCombinedPageTest(UniqueCourseTest):
"""Test that we can log in using the combined login/registration page.
Also test that we can request a password reset from the combined
login/registration page.
"""
def setUp(self):
"""Initialize the page objects and create a test course. """
super(LoginFromCombinedPageTest, self).setUp()
self.login_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="login",
course_id=self.course_id
)
self.dashboard_page = DashboardPage(self.browser)
# Create a course to enroll in
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
def test_login_success(self):
# Create a user account
email, password = self._create_unique_user()
# Navigate to the login page and try to log in
self.login_page.visit().login(email=email, password=password)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
def test_login_failure(self):
# Navigate to the login page
self.login_page.visit()
# User account does not exist
self.login_page.login(email="nobody@nowhere.com", password="password")
# Verify that an error is displayed
self.assertIn("Email or password is incorrect.", self.login_page.wait_for_errors())
def test_toggle_to_register_form(self):
self.login_page.visit().toggle_form()
self.assertEqual(self.login_page.current_form, "register")
@flaky # ECOM-1165
def test_password_reset_success(self):
# Create a user account
email, password = self._create_unique_user() # pylint: disable=unused-variable
# Navigate to the password reset form and try to submit it
self.login_page.visit().password_reset(email=email)
# Expect that we're shown a success message
self.assertIn("Password Reset Email Sent", self.login_page.wait_for_success())
def test_password_reset_failure(self):
# Navigate to the password reset form
self.login_page.visit()
# User account does not exist
self.login_page.password_reset(email="nobody@nowhere.com")
# Expect that we're shown a failure message
self.assertIn(
"No user with the provided email address exists.",
self.login_page.wait_for_errors()
)
def test_third_party_login(self):
"""
Test that we can login using third party credentials, and that the
third party account gets linked to the edX account.
"""
# Create a user account
email, password = self._create_unique_user()
# Navigate to the login page
self.login_page.visit()
self.assertScreenshot('#login .login-providers', 'login-providers')
# Try to log in using "Dummy" provider
self.login_page.click_third_party_dummy_provider()
# The user will be redirected somewhere and then back to the login page:
msg_text = self.login_page.wait_for_auth_status_message()
self.assertIn("You have successfully signed into Dummy", msg_text)
self.assertIn("To link your accounts, sign in now using your edX password", msg_text)
# Now login with username and password:
self.login_page.login(email=email, password=password)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
# Now logout and check that we can log back in instantly (because the account is linked):
LogoutPage(self.browser).visit()
self.login_page.visit()
self.login_page.click_third_party_dummy_provider()
self.dashboard_page.wait_for_page()
self._unlink_dummy_account()
def test_hinted_login(self):
""" Test the login page when coming from course URL that specified which third party provider to use """
# Create a user account and link it to third party auth with the dummy provider:
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self._link_dummy_account()
LogoutPage(self.browser).visit()
# When not logged in, try to load a course URL that includes the provider hint ?tpa_hint=...
course_page = CoursewarePage(self.browser, self.course_id)
self.browser.get(course_page.url + '?tpa_hint=oa2-dummy')
# We should now be redirected to the login page
self.login_page.wait_for_page()
self.assertIn("Would you like to sign in using your Dummy credentials?", self.login_page.hinted_login_prompt)
self.assertScreenshot('#hinted-login-form', 'hinted-login')
self.login_page.click_third_party_dummy_provider()
# We should now be redirected to the course page
course_page.wait_for_page()
self._unlink_dummy_account()
def _link_dummy_account(self):
""" Go to Account Settings page and link the user's account to the Dummy provider """
account_settings = AccountSettingsPage(self.browser).visit()
field_id = "auth-oa2-dummy"
account_settings.wait_for_field(field_id)
self.assertEqual("Link", account_settings.link_title_for_link_field(field_id))
account_settings.click_on_link_in_link_field(field_id)
account_settings.wait_for_link_title_for_link_field(field_id, "Unlink")
def _unlink_dummy_account(self):
""" Verify that the 'Dummy' third party auth provider is linked, then unlink it """
# This must be done after linking the account, or we'll get cross-test side effects
account_settings = AccountSettingsPage(self.browser).visit()
field_id = "auth-oa2-dummy"
account_settings.wait_for_field(field_id)
self.assertEqual("Unlink", account_settings.link_title_for_link_field(field_id))
account_settings.click_on_link_in_link_field(field_id)
account_settings.wait_for_message(field_id, "Successfully unlinked")
def _create_unique_user(self):
"""
Create a new user with a unique name and email.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
password = "password"
# Create the user (automatically logs us in)
AutoAuthPage(
self.browser,
username=username,
email=email,
password=password
).visit()
# Log out
LogoutPage(self.browser).visit()
return (email, password)
@attr('shard_8')
class RegisterFromCombinedPageTest(UniqueCourseTest):
"""Test that we can register a new user from the combined login/registration page. """
def setUp(self):
"""Initialize the page objects and create a test course. """
super(RegisterFromCombinedPageTest, self).setUp()
self.register_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="register",
course_id=self.course_id
)
self.dashboard_page = DashboardPage(self.browser)
# Create a course to enroll in
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
def test_register_success(self):
# Navigate to the registration page
self.register_page.visit()
# Fill in the form and submit it
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.register_page.register(
email=email,
password="password",
username=username,
full_name="Test User",
country="US",
favorite_movie="Mad Max: Fury Road",
terms_of_service=True
)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
def test_register_failure(self):
# Navigate to the registration page
self.register_page.visit()
# Enter a blank for the username field, which is required
# Don't agree to the terms of service / honor code.
# Don't specify a country code, which is required.
# Don't specify a favorite movie.
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.register_page.register(
email=email,
password="password",
username="",
full_name="Test User",
terms_of_service=False
)
# Verify that the expected errors are displayed.
errors = self.register_page.wait_for_errors()
self.assertIn(u'Please enter your Public username.', errors)
self.assertIn(u'You must agree to the edX Terms of Service and Honor Code.', errors)
self.assertIn(u'Please select your Country.', errors)
self.assertIn(u'Please tell us your favorite movie.', errors)
def test_toggle_to_login_form(self):
self.register_page.visit().toggle_form()
self.assertEqual(self.register_page.current_form, "login")
def test_third_party_register(self):
"""
Test that we can register using third party credentials, and that the
third party account gets linked to the edX account.
"""
# Navigate to the register page
self.register_page.visit()
self.assertScreenshot('#register .login-providers', 'register-providers')
# Try to authenticate using the "Dummy" provider
self.register_page.click_third_party_dummy_provider()
# The user will be redirected somewhere and then back to the register page:
msg_text = self.register_page.wait_for_auth_status_message()
self.assertEqual(self.register_page.current_form, "register")
self.assertIn("You've successfully signed into Dummy", msg_text)
self.assertIn("We just need a little more information", msg_text)
# Now the form should be pre-filled with the data from the Dummy provider:
self.assertEqual(self.register_page.email_value, "adama@fleet.colonies.gov")
self.assertEqual(self.register_page.full_name_value, "William Adama")
self.assertIn("Galactica1", self.register_page.username_value)
# Set country, accept the terms, and submit the form:
self.register_page.register(country="US", favorite_movie="Battlestar Galactica", terms_of_service=True)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
# Now logout and check that we can log back in instantly (because the account is linked):
LogoutPage(self.browser).visit()
login_page = CombinedLoginAndRegisterPage(self.browser, start_page="login")
login_page.visit()
login_page.click_third_party_dummy_provider()
self.dashboard_page.wait_for_page()
# Now unlink the account (To test the account settings view and also to prevent cross-test side effects)
account_settings = AccountSettingsPage(self.browser).visit()
field_id = "auth-oa2-dummy"
account_settings.wait_for_field(field_id)
self.assertEqual("Unlink", account_settings.link_title_for_link_field(field_id))
account_settings.click_on_link_in_link_field(field_id)
account_settings.wait_for_message(field_id, "Successfully unlinked")
@attr('shard_8')
class PayAndVerifyTest(EventsTestMixin, UniqueCourseTest):
"""Test that we can proceed through the payment and verification flow."""
def setUp(self):
"""Initialize the test.
Create the necessary page objects, create a test course and configure its modes,
create a user and log them in.
"""
super(PayAndVerifyTest, self).setUp()
self.track_selection_page = TrackSelectionPage(self.browser, self.course_id)
self.payment_and_verification_flow = PaymentAndVerificationFlow(self.browser, self.course_id)
self.immediate_verification_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='verify-now')
self.upgrade_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='upgrade')
self.fake_payment_page = FakePaymentPage(self.browser, self.course_id)
self.dashboard_page = DashboardPage(self.browser)
# Create a course
CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
).install()
# Add an honor mode to the course
ModeCreationPage(self.browser, self.course_id).visit()
# Add a verified mode to the course
ModeCreationPage(self.browser, self.course_id, mode_slug=u'verified', mode_display_name=u'Verified Certificate', min_price=10, suggested_prices='10,20').visit()
@skip("Flaky 02/02/2015")
def test_immediate_verification_enrollment(self):
# Create a user and log them in
student_id = AutoAuthPage(self.browser).visit().get_user_id()
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
# Proceed to verification
self.payment_and_verification_flow.immediate_verification()
# Take face photo and proceed to the ID photo step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Take ID photo and proceed to the review photos step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Submit photos and proceed to the enrollment confirmation step
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
def test_deferred_verification_enrollment(self):
# Create a user and log them in
student_id = AutoAuthPage(self.browser).visit().get_user_id()
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
def test_enrollment_upgrade(self):
# Create a user, log them in, and enroll them in the honor mode
student_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as honor in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'honor')
# Click the upsell button on the dashboard
self.dashboard_page.upgrade_enrollment(self.course_info["display_name"], self.upgrade_page)
# Select the first contribution option appearing on the page
self.upgrade_page.indicate_contribution()
# Proceed to the fake payment page
self.upgrade_page.proceed_to_payment()
def only_enrollment_events(event):
"""Filter out all non-enrollment events."""
return event['event_type'].startswith('edx.course.enrollment.')
expected_events = [
{
'event_type': 'edx.course.enrollment.mode_changed',
'event': {
'user_id': int(student_id),
'mode': 'verified',
}
}
]
with self.assert_events_match_during(event_filter=only_enrollment_events, expected_events=expected_events):
# Submit payment
self.fake_payment_page.submit_payment()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
@attr('shard_1')
class CourseWikiTest(UniqueCourseTest):
"""
Tests that verify the course wiki.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(CourseWikiTest, self).setUp()
# self.course_info['number'] must be shorter since we are accessing the wiki. See TNL-1751
self.course_info['number'] = self.unique_id[0:6]
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_wiki_page = CourseWikiPage(self.browser, self.course_id)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_wiki_edit_page = CourseWikiEditPage(self.browser, self.course_id, self.course_info)
self.tab_nav = TabNavPage(self.browser)
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
# Access course wiki page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Wiki')
def _open_editor(self):
self.course_wiki_page.open_editor()
self.course_wiki_edit_page.wait_for_page()
def test_edit_course_wiki(self):
"""
Wiki page by default is editable for students.
After accessing the course wiki,
Replace the content of the default page
Confirm new content has been saved
"""
content = "hello"
self._open_editor()
self.course_wiki_edit_page.replace_wiki_content(content)
self.course_wiki_edit_page.save_wiki_content()
actual_content = unicode(self.course_wiki_page.q(css='.wiki-article p').text[0])
self.assertEqual(content, actual_content)
@attr('shard_1')
class HighLevelTabTest(UniqueCourseTest):
"""
Tests that verify each of the high-level tabs available within a course.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(HighLevelTabTest, self).setUp()
# self.course_info['number'] must be shorter since we are accessing the wiki. See TNL-1751
self.course_info['number'] = self.unique_id[0:6]
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.progress_page = ProgressPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.video = VideoPage(self.browser)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_update(
CourseUpdateDesc(date='January 29, 2014', content='Test course update1')
)
course_fix.add_handout('demoPDF.pdf')
course_fix.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab', data=r"static tab data with mathjax \(E=mc^2\)"),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2'),
XBlockFixtureDesc('sequential', 'Test Subsection 3'),
)
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_course_info(self):
"""
Navigate to the course info page.
"""
# Navigate to the course info page from the progress page
self.progress_page.visit()
self.tab_nav.go_to_tab('Home')
# Expect just one update
self.assertEqual(self.course_info_page.num_updates, 1)
# Expect a link to the demo handout pdf
handout_links = self.course_info_page.handout_links
self.assertEqual(len(handout_links), 1)
self.assertIn('demoPDF.pdf', handout_links[0])
def test_progress(self):
"""
Navigate to the progress page.
"""
# Navigate to the progress page from the info page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Progress')
# We haven't answered any problems yet, so assume scores are zero
# Only problems should have scores; so there should be 2 scores.
CHAPTER = 'Test Section'
SECTION = 'Test Subsection'
EXPECTED_SCORES = [(0, 3), (0, 1)]
actual_scores = self.progress_page.scores(CHAPTER, SECTION)
self.assertEqual(actual_scores, EXPECTED_SCORES)
def test_static_tab(self):
"""
Navigate to a static tab (course content)
"""
# From the course info page, navigate to the static tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Test Static Tab')
self.assertTrue(self.tab_nav.is_on_tab('Test Static Tab'))
def test_static_tab_with_mathjax(self):
"""
Navigate to a static tab (course content)
"""
# From the course info page, navigate to the static tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Test Static Tab')
self.assertTrue(self.tab_nav.is_on_tab('Test Static Tab'))
# Verify that Mathjax has rendered
self.tab_nav.mathjax_has_rendered()
def test_wiki_tab_first_time(self):
"""
Navigate to the course wiki tab. When the wiki is accessed for
the first time, it is created on the fly.
"""
course_wiki = CourseWikiPage(self.browser, self.course_id)
# From the course info page, navigate to the wiki tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Wiki')
self.assertTrue(self.tab_nav.is_on_tab('Wiki'))
# Assert that a default wiki is created
expected_article_name = "{org}.{course_number}.{course_run}".format(
org=self.course_info['org'],
course_number=self.course_info['number'],
course_run=self.course_info['run']
)
self.assertEqual(expected_article_name, course_wiki.article_name)
def test_courseware_nav(self):
"""
Navigate to a particular unit in the course.
"""
# Navigate to the course page from the info page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
# Check that the course navigation appears correctly
EXPECTED_SECTIONS = {
'Test Section': ['Test Subsection'],
'Test Section 2': ['Test Subsection 2', 'Test Subsection 3']
}
actual_sections = self.course_nav.sections
for section, subsections in EXPECTED_SECTIONS.iteritems():
self.assertIn(section, actual_sections)
self.assertEqual(actual_sections[section], EXPECTED_SECTIONS[section])
# Navigate to a particular section
self.course_nav.go_to_section('Test Section', 'Test Subsection')
# Check the sequence items
EXPECTED_ITEMS = ['Test Problem 1', 'Test Problem 2', 'Test HTML']
actual_items = self.course_nav.sequence_items
self.assertEqual(len(actual_items), len(EXPECTED_ITEMS))
for expected in EXPECTED_ITEMS:
self.assertIn(expected, actual_items)
@attr('shard_1')
class PDFTextBooksTabTest(UniqueCourseTest):
"""
Tests that verify each of the textbook tabs available within a course.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(PDFTextBooksTabTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
# Install a course with TextBooks
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
# Add PDF textbooks to course fixture.
for i in range(1, 3):
course_fix.add_textbook("PDF Book {}".format(i), [{"title": "Chapter Of Book {}".format(i), "url": ""}])
course_fix.install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_verify_textbook_tabs(self):
"""
Test multiple pdf textbooks loads correctly in lms.
"""
self.course_info_page.visit()
# Verify each PDF textbook tab by visiting, it will fail if correct tab is not loaded.
for i in range(1, 3):
self.tab_nav.go_to_tab("PDF Book {}".format(i))
@attr('shard_1')
class VisibleToStaffOnlyTest(UniqueCourseTest):
"""
Tests that content with visible_to_staff_only set to True cannot be viewed by students.
"""
def setUp(self):
super(VisibleToStaffOnlyTest, self).setUp()
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Subsection With Locked Unit').add_children(
XBlockFixtureDesc('vertical', 'Locked Unit', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('html', 'Html Child in locked unit', data="<html>Visible only to staff</html>"),
),
XBlockFixtureDesc('vertical', 'Unlocked Unit').add_children(
XBlockFixtureDesc('html', 'Html Child in unlocked unit', data="<html>Visible only to all</html>"),
)
),
XBlockFixtureDesc('sequential', 'Unlocked Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Html Child in visible unit', data="<html>Visible to all</html>"),
)
),
XBlockFixtureDesc('sequential', 'Locked Subsection', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'html', 'Html Child in locked subsection', data="<html>Visible only to staff</html>"
)
)
)
)
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
def test_visible_to_staff(self):
"""
Scenario: All content is visible for a user marked is_staff (different from course staff)
Given some of the course content has been marked 'visible_to_staff_only'
And I am logged on with an account marked 'is_staff'
Then I can see all course content
"""
AutoAuthPage(self.browser, username="STAFF_TESTER", email="johndoe_staff@example.com",
course_id=self.course_id, staff=True).visit()
self.courseware_page.visit()
self.assertEqual(3, len(self.course_nav.sections['Test Section']))
self.course_nav.go_to_section("Test Section", "Subsection With Locked Unit")
self.assertEqual(["Html Child in locked unit", "Html Child in unlocked unit"], self.course_nav.sequence_items)
self.course_nav.go_to_section("Test Section", "Unlocked Subsection")
self.assertEqual(["Html Child in visible unit"], self.course_nav.sequence_items)
self.course_nav.go_to_section("Test Section", "Locked Subsection")
self.assertEqual(["Html Child in locked subsection"], self.course_nav.sequence_items)
def test_visible_to_student(self):
"""
Scenario: Content marked 'visible_to_staff_only' is not visible for students in the course
Given some of the course content has been marked 'visible_to_staff_only'
And I am logged on with an authorized student account
Then I can only see content without 'visible_to_staff_only' set to True
"""
AutoAuthPage(self.browser, username="STUDENT_TESTER", email="johndoe_student@example.com",
course_id=self.course_id, staff=False).visit()
self.courseware_page.visit()
self.assertEqual(2, len(self.course_nav.sections['Test Section']))
self.course_nav.go_to_section("Test Section", "Subsection With Locked Unit")
self.assertEqual(["Html Child in unlocked unit"], self.course_nav.sequence_items)
self.course_nav.go_to_section("Test Section", "Unlocked Subsection")
self.assertEqual(["Html Child in visible unit"], self.course_nav.sequence_items)
@attr('shard_1')
class TooltipTest(UniqueCourseTest):
"""
Tests that tooltips are displayed
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(TooltipTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab'),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
)
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_tooltip(self):
"""
Verify that tooltips are displayed when you hover over the sequence nav bar.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
self.courseware_page.verify_tooltips_displayed()
@attr('shard_1')
class PreRequisiteCourseTest(UniqueCourseTest):
"""
Tests that pre-requisite course messages are displayed
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(PreRequisiteCourseTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
self.prc_info = {
'org': 'test_org',
'number': self.unique_id,
'run': 'prc_test_run',
'display_name': 'PR Test Course' + self.unique_id
}
CourseFixture(
self.prc_info['org'], self.prc_info['number'],
self.prc_info['run'], self.prc_info['display_name']
).install()
pre_requisite_course_key = generate_course_key(
self.prc_info['org'],
self.prc_info['number'],
self.prc_info['run']
)
self.pre_requisite_course_id = unicode(pre_requisite_course_key)
self.dashboard_page = DashboardPage(self.browser)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_dashboard_message(self):
"""
Scenario: Any course where there is a Pre-Requisite course Student dashboard should have
appropriate messaging.
Given that I am on the Student dashboard
When I view a course with a pre-requisite course set
Then At the bottom of course I should see course requirements message.'
"""
# visit dashboard page and make sure there is not pre-requisite course message
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.pre_requisite_message_displayed())
# Logout and login as a staff.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# visit course settings page and set pre-requisite course
self.settings_page.visit()
self._set_pre_requisite_course()
# Logout and login as a student.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=False).visit()
# visit dashboard page again now it should have pre-requisite course message
self.dashboard_page.visit()
EmptyPromise(lambda: self.dashboard_page.available_courses > 0, 'Dashboard page loaded').fulfill()
self.assertTrue(self.dashboard_page.pre_requisite_message_displayed())
def _set_pre_requisite_course(self):
"""
set pre-requisite course
"""
select_option_by_value(self.settings_page.pre_requisite_course_options, self.pre_requisite_course_id)
self.settings_page.save_changes()
@attr('shard_1')
class ProblemExecutionTest(UniqueCourseTest):
"""
Tests of problems.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(ProblemExecutionTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
# Install a course with sections and problems.
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_asset(['python_lib.zip'])
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Python Problem', data=dedent(
"""\
<problem>
<script type="loncapa/python">
from number_helpers import seventeen, fortytwo
oneseven = seventeen()
def check_function(expect, ans):
if int(ans) == fortytwo(-22):
return True
else:
return False
</script>
<p>What is the sum of $oneseven and 3?</p>
<customresponse expect="20" cfn="check_function">
<textline/>
</customresponse>
</problem>
"""
))
)
)
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_python_execution_in_problem(self):
# Navigate to the problem page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
self.course_nav.go_to_section('Test Section', 'Test Subsection')
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name.upper(), 'PYTHON PROBLEM')
# Does the page have computation results?
self.assertIn("What is the sum of 17 and 3?", problem_page.problem_text)
# Fill in the answer correctly.
problem_page.fill_answer("20")
problem_page.click_check()
self.assertTrue(problem_page.is_correct())
# Fill in the answer incorrectly.
problem_page.fill_answer("4")
problem_page.click_check()
self.assertFalse(problem_page.is_correct())
@attr('shard_1')
class EntranceExamTest(UniqueCourseTest):
"""
Tests that course has an entrance exam.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(EntranceExamTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_entrance_exam_section(self):
"""
Scenario: Any course that is enabled for an entrance exam, should have entrance exam chapter at course
page.
Given that I am on the course page
When I view the course that has an entrance exam
Then there should be an "Entrance Exam" chapter.'
"""
entrance_exam_link_selector = '.accordion .course-navigation .chapter .group-heading'
# visit course page and make sure there is not entrance exam chapter.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
self.assertFalse(element_has_text(
page=self.courseware_page,
css_selector=entrance_exam_link_selector,
text='Entrance Exam'
))
# Logout and login as a staff.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# visit course settings page and set/enabled entrance exam for that course.
self.settings_page.visit()
self.settings_page.wait_for_page()
self.assertTrue(self.settings_page.is_browser_on_page())
self.settings_page.entrance_exam_field.click()
self.settings_page.save_changes()
# Logout and login as a student.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=False).visit()
# visit course info page and make sure there is an "Entrance Exam" section.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
self.assertTrue(element_has_text(
page=self.courseware_page,
css_selector=entrance_exam_link_selector,
text='Entrance Exam'
))
@attr('shard_1')
class NotLiveRedirectTest(UniqueCourseTest):
"""
Test that a banner is shown when the user is redirected to
the dashboard from a non-live course.
"""
def setUp(self):
"""Create a course that isn't live yet and enroll for it."""
super(NotLiveRedirectTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name'],
start_date=datetime(year=2099, month=1, day=1)
).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_redirect_banner(self):
"""
Navigate to the course info page, then check that we're on the
dashboard page with the appropriate message.
"""
url = BASE_URL + "/courses/" + self.course_id + "/" + 'info'
self.browser.get(url)
page = DashboardPage(self.browser)
page.wait_for_page()
self.assertIn(
'The course you are looking for does not start until',
page.banner_text
)
@attr('shard_1')
class EnrollmentClosedRedirectTest(UniqueCourseTest):
"""
Test that a banner is shown when the user is redirected to the
dashboard after trying to view the track selection page for a
course after enrollment has ended.
"""
def setUp(self):
"""Create a course that is closed for enrollment, and sign in as a user."""
super(EnrollmentClosedRedirectTest, self).setUp()
course = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
now = datetime.now(pytz.UTC)
course.add_course_details({
'enrollment_start': (now - timedelta(days=30)).isoformat(),
'enrollment_end': (now - timedelta(days=1)).isoformat()
})
course.install()
# Add an honor mode to the course
ModeCreationPage(self.browser, self.course_id).visit()
# Add a verified mode to the course
ModeCreationPage(
self.browser,
self.course_id,
mode_slug=u'verified',
mode_display_name=u'Verified Certificate',
min_price=10,
suggested_prices='10,20'
).visit()
def _assert_dashboard_message(self):
"""
Assert that the 'closed for enrollment' text is present on the
dashboard.
"""
page = DashboardPage(self.browser)
page.wait_for_page()
self.assertIn(
'The course you are looking for is closed for enrollment',
page.banner_text
)
def test_redirect_banner(self):
"""
Navigate to the course info page, then check that we're on the
dashboard page with the appropriate message.
"""
AutoAuthPage(self.browser).visit()
url = BASE_URL + "/course_modes/choose/" + self.course_id
self.browser.get(url)
self._assert_dashboard_message()
def test_login_redirect(self):
"""
Test that the user is correctly redirected after logistration when
attempting to enroll in a closed course.
"""
url = '{base_url}/register?{params}'.format(
base_url=BASE_URL,
params=urllib.urlencode({
'course_id': self.course_id,
'enrollment_action': 'enroll',
'email_opt_in': 'false'
})
)
self.browser.get(url)
register_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="register",
course_id=self.course_id
)
register_page.wait_for_page()
register_page.register(
email="email@example.com",
password="password",
username="username",
full_name="Test User",
country="US",
favorite_movie="Mad Max: Fury Road",
terms_of_service=True
)
self._assert_dashboard_message()
@attr('shard_1')
class LMSLanguageTest(UniqueCourseTest):
""" Test suite for the LMS Language """
def setUp(self):
super(LMSLanguageTest, self).setUp()
self.dashboard_page = DashboardPage(self.browser)
self.account_settings = AccountSettingsPage(self.browser)
AutoAuthPage(self.browser).visit()
def test_lms_language_change(self):
"""
Scenario: Ensure that language selection is working fine.
First I go to the user dashboard page in LMS. I can see 'English' is selected by default.
Then I choose 'Dummy Language' from drop down (at top of the page).
Then I visit the student account settings page and I can see the language has been updated to 'Dummy Language'
in both drop downs.
After that I select the 'English' language and visit the dashboard page again.
Then I can see that top level language selector persist its value to 'English'.
"""
self.dashboard_page.visit()
language_selector = self.dashboard_page.language_selector
self.assertEqual(
get_selected_option_text(language_selector),
u'English'
)
select_option_by_text(language_selector, 'Dummy Language (Esperanto)')
self.dashboard_page.wait_for_ajax()
self.account_settings.visit()
self.assertEqual(self.account_settings.value_for_dropdown_field('pref-lang'), u'Dummy Language (Esperanto)')
self.assertEqual(
get_selected_option_text(language_selector),
u'Dummy Language (Esperanto)'
)
# changed back to English language.
select_option_by_text(language_selector, 'English')
self.account_settings.wait_for_ajax()
self.assertEqual(self.account_settings.value_for_dropdown_field('pref-lang'), u'English')
self.dashboard_page.visit()
self.assertEqual(
get_selected_option_text(language_selector),
u'English'
)
@attr('a11y')
class CourseInfoA11yTest(UniqueCourseTest):
"""Accessibility test for course home/info page."""
def setUp(self):
super(CourseInfoA11yTest, self).setUp()
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.course_fixture.add_update(
CourseUpdateDesc(date='January 29, 2014', content='Test course update1')
)
self.course_fixture.add_update(
CourseUpdateDesc(date='February 5th, 2014', content='Test course update2')
)
self.course_fixture.add_update(
CourseUpdateDesc(date='March 31st, 2014', content='Test course update3')
)
self.course_fixture.install()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_course_home_a11y(self):
self.course_info_page.visit()
self.course_info_page.a11y_audit.check_for_accessibility_errors()
|
devs1991/test_edx_docmode
|
common/test/acceptance/tests/lms/test_lms.py
|
Python
|
agpl-3.0
| 51,932
|
[
"VisIt"
] |
a525f20472e6f55fa92fcbf4bc6856a755aa75f2d90bb405fcf4f91b30b43374
|
#!/usr/bin/python3
dicewords = {"11111":"a",
"11112":"a&p",
"11113":"a's",
"11114":"aa",
"11115":"aaa",
"11116":"aaaa",
"11121":"aaron",
"11122":"ab",
"11123":"aba",
"11124":"ababa",
"11125":"aback",
"11126":"abase",
"11131":"abash",
"11132":"abate",
"11133":"abbas",
"11134":"abbe",
"11135":"abbey",
"11136":"abbot",
"11141":"abbott",
"11142":"abc",
"11143":"abe",
"11144":"abed",
"11145":"abel",
"11146":"abet",
"11151":"abide",
"11152":"abject",
"11153":"ablaze",
"11154":"able",
"11155":"abner",
"11156":"abo",
"11161":"abode",
"11162":"abort",
"11163":"about",
"11164":"above",
"11165":"abrade",
"11166":"abram",
"11211":"absorb",
"11212":"abuse",
"11213":"abut",
"11214":"abyss",
"11215":"ac",
"11216":"acadia",
"11221":"accra",
"11222":"accrue",
"11223":"ace",
"11224":"acetic",
"11225":"ache",
"11226":"acid",
"11231":"acidic",
"11232":"acm",
"11233":"acme",
"11234":"acorn",
"11235":"acre",
"11236":"acrid",
"11241":"act",
"11242":"acton",
"11243":"actor",
"11244":"acts",
"11245":"acuity",
"11246":"acute",
"11251":"ad",
"11252":"ada",
"11253":"adage",
"11254":"adagio",
"11255":"adair",
"11256":"adam",
"11261":"adams",
"11262":"adapt",
"11263":"add",
"11264":"added",
"11265":"addict",
"11266":"addis",
"11311":"addle",
"11312":"adele",
"11313":"aden",
"11314":"adept",
"11315":"adieu",
"11316":"adjust",
"11321":"adler",
"11322":"admit",
"11323":"admix",
"11324":"ado",
"11325":"adobe",
"11326":"adonis",
"11331":"adopt",
"11332":"adore",
"11333":"adorn",
"11334":"adult",
"11335":"advent",
"11336":"advert",
"11341":"advise",
"11342":"ae",
"11343":"aegis",
"11344":"aeneid",
"11345":"af",
"11346":"afar",
"11351":"affair",
"11352":"affine",
"11353":"affix",
"11354":"afire",
"11355":"afoot",
"11356":"afraid",
"11361":"africa",
"11362":"afro",
"11363":"aft",
"11364":"ag",
"11365":"again",
"11366":"agate",
"11411":"agave",
"11412":"age",
"11413":"agee",
"11414":"agenda",
"11415":"agent",
"11416":"agile",
"11421":"aging",
"11422":"agnes",
"11423":"agnew",
"11424":"ago",
"11425":"agone",
"11426":"agony",
"11431":"agree",
"11432":"ague",
"11433":"agway",
"11434":"ah",
"11435":"ahead",
"11436":"ahem",
"11441":"ahoy",
"11442":"ai",
"11443":"aid",
"11444":"aida",
"11445":"aide",
"11446":"aides",
"11451":"aiken",
"11452":"ail",
"11453":"aile",
"11454":"aim",
"11455":"ain't",
"11456":"ainu",
"11461":"air",
"11462":"aires",
"11463":"airman",
"11464":"airway",
"11465":"airy",
"11466":"aisle",
"11511":"aj",
"11512":"ajar",
"11513":"ajax",
"11514":"ak",
"11515":"akers",
"11516":"akin",
"11521":"akron",
"11522":"al",
"11523":"ala",
"11524":"alai",
"11525":"alamo",
"11526":"alan",
"11531":"alarm",
"11532":"alaska",
"11533":"alb",
"11534":"alba",
"11535":"album",
"11536":"alcoa",
"11541":"alden",
"11542":"alder",
"11543":"ale",
"11544":"alec",
"11545":"aleck",
"11546":"aleph",
"11551":"alert",
"11552":"alex",
"11553":"alexei",
"11554":"alga",
"11555":"algae",
"11556":"algal",
"11561":"alger",
"11562":"algol",
"11563":"ali",
"11564":"alia",
"11565":"alias",
"11566":"alibi",
"11611":"alice",
"11612":"alien",
"11613":"alight",
"11614":"align",
"11615":"alike",
"11616":"alive",
"11621":"all",
"11622":"allah",
"11623":"allan",
"11624":"allay",
"11625":"allen",
"11626":"alley",
"11631":"allied",
"11632":"allis",
"11633":"allot",
"11634":"allow",
"11635":"alloy",
"11636":"allure",
"11641":"ally",
"11642":"allyl",
"11643":"allyn",
"11644":"alma",
"11645":"almost",
"11646":"aloe",
"11651":"aloft",
"11652":"aloha",
"11653":"alone",
"11654":"along",
"11655":"aloof",
"11656":"aloud",
"11661":"alp",
"11662":"alpha",
"11663":"alps",
"11664":"also",
"11665":"alsop",
"11666":"altair",
"12111":"altar",
"12112":"alter",
"12113":"alto",
"12114":"alton",
"12115":"alum",
"12116":"alumni",
"12121":"alva",
"12122":"alvin",
"12123":"alway",
"12124":"am",
"12125":"ama",
"12126":"amass",
"12131":"amaze",
"12132":"amber",
"12133":"amble",
"12134":"ambush",
"12135":"amen",
"12136":"amend",
"12141":"ames",
"12142":"ami",
"12143":"amid",
"12144":"amide",
"12145":"amigo",
"12146":"amino",
"12151":"amiss",
"12152":"amity",
"12153":"amman",
"12154":"ammo",
"12155":"amoco",
"12156":"amok",
"12161":"among",
"12162":"amort",
"12163":"amos",
"12164":"amp",
"12165":"ampere",
"12166":"ampex",
"12211":"ample",
"12212":"amply",
"12213":"amra",
"12214":"amulet",
"12215":"amuse",
"12216":"amy",
"12221":"an",
"12222":"ana",
"12223":"and",
"12224":"andes",
"12225":"andre",
"12226":"andrew",
"12231":"andy",
"12232":"anent",
"12233":"anew",
"12234":"angel",
"12235":"angelo",
"12236":"anger",
"12241":"angie",
"12242":"angle",
"12243":"anglo",
"12244":"angola",
"12245":"angry",
"12246":"angst",
"12251":"angus",
"12252":"ani",
"12253":"anion",
"12254":"anise",
"12255":"anita",
"12256":"ankle",
"12261":"ann",
"12262":"anna",
"12263":"annal",
"12264":"anne",
"12265":"annex",
"12266":"annie",
"12311":"annoy",
"12312":"annul",
"12313":"annuli",
"12314":"annum",
"12315":"anode",
"12316":"ansi",
"12321":"answer",
"12322":"ant",
"12323":"ante",
"12324":"anti",
"12325":"antic",
"12326":"anton",
"12331":"anus",
"12332":"anvil",
"12333":"any",
"12334":"anyhow",
"12335":"anyway",
"12336":"ao",
"12341":"aok",
"12342":"aorta",
"12343":"ap",
"12344":"apart",
"12345":"apathy",
"12346":"ape",
"12351":"apex",
"12352":"aphid",
"12353":"aplomb",
"12354":"appeal",
"12355":"append",
"12356":"apple",
"12361":"apply",
"12362":"april",
"12363":"apron",
"12364":"apse",
"12365":"apt",
"12366":"aq",
"12411":"aqua",
"12412":"ar",
"12413":"arab",
"12414":"araby",
"12415":"arc",
"12416":"arcana",
"12421":"arch",
"12422":"archer",
"12423":"arden",
"12424":"ardent",
"12425":"are",
"12426":"area",
"12431":"arena",
"12432":"ares",
"12433":"argive",
"12434":"argo",
"12435":"argon",
"12436":"argot",
"12441":"argue",
"12442":"argus",
"12443":"arhat",
"12444":"arid",
"12445":"aries",
"12446":"arise",
"12451":"ark",
"12452":"arlen",
"12453":"arlene",
"12454":"arm",
"12455":"armco",
"12456":"army",
"12461":"arnold",
"12462":"aroma",
"12463":"arose",
"12464":"arpa",
"12465":"array",
"12466":"arrear",
"12511":"arrow",
"12512":"arson",
"12513":"art",
"12514":"artery",
"12515":"arthur",
"12516":"artie",
"12521":"arty",
"12522":"aruba",
"12523":"arum",
"12524":"aryl",
"12525":"as",
"12526":"ascend",
"12531":"ash",
"12532":"ashen",
"12533":"asher",
"12534":"ashley",
"12535":"ashy",
"12536":"asia",
"12541":"aside",
"12542":"ask",
"12543":"askew",
"12544":"asleep",
"12545":"aspen",
"12546":"aspire",
"12551":"ass",
"12552":"assai",
"12553":"assam",
"12554":"assay",
"12555":"asset",
"12556":"assort",
"12561":"assure",
"12562":"aster",
"12563":"astm",
"12564":"astor",
"12565":"astral",
"12566":"at",
"12611":"at&t",
"12612":"ate",
"12613":"athens",
"12614":"atlas",
"12615":"atom",
"12616":"atomic",
"12621":"atone",
"12622":"atop",
"12623":"attic",
"12624":"attire",
"12625":"au",
"12626":"aubrey",
"12631":"audio",
"12632":"audit",
"12633":"aug",
"12634":"auger",
"12635":"augur",
"12636":"august",
"12641":"auk",
"12642":"aunt",
"12643":"aura",
"12644":"aural",
"12645":"auric",
"12646":"austin",
"12651":"auto",
"12652":"autumn",
"12653":"av",
"12654":"avail",
"12655":"ave",
"12656":"aver",
"12661":"avert",
"12662":"avery",
"12663":"aviate",
"12664":"avid",
"12665":"avis",
"12666":"aviv",
"13111":"avoid",
"13112":"avon",
"13113":"avow",
"13114":"aw",
"13115":"await",
"13116":"awake",
"13121":"award",
"13122":"aware",
"13123":"awash",
"13124":"away",
"13125":"awe",
"13126":"awful",
"13131":"awl",
"13132":"awn",
"13133":"awoke",
"13134":"awry",
"13135":"ax",
"13136":"axe",
"13141":"axes",
"13142":"axial",
"13143":"axiom",
"13144":"axis",
"13145":"axle",
"13146":"axon",
"13151":"ay",
"13152":"aye",
"13153":"ayers",
"13154":"az",
"13155":"aztec",
"13156":"azure",
"13161":"b",
"13162":"b's",
"13163":"ba",
"13164":"babe",
"13165":"babel",
"13166":"baby",
"13211":"bach",
"13212":"back",
"13213":"backup",
"13214":"bacon",
"13215":"bad",
"13216":"bade",
"13221":"baden",
"13222":"badge",
"13223":"baffle",
"13224":"bag",
"13225":"baggy",
"13226":"bah",
"13231":"bahama",
"13232":"bail",
"13233":"baird",
"13234":"bait",
"13235":"bake",
"13236":"baku",
"13241":"bald",
"13242":"baldy",
"13243":"bale",
"13244":"bali",
"13245":"balk",
"13246":"balkan",
"13251":"balky",
"13252":"ball",
"13253":"balled",
"13254":"ballot",
"13255":"balm",
"13256":"balmy",
"13261":"balsa",
"13262":"bam",
"13263":"bambi",
"13264":"ban",
"13265":"banal",
"13266":"band",
"13311":"bandit",
"13312":"bandy",
"13313":"bane",
"13314":"bang",
"13315":"banish",
"13316":"banjo",
"13321":"bank",
"13322":"banks",
"13323":"bantu",
"13324":"bar",
"13325":"barb",
"13326":"bard",
"13331":"bare",
"13332":"barfly",
"13333":"barge",
"13334":"bark",
"13335":"barley",
"13336":"barn",
"13341":"barnes",
"13342":"baron",
"13343":"barony",
"13344":"barr",
"13345":"barre",
"13346":"barry",
"13351":"barter",
"13352":"barth",
"13353":"barton",
"13354":"basal",
"13355":"base",
"13356":"basel",
"13361":"bash",
"13362":"basic",
"13363":"basil",
"13364":"basin",
"13365":"basis",
"13366":"bask",
"13411":"bass",
"13412":"bassi",
"13413":"basso",
"13414":"baste",
"13415":"bat",
"13416":"batch",
"13421":"bate",
"13422":"bater",
"13423":"bates",
"13424":"bath",
"13425":"bathe",
"13426":"batik",
"13431":"baton",
"13432":"bator",
"13433":"batt",
"13434":"bauble",
"13435":"baud",
"13436":"bauer",
"13441":"bawd",
"13442":"bawdy",
"13443":"bawl",
"13444":"baxter",
"13445":"bay",
"13446":"bayda",
"13451":"bayed",
"13452":"bayou",
"13453":"bazaar",
"13454":"bb",
"13455":"bbb",
"13456":"bbbb",
"13461":"bc",
"13462":"bcd",
"13463":"bd",
"13464":"be",
"13465":"beach",
"13466":"bead",
"13511":"beady",
"13512":"beak",
"13513":"beam",
"13514":"bean",
"13515":"bear",
"13516":"beard",
"13521":"beast",
"13522":"beat",
"13523":"beau",
"13524":"beauty",
"13525":"beaux",
"13526":"bebop",
"13531":"becalm",
"13532":"beck",
"13533":"becker",
"13534":"becky",
"13535":"bed",
"13536":"bedim",
"13541":"bee",
"13542":"beebe",
"13543":"beech",
"13544":"beef",
"13545":"beefy",
"13546":"been",
"13551":"beep",
"13552":"beer",
"13553":"beet",
"13554":"befall",
"13555":"befit",
"13556":"befog",
"13561":"beg",
"13562":"began",
"13563":"beget",
"13564":"beggar",
"13565":"begin",
"13566":"begun",
"13611":"behind",
"13612":"beige",
"13613":"being",
"13614":"beirut",
"13615":"bel",
"13616":"bela",
"13621":"belch",
"13622":"belfry",
"13623":"belie",
"13624":"bell",
"13625":"bella",
"13626":"belle",
"13631":"belly",
"13632":"below",
"13633":"belt",
"13634":"bema",
"13635":"beman",
"13636":"bemoan",
"13641":"ben",
"13642":"bench",
"13643":"bend",
"13644":"bender",
"13645":"benny",
"13646":"bent",
"13651":"benz",
"13652":"berea",
"13653":"bereft",
"13654":"beret",
"13655":"berg",
"13656":"berlin",
"13661":"bern",
"13662":"berne",
"13663":"bernet",
"13664":"berra",
"13665":"berry",
"13666":"bert",
"14111":"berth",
"14112":"beryl",
"14113":"beset",
"14114":"bess",
"14115":"bessel",
"14116":"best",
"14121":"bestir",
"14122":"bet",
"14123":"beta",
"14124":"betel",
"14125":"beth",
"14126":"bethel",
"14131":"betsy",
"14132":"bette",
"14133":"betty",
"14134":"bevel",
"14135":"bevy",
"14136":"beware",
"14141":"bey",
"14142":"bezel",
"14143":"bf",
"14144":"bg",
"14145":"bh",
"14146":"bhoy",
"14151":"bi",
"14152":"bias",
"14153":"bib",
"14154":"bibb",
"14155":"bible",
"14156":"bicep",
"14161":"biceps",
"14162":"bid",
"14163":"biddy",
"14164":"bide",
"14165":"bien",
"14166":"big",
"14211":"biggs",
"14212":"bigot",
"14213":"bile",
"14214":"bilge",
"14215":"bilk",
"14216":"bill",
"14221":"billow",
"14222":"billy",
"14223":"bin",
"14224":"binary",
"14225":"bind",
"14226":"bing",
"14231":"binge",
"14232":"bingle",
"14233":"bini",
"14234":"biota",
"14235":"birch",
"14236":"bird",
"14241":"birdie",
"14242":"birth",
"14243":"bison",
"14244":"bisque",
"14245":"bit",
"14246":"bitch",
"14251":"bite",
"14252":"bitt",
"14253":"bitten",
"14254":"biz",
"14255":"bizet",
"14256":"bj",
"14261":"bk",
"14262":"bl",
"14263":"blab",
"14264":"black",
"14265":"blade",
"14266":"blair",
"14311":"blake",
"14312":"blame",
"14313":"blanc",
"14314":"bland",
"14315":"blank",
"14316":"blare",
"14321":"blast",
"14322":"blat",
"14323":"blatz",
"14324":"blaze",
"14325":"bleak",
"14326":"bleat",
"14331":"bled",
"14332":"bleed",
"14333":"blend",
"14334":"bless",
"14335":"blest",
"14336":"blew",
"14341":"blimp",
"14342":"blind",
"14343":"blink",
"14344":"blinn",
"14345":"blip",
"14346":"bliss",
"14351":"blithe",
"14352":"blitz",
"14353":"bloat",
"14354":"blob",
"14355":"bloc",
"14356":"bloch",
"14361":"block",
"14362":"bloke",
"14363":"blond",
"14364":"blonde",
"14365":"blood",
"14366":"bloom",
"14411":"bloop",
"14412":"blot",
"14413":"blotch",
"14414":"blow",
"14415":"blown",
"14416":"blue",
"14421":"bluet",
"14422":"bluff",
"14423":"blum",
"14424":"blunt",
"14425":"blur",
"14426":"blurt",
"14431":"blush",
"14432":"blvd",
"14433":"blythe",
"14434":"bm",
"14435":"bmw",
"14436":"bn",
"14441":"bo",
"14442":"boa",
"14443":"boar",
"14444":"board",
"14445":"boast",
"14446":"boat",
"14451":"bob",
"14452":"bobbin",
"14453":"bobby",
"14454":"bobcat",
"14455":"boca",
"14456":"bock",
"14461":"bode",
"14462":"body",
"14463":"bog",
"14464":"bogey",
"14465":"boggy",
"14466":"bogus",
"14511":"bogy",
"14512":"bohr",
"14513":"boil",
"14514":"bois",
"14515":"boise",
"14516":"bold",
"14521":"bole",
"14522":"bolo",
"14523":"bolt",
"14524":"bomb",
"14525":"bombay",
"14526":"bon",
"14531":"bona",
"14532":"bond",
"14533":"bone",
"14534":"bong",
"14535":"bongo",
"14536":"bonn",
"14541":"bonus",
"14542":"bony",
"14543":"bonze",
"14544":"boo",
"14545":"booby",
"14546":"boogie",
"14551":"book",
"14552":"booky",
"14553":"boom",
"14554":"boon",
"14555":"boone",
"14556":"boor",
"14561":"boost",
"14562":"boot",
"14563":"booth",
"14564":"booty",
"14565":"booze",
"14566":"bop",
"14611":"borax",
"14612":"border",
"14613":"bore",
"14614":"borg",
"14615":"boric",
"14616":"boris",
"14621":"born",
"14622":"borne",
"14623":"borneo",
"14624":"boron",
"14625":"bosch",
"14626":"bose",
"14631":"bosom",
"14632":"boson",
"14633":"boss",
"14634":"boston",
"14635":"botch",
"14636":"both",
"14641":"bottle",
"14642":"bough",
"14643":"bouncy",
"14644":"bound",
"14645":"bourn",
"14646":"bout",
"14651":"bovine",
"14652":"bow",
"14653":"bowel",
"14654":"bowen",
"14655":"bowie",
"14656":"bowl",
"14661":"box",
"14662":"boxy",
"14663":"boy",
"14664":"boyar",
"14665":"boyce",
"14666":"boyd",
"15111":"boyle",
"15112":"bp",
"15113":"bq",
"15114":"br",
"15115":"brace",
"15116":"bract",
"15121":"brad",
"15122":"brady",
"15123":"brae",
"15124":"brag",
"15125":"bragg",
"15126":"braid",
"15131":"brain",
"15132":"brainy",
"15133":"brake",
"15134":"bran",
"15135":"brand",
"15136":"brandt",
"15141":"brant",
"15142":"brash",
"15143":"brass",
"15144":"brassy",
"15145":"braun",
"15146":"brave",
"15151":"bravo",
"15152":"brawl",
"15153":"bray",
"15154":"bread",
"15155":"break",
"15156":"bream",
"15161":"breath",
"15162":"bred",
"15163":"breed",
"15164":"breeze",
"15165":"bremen",
"15166":"brent",
"15211":"brest",
"15212":"brett",
"15213":"breve",
"15214":"brew",
"15215":"brian",
"15216":"briar",
"15221":"bribe",
"15222":"brice",
"15223":"brick",
"15224":"bride",
"15225":"brief",
"15226":"brig",
"15231":"briggs",
"15232":"brim",
"15233":"brine",
"15234":"bring",
"15235":"brink",
"15236":"briny",
"15241":"brisk",
"15242":"broad",
"15243":"brock",
"15244":"broil",
"15245":"broke",
"15246":"broken",
"15251":"bronx",
"15252":"brood",
"15253":"brook",
"15254":"brooke",
"15255":"broom",
"15256":"broth",
"15261":"brow",
"15262":"brown",
"15263":"browse",
"15264":"bruce",
"15265":"bruit",
"15266":"brunch",
"15311":"bruno",
"15312":"brunt",
"15313":"brush",
"15314":"brute",
"15315":"bryan",
"15316":"bryant",
"15321":"bryce",
"15322":"bryn",
"15323":"bs",
"15324":"bstj",
"15325":"bt",
"15326":"btl",
"15331":"bu",
"15332":"bub",
"15333":"buck",
"15334":"bud",
"15335":"budd",
"15336":"buddy",
"15341":"budge",
"15342":"buena",
"15343":"buenos",
"15344":"buff",
"15345":"bug",
"15346":"buggy",
"15351":"bugle",
"15352":"buick",
"15353":"build",
"15354":"built",
"15355":"bulb",
"15356":"bulge",
"15361":"bulk",
"15362":"bulky",
"15363":"bull",
"15364":"bully",
"15365":"bum",
"15366":"bump",
"15411":"bun",
"15412":"bunch",
"15413":"bundy",
"15414":"bunk",
"15415":"bunny",
"15416":"bunt",
"15421":"bunyan",
"15422":"buoy",
"15423":"burch",
"15424":"bureau",
"15425":"buret",
"15426":"burg",
"15431":"buried",
"15432":"burke",
"15433":"burl",
"15434":"burly",
"15435":"burma",
"15436":"burn",
"15441":"burnt",
"15442":"burp",
"15443":"burr",
"15444":"burro",
"15445":"burst",
"15446":"burt",
"15451":"burton",
"15452":"burtt",
"15453":"bury",
"15454":"bus",
"15455":"busch",
"15456":"bush",
"15461":"bushel",
"15462":"bushy",
"15463":"buss",
"15464":"bust",
"15465":"busy",
"15466":"but",
"15511":"butane",
"15512":"butch",
"15513":"buteo",
"15514":"butt",
"15515":"butte",
"15516":"butyl",
"15521":"buxom",
"15522":"buy",
"15523":"buyer",
"15524":"buzz",
"15525":"buzzy",
"15526":"bv",
"15531":"bw",
"15532":"bx",
"15533":"by",
"15534":"bye",
"15535":"byers",
"15536":"bylaw",
"15541":"byline",
"15542":"byrd",
"15543":"byrne",
"15544":"byron",
"15545":"byte",
"15546":"byway",
"15551":"byword",
"15552":"bz",
"15553":"c",
"15554":"c's",
"15555":"ca",
"15556":"cab",
"15561":"cabal",
"15562":"cabin",
"15563":"cable",
"15564":"cabot",
"15565":"cacao",
"15566":"cache",
"15611":"cacm",
"15612":"cacti",
"15613":"caddy",
"15614":"cadent",
"15615":"cadet",
"15616":"cadre",
"15621":"cady",
"15622":"cafe",
"15623":"cage",
"15624":"cagey",
"15625":"cahill",
"15626":"caiman",
"15631":"cain",
"15632":"caine",
"15633":"cairn",
"15634":"cairo",
"15635":"cake",
"15636":"cal",
"15641":"calder",
"15642":"caleb",
"15643":"calf",
"15644":"call",
"15645":"calla",
"15646":"callus",
"15651":"calm",
"15652":"calve",
"15653":"cam",
"15654":"camber",
"15655":"came",
"15656":"camel",
"15661":"cameo",
"15662":"camp",
"15663":"can",
"15664":"can't",
"15665":"canal",
"15666":"canary",
"16111":"cancer",
"16112":"candle",
"16113":"candy",
"16114":"cane",
"16115":"canis",
"16116":"canna",
"16121":"cannot",
"16122":"canny",
"16123":"canoe",
"16124":"canon",
"16125":"canopy",
"16126":"cant",
"16131":"canto",
"16132":"canton",
"16133":"cap",
"16134":"cape",
"16135":"caper",
"16136":"capo",
"16141":"car",
"16142":"carbon",
"16143":"card",
"16144":"care",
"16145":"caress",
"16146":"caret",
"16151":"carey",
"16152":"cargo",
"16153":"carib",
"16154":"carl",
"16155":"carla",
"16156":"carlo",
"16161":"carne",
"16162":"carob",
"16163":"carol",
"16164":"carp",
"16165":"carpet",
"16166":"carr",
"16211":"carrie",
"16212":"carry",
"16213":"carson",
"16214":"cart",
"16215":"carte",
"16216":"caruso",
"16221":"carve",
"16222":"case",
"16223":"casey",
"16224":"cash",
"16225":"cashew",
"16226":"cask",
"16231":"casket",
"16232":"cast",
"16233":"caste",
"16234":"cat",
"16235":"catch",
"16236":"cater",
"16241":"cathy",
"16242":"catkin",
"16243":"catsup",
"16244":"cauchy",
"16245":"caulk",
"16246":"cause",
"16251":"cave",
"16252":"cavern",
"16253":"cavil",
"16254":"cavort",
"16255":"caw",
"16256":"cayuga",
"16261":"cb",
"16262":"cbs",
"16263":"cc",
"16264":"ccc",
"16265":"cccc",
"16266":"cd",
"16311":"cdc",
"16312":"ce",
"16313":"cease",
"16314":"cecil",
"16315":"cedar",
"16316":"cede",
"16321":"ceil",
"16322":"celia",
"16323":"cell",
"16324":"census",
"16325":"cent",
"16326":"ceres",
"16331":"cern",
"16332":"cetera",
"16333":"cetus",
"16334":"cf",
"16335":"cg",
"16336":"ch",
"16341":"chad",
"16342":"chafe",
"16343":"chaff",
"16344":"chai",
"16345":"chain",
"16346":"chair",
"16351":"chalk",
"16352":"champ",
"16353":"chance",
"16354":"chang",
"16355":"chant",
"16356":"chao",
"16361":"chaos",
"16362":"chap",
"16363":"chapel",
"16364":"char",
"16365":"chard",
"16366":"charm",
"16411":"chart",
"16412":"chase",
"16413":"chasm",
"16414":"chaste",
"16415":"chat",
"16416":"chaw",
"16421":"cheap",
"16422":"cheat",
"16423":"check",
"16424":"cheek",
"16425":"cheeky",
"16426":"cheer",
"16431":"chef",
"16432":"chen",
"16433":"chert",
"16434":"cherub",
"16435":"chess",
"16436":"chest",
"16441":"chevy",
"16442":"chew",
"16443":"chi",
"16444":"chic",
"16445":"chick",
"16446":"chide",
"16451":"chief",
"16452":"child",
"16453":"chile",
"16454":"chili",
"16455":"chill",
"16456":"chilly",
"16461":"chime",
"16462":"chin",
"16463":"china",
"16464":"chine",
"16465":"chink",
"16466":"chip",
"16511":"chirp",
"16512":"chisel",
"16513":"chit",
"16514":"chive",
"16515":"chock",
"16516":"choir",
"16521":"choke",
"16522":"chomp",
"16523":"chop",
"16524":"chopin",
"16525":"choral",
"16526":"chord",
"16531":"chore",
"16532":"chose",
"16533":"chosen",
"16534":"chou",
"16535":"chow",
"16536":"chris",
"16541":"chub",
"16542":"chuck",
"16543":"chuff",
"16544":"chug",
"16545":"chum",
"16546":"chump",
"16551":"chunk",
"16552":"churn",
"16553":"chute",
"16554":"ci",
"16555":"cia",
"16556":"cicada",
"16561":"cider",
"16562":"cigar",
"16563":"cilia",
"16564":"cinch",
"16565":"cindy",
"16566":"cipher",
"16611":"circa",
"16612":"circe",
"16613":"cite",
"16614":"citrus",
"16615":"city",
"16616":"civet",
"16621":"civic",
"16622":"civil",
"16623":"cj",
"16624":"ck",
"16625":"cl",
"16626":"clad",
"16631":"claim",
"16632":"clam",
"16633":"clammy",
"16634":"clamp",
"16635":"clan",
"16636":"clang",
"16641":"clank",
"16642":"clap",
"16643":"clara",
"16644":"clare",
"16645":"clark",
"16646":"clarke",
"16651":"clash",
"16652":"clasp",
"16653":"class",
"16654":"claus",
"16655":"clause",
"16656":"claw",
"16661":"clay",
"16662":"clean",
"16663":"clear",
"16664":"cleat",
"16665":"cleft",
"16666":"clerk",
"21111":"cliche",
"21112":"click",
"21113":"cliff",
"21114":"climb",
"21115":"clime",
"21116":"cling",
"21121":"clink",
"21122":"clint",
"21123":"clio",
"21124":"clip",
"21125":"clive",
"21126":"cloak",
"21131":"clock",
"21132":"clod",
"21133":"clog",
"21134":"clomp",
"21135":"clone",
"21136":"close",
"21141":"closet",
"21142":"clot",
"21143":"cloth",
"21144":"cloud",
"21145":"clout",
"21146":"clove",
"21151":"clown",
"21152":"cloy",
"21153":"club",
"21154":"cluck",
"21155":"clue",
"21156":"cluj",
"21161":"clump",
"21162":"clumsy",
"21163":"clung",
"21164":"clyde",
"21165":"cm",
"21166":"cn",
"21211":"co",
"21212":"coach",
"21213":"coal",
"21214":"coast",
"21215":"coat",
"21216":"coax",
"21221":"cobb",
"21222":"cobble",
"21223":"cobol",
"21224":"cobra",
"21225":"coca",
"21226":"cock",
"21231":"cockle",
"21232":"cocky",
"21233":"coco",
"21234":"cocoa",
"21235":"cod",
"21236":"coda",
"21241":"coddle",
"21242":"code",
"21243":"codon",
"21244":"cody",
"21245":"coed",
"21246":"cog",
"21251":"cogent",
"21252":"cohen",
"21253":"cohn",
"21254":"coil",
"21255":"coin",
"21256":"coke",
"21261":"col",
"21262":"cola",
"21263":"colby",
"21264":"cold",
"21265":"cole",
"21266":"colon",
"21311":"colony",
"21312":"colt",
"21313":"colza",
"21314":"coma",
"21315":"comb",
"21316":"combat",
"21321":"come",
"21322":"comet",
"21323":"cometh",
"21324":"comic",
"21325":"comma",
"21326":"con",
"21331":"conch",
"21332":"cone",
"21333":"coney",
"21334":"congo",
"21335":"conic",
"21336":"conn",
"21341":"conner",
"21342":"conway",
"21343":"cony",
"21344":"coo",
"21345":"cook",
"21346":"cooke",
"21351":"cooky",
"21352":"cool",
"21353":"cooley",
"21354":"coon",
"21355":"coop",
"21356":"coors",
"21361":"coot",
"21362":"cop",
"21363":"cope",
"21364":"copra",
"21365":"copy",
"21366":"coral",
"21411":"corbel",
"21412":"cord",
"21413":"core",
"21414":"corey",
"21415":"cork",
"21416":"corn",
"21421":"corny",
"21422":"corp",
"21423":"corps",
"21424":"corvus",
"21425":"cos",
"21426":"cosec",
"21431":"coset",
"21432":"cosh",
"21433":"cost",
"21434":"costa",
"21435":"cosy",
"21436":"cot",
"21441":"cotta",
"21442":"cotty",
"21443":"couch",
"21444":"cough",
"21445":"could",
"21446":"count",
"21451":"coup",
"21452":"coupe",
"21453":"court",
"21454":"cousin",
"21455":"cove",
"21456":"coven",
"21461":"cover",
"21462":"covet",
"21463":"cow",
"21464":"cowan",
"21465":"cowl",
"21466":"cowman",
"21511":"cowry",
"21512":"cox",
"21513":"coy",
"21514":"coyote",
"21515":"coypu",
"21516":"cozen",
"21521":"cozy",
"21522":"cp",
"21523":"cpa",
"21524":"cq",
"21525":"cr",
"21526":"crab",
"21531":"crack",
"21532":"craft",
"21533":"crag",
"21534":"craig",
"21535":"cram",
"21536":"cramp",
"21541":"crane",
"21542":"crank",
"21543":"crap",
"21544":"crash",
"21545":"crass",
"21546":"crate",
"21551":"crater",
"21552":"crave",
"21553":"craw",
"21554":"crawl",
"21555":"craze",
"21556":"crazy",
"21561":"creak",
"21562":"cream",
"21563":"credit",
"21564":"credo",
"21565":"creed",
"21566":"creek",
"21611":"creep",
"21612":"creole",
"21613":"creon",
"21614":"crepe",
"21615":"crept",
"21616":"cress",
"21621":"crest",
"21622":"crete",
"21623":"crew",
"21624":"crib",
"21625":"cried",
"21626":"crime",
"21631":"crimp",
"21632":"crisp",
"21633":"criss",
"21634":"croak",
"21635":"crock",
"21636":"crocus",
"21641":"croft",
"21642":"croix",
"21643":"crone",
"21644":"crony",
"21645":"crook",
"21646":"croon",
"21651":"crop",
"21652":"cross",
"21653":"crow",
"21654":"crowd",
"21655":"crown",
"21656":"crt",
"21661":"crud",
"21662":"crude",
"21663":"cruel",
"21664":"crumb",
"21665":"crump",
"21666":"crush",
"22111":"crust",
"22112":"crux",
"22113":"cruz",
"22114":"cry",
"22115":"crypt",
"22116":"cs",
"22121":"ct",
"22122":"cu",
"22123":"cub",
"22124":"cuba",
"22125":"cube",
"22126":"cubic",
"22131":"cud",
"22132":"cuddle",
"22133":"cue",
"22134":"cuff",
"22135":"cull",
"22136":"culpa",
"22141":"cult",
"22142":"cumin",
"22143":"cuny",
"22144":"cup",
"22145":"cupful",
"22146":"cupid",
"22151":"cur",
"22152":"curb",
"22153":"curd",
"22154":"cure",
"22155":"curfew",
"22156":"curia",
"22161":"curie",
"22162":"curio",
"22163":"curl",
"22164":"curry",
"22165":"curse",
"22166":"curt",
"22211":"curve",
"22212":"cusp",
"22213":"cut",
"22214":"cute",
"22215":"cutlet",
"22216":"cv",
"22221":"cw",
"22222":"cx",
"22223":"cy",
"22224":"cycad",
"22225":"cycle",
"22226":"cynic",
"22231":"cyril",
"22232":"cyrus",
"22233":"cyst",
"22234":"cz",
"22235":"czar",
"22236":"czech",
"22241":"d",
"22242":"d'art",
"22243":"d's",
"22244":"da",
"22245":"dab",
"22246":"dacca",
"22251":"dactyl",
"22252":"dad",
"22253":"dada",
"22254":"daddy",
"22255":"dade",
"22256":"daffy",
"22261":"dahl",
"22262":"dahlia",
"22263":"dairy",
"22264":"dais",
"22265":"daisy",
"22266":"dakar",
"22311":"dale",
"22312":"daley",
"22313":"dally",
"22314":"daly",
"22315":"dam",
"22316":"dame",
"22321":"damn",
"22322":"damon",
"22323":"damp",
"22324":"damsel",
"22325":"dan",
"22326":"dana",
"22331":"dance",
"22332":"dandy",
"22333":"dane",
"22334":"dang",
"22335":"dank",
"22336":"danny",
"22341":"dante",
"22342":"dar",
"22343":"dare",
"22344":"dark",
"22345":"darken",
"22346":"darn",
"22351":"darry",
"22352":"dart",
"22353":"dash",
"22354":"data",
"22355":"date",
"22356":"dater",
"22361":"datum",
"22362":"daub",
"22363":"daunt",
"22364":"dave",
"22365":"david",
"22366":"davis",
"22411":"davit",
"22412":"davy",
"22413":"dawn",
"22414":"dawson",
"22415":"day",
"22416":"daze",
"22421":"db",
"22422":"dc",
"22423":"dd",
"22424":"ddd",
"22425":"dddd",
"22426":"de",
"22431":"deacon",
"22432":"dead",
"22433":"deaf",
"22434":"deal",
"22435":"dealt",
"22436":"dean",
"22441":"deane",
"22442":"dear",
"22443":"death",
"22444":"debar",
"22445":"debby",
"22446":"debit",
"22451":"debra",
"22452":"debris",
"22453":"debt",
"22454":"debug",
"22455":"debut",
"22456":"dec",
"22461":"decal",
"22462":"decay",
"22463":"decca",
"22464":"deck",
"22465":"decker",
"22466":"decor",
"22511":"decree",
"22512":"decry",
"22513":"dee",
"22514":"deed",
"22515":"deem",
"22516":"deep",
"22521":"deer",
"22522":"deere",
"22523":"def",
"22524":"defer",
"22525":"deform",
"22526":"deft",
"22531":"defy",
"22532":"degas",
"22533":"degum",
"22534":"deify",
"22535":"deign",
"22536":"deity",
"22541":"deja",
"22542":"del",
"22543":"delay",
"22544":"delft",
"22545":"delhi",
"22546":"delia",
"22551":"dell",
"22552":"della",
"22553":"delta",
"22554":"delve",
"22555":"demark",
"22556":"demit",
"22561":"demon",
"22562":"demur",
"22563":"den",
"22564":"deneb",
"22565":"denial",
"22566":"denny",
"22611":"dense",
"22612":"dent",
"22613":"denton",
"22614":"deny",
"22615":"depot",
"22616":"depth",
"22621":"depute",
"22622":"derby",
"22623":"derek",
"22624":"des",
"22625":"desist",
"22626":"desk",
"22631":"detach",
"22632":"deter",
"22633":"deuce",
"22634":"deus",
"22635":"devil",
"22636":"devoid",
"22641":"devon",
"22642":"dew",
"22643":"dewar",
"22644":"dewey",
"22645":"dewy",
"22646":"dey",
"22651":"df",
"22652":"dg",
"22653":"dh",
"22654":"dhabi",
"22655":"di",
"22656":"dial",
"22661":"diana",
"22662":"diane",
"22663":"diary",
"22664":"dibble",
"22665":"dice",
"22666":"dick",
"23111":"dicta",
"23112":"did",
"23113":"dido",
"23114":"die",
"23115":"died",
"23116":"diego",
"23121":"diem",
"23122":"diesel",
"23123":"diet",
"23124":"diety",
"23125":"dietz",
"23126":"dig",
"23131":"digit",
"23132":"dilate",
"23133":"dill",
"23134":"dim",
"23135":"dime",
"23136":"din",
"23141":"dinah",
"23142":"dine",
"23143":"ding",
"23144":"dingo",
"23145":"dingy",
"23146":"dint",
"23151":"diode",
"23152":"dip",
"23153":"dirac",
"23154":"dire",
"23155":"dirge",
"23156":"dirt",
"23161":"dirty",
"23162":"dis",
"23163":"disc",
"23164":"dish",
"23165":"disk",
"23166":"disney",
"23211":"ditch",
"23212":"ditto",
"23213":"ditty",
"23214":"diva",
"23215":"divan",
"23216":"dive",
"23221":"dixie",
"23222":"dixon",
"23223":"dizzy",
"23224":"dj",
"23225":"dk",
"23226":"dl",
"23231":"dm",
"23232":"dn",
"23233":"dna",
"23234":"do",
"23235":"dobbs",
"23236":"dobson",
"23241":"dock",
"23242":"docket",
"23243":"dod",
"23244":"dodd",
"23245":"dodge",
"23246":"dodo",
"23251":"doe",
"23252":"doff",
"23253":"dog",
"23254":"doge",
"23255":"dogma",
"23256":"dolan",
"23261":"dolce",
"23262":"dole",
"23263":"doll",
"23264":"dolly",
"23265":"dolt",
"23266":"dome",
"23311":"don",
"23312":"don't",
"23313":"done",
"23314":"doneck",
"23315":"donna",
"23316":"donor",
"23321":"doom",
"23322":"door",
"23323":"dope",
"23324":"dora",
"23325":"doria",
"23326":"doric",
"23331":"doris",
"23332":"dose",
"23333":"dot",
"23334":"dote",
"23335":"double",
"23336":"doubt",
"23341":"douce",
"23342":"doug",
"23343":"dough",
"23344":"dour",
"23345":"douse",
"23346":"dove",
"23351":"dow",
"23352":"dowel",
"23353":"down",
"23354":"downs",
"23355":"dowry",
"23356":"doyle",
"23361":"doze",
"23362":"dozen",
"23363":"dp",
"23364":"dq",
"23365":"dr",
"23366":"drab",
"23411":"draco",
"23412":"draft",
"23413":"drag",
"23414":"drain",
"23415":"drake",
"23416":"dram",
"23421":"drama",
"23422":"drank",
"23423":"drape",
"23424":"draw",
"23425":"drawl",
"23426":"drawn",
"23431":"dread",
"23432":"dream",
"23433":"dreamy",
"23434":"dreg",
"23435":"dress",
"23436":"dressy",
"23441":"drew",
"23442":"drib",
"23443":"dried",
"23444":"drier",
"23445":"drift",
"23446":"drill",
"23451":"drink",
"23452":"drip",
"23453":"drive",
"23454":"droll",
"23455":"drone",
"23456":"drool",
"23461":"droop",
"23462":"drop",
"23463":"dross",
"23464":"drove",
"23465":"drown",
"23466":"drub",
"23511":"drug",
"23512":"druid",
"23513":"drum",
"23514":"drunk",
"23515":"drury",
"23516":"dry",
"23521":"dryad",
"23522":"ds",
"23523":"dt",
"23524":"du",
"23525":"dual",
"23526":"duane",
"23531":"dub",
"23532":"dubhe",
"23533":"dublin",
"23534":"ducat",
"23535":"duck",
"23536":"duct",
"23541":"dud",
"23542":"due",
"23543":"duel",
"23544":"duet",
"23545":"duff",
"23546":"duffy",
"23551":"dug",
"23552":"dugan",
"23553":"duke",
"23554":"dull",
"23555":"dully",
"23556":"dulse",
"23561":"duly",
"23562":"duma",
"23563":"dumb",
"23564":"dummy",
"23565":"dump",
"23566":"dumpy",
"23611":"dun",
"23612":"dunce",
"23613":"dune",
"23614":"dung",
"23615":"dunham",
"23616":"dunk",
"23621":"dunlop",
"23622":"dunn",
"23623":"dupe",
"23624":"durer",
"23625":"dusk",
"23626":"dusky",
"23631":"dust",
"23632":"dusty",
"23633":"dutch",
"23634":"duty",
"23635":"dv",
"23636":"dw",
"23641":"dwarf",
"23642":"dwell",
"23643":"dwelt",
"23644":"dwight",
"23645":"dwyer",
"23646":"dx",
"23651":"dy",
"23652":"dyad",
"23653":"dye",
"23654":"dyer",
"23655":"dying",
"23656":"dyke",
"23661":"dylan",
"23662":"dyne",
"23663":"dz",
"23664":"e",
"23665":"e'er",
"23666":"e's",
"24111":"ea",
"24112":"each",
"24113":"eagan",
"24114":"eager",
"24115":"eagle",
"24116":"ear",
"24121":"earl",
"24122":"earn",
"24123":"earth",
"24124":"ease",
"24125":"easel",
"24126":"east",
"24131":"easy",
"24132":"eat",
"24133":"eaten",
"24134":"eater",
"24135":"eaton",
"24136":"eave",
"24141":"eb",
"24142":"ebb",
"24143":"eben",
"24144":"ebony",
"24145":"ec",
"24146":"echo",
"24151":"eclat",
"24152":"ecole",
"24153":"ed",
"24154":"eddie",
"24155":"eddy",
"24156":"eden",
"24161":"edgar",
"24162":"edge",
"24163":"edgy",
"24164":"edict",
"24165":"edify",
"24166":"edit",
"24211":"edith",
"24212":"editor",
"24213":"edna",
"24214":"edt",
"24215":"edwin",
"24216":"ee",
"24221":"eee",
"24222":"eeee",
"24223":"eel",
"24224":"eeoc",
"24225":"eerie",
"24226":"ef",
"24231":"efface",
"24232":"effie",
"24233":"efg",
"24234":"eft",
"24235":"eg",
"24236":"egan",
"24241":"egg",
"24242":"ego",
"24243":"egress",
"24244":"egret",
"24245":"egypt",
"24246":"eh",
"24251":"ei",
"24252":"eider",
"24253":"eight",
"24254":"eire",
"24255":"ej",
"24256":"eject",
"24261":"ek",
"24262":"eke",
"24263":"el",
"24264":"elan",
"24265":"elate",
"24266":"elba",
"24311":"elbow",
"24312":"elder",
"24313":"eldon",
"24314":"elect",
"24315":"elegy",
"24316":"elena",
"24321":"eleven",
"24322":"elfin",
"24323":"elgin",
"24324":"eli",
"24325":"elide",
"24326":"eliot",
"24331":"elite",
"24332":"elk",
"24333":"ell",
"24334":"ella",
"24335":"ellen",
"24336":"ellis",
"24341":"elm",
"24342":"elmer",
"24343":"elope",
"24344":"else",
"24345":"elsie",
"24346":"elton",
"24351":"elude",
"24352":"elute",
"24353":"elves",
"24354":"ely",
"24355":"em",
"24356":"embalm",
"24361":"embark",
"24362":"embed",
"24363":"ember",
"24364":"emcee",
"24365":"emery",
"24366":"emil",
"24411":"emile",
"24412":"emily",
"24413":"emit",
"24414":"emma",
"24415":"emory",
"24416":"empty",
"24421":"en",
"24422":"enact",
"24423":"enamel",
"24424":"end",
"24425":"endow",
"24426":"enemy",
"24431":"eng",
"24432":"engel",
"24433":"engle",
"24434":"engulf",
"24435":"enid",
"24436":"enjoy",
"24441":"enmity",
"24442":"enoch",
"24443":"enol",
"24444":"enos",
"24445":"enrico",
"24446":"ensue",
"24451":"enter",
"24452":"entrap",
"24453":"entry",
"24454":"envoy",
"24455":"envy",
"24456":"eo",
"24461":"ep",
"24462":"epa",
"24463":"epic",
"24464":"epoch",
"24465":"epoxy",
"24466":"epsom",
"24511":"eq",
"24512":"equal",
"24513":"equip",
"24514":"er",
"24515":"era",
"24516":"erase",
"24521":"erato",
"24522":"erda",
"24523":"ere",
"24524":"erect",
"24525":"erg",
"24526":"eric",
"24531":"erich",
"24532":"erie",
"24533":"erik",
"24534":"ernest",
"24535":"ernie",
"24536":"ernst",
"24541":"erode",
"24542":"eros",
"24543":"err",
"24544":"errand",
"24545":"errol",
"24546":"error",
"24551":"erupt",
"24552":"ervin",
"24553":"erwin",
"24554":"es",
"24555":"essay",
"24556":"essen",
"24561":"essex",
"24562":"est",
"24563":"ester",
"24564":"estes",
"24565":"estop",
"24566":"et",
"24611":"eta",
"24612":"etc",
"24613":"etch",
"24614":"ethan",
"24615":"ethel",
"24616":"ether",
"24621":"ethic",
"24622":"ethos",
"24623":"ethyl",
"24624":"etude",
"24625":"eu",
"24626":"eucre",
"24631":"euler",
"24632":"eureka",
"24633":"ev",
"24634":"eva",
"24635":"evade",
"24636":"evans",
"24641":"eve",
"24642":"even",
"24643":"event",
"24644":"every",
"24645":"evict",
"24646":"evil",
"24651":"evoke",
"24652":"evolve",
"24653":"ew",
"24654":"ewe",
"24655":"ewing",
"24656":"ex",
"24661":"exact",
"24662":"exalt",
"24663":"exam",
"24664":"excel",
"24665":"excess",
"24666":"exert",
"25111":"exile",
"25112":"exist",
"25113":"exit",
"25114":"exodus",
"25115":"expel",
"25116":"extant",
"25121":"extent",
"25122":"extol",
"25123":"extra",
"25124":"exude",
"25125":"exult",
"25126":"exxon",
"25131":"ey",
"25132":"eye",
"25133":"eyed",
"25134":"ez",
"25135":"ezra",
"25136":"f",
"25141":"f's",
"25142":"fa",
"25143":"faa",
"25144":"faber",
"25145":"fable",
"25146":"face",
"25151":"facet",
"25152":"facile",
"25153":"fact",
"25154":"facto",
"25155":"fad",
"25156":"fade",
"25161":"faery",
"25162":"fag",
"25163":"fahey",
"25164":"fail",
"25165":"fain",
"25166":"faint",
"25211":"fair",
"25212":"fairy",
"25213":"faith",
"25214":"fake",
"25215":"fall",
"25216":"false",
"25221":"fame",
"25222":"fan",
"25223":"fancy",
"25224":"fang",
"25225":"fanny",
"25226":"fanout",
"25231":"far",
"25232":"farad",
"25233":"farce",
"25234":"fare",
"25235":"fargo",
"25236":"farley",
"25241":"farm",
"25242":"faro",
"25243":"fast",
"25244":"fat",
"25245":"fatal",
"25246":"fate",
"25251":"fatty",
"25252":"fault",
"25253":"faun",
"25254":"fauna",
"25255":"faust",
"25256":"fawn",
"25261":"fay",
"25262":"faze",
"25263":"fb",
"25264":"fbi",
"25265":"fc",
"25266":"fcc",
"25311":"fd",
"25312":"fda",
"25313":"fe",
"25314":"fear",
"25315":"feast",
"25316":"feat",
"25321":"feb",
"25322":"fed",
"25323":"fee",
"25324":"feed",
"25325":"feel",
"25326":"feet",
"25331":"feign",
"25332":"feint",
"25333":"felice",
"25334":"felix",
"25335":"fell",
"25336":"felon",
"25341":"felt",
"25342":"femur",
"25343":"fence",
"25344":"fend",
"25345":"fermi",
"25346":"fern",
"25351":"ferric",
"25352":"ferry",
"25353":"fest",
"25354":"fetal",
"25355":"fetch",
"25356":"fete",
"25361":"fetid",
"25362":"fetus",
"25363":"feud",
"25364":"fever",
"25365":"few",
"25366":"ff",
"25411":"fff",
"25412":"ffff",
"25413":"fg",
"25414":"fgh",
"25415":"fh",
"25416":"fi",
"25421":"fiat",
"25422":"fib",
"25423":"fibrin",
"25424":"fiche",
"25425":"fide",
"25426":"fief",
"25431":"field",
"25432":"fiend",
"25433":"fiery",
"25434":"fife",
"25435":"fifo",
"25436":"fifth",
"25441":"fifty",
"25442":"fig",
"25443":"fight",
"25444":"filch",
"25445":"file",
"25446":"filet",
"25451":"fill",
"25452":"filler",
"25453":"filly",
"25454":"film",
"25455":"filmy",
"25456":"filth",
"25461":"fin",
"25462":"final",
"25463":"finale",
"25464":"finch",
"25465":"find",
"25466":"fine",
"25511":"finite",
"25512":"fink",
"25513":"finn",
"25514":"finny",
"25515":"fir",
"25516":"fire",
"25521":"firm",
"25522":"first",
"25523":"fish",
"25524":"fishy",
"25525":"fisk",
"25526":"fiske",
"25531":"fist",
"25532":"fit",
"25533":"fitch",
"25534":"five",
"25535":"fix",
"25536":"fj",
"25541":"fjord",
"25542":"fk",
"25543":"fl",
"25544":"flack",
"25545":"flag",
"25546":"flail",
"25551":"flair",
"25552":"flak",
"25553":"flake",
"25554":"flaky",
"25555":"flam",
"25556":"flame",
"25561":"flank",
"25562":"flap",
"25563":"flare",
"25564":"flash",
"25565":"flask",
"25566":"flat",
"25611":"flatus",
"25612":"flaw",
"25613":"flax",
"25614":"flea",
"25615":"fleck",
"25616":"fled",
"25621":"flee",
"25622":"fleet",
"25623":"flesh",
"25624":"flew",
"25625":"flex",
"25626":"flick",
"25631":"flier",
"25632":"flinch",
"25633":"fling",
"25634":"flint",
"25635":"flip",
"25636":"flirt",
"25641":"flit",
"25642":"flo",
"25643":"float",
"25644":"floc",
"25645":"flock",
"25646":"floe",
"25651":"flog",
"25652":"flood",
"25653":"floor",
"25654":"flop",
"25655":"floppy",
"25656":"flora",
"25661":"flour",
"25662":"flout",
"25663":"flow",
"25664":"flown",
"25665":"floyd",
"25666":"flu",
"26111":"flub",
"26112":"flue",
"26113":"fluff",
"26114":"fluid",
"26115":"fluke",
"26116":"flung",
"26121":"flush",
"26122":"flute",
"26123":"flux",
"26124":"fly",
"26125":"flyer",
"26126":"flynn",
"26131":"fm",
"26132":"fmc",
"26133":"fn",
"26134":"fo",
"26135":"foal",
"26136":"foam",
"26141":"foamy",
"26142":"fob",
"26143":"focal",
"26144":"foci",
"26145":"focus",
"26146":"fodder",
"26151":"foe",
"26152":"fog",
"26153":"foggy",
"26154":"fogy",
"26155":"foil",
"26156":"foist",
"26161":"fold",
"26162":"foley",
"26163":"folio",
"26164":"folk",
"26165":"folly",
"26166":"fond",
"26211":"font",
"26212":"food",
"26213":"fool",
"26214":"foot",
"26215":"foote",
"26216":"fop",
"26221":"for",
"26222":"foray",
"26223":"force",
"26224":"ford",
"26225":"fore",
"26226":"forge",
"26231":"forgot",
"26232":"fork",
"26233":"form",
"26234":"fort",
"26235":"forte",
"26236":"forth",
"26241":"forty",
"26242":"forum",
"26243":"foss",
"26244":"fossil",
"26245":"foul",
"26246":"found",
"26251":"fount",
"26252":"four",
"26253":"fovea",
"26254":"fowl",
"26255":"fox",
"26256":"foxy",
"26261":"foyer",
"26262":"fp",
"26263":"fpc",
"26264":"fq",
"26265":"fr",
"26266":"frail",
"26311":"frame",
"26312":"fran",
"26313":"franc",
"26314":"franca",
"26315":"frank",
"26316":"franz",
"26321":"frau",
"26322":"fraud",
"26323":"fray",
"26324":"freak",
"26325":"fred",
"26326":"free",
"26331":"freed",
"26332":"freer",
"26333":"frenzy",
"26334":"freon",
"26335":"fresh",
"26336":"fret",
"26341":"freud",
"26342":"frey",
"26343":"freya",
"26344":"friar",
"26345":"frick",
"26346":"fried",
"26351":"frill",
"26352":"frilly",
"26353":"frisky",
"26354":"fritz",
"26355":"fro",
"26356":"frock",
"26361":"frog",
"26362":"from",
"26363":"front",
"26364":"frost",
"26365":"froth",
"26366":"frown",
"26411":"froze",
"26412":"fruit",
"26413":"fry",
"26414":"frye",
"26415":"fs",
"26416":"ft",
"26421":"ftc",
"26422":"fu",
"26423":"fuchs",
"26424":"fudge",
"26425":"fuel",
"26426":"fugal",
"26431":"fugue",
"26432":"fuji",
"26433":"full",
"26434":"fully",
"26435":"fum",
"26436":"fume",
"26441":"fun",
"26442":"fund",
"26443":"fungal",
"26444":"fungi",
"26445":"funk",
"26446":"funny",
"26451":"fur",
"26452":"furl",
"26453":"furry",
"26454":"fury",
"26455":"furze",
"26456":"fuse",
"26461":"fuss",
"26462":"fussy",
"26463":"fusty",
"26464":"fuzz",
"26465":"fuzzy",
"26466":"fv",
"26511":"fw",
"26512":"fx",
"26513":"fy",
"26514":"fz",
"26515":"g",
"26516":"g's",
"26521":"ga",
"26522":"gab",
"26523":"gable",
"26524":"gabon",
"26525":"gad",
"26526":"gadget",
"26531":"gaff",
"26532":"gaffe",
"26533":"gag",
"26534":"gage",
"26535":"gail",
"26536":"gain",
"26541":"gait",
"26542":"gal",
"26543":"gala",
"26544":"galaxy",
"26545":"gale",
"26546":"galen",
"26551":"gall",
"26552":"gallop",
"26553":"galt",
"26554":"gam",
"26555":"game",
"26556":"gamin",
"26561":"gamma",
"26562":"gamut",
"26563":"gander",
"26564":"gang",
"26565":"gao",
"26566":"gap",
"26611":"gape",
"26612":"gar",
"26613":"garb",
"26614":"garish",
"26615":"garner",
"26616":"garry",
"26621":"garth",
"26622":"gary",
"26623":"gas",
"26624":"gash",
"26625":"gasp",
"26626":"gassy",
"26631":"gate",
"26632":"gates",
"26633":"gator",
"26634":"gauche",
"26635":"gaudy",
"26636":"gauge",
"26641":"gaul",
"26642":"gaunt",
"26643":"gaur",
"26644":"gauss",
"26645":"gauze",
"26646":"gave",
"26651":"gavel",
"26652":"gavin",
"26653":"gawk",
"26654":"gawky",
"26655":"gay",
"26656":"gaze",
"26661":"gb",
"26662":"gc",
"26663":"gd",
"26664":"ge",
"26665":"gear",
"26666":"gecko",
"31111":"gee",
"31112":"geese",
"31113":"geigy",
"31114":"gel",
"31115":"geld",
"31116":"gem",
"31121":"gemma",
"31122":"gene",
"31123":"genie",
"31124":"genii",
"31125":"genoa",
"31126":"genre",
"31131":"gent",
"31132":"gentry",
"31133":"genus",
"31134":"gerbil",
"31135":"germ",
"31136":"gerry",
"31141":"get",
"31142":"getty",
"31143":"gf",
"31144":"gg",
"31145":"ggg",
"31146":"gggg",
"31151":"gh",
"31152":"ghana",
"31153":"ghent",
"31154":"ghetto",
"31155":"ghi",
"31156":"ghost",
"31161":"ghoul",
"31162":"gi",
"31163":"giant",
"31164":"gibbs",
"31165":"gibby",
"31166":"gibe",
"31211":"giddy",
"31212":"gift",
"31213":"gig",
"31214":"gil",
"31215":"gila",
"31216":"gild",
"31221":"giles",
"31222":"gill",
"31223":"gilt",
"31224":"gimbal",
"31225":"gimpy",
"31226":"gin",
"31231":"gina",
"31232":"ginn",
"31233":"gino",
"31234":"gird",
"31235":"girl",
"31236":"girth",
"31241":"gist",
"31242":"give",
"31243":"given",
"31244":"gj",
"31245":"gk",
"31246":"gl",
"31251":"glad",
"31252":"gladdy",
"31253":"glade",
"31254":"glamor",
"31255":"gland",
"31256":"glans",
"31261":"glare",
"31262":"glass",
"31263":"glaze",
"31264":"gleam",
"31265":"glean",
"31266":"glee",
"31311":"glen",
"31312":"glenn",
"31313":"glib",
"31314":"glide",
"31315":"glint",
"31316":"gloat",
"31321":"glob",
"31322":"globe",
"31323":"glom",
"31324":"gloom",
"31325":"glory",
"31326":"gloss",
"31331":"glove",
"31332":"glow",
"31333":"glue",
"31334":"glued",
"31335":"gluey",
"31336":"gluing",
"31341":"glum",
"31342":"glut",
"31343":"glyph",
"31344":"gm",
"31345":"gmt",
"31346":"gn",
"31351":"gnarl",
"31352":"gnash",
"31353":"gnat",
"31354":"gnaw",
"31355":"gnome",
"31356":"gnp",
"31361":"gnu",
"31362":"go",
"31363":"goa",
"31364":"goad",
"31365":"goal",
"31366":"goat",
"31411":"gob",
"31412":"goer",
"31413":"goes",
"31414":"goff",
"31415":"gog",
"31416":"goggle",
"31421":"gogh",
"31422":"gogo",
"31423":"gold",
"31424":"golf",
"31425":"golly",
"31426":"gone",
"31431":"gong",
"31432":"goo",
"31433":"good",
"31434":"goode",
"31435":"goody",
"31436":"goof",
"31441":"goofy",
"31442":"goose",
"31443":"gop",
"31444":"gordon",
"31445":"gore",
"31446":"goren",
"31451":"gorge",
"31452":"gorky",
"31453":"gorse",
"31454":"gory",
"31455":"gosh",
"31456":"gospel",
"31461":"got",
"31462":"gouda",
"31463":"gouge",
"31464":"gould",
"31465":"gourd",
"31466":"gout",
"31511":"gown",
"31512":"gp",
"31513":"gpo",
"31514":"gq",
"31515":"gr",
"31516":"grab",
"31521":"grace",
"31522":"grad",
"31523":"grade",
"31524":"grady",
"31525":"graff",
"31526":"graft",
"31531":"grail",
"31532":"grain",
"31533":"grand",
"31534":"grant",
"31535":"grape",
"31536":"graph",
"31541":"grasp",
"31542":"grass",
"31543":"grata",
"31544":"grate",
"31545":"grater",
"31546":"grave",
"31551":"gravy",
"31552":"gray",
"31553":"graze",
"31554":"great",
"31555":"grebe",
"31556":"greed",
"31561":"greedy",
"31562":"greek",
"31563":"green",
"31564":"greer",
"31565":"greet",
"31566":"greg",
"31611":"gregg",
"31612":"greta",
"31613":"grew",
"31614":"grey",
"31615":"grid",
"31616":"grief",
"31621":"grieve",
"31622":"grill",
"31623":"grim",
"31624":"grime",
"31625":"grimm",
"31626":"grin",
"31631":"grind",
"31632":"grip",
"31633":"gripe",
"31634":"grist",
"31635":"grit",
"31636":"groan",
"31641":"groat",
"31642":"groin",
"31643":"groom",
"31644":"grope",
"31645":"gross",
"31646":"groton",
"31651":"group",
"31652":"grout",
"31653":"grove",
"31654":"grow",
"31655":"growl",
"31656":"grown",
"31661":"grub",
"31662":"gruff",
"31663":"grunt",
"31664":"gs",
"31665":"gsa",
"31666":"gt",
"32111":"gu",
"32112":"guam",
"32113":"guano",
"32114":"guard",
"32115":"guess",
"32116":"guest",
"32121":"guide",
"32122":"guild",
"32123":"guile",
"32124":"guilt",
"32125":"guise",
"32126":"guitar",
"32131":"gules",
"32132":"gulf",
"32133":"gull",
"32134":"gully",
"32135":"gulp",
"32136":"gum",
"32141":"gumbo",
"32142":"gummy",
"32143":"gun",
"32144":"gunk",
"32145":"gunky",
"32146":"gunny",
"32151":"gurgle",
"32152":"guru",
"32153":"gus",
"32154":"gush",
"32155":"gust",
"32156":"gusto",
"32161":"gusty",
"32162":"gut",
"32163":"gutsy",
"32164":"guy",
"32165":"guyana",
"32166":"gv",
"32211":"gw",
"32212":"gwen",
"32213":"gwyn",
"32214":"gx",
"32215":"gy",
"32216":"gym",
"32221":"gyp",
"32222":"gypsy",
"32223":"gyro",
"32224":"gz",
"32225":"h",
"32226":"h's",
"32231":"ha",
"32232":"haag",
"32233":"haas",
"32234":"habib",
"32235":"habit",
"32236":"hack",
"32241":"had",
"32242":"hades",
"32243":"hadron",
"32244":"hagen",
"32245":"hager",
"32246":"hague",
"32251":"hahn",
"32252":"haifa",
"32253":"haiku",
"32254":"hail",
"32255":"hair",
"32256":"hairy",
"32261":"haiti",
"32262":"hal",
"32263":"hale",
"32264":"haley",
"32265":"half",
"32266":"hall",
"32311":"halma",
"32312":"halo",
"32313":"halt",
"32314":"halvah",
"32315":"halve",
"32316":"ham",
"32321":"hamal",
"32322":"hamlin",
"32323":"han",
"32324":"hand",
"32325":"handy",
"32326":"haney",
"32331":"hang",
"32332":"hank",
"32333":"hanna",
"32334":"hanoi",
"32335":"hans",
"32336":"hansel",
"32341":"hap",
"32342":"happy",
"32343":"hard",
"32344":"hardy",
"32345":"hare",
"32346":"harem",
"32351":"hark",
"32352":"harley",
"32353":"harm",
"32354":"harp",
"32355":"harpy",
"32356":"harry",
"32361":"harsh",
"32362":"hart",
"32363":"harvey",
"32364":"hash",
"32365":"hasp",
"32366":"hast",
"32411":"haste",
"32412":"hasty",
"32413":"hat",
"32414":"hatch",
"32415":"hate",
"32416":"hater",
"32421":"hath",
"32422":"hatred",
"32423":"haul",
"32424":"haunt",
"32425":"have",
"32426":"haven",
"32431":"havoc",
"32432":"haw",
"32433":"hawk",
"32434":"hay",
"32435":"haydn",
"32436":"hayes",
"32441":"hays",
"32442":"hazard",
"32443":"haze",
"32444":"hazel",
"32445":"hazy",
"32446":"hb",
"32451":"hc",
"32452":"hd",
"32453":"he",
"32454":"he'd",
"32455":"he'll",
"32456":"head",
"32461":"heady",
"32462":"heal",
"32463":"healy",
"32464":"heap",
"32465":"hear",
"32466":"heard",
"32511":"heart",
"32512":"heat",
"32513":"heath",
"32514":"heave",
"32515":"heavy",
"32516":"hebe",
"32521":"hebrew",
"32522":"heck",
"32523":"heckle",
"32524":"hedge",
"32525":"heed",
"32526":"heel",
"32531":"heft",
"32532":"hefty",
"32533":"heigh",
"32534":"heine",
"32535":"heinz",
"32536":"heir",
"32541":"held",
"32542":"helen",
"32543":"helga",
"32544":"helix",
"32545":"hell",
"32546":"hello",
"32551":"helm",
"32552":"helmut",
"32553":"help",
"32554":"hem",
"32555":"hemp",
"32556":"hen",
"32561":"hence",
"32562":"henri",
"32563":"henry",
"32564":"her",
"32565":"hera",
"32566":"herb",
"32611":"herd",
"32612":"here",
"32613":"hero",
"32614":"heroic",
"32615":"heron",
"32616":"herr",
"32621":"hertz",
"32622":"hess",
"32623":"hesse",
"32624":"hettie",
"32625":"hetty",
"32626":"hew",
"32631":"hewitt",
"32632":"hewn",
"32633":"hex",
"32634":"hey",
"32635":"hf",
"32636":"hg",
"32641":"hh",
"32642":"hhh",
"32643":"hhhh",
"32644":"hi",
"32645":"hiatt",
"32646":"hick",
"32651":"hicks",
"32652":"hid",
"32653":"hide",
"32654":"high",
"32655":"hij",
"32656":"hike",
"32661":"hill",
"32662":"hilly",
"32663":"hilt",
"32664":"hilum",
"32665":"him",
"32666":"hind",
"33111":"hindu",
"33112":"hines",
"33113":"hinge",
"33114":"hint",
"33115":"hip",
"33116":"hippo",
"33121":"hippy",
"33122":"hiram",
"33123":"hire",
"33124":"hirsch",
"33125":"his",
"33126":"hiss",
"33131":"hit",
"33132":"hitch",
"33133":"hive",
"33134":"hj",
"33135":"hk",
"33136":"hl",
"33141":"hm",
"33142":"hn",
"33143":"ho",
"33144":"hoagy",
"33145":"hoar",
"33146":"hoard",
"33151":"hob",
"33152":"hobbs",
"33153":"hobby",
"33154":"hobo",
"33155":"hoc",
"33156":"hock",
"33161":"hodge",
"33162":"hodges",
"33163":"hoe",
"33164":"hoff",
"33165":"hog",
"33166":"hogan",
"33211":"hoi",
"33212":"hokan",
"33213":"hold",
"33214":"holdup",
"33215":"hole",
"33216":"holly",
"33221":"holm",
"33222":"holst",
"33223":"holt",
"33224":"home",
"33225":"homo",
"33226":"honda",
"33231":"hondo",
"33232":"hone",
"33233":"honey",
"33234":"hong",
"33235":"honk",
"33236":"hooch",
"33241":"hood",
"33242":"hoof",
"33243":"hook",
"33244":"hookup",
"33245":"hoop",
"33246":"hoot",
"33251":"hop",
"33252":"hope",
"33253":"horde",
"33254":"horn",
"33255":"horny",
"33256":"horse",
"33261":"horus",
"33262":"hose",
"33263":"host",
"33264":"hot",
"33265":"hotbox",
"33266":"hotel",
"33311":"hough",
"33312":"hound",
"33313":"hour",
"33314":"house",
"33315":"hove",
"33316":"hovel",
"33321":"hover",
"33322":"how",
"33323":"howdy",
"33324":"howe",
"33325":"howl",
"33326":"hoy",
"33331":"hoyt",
"33332":"hp",
"33333":"hq",
"33334":"hr",
"33335":"hs",
"33336":"ht",
"33341":"hu",
"33342":"hub",
"33343":"hubbub",
"33344":"hubby",
"33345":"huber",
"33346":"huck",
"33351":"hue",
"33352":"hued",
"33353":"huff",
"33354":"hug",
"33355":"huge",
"33356":"hugh",
"33361":"hughes",
"33362":"hugo",
"33363":"huh",
"33364":"hulk",
"33365":"hull",
"33366":"hum",
"33411":"human",
"33412":"humid",
"33413":"hump",
"33414":"humus",
"33415":"hun",
"33416":"hunch",
"33421":"hung",
"33422":"hunk",
"33423":"hunt",
"33424":"hurd",
"33425":"hurl",
"33426":"huron",
"33431":"hurrah",
"33432":"hurry",
"33433":"hurst",
"33434":"hurt",
"33435":"hurty",
"33436":"hush",
"33441":"husky",
"33442":"hut",
"33443":"hutch",
"33444":"hv",
"33445":"hw",
"33446":"hx",
"33451":"hy",
"33452":"hyde",
"33453":"hydra",
"33454":"hydro",
"33455":"hyena",
"33456":"hying",
"33461":"hyman",
"33462":"hymen",
"33463":"hymn",
"33464":"hymnal",
"33465":"hz",
"33466":"i",
"33511":"i'd",
"33512":"i'll",
"33513":"i'm",
"33514":"i's",
"33515":"i've",
"33516":"ia",
"33521":"iambic",
"33522":"ian",
"33523":"ib",
"33524":"ibex",
"33525":"ibid",
"33526":"ibis",
"33531":"ibm",
"33532":"ibn",
"33533":"ic",
"33534":"icc",
"33535":"ice",
"33536":"icing",
"33541":"icky",
"33542":"icon",
"33543":"icy",
"33544":"id",
"33545":"ida",
"33546":"idaho",
"33551":"idea",
"33552":"ideal",
"33553":"idiom",
"33554":"idiot",
"33555":"idle",
"33556":"idol",
"33561":"idyll",
"33562":"ie",
"33563":"ieee",
"33564":"if",
"33565":"iffy",
"33566":"ifni",
"33611":"ig",
"33612":"igloo",
"33613":"igor",
"33614":"ih",
"33615":"ii",
"33616":"iii",
"33621":"iiii",
"33622":"ij",
"33623":"ijk",
"33624":"ik",
"33625":"ike",
"33626":"il",
"33631":"ileum",
"33632":"iliac",
"33633":"iliad",
"33634":"ill",
"33635":"illume",
"33636":"ilona",
"33641":"im",
"33642":"image",
"33643":"imbue",
"33644":"imp",
"33645":"impel",
"33646":"import",
"33651":"impute",
"33652":"in",
"33653":"inane",
"33654":"inapt",
"33655":"inc",
"33656":"inca",
"33661":"incest",
"33662":"inch",
"33663":"incur",
"33664":"index",
"33665":"india",
"33666":"indies",
"34111":"indy",
"34112":"inept",
"34113":"inert",
"34114":"infect",
"34115":"infer",
"34116":"infima",
"34121":"infix",
"34122":"infra",
"34123":"ingot",
"34124":"inhere",
"34125":"injun",
"34126":"ink",
"34131":"inlay",
"34132":"inlet",
"34133":"inman",
"34134":"inn",
"34135":"inner",
"34136":"input",
"34141":"insect",
"34142":"inset",
"34143":"insult",
"34144":"intend",
"34145":"inter",
"34146":"into",
"34151":"inure",
"34152":"invoke",
"34153":"io",
"34154":"ion",
"34155":"ionic",
"34156":"iota",
"34161":"iowa",
"34162":"ip",
"34163":"ipso",
"34164":"iq",
"34165":"ir",
"34166":"ira",
"34211":"iran",
"34212":"iraq",
"34213":"irate",
"34214":"ire",
"34215":"irene",
"34216":"iris",
"34221":"irish",
"34222":"irk",
"34223":"irma",
"34224":"iron",
"34225":"irony",
"34226":"irs",
"34231":"irvin",
"34232":"irwin",
"34233":"is",
"34234":"isaac",
"34235":"isabel",
"34236":"ising",
"34241":"isis",
"34242":"islam",
"34243":"island",
"34244":"isle",
"34245":"isn't",
"34246":"israel",
"34251":"issue",
"34252":"it",
"34253":"it&t",
"34254":"it'd",
"34255":"it'll",
"34256":"italy",
"34261":"itch",
"34262":"item",
"34263":"ito",
"34264":"itt",
"34265":"iu",
"34266":"iv",
"34311":"ivan",
"34312":"ive",
"34313":"ivory",
"34314":"ivy",
"34315":"iw",
"34316":"ix",
"34321":"iy",
"34322":"iz",
"34323":"j",
"34324":"j's",
"34325":"ja",
"34326":"jab",
"34331":"jack",
"34332":"jacky",
"34333":"jacm",
"34334":"jacob",
"34335":"jacobi",
"34336":"jade",
"34341":"jag",
"34342":"jail",
"34343":"jaime",
"34344":"jake",
"34345":"jam",
"34346":"james",
"34351":"jan",
"34352":"jane",
"34353":"janet",
"34354":"janos",
"34355":"janus",
"34356":"japan",
"34361":"jar",
"34362":"jason",
"34363":"java",
"34364":"jaw",
"34365":"jay",
"34366":"jazz",
"34411":"jazzy",
"34412":"jb",
"34413":"jc",
"34414":"jd",
"34415":"je",
"34416":"jean",
"34421":"jed",
"34422":"jeep",
"34423":"jeff",
"34424":"jejune",
"34425":"jelly",
"34426":"jenny",
"34431":"jeres",
"34432":"jerk",
"34433":"jerky",
"34434":"jerry",
"34435":"jersey",
"34436":"jess",
"34441":"jesse",
"34442":"jest",
"34443":"jesus",
"34444":"jet",
"34445":"jew",
"34446":"jewel",
"34451":"jewett",
"34452":"jewish",
"34453":"jf",
"34454":"jg",
"34455":"jh",
"34456":"ji",
"34461":"jibe",
"34462":"jiffy",
"34463":"jig",
"34464":"jill",
"34465":"jilt",
"34466":"jim",
"34511":"jimmy",
"34512":"jinx",
"34513":"jive",
"34514":"jj",
"34515":"jjj",
"34516":"jjjj",
"34521":"jk",
"34522":"jkl",
"34523":"jl",
"34524":"jm",
"34525":"jn",
"34526":"jo",
"34531":"joan",
"34532":"job",
"34533":"jock",
"34534":"jockey",
"34535":"joe",
"34536":"joel",
"34541":"joey",
"34542":"jog",
"34543":"john",
"34544":"johns",
"34545":"join",
"34546":"joint",
"34551":"joke",
"34552":"jolla",
"34553":"jolly",
"34554":"jolt",
"34555":"jon",
"34556":"jonas",
"34561":"jones",
"34562":"jorge",
"34563":"jose",
"34564":"josef",
"34565":"joshua",
"34566":"joss",
"34611":"jostle",
"34612":"jot",
"34613":"joule",
"34614":"joust",
"34615":"jove",
"34616":"jowl",
"34621":"jowly",
"34622":"joy",
"34623":"joyce",
"34624":"jp",
"34625":"jq",
"34626":"jr",
"34631":"js",
"34632":"jt",
"34633":"ju",
"34634":"juan",
"34635":"judas",
"34636":"judd",
"34641":"jude",
"34642":"judge",
"34643":"judo",
"34644":"judy",
"34645":"jug",
"34646":"juggle",
"34651":"juice",
"34652":"juicy",
"34653":"juju",
"34654":"juke",
"34655":"jukes",
"34656":"julep",
"34661":"jules",
"34662":"julia",
"34663":"julie",
"34664":"julio",
"34665":"july",
"34666":"jumbo",
"35111":"jump",
"35112":"jumpy",
"35113":"junco",
"35114":"june",
"35115":"junk",
"35116":"junky",
"35121":"juno",
"35122":"junta",
"35123":"jura",
"35124":"jure",
"35125":"juror",
"35126":"jury",
"35131":"just",
"35132":"jut",
"35133":"jute",
"35134":"jv",
"35135":"jw",
"35136":"jx",
"35141":"jy",
"35142":"jz",
"35143":"k",
"35144":"k's",
"35145":"ka",
"35146":"kabul",
"35151":"kafka",
"35152":"kahn",
"35153":"kajar",
"35154":"kale",
"35155":"kalmia",
"35156":"kane",
"35161":"kant",
"35162":"kapok",
"35163":"kappa",
"35164":"karate",
"35165":"karen",
"35166":"karl",
"35211":"karma",
"35212":"karol",
"35213":"karp",
"35214":"kate",
"35215":"kathy",
"35216":"katie",
"35221":"katz",
"35222":"kava",
"35223":"kay",
"35224":"kayo",
"35225":"kazoo",
"35226":"kb",
"35231":"kc",
"35232":"kd",
"35233":"ke",
"35234":"keats",
"35235":"keel",
"35236":"keen",
"35241":"keep",
"35242":"keg",
"35243":"keith",
"35244":"keller",
"35245":"kelly",
"35246":"kelp",
"35251":"kemp",
"35252":"ken",
"35253":"keno",
"35254":"kent",
"35255":"kenya",
"35256":"kepler",
"35261":"kept",
"35262":"kern",
"35263":"kerr",
"35264":"kerry",
"35265":"ketch",
"35266":"kevin",
"35311":"key",
"35312":"keyed",
"35313":"keyes",
"35314":"keys",
"35315":"kf",
"35316":"kg",
"35321":"kh",
"35322":"khaki",
"35323":"khan",
"35324":"khmer",
"35325":"ki",
"35326":"kick",
"35331":"kid",
"35332":"kidde",
"35333":"kidney",
"35334":"kiev",
"35335":"kigali",
"35336":"kill",
"35341":"kim",
"35342":"kin",
"35343":"kind",
"35344":"king",
"35345":"kink",
"35346":"kinky",
"35351":"kiosk",
"35352":"kiowa",
"35353":"kirby",
"35354":"kirk",
"35355":"kirov",
"35356":"kiss",
"35361":"kit",
"35362":"kite",
"35363":"kitty",
"35364":"kiva",
"35365":"kivu",
"35366":"kiwi",
"35411":"kj",
"35412":"kk",
"35413":"kkk",
"35414":"kkkk",
"35415":"kl",
"35416":"klan",
"35421":"klaus",
"35422":"klein",
"35423":"kline",
"35424":"klm",
"35425":"klux",
"35426":"km",
"35431":"kn",
"35432":"knack",
"35433":"knapp",
"35434":"knauer",
"35435":"knead",
"35436":"knee",
"35441":"kneel",
"35442":"knelt",
"35443":"knew",
"35444":"knick",
"35445":"knife",
"35446":"knit",
"35451":"knob",
"35452":"knock",
"35453":"knoll",
"35454":"knot",
"35455":"knott",
"35456":"know",
"35461":"known",
"35462":"knox",
"35463":"knurl",
"35464":"ko",
"35465":"koala",
"35466":"koch",
"35511":"kodak",
"35512":"kola",
"35513":"kombu",
"35514":"kong",
"35515":"koran",
"35516":"korea",
"35521":"kp",
"35522":"kq",
"35523":"kr",
"35524":"kraft",
"35525":"krause",
"35526":"kraut",
"35531":"krebs",
"35532":"kruse",
"35533":"ks",
"35534":"kt",
"35535":"ku",
"35536":"kudo",
"35541":"kudzu",
"35542":"kuhn",
"35543":"kulak",
"35544":"kurd",
"35545":"kurt",
"35546":"kv",
"35551":"kw",
"35552":"kx",
"35553":"ky",
"35554":"kyle",
"35555":"kyoto",
"35556":"kz",
"35561":"l",
"35562":"l's",
"35563":"la",
"35564":"lab",
"35565":"laban",
"35566":"label",
"35611":"labia",
"35612":"labile",
"35613":"lac",
"35614":"lace",
"35615":"lack",
"35616":"lacy",
"35621":"lad",
"35622":"laden",
"35623":"ladle",
"35624":"lady",
"35625":"lag",
"35626":"lager",
"35631":"lagoon",
"35632":"lagos",
"35633":"laid",
"35634":"lain",
"35635":"lair",
"35636":"laity",
"35641":"lake",
"35642":"lam",
"35643":"lamar",
"35644":"lamb",
"35645":"lame",
"35646":"lamp",
"35651":"lana",
"35652":"lance",
"35653":"land",
"35654":"lane",
"35655":"lang",
"35656":"lange",
"35661":"lanka",
"35662":"lanky",
"35663":"lao",
"35664":"laos",
"35665":"lap",
"35666":"lapel",
"36111":"lapse",
"36112":"larch",
"36113":"lard",
"36114":"lares",
"36115":"large",
"36116":"lark",
"36121":"larkin",
"36122":"larry",
"36123":"lars",
"36124":"larva",
"36125":"lase",
"36126":"lash",
"36131":"lass",
"36132":"lasso",
"36133":"last",
"36134":"latch",
"36135":"late",
"36136":"later",
"36141":"latest",
"36142":"latex",
"36143":"lath",
"36144":"lathe",
"36145":"latin",
"36146":"latus",
"36151":"laud",
"36152":"laue",
"36153":"laugh",
"36154":"launch",
"36155":"laura",
"36156":"lava",
"36161":"law",
"36162":"lawn",
"36163":"lawson",
"36164":"lax",
"36165":"lay",
"36166":"layup",
"36211":"laze",
"36212":"lazy",
"36213":"lb",
"36214":"lc",
"36215":"ld",
"36216":"le",
"36221":"lea",
"36222":"leach",
"36223":"lead",
"36224":"leaf",
"36225":"leafy",
"36226":"leak",
"36231":"leaky",
"36232":"lean",
"36233":"leap",
"36234":"leapt",
"36235":"lear",
"36236":"learn",
"36241":"lease",
"36242":"leash",
"36243":"least",
"36244":"leave",
"36245":"led",
"36246":"ledge",
"36251":"lee",
"36252":"leech",
"36253":"leeds",
"36254":"leek",
"36255":"leer",
"36256":"leery",
"36261":"leeway",
"36262":"left",
"36263":"lefty",
"36264":"leg",
"36265":"legal",
"36266":"leggy",
"36311":"legion",
"36312":"leigh",
"36313":"leila",
"36314":"leland",
"36315":"lemma",
"36316":"lemon",
"36321":"len",
"36322":"lena",
"36323":"lend",
"36324":"lenin",
"36325":"lenny",
"36326":"lens",
"36331":"lent",
"36332":"leo",
"36333":"leon",
"36334":"leona",
"36335":"leone",
"36336":"leper",
"36341":"leroy",
"36342":"less",
"36343":"lessee",
"36344":"lest",
"36345":"let",
"36346":"lethe",
"36351":"lev",
"36352":"levee",
"36353":"level",
"36354":"lever",
"36355":"levi",
"36356":"levin",
"36361":"levis",
"36362":"levy",
"36363":"lew",
"36364":"lewd",
"36365":"lewis",
"36366":"leyden",
"36411":"lf",
"36412":"lg",
"36413":"lh",
"36414":"li",
"36415":"liar",
"36416":"libel",
"36421":"libido",
"36422":"libya",
"36423":"lice",
"36424":"lick",
"36425":"lid",
"36426":"lie",
"36431":"lied",
"36432":"lien",
"36433":"lieu",
"36434":"life",
"36435":"lifo",
"36436":"lift",
"36441":"light",
"36442":"like",
"36443":"liken",
"36444":"lila",
"36445":"lilac",
"36446":"lilly",
"36451":"lilt",
"36452":"lily",
"36453":"lima",
"36454":"limb",
"36455":"limbo",
"36456":"lime",
"36461":"limit",
"36462":"limp",
"36463":"lin",
"36464":"lind",
"36465":"linda",
"36466":"linden",
"36511":"line",
"36512":"linen",
"36513":"lingo",
"36514":"link",
"36515":"lint",
"36516":"linus",
"36521":"lion",
"36522":"lip",
"36523":"lipid",
"36524":"lisa",
"36525":"lise",
"36526":"lisle",
"36531":"lisp",
"36532":"list",
"36533":"listen",
"36534":"lit",
"36535":"lithe",
"36536":"litton",
"36541":"live",
"36542":"liven",
"36543":"livid",
"36544":"livre",
"36545":"liz",
"36546":"lizzie",
"36551":"lj",
"36552":"lk",
"36553":"ll",
"36554":"lll",
"36555":"llll",
"36556":"lloyd",
"36561":"lm",
"36562":"lmn",
"36563":"ln",
"36564":"lo",
"36565":"load",
"36566":"loaf",
"36611":"loam",
"36612":"loamy",
"36613":"loan",
"36614":"loath",
"36615":"lob",
"36616":"lobar",
"36621":"lobby",
"36622":"lobe",
"36623":"lobo",
"36624":"local",
"36625":"loci",
"36626":"lock",
"36631":"locke",
"36632":"locus",
"36633":"lodge",
"36634":"loeb",
"36635":"loess",
"36636":"loft",
"36641":"lofty",
"36642":"log",
"36643":"logan",
"36644":"loge",
"36645":"logic",
"36646":"loin",
"36651":"loire",
"36652":"lois",
"36653":"loiter",
"36654":"loki",
"36655":"lola",
"36656":"loll",
"36661":"lolly",
"36662":"lomb",
"36663":"lome",
"36664":"lone",
"36665":"long",
"36666":"look",
"41111":"loom",
"41112":"loon",
"41113":"loop",
"41114":"loose",
"41115":"loot",
"41116":"lop",
"41121":"lope",
"41122":"lopez",
"41123":"lord",
"41124":"lore",
"41125":"loren",
"41126":"los",
"41131":"lose",
"41132":"loss",
"41133":"lossy",
"41134":"lost",
"41135":"lot",
"41136":"lotte",
"41141":"lotus",
"41142":"lou",
"41143":"loud",
"41144":"louis",
"41145":"louise",
"41146":"louse",
"41151":"lousy",
"41152":"louver",
"41153":"love",
"41154":"low",
"41155":"lowe",
"41156":"lower",
"41161":"lowry",
"41162":"loy",
"41163":"loyal",
"41164":"lp",
"41165":"lq",
"41166":"lr",
"41211":"ls",
"41212":"lsi",
"41213":"lt",
"41214":"ltv",
"41215":"lu",
"41216":"lucas",
"41221":"lucia",
"41222":"lucid",
"41223":"luck",
"41224":"lucky",
"41225":"lucre",
"41226":"lucy",
"41231":"lug",
"41232":"luge",
"41233":"luger",
"41234":"luis",
"41235":"luke",
"41236":"lull",
"41241":"lulu",
"41242":"lumbar",
"41243":"lumen",
"41244":"lump",
"41245":"lumpy",
"41246":"lunar",
"41251":"lunch",
"41252":"lund",
"41253":"lung",
"41254":"lunge",
"41255":"lura",
"41256":"lurch",
"41261":"lure",
"41262":"lurid",
"41263":"lurk",
"41264":"lush",
"41265":"lust",
"41266":"lusty",
"41311":"lute",
"41312":"lutz",
"41313":"lux",
"41314":"luxe",
"41315":"luzon",
"41316":"lv",
"41321":"lw",
"41322":"lx",
"41323":"ly",
"41324":"lydia",
"41325":"lye",
"41326":"lying",
"41331":"lykes",
"41332":"lyle",
"41333":"lyman",
"41334":"lymph",
"41335":"lynch",
"41336":"lynn",
"41341":"lynx",
"41342":"lyon",
"41343":"lyons",
"41344":"lyra",
"41345":"lyric",
"41346":"lz",
"41351":"m",
"41352":"m&m",
"41353":"m's",
"41354":"ma",
"41355":"mabel",
"41356":"mac",
"41361":"mace",
"41362":"mach",
"41363":"macho",
"41364":"mack",
"41365":"mackey",
"41366":"macon",
"41411":"macro",
"41412":"mad",
"41413":"madam",
"41414":"made",
"41415":"madman",
"41416":"madsen",
"41421":"mae",
"41422":"magi",
"41423":"magic",
"41424":"magma",
"41425":"magna",
"41426":"magog",
"41431":"maid",
"41432":"maier",
"41433":"mail",
"41434":"maim",
"41435":"main",
"41436":"maine",
"41441":"major",
"41442":"make",
"41443":"malady",
"41444":"malay",
"41445":"male",
"41446":"mali",
"41451":"mall",
"41452":"malt",
"41453":"malta",
"41454":"mambo",
"41455":"mamma",
"41456":"mammal",
"41461":"man",
"41462":"mana",
"41463":"manama",
"41464":"mane",
"41465":"mange",
"41466":"mania",
"41511":"manic",
"41512":"mann",
"41513":"manna",
"41514":"manor",
"41515":"mans",
"41516":"manse",
"41521":"mantle",
"41522":"many",
"41523":"mao",
"41524":"maori",
"41525":"map",
"41526":"maple",
"41531":"mar",
"41532":"marc",
"41533":"march",
"41534":"marco",
"41535":"marcy",
"41536":"mardi",
"41541":"mare",
"41542":"margo",
"41543":"maria",
"41544":"marie",
"41545":"marin",
"41546":"marine",
"41551":"mario",
"41552":"mark",
"41553":"marks",
"41554":"marlin",
"41555":"marrow",
"41556":"marry",
"41561":"mars",
"41562":"marsh",
"41563":"mart",
"41564":"marty",
"41565":"marx",
"41566":"mary",
"41611":"maser",
"41612":"mash",
"41613":"mask",
"41614":"mason",
"41615":"masque",
"41616":"mass",
"41621":"mast",
"41622":"mat",
"41623":"match",
"41624":"mate",
"41625":"mateo",
"41626":"mater",
"41631":"math",
"41632":"matte",
"41633":"maul",
"41634":"mauve",
"41635":"mavis",
"41636":"maw",
"41641":"mawr",
"41642":"max",
"41643":"maxim",
"41644":"maxima",
"41645":"may",
"41646":"maya",
"41651":"maybe",
"41652":"mayer",
"41653":"mayhem",
"41654":"mayo",
"41655":"mayor",
"41656":"mayst",
"41661":"mazda",
"41662":"maze",
"41663":"mb",
"41664":"mba",
"41665":"mc",
"41666":"mccoy",
"42111":"mcgee",
"42112":"mckay",
"42113":"mckee",
"42114":"mcleod",
"42115":"md",
"42116":"me",
"42121":"mead",
"42122":"meal",
"42123":"mealy",
"42124":"mean",
"42125":"meant",
"42126":"meat",
"42131":"meaty",
"42132":"mecca",
"42133":"mecum",
"42134":"medal",
"42135":"medea",
"42136":"media",
"42141":"medic",
"42142":"medley",
"42143":"meek",
"42144":"meet",
"42145":"meg",
"42146":"mega",
"42151":"meier",
"42152":"meir",
"42153":"mel",
"42154":"meld",
"42155":"melee",
"42156":"mellow",
"42161":"melon",
"42162":"melt",
"42163":"memo",
"42164":"memoir",
"42165":"men",
"42166":"mend",
"42211":"menlo",
"42212":"menu",
"42213":"merck",
"42214":"mercy",
"42215":"mere",
"42216":"merge",
"42221":"merit",
"42222":"merle",
"42223":"merry",
"42224":"mesa",
"42225":"mescal",
"42226":"mesh",
"42231":"meson",
"42232":"mess",
"42233":"messy",
"42234":"met",
"42235":"metal",
"42236":"mete",
"42241":"meter",
"42242":"metro",
"42243":"mew",
"42244":"meyer",
"42245":"meyers",
"42246":"mezzo",
"42251":"mf",
"42252":"mg",
"42253":"mh",
"42254":"mi",
"42255":"miami",
"42256":"mica",
"42261":"mice",
"42262":"mickey",
"42263":"micky",
"42264":"micro",
"42265":"mid",
"42266":"midas",
"42311":"midge",
"42312":"midst",
"42313":"mien",
"42314":"miff",
"42315":"mig",
"42316":"might",
"42321":"mike",
"42322":"mila",
"42323":"milan",
"42324":"milch",
"42325":"mild",
"42326":"mildew",
"42331":"mile",
"42332":"miles",
"42333":"milk",
"42334":"milky",
"42335":"mill",
"42336":"mills",
"42341":"milt",
"42342":"mimi",
"42343":"mimic",
"42344":"mince",
"42345":"mind",
"42346":"mine",
"42351":"mini",
"42352":"minim",
"42353":"mink",
"42354":"minnow",
"42355":"minor",
"42356":"minos",
"42361":"minot",
"42362":"minsk",
"42363":"mint",
"42364":"minus",
"42365":"mira",
"42366":"mirage",
"42411":"mire",
"42412":"mirth",
"42413":"miser",
"42414":"misery",
"42415":"miss",
"42416":"missy",
"42421":"mist",
"42422":"misty",
"42423":"mit",
"42424":"mite",
"42425":"mitre",
"42426":"mitt",
"42431":"mix",
"42432":"mixup",
"42433":"mizar",
"42434":"mj",
"42435":"mk",
"42436":"ml",
"42441":"mm",
"42442":"mmm",
"42443":"mmmm",
"42444":"mn",
"42445":"mno",
"42446":"mo",
"42451":"moan",
"42452":"moat",
"42453":"mob",
"42454":"mobil",
"42455":"mock",
"42456":"modal",
"42461":"mode",
"42462":"model",
"42463":"modem",
"42464":"modish",
"42465":"moe",
"42466":"moen",
"42511":"mohr",
"42512":"moire",
"42513":"moist",
"42514":"molal",
"42515":"molar",
"42516":"mold",
"42521":"mole",
"42522":"moll",
"42523":"mollie",
"42524":"molly",
"42525":"molt",
"42526":"molten",
"42531":"mommy",
"42532":"mona",
"42533":"monad",
"42534":"mondo",
"42535":"monel",
"42536":"money",
"42541":"monic",
"42542":"monk",
"42543":"mont",
"42544":"monte",
"42545":"month",
"42546":"monty",
"42551":"moo",
"42552":"mood",
"42553":"moody",
"42554":"moon",
"42555":"moor",
"42556":"moore",
"42561":"moose",
"42562":"moot",
"42563":"mop",
"42564":"moral",
"42565":"morale",
"42566":"moran",
"42611":"more",
"42612":"morel",
"42613":"morn",
"42614":"moron",
"42615":"morse",
"42616":"morsel",
"42621":"mort",
"42622":"mosaic",
"42623":"moser",
"42624":"moses",
"42625":"moss",
"42626":"mossy",
"42631":"most",
"42632":"mot",
"42633":"motel",
"42634":"motet",
"42635":"moth",
"42636":"mother",
"42641":"motif",
"42642":"motor",
"42643":"motto",
"42644":"mould",
"42645":"mound",
"42646":"mount",
"42651":"mourn",
"42652":"mouse",
"42653":"mousy",
"42654":"mouth",
"42655":"move",
"42656":"movie",
"42661":"mow",
"42662":"moyer",
"42663":"mp",
"42664":"mph",
"42665":"mq",
"42666":"mr",
"43111":"mrs",
"43112":"ms",
"43113":"mt",
"43114":"mu",
"43115":"much",
"43116":"muck",
"43121":"mucus",
"43122":"mud",
"43123":"mudd",
"43124":"muddy",
"43125":"muff",
"43126":"muffin",
"43131":"mug",
"43132":"muggy",
"43133":"mugho",
"43134":"muir",
"43135":"mulch",
"43136":"mulct",
"43141":"mule",
"43142":"mull",
"43143":"multi",
"43144":"mum",
"43145":"mummy",
"43146":"munch",
"43151":"mung",
"43152":"munson",
"43153":"muon",
"43154":"muong",
"43155":"mural",
"43156":"muriel",
"43161":"murk",
"43162":"murky",
"43163":"murre",
"43164":"muse",
"43165":"mush",
"43166":"mushy",
"43211":"music",
"43212":"musk",
"43213":"muslim",
"43214":"must",
"43215":"musty",
"43216":"mute",
"43221":"mutt",
"43222":"muzak",
"43223":"muzo",
"43224":"mv",
"43225":"mw",
"43226":"mx",
"43231":"my",
"43232":"myel",
"43233":"myers",
"43234":"mylar",
"43235":"mynah",
"43236":"myopia",
"43241":"myra",
"43242":"myron",
"43243":"myrrh",
"43244":"myself",
"43245":"myth",
"43246":"mz",
"43251":"n",
"43252":"n's",
"43253":"na",
"43254":"naacp",
"43255":"nab",
"43256":"nadir",
"43261":"nag",
"43262":"nagoya",
"43263":"nagy",
"43264":"naiad",
"43265":"nail",
"43266":"nair",
"43311":"naive",
"43312":"naked",
"43313":"name",
"43314":"nan",
"43315":"nancy",
"43316":"naomi",
"43321":"nap",
"43322":"nary",
"43323":"nasa",
"43324":"nasal",
"43325":"nash",
"43326":"nasty",
"43331":"nat",
"43332":"natal",
"43333":"nate",
"43334":"nato",
"43335":"natty",
"43336":"nature",
"43341":"naval",
"43342":"nave",
"43343":"navel",
"43344":"navy",
"43345":"nay",
"43346":"nazi",
"43351":"nb",
"43352":"nbc",
"43353":"nbs",
"43354":"nc",
"43355":"ncaa",
"43356":"ncr",
"43361":"nd",
"43362":"ne",
"43363":"neal",
"43364":"near",
"43365":"neat",
"43366":"neath",
"43411":"neck",
"43412":"ned",
"43413":"nee",
"43414":"need",
"43415":"needy",
"43416":"neff",
"43421":"negate",
"43422":"negro",
"43423":"nehru",
"43424":"neil",
"43425":"nell",
"43426":"nelsen",
"43431":"neon",
"43432":"nepal",
"43433":"nero",
"43434":"nerve",
"43435":"ness",
"43436":"nest",
"43441":"net",
"43442":"neuron",
"43443":"neva",
"43444":"neve",
"43445":"new",
"43446":"newel",
"43451":"newt",
"43452":"next",
"43453":"nf",
"43454":"ng",
"43455":"nh",
"43456":"ni",
"43461":"nib",
"43462":"nibs",
"43463":"nice",
"43464":"nicety",
"43465":"niche",
"43466":"nick",
"43511":"niece",
"43512":"niger",
"43513":"nigh",
"43514":"night",
"43515":"nih",
"43516":"nikko",
"43521":"nil",
"43522":"nile",
"43523":"nimbus",
"43524":"nimh",
"43525":"nina",
"43526":"nine",
"43531":"ninth",
"43532":"niobe",
"43533":"nip",
"43534":"nit",
"43535":"nitric",
"43536":"nitty",
"43541":"nixon",
"43542":"nj",
"43543":"nk",
"43544":"nl",
"43545":"nm",
"43546":"nn",
"43551":"nnn",
"43552":"nnnn",
"43553":"no",
"43554":"noaa",
"43555":"noah",
"43556":"nob",
"43561":"nobel",
"43562":"noble",
"43563":"nod",
"43564":"nodal",
"43565":"node",
"43566":"noel",
"43611":"noise",
"43612":"noisy",
"43613":"nolan",
"43614":"noll",
"43615":"nolo",
"43616":"nomad",
"43621":"non",
"43622":"nonce",
"43623":"none",
"43624":"nook",
"43625":"noon",
"43626":"noose",
"43631":"nop",
"43632":"nor",
"43633":"nora",
"43634":"norm",
"43635":"norma",
"43636":"north",
"43641":"norway",
"43642":"nose",
"43643":"not",
"43644":"notch",
"43645":"note",
"43646":"notre",
"43651":"noun",
"43652":"nov",
"43653":"nova",
"43654":"novak",
"43655":"novel",
"43656":"novo",
"43661":"now",
"43662":"np",
"43663":"nq",
"43664":"nr",
"43665":"nrc",
"43666":"ns",
"44111":"nsf",
"44112":"nt",
"44113":"ntis",
"44114":"nu",
"44115":"nuance",
"44116":"nubia",
"44121":"nuclei",
"44122":"nude",
"44123":"nudge",
"44124":"null",
"44125":"numb",
"44126":"nun",
"44131":"nurse",
"44132":"nut",
"44133":"nv",
"44134":"nw",
"44135":"nx",
"44136":"ny",
"44141":"nyc",
"44142":"nylon",
"44143":"nymph",
"44144":"nyu",
"44145":"nz",
"44146":"o",
"44151":"o'er",
"44152":"o's",
"44153":"oa",
"44154":"oaf",
"44155":"oak",
"44156":"oaken",
"44161":"oakley",
"44162":"oar",
"44163":"oases",
"44164":"oasis",
"44165":"oat",
"44166":"oath",
"44211":"ob",
"44212":"obese",
"44213":"obey",
"44214":"objet",
"44215":"oboe",
"44216":"oc",
"44221":"occur",
"44222":"ocean",
"44223":"oct",
"44224":"octal",
"44225":"octave",
"44226":"octet",
"44231":"od",
"44232":"odd",
"44233":"ode",
"44234":"odin",
"44235":"odium",
"44236":"oe",
"44241":"of",
"44242":"off",
"44243":"offal",
"44244":"offend",
"44245":"offer",
"44246":"oft",
"44251":"often",
"44252":"og",
"44253":"ogden",
"44254":"ogle",
"44255":"ogre",
"44256":"oh",
"44261":"ohio",
"44262":"ohm",
"44263":"ohmic",
"44264":"oi",
"44265":"oil",
"44266":"oily",
"44311":"oint",
"44312":"oj",
"44313":"ok",
"44314":"okay",
"44315":"ol",
"44316":"olaf",
"44321":"olav",
"44322":"old",
"44323":"olden",
"44324":"oldy",
"44325":"olga",
"44326":"olin",
"44331":"olive",
"44332":"olsen",
"44333":"olson",
"44334":"om",
"44335":"omaha",
"44336":"oman",
"44341":"omega",
"44342":"omen",
"44343":"omit",
"44344":"on",
"44345":"once",
"44346":"one",
"44351":"onion",
"44352":"only",
"44353":"onset",
"44354":"onto",
"44355":"onus",
"44356":"onward",
"44361":"onyx",
"44362":"oo",
"44363":"ooo",
"44364":"oooo",
"44365":"ooze",
"44366":"op",
"44411":"opal",
"44412":"opec",
"44413":"opel",
"44414":"open",
"44415":"opera",
"44416":"opium",
"44421":"opt",
"44422":"optic",
"44423":"opus",
"44424":"oq",
"44425":"or",
"44426":"oral",
"44431":"orate",
"44432":"orb",
"44433":"orbit",
"44434":"orchid",
"44435":"ordain",
"44436":"order",
"44441":"ore",
"44442":"organ",
"44443":"orgy",
"44444":"orin",
"44445":"orion",
"44446":"ornery",
"44451":"orono",
"44452":"orr",
"44453":"os",
"44454":"osaka",
"44455":"oscar",
"44456":"osier",
"44461":"oslo",
"44462":"ot",
"44463":"other",
"44464":"otis",
"44465":"ott",
"44466":"otter",
"44511":"otto",
"44512":"ou",
"44513":"ouch",
"44514":"ought",
"44515":"ounce",
"44516":"our",
"44521":"oust",
"44522":"out",
"44523":"ouvre",
"44524":"ouzel",
"44525":"ouzo",
"44526":"ov",
"44531":"ova",
"44532":"oval",
"44533":"ovary",
"44534":"ovate",
"44535":"oven",
"44536":"over",
"44541":"overt",
"44542":"ovid",
"44543":"ow",
"44544":"owe",
"44545":"owens",
"44546":"owing",
"44551":"owl",
"44552":"owly",
"44553":"own",
"44554":"ox",
"44555":"oxen",
"44556":"oxeye",
"44561":"oxide",
"44562":"oxnard",
"44563":"oy",
"44564":"oz",
"44565":"ozark",
"44566":"ozone",
"44611":"p",
"44612":"p's",
"44613":"pa",
"44614":"pablo",
"44615":"pabst",
"44616":"pace",
"44621":"pack",
"44622":"packet",
"44623":"pact",
"44624":"pad",
"44625":"paddy",
"44626":"padre",
"44631":"paean",
"44632":"pagan",
"44633":"page",
"44634":"paid",
"44635":"pail",
"44636":"pain",
"44641":"paine",
"44642":"paint",
"44643":"pair",
"44644":"pal",
"44645":"pale",
"44646":"pall",
"44651":"palm",
"44652":"palo",
"44653":"palsy",
"44654":"pam",
"44655":"pampa",
"44656":"pan",
"44661":"panama",
"44662":"panda",
"44663":"pane",
"44664":"panel",
"44665":"pang",
"44666":"panic",
"45111":"pansy",
"45112":"pant",
"45113":"panty",
"45114":"paoli",
"45115":"pap",
"45116":"papa",
"45121":"papal",
"45122":"papaw",
"45123":"paper",
"45124":"pappy",
"45125":"papua",
"45126":"par",
"45131":"parch",
"45132":"pardon",
"45133":"pare",
"45134":"pareto",
"45135":"paris",
"45136":"park",
"45141":"parke",
"45142":"parks",
"45143":"parr",
"45144":"parry",
"45145":"parse",
"45146":"part",
"45151":"party",
"45152":"pascal",
"45153":"pasha",
"45154":"paso",
"45155":"pass",
"45156":"passe",
"45161":"past",
"45162":"paste",
"45163":"pasty",
"45164":"pat",
"45165":"patch",
"45166":"pate",
"45211":"pater",
"45212":"path",
"45213":"patio",
"45214":"patsy",
"45215":"patti",
"45216":"patton",
"45221":"patty",
"45222":"paul",
"45223":"paula",
"45224":"pauli",
"45225":"paulo",
"45226":"pause",
"45231":"pave",
"45232":"paw",
"45233":"pawn",
"45234":"pax",
"45235":"pay",
"45236":"payday",
"45241":"payne",
"45242":"paz",
"45243":"pb",
"45244":"pbs",
"45245":"pc",
"45246":"pd",
"45251":"pe",
"45252":"pea",
"45253":"peace",
"45254":"peach",
"45255":"peak",
"45256":"peaky",
"45261":"peal",
"45262":"peale",
"45263":"pear",
"45264":"pearl",
"45265":"pease",
"45266":"peat",
"45311":"pebble",
"45312":"pecan",
"45313":"peck",
"45314":"pecos",
"45315":"pedal",
"45316":"pedro",
"45321":"pee",
"45322":"peed",
"45323":"peek",
"45324":"peel",
"45325":"peep",
"45326":"peepy",
"45331":"peer",
"45332":"peg",
"45333":"peggy",
"45334":"pelt",
"45335":"pen",
"45336":"penal",
"45341":"pence",
"45342":"pencil",
"45343":"pend",
"45344":"penh",
"45345":"penn",
"45346":"penna",
"45351":"penny",
"45352":"pent",
"45353":"peony",
"45354":"pep",
"45355":"peppy",
"45356":"pepsi",
"45361":"per",
"45362":"perch",
"45363":"percy",
"45364":"perez",
"45365":"peril",
"45366":"perk",
"45411":"perky",
"45412":"perle",
"45413":"perry",
"45414":"persia",
"45415":"pert",
"45416":"perth",
"45421":"peru",
"45422":"peruse",
"45423":"pest",
"45424":"peste",
"45425":"pet",
"45426":"petal",
"45431":"pete",
"45432":"peter",
"45433":"petit",
"45434":"petri",
"45435":"petty",
"45436":"pew",
"45441":"pewee",
"45442":"pf",
"45443":"pg",
"45444":"ph",
"45445":"ph.d",
"45446":"phage",
"45451":"phase",
"45452":"phd",
"45453":"phenol",
"45454":"phi",
"45455":"phil",
"45456":"phlox",
"45461":"phon",
"45462":"phone",
"45463":"phony",
"45464":"photo",
"45465":"phyla",
"45466":"physic",
"45511":"pi",
"45512":"piano",
"45513":"pica",
"45514":"pick",
"45515":"pickup",
"45516":"picky",
"45521":"pie",
"45522":"piece",
"45523":"pier",
"45524":"pierce",
"45525":"piety",
"45526":"pig",
"45531":"piggy",
"45532":"pike",
"45533":"pile",
"45534":"pill",
"45535":"pilot",
"45536":"pimp",
"45541":"pin",
"45542":"pinch",
"45543":"pine",
"45544":"ping",
"45545":"pinion",
"45546":"pink",
"45551":"pint",
"45552":"pinto",
"45553":"pion",
"45554":"piotr",
"45555":"pious",
"45556":"pip",
"45561":"pipe",
"45562":"piper",
"45563":"pique",
"45564":"pit",
"45565":"pitch",
"45566":"pith",
"45611":"pithy",
"45612":"pitney",
"45613":"pitt",
"45614":"pity",
"45615":"pius",
"45616":"pivot",
"45621":"pixel",
"45622":"pixy",
"45623":"pizza",
"45624":"pj",
"45625":"pk",
"45626":"pl",
"45631":"place",
"45632":"plague",
"45633":"plaid",
"45634":"plain",
"45635":"plan",
"45636":"plane",
"45641":"plank",
"45642":"plant",
"45643":"plasm",
"45644":"plat",
"45645":"plate",
"45646":"plato",
"45651":"play",
"45652":"playa",
"45653":"plaza",
"45654":"plea",
"45655":"plead",
"45656":"pleat",
"45661":"pledge",
"45662":"pliny",
"45663":"plod",
"45664":"plop",
"45665":"plot",
"45666":"plow",
"46111":"pluck",
"46112":"plug",
"46113":"plum",
"46114":"plumb",
"46115":"plume",
"46116":"plump",
"46121":"plunk",
"46122":"plus",
"46123":"plush",
"46124":"plushy",
"46125":"pluto",
"46126":"ply",
"46131":"pm",
"46132":"pn",
"46133":"po",
"46134":"poach",
"46135":"pobox",
"46136":"pod",
"46141":"podge",
"46142":"podia",
"46143":"poe",
"46144":"poem",
"46145":"poesy",
"46146":"poet",
"46151":"poetry",
"46152":"pogo",
"46153":"poi",
"46154":"point",
"46155":"poise",
"46156":"poke",
"46161":"pol",
"46162":"polar",
"46163":"pole",
"46164":"police",
"46165":"polio",
"46166":"polis",
"46211":"polk",
"46212":"polka",
"46213":"poll",
"46214":"polo",
"46215":"pomona",
"46216":"pomp",
"46221":"ponce",
"46222":"pond",
"46223":"pong",
"46224":"pont",
"46225":"pony",
"46226":"pooch",
"46231":"pooh",
"46232":"pool",
"46233":"poole",
"46234":"poop",
"46235":"poor",
"46236":"pop",
"46241":"pope",
"46242":"poppy",
"46243":"porch",
"46244":"pore",
"46245":"pork",
"46246":"porous",
"46251":"port",
"46252":"porte",
"46253":"portia",
"46254":"porto",
"46255":"pose",
"46256":"posey",
"46261":"posh",
"46262":"posit",
"46263":"posse",
"46264":"post",
"46265":"posy",
"46266":"pot",
"46311":"potts",
"46312":"pouch",
"46313":"pound",
"46314":"pour",
"46315":"pout",
"46316":"pow",
"46321":"powder",
"46322":"power",
"46323":"pp",
"46324":"ppm",
"46325":"ppp",
"46326":"pppp",
"46331":"pq",
"46332":"pqr",
"46333":"pr",
"46334":"prado",
"46335":"pram",
"46336":"prank",
"46341":"pratt",
"46342":"pray",
"46343":"preen",
"46344":"prefix",
"46345":"prep",
"46346":"press",
"46351":"prexy",
"46352":"prey",
"46353":"priam",
"46354":"price",
"46355":"prick",
"46356":"pride",
"46361":"prig",
"46362":"prim",
"46363":"prima",
"46364":"prime",
"46365":"primp",
"46366":"prince",
"46411":"print",
"46412":"prior",
"46413":"prism",
"46414":"prissy",
"46415":"privy",
"46416":"prize",
"46421":"pro",
"46422":"probe",
"46423":"prod",
"46424":"prof",
"46425":"prom",
"46426":"prone",
"46431":"prong",
"46432":"proof",
"46433":"prop",
"46434":"propyl",
"46435":"prose",
"46436":"proud",
"46441":"prove",
"46442":"prow",
"46443":"prowl",
"46444":"proxy",
"46445":"prune",
"46446":"pry",
"46451":"ps",
"46452":"psalm",
"46453":"psi",
"46454":"psych",
"46455":"pt",
"46456":"pta",
"46461":"pu",
"46462":"pub",
"46463":"puck",
"46464":"puddly",
"46465":"puerto",
"46466":"puff",
"46511":"puffy",
"46512":"pug",
"46513":"pugh",
"46514":"puke",
"46515":"pull",
"46516":"pulp",
"46521":"pulse",
"46522":"puma",
"46523":"pump",
"46524":"pun",
"46525":"punch",
"46526":"punic",
"46531":"punish",
"46532":"punk",
"46533":"punky",
"46534":"punt",
"46535":"puny",
"46536":"pup",
"46541":"pupal",
"46542":"pupil",
"46543":"puppy",
"46544":"pure",
"46545":"purge",
"46546":"purl",
"46551":"purr",
"46552":"purse",
"46553":"pus",
"46554":"pusan",
"46555":"pusey",
"46556":"push",
"46561":"pussy",
"46562":"put",
"46563":"putt",
"46564":"putty",
"46565":"pv",
"46566":"pvc",
"46611":"pw",
"46612":"px",
"46613":"py",
"46614":"pygmy",
"46615":"pyle",
"46616":"pyre",
"46621":"pyrex",
"46622":"pyrite",
"46623":"pz",
"46624":"q",
"46625":"q's",
"46626":"qa",
"46631":"qatar",
"46632":"qb",
"46633":"qc",
"46634":"qd",
"46635":"qe",
"46636":"qed",
"46641":"qf",
"46642":"qg",
"46643":"qh",
"46644":"qi",
"46645":"qj",
"46646":"qk",
"46651":"ql",
"46652":"qm",
"46653":"qn",
"46654":"qo",
"46655":"qp",
"46656":"qq",
"46661":"qqq",
"46662":"qqqq",
"46663":"qr",
"46664":"qrs",
"46665":"qs",
"46666":"qt",
"51111":"qu",
"51112":"qua",
"51113":"quack",
"51114":"quad",
"51115":"quaff",
"51116":"quail",
"51121":"quake",
"51122":"qualm",
"51123":"quark",
"51124":"quarry",
"51125":"quart",
"51126":"quash",
"51131":"quasi",
"51132":"quay",
"51133":"queasy",
"51134":"queen",
"51135":"queer",
"51136":"quell",
"51141":"query",
"51142":"quest",
"51143":"queue",
"51144":"quick",
"51145":"quid",
"51146":"quiet",
"51151":"quill",
"51152":"quilt",
"51153":"quinn",
"51154":"quint",
"51155":"quip",
"51156":"quirk",
"51161":"quirt",
"51162":"quit",
"51163":"quite",
"51164":"quito",
"51165":"quiz",
"51166":"quo",
"51211":"quod",
"51212":"quota",
"51213":"quote",
"51214":"qv",
"51215":"qw",
"51216":"qx",
"51221":"qy",
"51222":"qz",
"51223":"r",
"51224":"r&d",
"51225":"r's",
"51226":"ra",
"51231":"rabat",
"51232":"rabbi",
"51233":"rabbit",
"51234":"rabid",
"51235":"rabin",
"51236":"race",
"51241":"rack",
"51242":"racy",
"51243":"radar",
"51244":"radii",
"51245":"radio",
"51246":"radium",
"51251":"radix",
"51252":"radon",
"51253":"rae",
"51254":"rafael",
"51255":"raft",
"51256":"rag",
"51261":"rage",
"51262":"raid",
"51263":"rail",
"51264":"rain",
"51265":"rainy",
"51266":"raise",
"51311":"raj",
"51312":"rajah",
"51313":"rake",
"51314":"rally",
"51315":"ralph",
"51316":"ram",
"51321":"raman",
"51322":"ramo",
"51323":"ramp",
"51324":"ramsey",
"51325":"ran",
"51326":"ranch",
"51331":"rand",
"51332":"randy",
"51333":"rang",
"51334":"range",
"51335":"rangy",
"51336":"rank",
"51341":"rant",
"51342":"raoul",
"51343":"rap",
"51344":"rape",
"51345":"rapid",
"51346":"rapt",
"51351":"rare",
"51352":"rasa",
"51353":"rascal",
"51354":"rash",
"51355":"rasp",
"51356":"rat",
"51361":"rata",
"51362":"rate",
"51363":"rater",
"51364":"ratio",
"51365":"rattle",
"51366":"raul",
"51411":"rave",
"51412":"ravel",
"51413":"raven",
"51414":"raw",
"51415":"ray",
"51416":"raze",
"51421":"razor",
"51422":"rb",
"51423":"rc",
"51424":"rca",
"51425":"rd",
"51426":"re",
"51431":"reach",
"51432":"read",
"51433":"ready",
"51434":"reagan",
"51435":"real",
"51436":"realm",
"51441":"ream",
"51442":"reap",
"51443":"rear",
"51444":"reave",
"51445":"reb",
"51446":"rebel",
"51451":"rebut",
"51452":"recipe",
"51453":"reck",
"51454":"recur",
"51455":"red",
"51456":"redeem",
"51461":"reduce",
"51462":"reed",
"51463":"reedy",
"51464":"reef",
"51465":"reek",
"51466":"reel",
"51511":"reese",
"51512":"reeve",
"51513":"refer",
"51514":"regal",
"51515":"regina",
"51516":"regis",
"51521":"reich",
"51522":"reid",
"51523":"reign",
"51524":"rein",
"51525":"relax",
"51526":"relay",
"51531":"relic",
"51532":"reman",
"51533":"remedy",
"51534":"remit",
"51535":"remus",
"51536":"rena",
"51541":"renal",
"51542":"rend",
"51543":"rene",
"51544":"renown",
"51545":"rent",
"51546":"rep",
"51551":"repel",
"51552":"repent",
"51553":"resin",
"51554":"resort",
"51555":"rest",
"51556":"ret",
"51561":"retch",
"51562":"return",
"51563":"reub",
"51564":"rev",
"51565":"reveal",
"51566":"revel",
"51611":"rever",
"51612":"revet",
"51613":"revved",
"51614":"rex",
"51615":"rf",
"51616":"rg",
"51621":"rh",
"51622":"rhea",
"51623":"rheum",
"51624":"rhine",
"51625":"rhino",
"51626":"rho",
"51631":"rhoda",
"51632":"rhode",
"51633":"rhyme",
"51634":"ri",
"51635":"rib",
"51636":"rica",
"51641":"rice",
"51642":"rich",
"51643":"rick",
"51644":"rico",
"51645":"rid",
"51646":"ride",
"51651":"ridge",
"51652":"rifle",
"51653":"rift",
"51654":"rig",
"51655":"riga",
"51656":"rigel",
"51661":"riggs",
"51662":"right",
"51663":"rigid",
"51664":"riley",
"51665":"rill",
"51666":"rilly",
"52111":"rim",
"52112":"rime",
"52113":"rimy",
"52114":"ring",
"52115":"rink",
"52116":"rinse",
"52121":"rio",
"52122":"riot",
"52123":"rip",
"52124":"ripe",
"52125":"ripen",
"52126":"ripley",
"52131":"rise",
"52132":"risen",
"52133":"risk",
"52134":"risky",
"52135":"rite",
"52136":"ritz",
"52141":"rival",
"52142":"riven",
"52143":"river",
"52144":"rivet",
"52145":"riyadh",
"52146":"rj",
"52151":"rk",
"52152":"rl",
"52153":"rm",
"52154":"rn",
"52155":"ro",
"52156":"roach",
"52161":"road",
"52162":"roam",
"52163":"roar",
"52164":"roast",
"52165":"rob",
"52166":"robe",
"52211":"robin",
"52212":"robot",
"52213":"rock",
"52214":"rocket",
"52215":"rocky",
"52216":"rod",
"52221":"rode",
"52222":"rodeo",
"52223":"roe",
"52224":"roger",
"52225":"rogue",
"52226":"roil",
"52231":"role",
"52232":"roll",
"52233":"roman",
"52234":"rome",
"52235":"romeo",
"52236":"romp",
"52241":"ron",
"52242":"rondo",
"52243":"rood",
"52244":"roof",
"52245":"rook",
"52246":"rookie",
"52251":"rooky",
"52252":"room",
"52253":"roomy",
"52254":"roost",
"52255":"root",
"52256":"rope",
"52261":"rosa",
"52262":"rose",
"52263":"rosen",
"52264":"ross",
"52265":"rosy",
"52266":"rot",
"52311":"rotc",
"52312":"roth",
"52313":"rotor",
"52314":"rouge",
"52315":"rough",
"52316":"round",
"52321":"rouse",
"52322":"rout",
"52323":"route",
"52324":"rove",
"52325":"row",
"52326":"rowdy",
"52331":"rowe",
"52332":"roy",
"52333":"royal",
"52334":"royce",
"52335":"rp",
"52336":"rpm",
"52341":"rq",
"52342":"rr",
"52343":"rrr",
"52344":"rrrr",
"52345":"rs",
"52346":"rst",
"52351":"rsvp",
"52352":"rt",
"52353":"ru",
"52354":"ruanda",
"52355":"rub",
"52356":"rube",
"52361":"ruben",
"52362":"rubin",
"52363":"rubric",
"52364":"ruby",
"52365":"ruddy",
"52366":"rude",
"52411":"rudy",
"52412":"rue",
"52413":"rufus",
"52414":"rug",
"52415":"ruin",
"52416":"rule",
"52421":"rum",
"52422":"rumen",
"52423":"rummy",
"52424":"rump",
"52425":"rumpus",
"52426":"run",
"52431":"rune",
"52432":"rung",
"52433":"runge",
"52434":"runic",
"52435":"runt",
"52436":"runty",
"52441":"rupee",
"52442":"rural",
"52443":"ruse",
"52444":"rush",
"52445":"rusk",
"52446":"russ",
"52451":"russo",
"52452":"rust",
"52453":"rusty",
"52454":"rut",
"52455":"ruth",
"52456":"rutty",
"52461":"rv",
"52462":"rw",
"52463":"rx",
"52464":"ry",
"52465":"ryan",
"52466":"ryder",
"52511":"rye",
"52512":"rz",
"52513":"s",
"52514":"s's",
"52515":"sa",
"52516":"sabine",
"52521":"sable",
"52522":"sabra",
"52523":"sac",
"52524":"sachs",
"52525":"sack",
"52526":"sad",
"52531":"saddle",
"52532":"sadie",
"52533":"safari",
"52534":"safe",
"52535":"sag",
"52536":"saga",
"52541":"sage",
"52542":"sago",
"52543":"said",
"52544":"sail",
"52545":"saint",
"52546":"sake",
"52551":"sal",
"52552":"salad",
"52553":"sale",
"52554":"salem",
"52555":"saline",
"52556":"salk",
"52561":"salle",
"52562":"sally",
"52563":"salon",
"52564":"salt",
"52565":"salty",
"52566":"salve",
"52611":"salvo",
"52612":"sam",
"52613":"samba",
"52614":"same",
"52615":"sammy",
"52616":"samoa",
"52621":"samuel",
"52622":"san",
"52623":"sana",
"52624":"sand",
"52625":"sandal",
"52626":"sandy",
"52631":"sane",
"52632":"sang",
"52633":"sank",
"52634":"sans",
"52635":"santa",
"52636":"santo",
"52641":"sao",
"52642":"sap",
"52643":"sappy",
"52644":"sara",
"52645":"sarah",
"52646":"saran",
"52651":"sari",
"52652":"sash",
"52653":"sat",
"52654":"satan",
"52655":"satin",
"52656":"satyr",
"52661":"sauce",
"52662":"saucy",
"52663":"saud",
"52664":"saudi",
"52665":"saul",
"52666":"sault",
"53111":"saute",
"53112":"save",
"53113":"savoy",
"53114":"savvy",
"53115":"saw",
"53116":"sawyer",
"53121":"sax",
"53122":"saxon",
"53123":"say",
"53124":"sb",
"53125":"sc",
"53126":"scab",
"53131":"scala",
"53132":"scald",
"53133":"scale",
"53134":"scalp",
"53135":"scam",
"53136":"scamp",
"53141":"scan",
"53142":"scant",
"53143":"scar",
"53144":"scare",
"53145":"scarf",
"53146":"scary",
"53151":"scat",
"53152":"scaup",
"53153":"scene",
"53154":"scent",
"53155":"school",
"53156":"scion",
"53161":"scm",
"53162":"scoff",
"53163":"scold",
"53164":"scoop",
"53165":"scoot",
"53166":"scope",
"53211":"scops",
"53212":"score",
"53213":"scoria",
"53214":"scorn",
"53215":"scot",
"53216":"scott",
"53221":"scour",
"53222":"scout",
"53223":"scowl",
"53224":"scram",
"53225":"scrap",
"53226":"scrape",
"53231":"screw",
"53232":"scrim",
"53233":"scrub",
"53234":"scuba",
"53235":"scud",
"53236":"scuff",
"53241":"scull",
"53242":"scum",
"53243":"scurry",
"53244":"sd",
"53245":"se",
"53246":"sea",
"53251":"seal",
"53252":"seam",
"53253":"seamy",
"53254":"sean",
"53255":"sear",
"53256":"sears",
"53261":"season",
"53262":"seat",
"53263":"sec",
"53264":"secant",
"53265":"sect",
"53266":"sedan",
"53311":"seder",
"53312":"sedge",
"53313":"see",
"53314":"seed",
"53315":"seedy",
"53316":"seek",
"53321":"seem",
"53322":"seen",
"53323":"seep",
"53324":"seethe",
"53325":"seize",
"53326":"self",
"53331":"sell",
"53332":"selma",
"53333":"semi",
"53334":"sen",
"53335":"send",
"53336":"seneca",
"53341":"senor",
"53342":"sense",
"53343":"sent",
"53344":"sentry",
"53345":"seoul",
"53346":"sepal",
"53351":"sepia",
"53352":"sepoy",
"53353":"sept",
"53354":"septa",
"53355":"sequin",
"53356":"sera",
"53361":"serf",
"53362":"serge",
"53363":"serif",
"53364":"serum",
"53365":"serve",
"53366":"servo",
"53411":"set",
"53412":"seth",
"53413":"seton",
"53414":"setup",
"53415":"seven",
"53416":"sever",
"53421":"severe",
"53422":"sew",
"53423":"sewn",
"53424":"sex",
"53425":"sexy",
"53426":"sf",
"53431":"sg",
"53432":"sh",
"53433":"shack",
"53434":"shad",
"53435":"shade",
"53436":"shady",
"53441":"shafer",
"53442":"shaft",
"53443":"shag",
"53444":"shah",
"53445":"shake",
"53446":"shaken",
"53451":"shako",
"53452":"shaky",
"53453":"shale",
"53454":"shall",
"53455":"sham",
"53456":"shame",
"53461":"shank",
"53462":"shape",
"53463":"shard",
"53464":"share",
"53465":"shari",
"53466":"shark",
"53511":"sharp",
"53512":"shave",
"53513":"shaw",
"53514":"shawl",
"53515":"shay",
"53516":"she",
"53521":"she'd",
"53522":"shea",
"53523":"sheaf",
"53524":"shear",
"53525":"sheath",
"53526":"shed",
"53531":"sheen",
"53532":"sheep",
"53533":"sheer",
"53534":"sheet",
"53535":"sheik",
"53536":"shelf",
"53541":"shell",
"53542":"shied",
"53543":"shift",
"53544":"shill",
"53545":"shim",
"53546":"shin",
"53551":"shine",
"53552":"shinto",
"53553":"shiny",
"53554":"ship",
"53555":"shire",
"53556":"shirk",
"53561":"shirt",
"53562":"shish",
"53563":"shiv",
"53564":"shoal",
"53565":"shock",
"53566":"shod",
"53611":"shoe",
"53612":"shoji",
"53613":"shone",
"53614":"shoo",
"53615":"shook",
"53616":"shoot",
"53621":"shop",
"53622":"shore",
"53623":"short",
"53624":"shot",
"53625":"shout",
"53626":"shove",
"53631":"show",
"53632":"shown",
"53633":"showy",
"53634":"shrank",
"53635":"shred",
"53636":"shrew",
"53641":"shrike",
"53642":"shrub",
"53643":"shrug",
"53644":"shu",
"53645":"shuck",
"53646":"shun",
"53651":"shunt",
"53652":"shut",
"53653":"shy",
"53654":"si",
"53655":"sial",
"53656":"siam",
"53661":"sian",
"53662":"sib",
"53663":"sibley",
"53664":"sibyl",
"53665":"sic",
"53666":"sick",
"54111":"side",
"54112":"sidle",
"54113":"siege",
"54114":"siena",
"54115":"sieve",
"54116":"sift",
"54121":"sigh",
"54122":"sight",
"54123":"sigma",
"54124":"sign",
"54125":"signal",
"54126":"signor",
"54131":"silas",
"54132":"silk",
"54133":"silky",
"54134":"sill",
"54135":"silly",
"54136":"silo",
"54141":"silt",
"54142":"silty",
"54143":"sima",
"54144":"simon",
"54145":"simons",
"54146":"sims",
"54151":"sin",
"54152":"sinai",
"54153":"since",
"54154":"sine",
"54155":"sinew",
"54156":"sing",
"54161":"singe",
"54162":"sinh",
"54163":"sink",
"54164":"sinus",
"54165":"sioux",
"54166":"sip",
"54211":"sir",
"54212":"sire",
"54213":"siren",
"54214":"sis",
"54215":"sisal",
"54216":"sit",
"54221":"site",
"54222":"situ",
"54223":"situs",
"54224":"siva",
"54225":"six",
"54226":"sixgun",
"54231":"sixth",
"54232":"sixty",
"54233":"size",
"54234":"sj",
"54235":"sk",
"54236":"skat",
"54241":"skate",
"54242":"skeet",
"54243":"skew",
"54244":"ski",
"54245":"skid",
"54246":"skied",
"54251":"skiff",
"54252":"skill",
"54253":"skim",
"54254":"skimp",
"54255":"skimpy",
"54256":"skin",
"54261":"skip",
"54262":"skirt",
"54263":"skit",
"54264":"skulk",
"54265":"skull",
"54266":"skunk",
"54311":"sky",
"54312":"skye",
"54313":"sl",
"54314":"slab",
"54315":"slack",
"54316":"slag",
"54321":"slain",
"54322":"slake",
"54323":"slam",
"54324":"slang",
"54325":"slant",
"54326":"slap",
"54331":"slash",
"54332":"slat",
"54333":"slate",
"54334":"slater",
"54335":"slav",
"54336":"slave",
"54341":"slay",
"54342":"sled",
"54343":"sleek",
"54344":"sleep",
"54345":"sleet",
"54346":"slept",
"54351":"slew",
"54352":"slice",
"54353":"slick",
"54354":"slid",
"54355":"slide",
"54356":"slim",
"54361":"slime",
"54362":"slimy",
"54363":"sling",
"54364":"slip",
"54365":"slit",
"54366":"sliver",
"54411":"sloan",
"54412":"slob",
"54413":"sloe",
"54414":"slog",
"54415":"sloop",
"54416":"slop",
"54421":"slope",
"54422":"slosh",
"54423":"slot",
"54424":"sloth",
"54425":"slow",
"54426":"slug",
"54431":"sluice",
"54432":"slum",
"54433":"slump",
"54434":"slung",
"54435":"slur",
"54436":"slurp",
"54441":"sly",
"54442":"sm",
"54443":"smack",
"54444":"small",
"54445":"smart",
"54446":"smash",
"54451":"smear",
"54452":"smell",
"54453":"smelt",
"54454":"smile",
"54455":"smirk",
"54456":"smith",
"54461":"smithy",
"54462":"smog",
"54463":"smoke",
"54464":"smoky",
"54465":"smug",
"54466":"smut",
"54511":"sn",
"54512":"snack",
"54513":"snafu",
"54514":"snag",
"54515":"snail",
"54516":"snake",
"54521":"snap",
"54522":"snare",
"54523":"snark",
"54524":"snarl",
"54525":"snatch",
"54526":"sneak",
"54531":"sneer",
"54532":"snell",
"54533":"snick",
"54534":"sniff",
"54535":"snip",
"54536":"snipe",
"54541":"snob",
"54542":"snook",
"54543":"snoop",
"54544":"snore",
"54545":"snort",
"54546":"snout",
"54551":"snow",
"54552":"snowy",
"54553":"snub",
"54554":"snuff",
"54555":"snug",
"54556":"so",
"54561":"soak",
"54562":"soap",
"54563":"soapy",
"54564":"soar",
"54565":"sob",
"54566":"sober",
"54611":"social",
"54612":"sock",
"54613":"sod",
"54614":"soda",
"54615":"sofa",
"54616":"sofia",
"54621":"soft",
"54622":"soften",
"54623":"soggy",
"54624":"soil",
"54625":"sol",
"54626":"solar",
"54631":"sold",
"54632":"sole",
"54633":"solemn",
"54634":"solid",
"54635":"solo",
"54636":"solon",
"54641":"solve",
"54642":"soma",
"54643":"somal",
"54644":"some",
"54645":"son",
"54646":"sonar",
"54651":"song",
"54652":"sonic",
"54653":"sonny",
"54654":"sonora",
"54655":"sony",
"54656":"soon",
"54661":"soot",
"54662":"sooth",
"54663":"sop",
"54664":"sora",
"54665":"sorb",
"54666":"sore",
"55111":"sorry",
"55112":"sort",
"55113":"sos",
"55114":"sou",
"55115":"sough",
"55116":"soul",
"55121":"sound",
"55122":"soup",
"55123":"sour",
"55124":"source",
"55125":"sousa",
"55126":"south",
"55131":"sow",
"55132":"sown",
"55133":"soy",
"55134":"soya",
"55135":"sp",
"55136":"spa",
"55141":"space",
"55142":"spade",
"55143":"spain",
"55144":"span",
"55145":"spar",
"55146":"spare",
"55151":"sparge",
"55152":"spark",
"55153":"spasm",
"55154":"spat",
"55155":"spate",
"55156":"spawn",
"55161":"spay",
"55162":"speak",
"55163":"spear",
"55164":"spec",
"55165":"speck",
"55166":"sped",
"55211":"speed",
"55212":"spell",
"55213":"spend",
"55214":"spent",
"55215":"sperm",
"55216":"sperry",
"55221":"spew",
"55222":"spica",
"55223":"spice",
"55224":"spicy",
"55225":"spike",
"55226":"spiky",
"55231":"spill",
"55232":"spilt",
"55233":"spin",
"55234":"spine",
"55235":"spiny",
"55236":"spire",
"55241":"spiro",
"55242":"spit",
"55243":"spite",
"55244":"spitz",
"55245":"splat",
"55246":"splay",
"55251":"spline",
"55252":"split",
"55253":"spoil",
"55254":"spoke",
"55255":"spoof",
"55256":"spook",
"55261":"spooky",
"55262":"spool",
"55263":"spoon",
"55264":"spore",
"55265":"sport",
"55266":"spot",
"55311":"spout",
"55312":"sprain",
"55313":"spray",
"55314":"spree",
"55315":"sprig",
"55316":"spruce",
"55321":"sprue",
"55322":"spud",
"55323":"spume",
"55324":"spun",
"55325":"spunk",
"55326":"spur",
"55331":"spurn",
"55332":"spurt",
"55333":"spy",
"55334":"sq",
"55335":"squad",
"55336":"squat",
"55341":"squaw",
"55342":"squibb",
"55343":"squid",
"55344":"squint",
"55345":"sr",
"55346":"sri",
"55351":"ss",
"55352":"sss",
"55353":"ssss",
"55354":"sst",
"55355":"st",
"55356":"st.",
"55361":"stab",
"55362":"stack",
"55363":"stacy",
"55364":"staff",
"55365":"stag",
"55366":"stage",
"55411":"stagy",
"55412":"stahl",
"55413":"staid",
"55414":"stain",
"55415":"stair",
"55416":"stake",
"55421":"stale",
"55422":"stalk",
"55423":"stall",
"55424":"stamp",
"55425":"stan",
"55426":"stance",
"55431":"stand",
"55432":"stank",
"55433":"staph",
"55434":"star",
"55435":"stare",
"55436":"stark",
"55441":"starr",
"55442":"start",
"55443":"stash",
"55444":"state",
"55445":"statue",
"55446":"stave",
"55451":"stay",
"55452":"stead",
"55453":"steak",
"55454":"steal",
"55455":"steam",
"55456":"steed",
"55461":"steel",
"55462":"steele",
"55463":"steen",
"55464":"steep",
"55465":"steer",
"55466":"stein",
"55511":"stella",
"55512":"stem",
"55513":"step",
"55514":"stern",
"55515":"steve",
"55516":"stew",
"55521":"stick",
"55522":"stiff",
"55523":"stile",
"55524":"still",
"55525":"stilt",
"55526":"sting",
"55531":"stingy",
"55532":"stink",
"55533":"stint",
"55534":"stir",
"55535":"stock",
"55536":"stoic",
"55541":"stoke",
"55542":"stole",
"55543":"stomp",
"55544":"stone",
"55545":"stony",
"55546":"stood",
"55551":"stool",
"55552":"stoop",
"55553":"stop",
"55554":"store",
"55555":"storey",
"55556":"stork",
"55561":"storm",
"55562":"story",
"55563":"stout",
"55564":"stove",
"55565":"stow",
"55566":"strafe",
"55611":"strap",
"55612":"straw",
"55613":"stray",
"55614":"strewn",
"55615":"strip",
"55616":"stroll",
"55621":"strom",
"55622":"strop",
"55623":"strum",
"55624":"strut",
"55625":"stu",
"55626":"stuart",
"55631":"stub",
"55632":"stuck",
"55633":"stud",
"55634":"study",
"55635":"stuff",
"55636":"stuffy",
"55641":"stump",
"55642":"stun",
"55643":"stung",
"55644":"stunk",
"55645":"stunt",
"55646":"sturm",
"55651":"style",
"55652":"styli",
"55653":"styx",
"55654":"su",
"55655":"suave",
"55656":"sub",
"55661":"subtly",
"55662":"such",
"55663":"suck",
"55664":"sud",
"55665":"sudan",
"55666":"suds",
"56111":"sue",
"56112":"suey",
"56113":"suez",
"56114":"sugar",
"56115":"suit",
"56116":"suite",
"56121":"sulfa",
"56122":"sulk",
"56123":"sulky",
"56124":"sully",
"56125":"sultry",
"56126":"sum",
"56131":"sumac",
"56132":"summon",
"56133":"sun",
"56134":"sung",
"56135":"sunk",
"56136":"sunny",
"56141":"sunset",
"56142":"suny",
"56143":"sup",
"56144":"super",
"56145":"supra",
"56146":"sure",
"56151":"surf",
"56152":"surge",
"56153":"sus",
"56154":"susan",
"56155":"sushi",
"56156":"susie",
"56161":"sutton",
"56162":"sv",
"56163":"sw",
"56164":"swab",
"56165":"swag",
"56166":"swain",
"56211":"swam",
"56212":"swami",
"56213":"swamp",
"56214":"swampy",
"56215":"swan",
"56216":"swank",
"56221":"swap",
"56222":"swarm",
"56223":"swart",
"56224":"swat",
"56225":"swath",
"56226":"sway",
"56231":"swear",
"56232":"sweat",
"56233":"sweaty",
"56234":"swede",
"56235":"sweep",
"56236":"sweet",
"56241":"swell",
"56242":"swelt",
"56243":"swept",
"56244":"swift",
"56245":"swig",
"56246":"swim",
"56251":"swine",
"56252":"swing",
"56253":"swipe",
"56254":"swirl",
"56255":"swish",
"56256":"swiss",
"56261":"swoop",
"56262":"sword",
"56263":"swore",
"56264":"sworn",
"56265":"swum",
"56266":"swung",
"56311":"sx",
"56312":"sy",
"56313":"sybil",
"56314":"sykes",
"56315":"sylow",
"56316":"sylvan",
"56321":"synge",
"56322":"synod",
"56323":"syria",
"56324":"syrup",
"56325":"sz",
"56326":"t",
"56331":"t's",
"56332":"ta",
"56333":"tab",
"56334":"table",
"56335":"taboo",
"56336":"tabu",
"56341":"tabula",
"56342":"tacit",
"56343":"tack",
"56344":"tacky",
"56345":"tacoma",
"56346":"tact",
"56351":"tad",
"56352":"taffy",
"56353":"taft",
"56354":"tag",
"56355":"tahoe",
"56356":"tail",
"56361":"taint",
"56362":"take",
"56363":"taken",
"56364":"talc",
"56365":"tale",
"56366":"talk",
"56411":"talky",
"56412":"tall",
"56413":"tallow",
"56414":"tally",
"56415":"talon",
"56416":"talus",
"56421":"tam",
"56422":"tame",
"56423":"tamp",
"56424":"tampa",
"56425":"tan",
"56426":"tang",
"56431":"tango",
"56432":"tangy",
"56433":"tanh",
"56434":"tank",
"56435":"tansy",
"56436":"tanya",
"56441":"tao",
"56442":"taos",
"56443":"tap",
"56444":"tapa",
"56445":"tape",
"56446":"taper",
"56451":"tapir",
"56452":"tapis",
"56453":"tappa",
"56454":"tar",
"56455":"tara",
"56456":"tardy",
"56461":"tariff",
"56462":"tarry",
"56463":"tart",
"56464":"task",
"56465":"tass",
"56466":"taste",
"56511":"tasty",
"56512":"tat",
"56513":"tate",
"56514":"tater",
"56515":"tattle",
"56516":"tatty",
"56521":"tau",
"56522":"taunt",
"56523":"taut",
"56524":"tavern",
"56525":"tawny",
"56526":"tax",
"56531":"taxi",
"56532":"tb",
"56533":"tc",
"56534":"td",
"56535":"te",
"56536":"tea",
"56541":"teach",
"56542":"teal",
"56543":"team",
"56544":"tear",
"56545":"tease",
"56546":"teat",
"56551":"tech",
"56552":"tecum",
"56553":"ted",
"56554":"teddy",
"56555":"tee",
"56556":"teem",
"56561":"teen",
"56562":"teensy",
"56563":"teet",
"56564":"teeth",
"56565":"telex",
"56566":"tell",
"56611":"tempo",
"56612":"tempt",
"56613":"ten",
"56614":"tend",
"56615":"tenet",
"56616":"tenney",
"56621":"tenon",
"56622":"tenor",
"56623":"tense",
"56624":"tensor",
"56625":"tent",
"56626":"tenth",
"56631":"tepee",
"56632":"tepid",
"56633":"term",
"56634":"tern",
"56635":"terra",
"56636":"terre",
"56641":"terry",
"56642":"terse",
"56643":"tess",
"56644":"test",
"56645":"testy",
"56646":"tete",
"56651":"texan",
"56652":"texas",
"56653":"text",
"56654":"tf",
"56655":"tg",
"56656":"th",
"56661":"thai",
"56662":"than",
"56663":"thank",
"56664":"that",
"56665":"thaw",
"56666":"the",
"61111":"thea",
"61112":"thee",
"61113":"theft",
"61114":"their",
"61115":"them",
"61116":"theme",
"61121":"then",
"61122":"there",
"61123":"these",
"61124":"theta",
"61125":"they",
"61126":"thick",
"61131":"thief",
"61132":"thigh",
"61133":"thin",
"61134":"thine",
"61135":"thing",
"61136":"think",
"61141":"third",
"61142":"this",
"61143":"thong",
"61144":"thor",
"61145":"thorn",
"61146":"thorny",
"61151":"those",
"61152":"thou",
"61153":"thread",
"61154":"three",
"61155":"threw",
"61156":"throb",
"61161":"throes",
"61162":"throw",
"61163":"thrum",
"61164":"thud",
"61165":"thug",
"61166":"thule",
"61211":"thumb",
"61212":"thump",
"61213":"thus",
"61214":"thy",
"61215":"thyme",
"61216":"ti",
"61221":"tiber",
"61222":"tibet",
"61223":"tibia",
"61224":"tic",
"61225":"tick",
"61226":"ticket",
"61231":"tid",
"61232":"tidal",
"61233":"tidbit",
"61234":"tide",
"61235":"tidy",
"61236":"tie",
"61241":"tied",
"61242":"tier",
"61243":"tift",
"61244":"tiger",
"61245":"tight",
"61246":"til",
"61251":"tilde",
"61252":"tile",
"61253":"till",
"61254":"tilt",
"61255":"tilth",
"61256":"tim",
"61261":"time",
"61262":"timex",
"61263":"timid",
"61264":"timon",
"61265":"tin",
"61266":"tina",
"61311":"tine",
"61312":"tinge",
"61313":"tint",
"61314":"tiny",
"61315":"tioga",
"61316":"tip",
"61321":"tipoff",
"61322":"tippy",
"61323":"tipsy",
"61324":"tire",
"61325":"tit",
"61326":"titan",
"61331":"tithe",
"61332":"title",
"61333":"titus",
"61334":"tj",
"61335":"tk",
"61336":"tl",
"61341":"tm",
"61342":"tn",
"61343":"tnt",
"61344":"to",
"61345":"toad",
"61346":"toady",
"61351":"toast",
"61352":"toby",
"61353":"today",
"61354":"todd",
"61355":"toe",
"61356":"tofu",
"61361":"tog",
"61362":"togo",
"61363":"togs",
"61364":"toil",
"61365":"toilet",
"61366":"token",
"61411":"tokyo",
"61412":"told",
"61413":"toll",
"61414":"tom",
"61415":"tomb",
"61416":"tome",
"61421":"tommy",
"61422":"ton",
"61423":"tonal",
"61424":"tone",
"61425":"tong",
"61426":"toni",
"61431":"tonic",
"61432":"tonk",
"61433":"tonsil",
"61434":"tony",
"61435":"too",
"61436":"took",
"61441":"tool",
"61442":"toot",
"61443":"tooth",
"61444":"top",
"61445":"topaz",
"61446":"topic",
"61451":"topple",
"61452":"topsy",
"61453":"tor",
"61454":"torah",
"61455":"torch",
"61456":"tore",
"61461":"tori",
"61462":"torn",
"61463":"torr",
"61464":"torso",
"61465":"tort",
"61466":"torus",
"61511":"tory",
"61512":"toss",
"61513":"tot",
"61514":"total",
"61515":"tote",
"61516":"totem",
"61521":"touch",
"61522":"tough",
"61523":"tour",
"61524":"tout",
"61525":"tow",
"61526":"towel",
"61531":"tower",
"61532":"town",
"61533":"toxic",
"61534":"toxin",
"61535":"toy",
"61536":"tp",
"61541":"tq",
"61542":"tr",
"61543":"trace",
"61544":"track",
"61545":"tract",
"61546":"tracy",
"61551":"trade",
"61552":"trag",
"61553":"trail",
"61554":"train",
"61555":"trait",
"61556":"tram",
"61561":"tramp",
"61562":"trap",
"61563":"trash",
"61564":"trawl",
"61565":"tray",
"61566":"tread",
"61611":"treat",
"61612":"treble",
"61613":"tree",
"61614":"trek",
"61615":"trench",
"61616":"trend",
"61621":"tress",
"61622":"triad",
"61623":"trial",
"61624":"tribe",
"61625":"trick",
"61626":"tried",
"61631":"trig",
"61632":"trill",
"61633":"trim",
"61634":"trio",
"61635":"trip",
"61636":"tripe",
"61641":"trite",
"61642":"triton",
"61643":"trod",
"61644":"troll",
"61645":"troop",
"61646":"trot",
"61651":"trout",
"61652":"troy",
"61653":"truce",
"61654":"truck",
"61655":"trudge",
"61656":"trudy",
"61661":"true",
"61662":"truly",
"61663":"trump",
"61664":"trunk",
"61665":"truss",
"61666":"trust",
"62111":"truth",
"62112":"trw",
"62113":"try",
"62114":"ts",
"62115":"tsar",
"62116":"tt",
"62121":"ttl",
"62122":"ttt",
"62123":"tttt",
"62124":"tty",
"62125":"tu",
"62126":"tub",
"62131":"tuba",
"62132":"tube",
"62133":"tuck",
"62134":"tudor",
"62135":"tuff",
"62136":"tuft",
"62141":"tug",
"62142":"tulane",
"62143":"tulip",
"62144":"tulle",
"62145":"tulsa",
"62146":"tum",
"62151":"tun",
"62152":"tuna",
"62153":"tune",
"62154":"tung",
"62155":"tunic",
"62156":"tunis",
"62161":"tunnel",
"62162":"tuple",
"62163":"turf",
"62164":"turin",
"62165":"turk",
"62166":"turn",
"62211":"turvy",
"62212":"tusk",
"62213":"tussle",
"62214":"tutor",
"62215":"tutu",
"62216":"tuv",
"62221":"tv",
"62222":"tva",
"62223":"tw",
"62224":"twa",
"62225":"twain",
"62226":"tweak",
"62231":"tweed",
"62232":"twice",
"62233":"twig",
"62234":"twill",
"62235":"twin",
"62236":"twine",
"62241":"twirl",
"62242":"twist",
"62243":"twisty",
"62244":"twit",
"62245":"two",
"62246":"twx",
"62251":"tx",
"62252":"ty",
"62253":"tyburn",
"62254":"tying",
"62255":"tyler",
"62256":"type",
"62261":"typic",
"62262":"typo",
"62263":"tyson",
"62264":"tz",
"62265":"u",
"62266":"u's",
"62311":"ua",
"62312":"ub",
"62313":"uc",
"62314":"ucla",
"62315":"ud",
"62316":"ue",
"62321":"uf",
"62322":"ug",
"62323":"ugh",
"62324":"ugly",
"62325":"uh",
"62326":"ui",
"62331":"uj",
"62332":"uk",
"62333":"ul",
"62334":"ulan",
"62335":"ulcer",
"62336":"ultra",
"62341":"um",
"62342":"umber",
"62343":"umbra",
"62344":"umpire",
"62345":"un",
"62346":"unary",
"62351":"uncle",
"62352":"under",
"62353":"unify",
"62354":"union",
"62355":"unit",
"62356":"unite",
"62361":"unity",
"62362":"unix",
"62363":"until",
"62364":"uo",
"62365":"up",
"62366":"upend",
"62411":"uphold",
"62412":"upon",
"62413":"upper",
"62414":"uproar",
"62415":"upset",
"62416":"uptake",
"62421":"upton",
"62422":"uq",
"62423":"ur",
"62424":"urban",
"62425":"urbane",
"62426":"urea",
"62431":"urge",
"62432":"uri",
"62433":"urine",
"62434":"uris",
"62435":"urn",
"62436":"ursa",
"62441":"us",
"62442":"usa",
"62443":"usaf",
"62444":"usage",
"62445":"usc",
"62446":"usda",
"62451":"use",
"62452":"useful",
"62453":"usgs",
"62454":"usher",
"62455":"usia",
"62456":"usn",
"62461":"usps",
"62462":"ussr",
"62463":"usual",
"62464":"usurp",
"62465":"usury",
"62466":"ut",
"62511":"utah",
"62512":"utica",
"62513":"utile",
"62514":"utmost",
"62515":"utter",
"62516":"uu",
"62521":"uuu",
"62522":"uuuu",
"62523":"uv",
"62524":"uvw",
"62525":"uw",
"62526":"ux",
"62531":"uy",
"62532":"uz",
"62533":"v",
"62534":"v's",
"62535":"va",
"62536":"vacua",
"62541":"vacuo",
"62542":"vade",
"62543":"vaduz",
"62544":"vague",
"62545":"vail",
"62546":"vain",
"62551":"vale",
"62552":"valet",
"62553":"valeur",
"62554":"valid",
"62555":"value",
"62556":"valve",
"62561":"vamp",
"62562":"van",
"62563":"vance",
"62564":"vane",
"62565":"vary",
"62566":"vase",
"62611":"vast",
"62612":"vat",
"62613":"vault",
"62614":"vb",
"62615":"vc",
"62616":"vd",
"62621":"ve",
"62622":"veal",
"62623":"veda",
"62624":"vee",
"62625":"veer",
"62626":"veery",
"62631":"vega",
"62632":"veil",
"62633":"vein",
"62634":"velar",
"62635":"veldt",
"62636":"vella",
"62641":"vellum",
"62642":"venal",
"62643":"vend",
"62644":"venial",
"62645":"venom",
"62646":"vent",
"62651":"venus",
"62652":"vera",
"62653":"verb",
"62654":"verde",
"62655":"verdi",
"62656":"verge",
"62661":"verity",
"62662":"verna",
"62663":"verne",
"62664":"versa",
"62665":"verse",
"62666":"verve",
"63111":"very",
"63112":"vessel",
"63113":"vest",
"63114":"vet",
"63115":"vetch",
"63116":"veto",
"63121":"vex",
"63122":"vf",
"63123":"vg",
"63124":"vh",
"63125":"vi",
"63126":"via",
"63131":"vial",
"63132":"vicar",
"63133":"vice",
"63134":"vichy",
"63135":"vicky",
"63136":"vida",
"63141":"video",
"63142":"vie",
"63143":"viet",
"63144":"view",
"63145":"vigil",
"63146":"vii",
"63151":"viii",
"63152":"vile",
"63153":"villa",
"63154":"vine",
"63155":"vinyl",
"63156":"viola",
"63161":"violet",
"63162":"virgil",
"63163":"virgo",
"63164":"virus",
"63165":"vis",
"63166":"visa",
"63211":"vise",
"63212":"visit",
"63213":"visor",
"63214":"vista",
"63215":"vita",
"63216":"vitae",
"63221":"vital",
"63222":"vito",
"63223":"vitro",
"63224":"viva",
"63225":"vivian",
"63226":"vivid",
"63231":"vivo",
"63232":"vixen",
"63233":"viz",
"63234":"vj",
"63235":"vk",
"63236":"vl",
"63241":"vm",
"63242":"vn",
"63243":"vo",
"63244":"vocal",
"63245":"vogel",
"63246":"vogue",
"63251":"voice",
"63252":"void",
"63253":"volt",
"63254":"volta",
"63255":"volvo",
"63256":"vomit",
"63261":"von",
"63262":"voss",
"63263":"vote",
"63264":"vouch",
"63265":"vow",
"63266":"vowel",
"63311":"vp",
"63312":"vq",
"63313":"vr",
"63314":"vs",
"63315":"vt",
"63316":"vu",
"63321":"vulcan",
"63322":"vv",
"63323":"vvv",
"63324":"vvvv",
"63325":"vw",
"63326":"vx",
"63331":"vy",
"63332":"vying",
"63333":"vz",
"63334":"w",
"63335":"w's",
"63336":"wa",
"63341":"waals",
"63342":"wac",
"63343":"wack",
"63344":"wacke",
"63345":"wacky",
"63346":"waco",
"63351":"wad",
"63352":"wade",
"63353":"wadi",
"63354":"wafer",
"63355":"wag",
"63356":"wage",
"63361":"waggle",
"63362":"wah",
"63363":"wahl",
"63364":"wail",
"63365":"waist",
"63366":"wait",
"63411":"waite",
"63412":"waive",
"63413":"wake",
"63414":"waken",
"63415":"waldo",
"63416":"wale",
"63421":"walk",
"63422":"walkie",
"63423":"wall",
"63424":"walls",
"63425":"wally",
"63426":"walsh",
"63431":"walt",
"63432":"walton",
"63433":"waltz",
"63434":"wan",
"63435":"wand",
"63436":"wane",
"63441":"wang",
"63442":"want",
"63443":"war",
"63444":"ward",
"63445":"ware",
"63446":"warm",
"63451":"warmth",
"63452":"warn",
"63453":"warp",
"63454":"warren",
"63455":"wart",
"63456":"warty",
"63461":"wary",
"63462":"was",
"63463":"wash",
"63464":"washy",
"63465":"wasp",
"63466":"wast",
"63511":"waste",
"63512":"watch",
"63513":"water",
"63514":"watt",
"63515":"watts",
"63516":"wave",
"63521":"wavy",
"63522":"wax",
"63523":"waxen",
"63524":"waxy",
"63525":"way",
"63526":"wayne",
"63531":"wb",
"63532":"wc",
"63533":"wd",
"63534":"we",
"63535":"we'd",
"63536":"we'll",
"63541":"we're",
"63542":"we've",
"63543":"weak",
"63544":"weal",
"63545":"wealth",
"63546":"wean",
"63551":"wear",
"63552":"weary",
"63553":"weave",
"63554":"web",
"63555":"webb",
"63556":"weber",
"63561":"weco",
"63562":"wed",
"63563":"wedge",
"63564":"wee",
"63565":"weed",
"63566":"weedy",
"63611":"week",
"63612":"weeks",
"63613":"weep",
"63614":"wehr",
"63615":"wei",
"63616":"weigh",
"63621":"weir",
"63622":"weird",
"63623":"weiss",
"63624":"welch",
"63625":"weld",
"63626":"well",
"63631":"wells",
"63632":"welsh",
"63633":"welt",
"63634":"wendy",
"63635":"went",
"63636":"wept",
"63641":"were",
"63642":"wert",
"63643":"west",
"63644":"wet",
"63645":"wf",
"63646":"wg",
"63651":"wh",
"63652":"whack",
"63653":"whale",
"63654":"wham",
"63655":"wharf",
"63656":"what",
"63661":"wheat",
"63662":"whee",
"63663":"wheel",
"63664":"whelk",
"63665":"whelm",
"63666":"whelp",
"64111":"when",
"64112":"where",
"64113":"whet",
"64114":"which",
"64115":"whiff",
"64116":"whig",
"64121":"while",
"64122":"whim",
"64123":"whine",
"64124":"whinny",
"64125":"whip",
"64126":"whir",
"64131":"whirl",
"64132":"whisk",
"64133":"whit",
"64134":"white",
"64135":"whiz",
"64136":"who",
"64141":"who'd",
"64142":"whoa",
"64143":"whole",
"64144":"whom",
"64145":"whoop",
"64146":"whoosh",
"64151":"whop",
"64152":"whose",
"64153":"whup",
"64154":"why",
"64155":"wi",
"64156":"wick",
"64161":"wide",
"64162":"widen",
"64163":"widow",
"64164":"width",
"64165":"wield",
"64166":"wier",
"64211":"wife",
"64212":"wig",
"64213":"wild",
"64214":"wile",
"64215":"wiley",
"64216":"wilkes",
"64221":"will",
"64222":"willa",
"64223":"wills",
"64224":"wilma",
"64225":"wilt",
"64226":"wily",
"64231":"win",
"64232":"wince",
"64233":"winch",
"64234":"wind",
"64235":"windy",
"64236":"wine",
"64241":"wing",
"64242":"wink",
"64243":"winnie",
"64244":"wino",
"64245":"winter",
"64246":"winy",
"64251":"wipe",
"64252":"wire",
"64253":"wiry",
"64254":"wise",
"64255":"wish",
"64256":"wishy",
"64261":"wisp",
"64262":"wispy",
"64263":"wit",
"64264":"witch",
"64265":"with",
"64266":"withe",
"64311":"withy",
"64312":"witt",
"64313":"witty",
"64314":"wive",
"64315":"wj",
"64316":"wk",
"64321":"wl",
"64322":"wm",
"64323":"wn",
"64324":"wo",
"64325":"woe",
"64326":"wok",
"64331":"woke",
"64332":"wold",
"64333":"wolf",
"64334":"wolfe",
"64335":"wolff",
"64336":"wolve",
"64341":"woman",
"64342":"womb",
"64343":"women",
"64344":"won",
"64345":"won't",
"64346":"wonder",
"64351":"wong",
"64352":"wont",
"64353":"woo",
"64354":"wood",
"64355":"woods",
"64356":"woody",
"64361":"wool",
"64362":"woozy",
"64363":"word",
"64364":"wordy",
"64365":"wore",
"64366":"work",
"64411":"world",
"64412":"worm",
"64413":"wormy",
"64414":"worn",
"64415":"worry",
"64416":"worse",
"64421":"worst",
"64422":"worth",
"64423":"wotan",
"64424":"would",
"64425":"wound",
"64426":"wove",
"64431":"woven",
"64432":"wow",
"64433":"wp",
"64434":"wq",
"64435":"wr",
"64436":"wrack",
"64441":"wrap",
"64442":"wrath",
"64443":"wreak",
"64444":"wreck",
"64445":"wrest",
"64446":"wring",
"64451":"wrist",
"64452":"writ",
"64453":"write",
"64454":"writhe",
"64455":"wrong",
"64456":"wrote",
"64461":"wry",
"64462":"ws",
"64463":"wt",
"64464":"wu",
"64465":"wuhan",
"64466":"wv",
"64511":"ww",
"64512":"www",
"64513":"wwww",
"64514":"wx",
"64515":"wxy",
"64516":"wy",
"64521":"wyatt",
"64522":"wyeth",
"64523":"wylie",
"64524":"wyman",
"64525":"wyner",
"64526":"wynn",
"64531":"wz",
"64532":"x",
"64533":"x's",
"64534":"xa",
"64535":"xb",
"64536":"xc",
"64541":"xd",
"64542":"xe",
"64543":"xenon",
"64544":"xerox",
"64545":"xf",
"64546":"xg",
"64551":"xh",
"64552":"xi",
"64553":"xj",
"64554":"xk",
"64555":"xl",
"64556":"xm",
"64561":"xn",
"64562":"xo",
"64563":"xp",
"64564":"xq",
"64565":"xr",
"64566":"xs",
"64611":"xt",
"64612":"xu",
"64613":"xv",
"64614":"xw",
"64615":"xx",
"64616":"xxx",
"64621":"xxxx",
"64622":"xy",
"64623":"xylem",
"64624":"xyz",
"64625":"xz",
"64626":"y",
"64631":"y's",
"64632":"ya",
"64633":"yacht",
"64634":"yah",
"64635":"yak",
"64636":"yale",
"64641":"yalta",
"64642":"yam",
"64643":"yamaha",
"64644":"yang",
"64645":"yank",
"64646":"yap",
"64651":"yaqui",
"64652":"yard",
"64653":"yarn",
"64654":"yates",
"64655":"yaw",
"64656":"yawl",
"64661":"yawn",
"64662":"yb",
"64663":"yc",
"64664":"yd",
"64665":"ye",
"64666":"yea",
"65111":"yeah",
"65112":"year",
"65113":"yearn",
"65114":"yeast",
"65115":"yeasty",
"65116":"yeats",
"65121":"yell",
"65122":"yelp",
"65123":"yemen",
"65124":"yen",
"65125":"yet",
"65126":"yf",
"65131":"yg",
"65132":"yh",
"65133":"yi",
"65134":"yield",
"65135":"yin",
"65136":"yip",
"65141":"yj",
"65142":"yk",
"65143":"yl",
"65144":"ym",
"65145":"ymca",
"65146":"yn",
"65151":"yo",
"65152":"yodel",
"65153":"yoder",
"65154":"yoga",
"65155":"yogi",
"65156":"yoke",
"65161":"yokel",
"65162":"yolk",
"65163":"yon",
"65164":"yond",
"65165":"yore",
"65166":"york",
"65211":"yost",
"65212":"you",
"65213":"you'd",
"65214":"young",
"65215":"your",
"65216":"youth",
"65221":"yow",
"65222":"yp",
"65223":"yq",
"65224":"yr",
"65225":"ys",
"65226":"yt",
"65231":"yu",
"65232":"yucca",
"65233":"yuck",
"65234":"yuh",
"65235":"yuki",
"65236":"yukon",
"65241":"yule",
"65242":"yv",
"65243":"yves",
"65244":"yw",
"65245":"ywca",
"65246":"yx",
"65251":"yy",
"65252":"yyy",
"65253":"yyyy",
"65254":"yz",
"65255":"z",
"65256":"z's",
"65261":"za",
"65262":"zag",
"65263":"zaire",
"65264":"zan",
"65265":"zap",
"65266":"zazen",
"65311":"zb",
"65312":"zc",
"65313":"zd",
"65314":"ze",
"65315":"zeal",
"65316":"zealot",
"65321":"zebra",
"65322":"zeiss",
"65323":"zen",
"65324":"zero",
"65325":"zest",
"65326":"zesty",
"65331":"zeta",
"65332":"zeus",
"65333":"zf",
"65334":"zg",
"65335":"zh",
"65336":"zi",
"65341":"zig",
"65342":"zilch",
"65343":"zinc",
"65344":"zing",
"65345":"zion",
"65346":"zip",
"65351":"zj",
"65352":"zk",
"65353":"zl",
"65354":"zloty",
"65355":"zm",
"65356":"zn",
"65361":"zo",
"65362":"zoe",
"65363":"zomba",
"65364":"zone",
"65365":"zoo",
"65366":"zoom",
"65411":"zorn",
"65412":"zp",
"65413":"zq",
"65414":"zr",
"65415":"zs",
"65416":"zt",
"65421":"zu",
"65422":"zurich",
"65423":"zv",
"65424":"zw",
"65425":"zx",
"65426":"zy",
"65431":"zz",
"65432":"zzz",
"65433":"zzzz",
"65434":"0",
"65435":"1",
"65436":"2",
"65441":"3",
"65442":"4",
"65443":"5",
"65444":"6",
"65445":"7",
"65446":"8",
"65451":"9",
"65452":"10",
"65453":"11",
"65454":"12",
"65455":"13",
"65456":"14",
"65461":"15",
"65462":"16",
"65463":"17",
"65464":"18",
"65465":"19",
"65466":"20",
"65511":"21",
"65512":"22",
"65513":"23",
"65514":"24",
"65515":"25",
"65516":"26",
"65521":"27",
"65522":"28",
"65523":"29",
"65524":"30",
"65525":"31",
"65526":"32",
"65531":"33",
"65532":"34",
"65533":"35",
"65534":"36",
"65535":"37",
"65536":"38",
"65541":"39",
"65542":"40",
"65543":"41",
"65544":"42",
"65545":"43",
"65546":"44",
"65551":"45",
"65552":"46",
"65553":"47",
"65554":"48",
"65555":"49",
"65556":"50",
"65561":"51",
"65562":"52",
"65563":"53",
"65564":"54",
"65565":"55",
"65566":"56",
"65611":"57",
"65612":"58",
"65613":"59",
"65614":"60",
"65615":"61",
"65616":"62",
"65621":"63",
"65622":"64",
"65623":"65",
"65624":"66",
"65625":"67",
"65626":"68",
"65631":"69",
"65632":"70",
"65633":"71",
"65634":"72",
"65635":"73",
"65636":"74",
"65641":"75",
"65642":"76",
"65643":"77",
"65644":"78",
"65645":"79",
"65646":"80",
"65651":"81",
"65652":"82",
"65653":"83",
"65654":"84",
"65655":"85",
"65656":"86",
"65661":"87",
"65662":"88",
"65663":"89",
"65664":"90",
"65665":"91",
"65666":"92",
"66111":"93",
"66112":"94",
"66113":"95",
"66114":"96",
"66115":"97",
"66116":"98",
"66121":"99",
"66122":"100",
"66123":"101",
"66124":"111",
"66125":"123",
"66126":"200",
"66131":"222",
"66132":"234",
"66133":"300",
"66134":"333",
"66135":"345",
"66136":"400",
"66141":"444",
"66142":"456",
"66143":"500",
"66144":"555",
"66145":"567",
"66146":"600",
"66151":"666",
"66152":"678",
"66153":"700",
"66154":"777",
"66155":"789",
"66156":"800",
"66161":"888",
"66162":"900",
"66163":"999",
"66164":"1000",
"66165":"1111",
"66166":"1234",
"66211":"1492",
"66212":"1500",
"66213":"1600",
"66214":"1700",
"66215":"1776",
"66216":"1800",
"66221":"1812",
"66222":"1900",
"66223":"1910",
"66224":"1920",
"66225":"1925",
"66226":"1930",
"66231":"1935",
"66232":"1940",
"66233":"1945",
"66234":"1950",
"66235":"1955",
"66236":"1960",
"66241":"1965",
"66242":"1970",
"66243":"1975",
"66244":"1980",
"66245":"1985",
"66246":"1990",
"66251":"1991",
"66252":"1992",
"66253":"1993",
"66254":"1994",
"66255":"1995",
"66256":"1996",
"66261":"1997",
"66262":"2000",
"66263":"2001",
"66264":"2020",
"66265":"2222",
"66266":"2345",
"66311":"2468",
"66312":"3000",
"66313":"3333",
"66314":"3456",
"66315":"4000",
"66316":"4321",
"66321":"4444",
"66322":"4567",
"66323":"5000",
"66324":"5555",
"66325":"5678",
"66326":"6000",
"66331":"6666",
"66332":"6789",
"66333":"7000",
"66334":"7777",
"66335":"8000",
"66336":"8888",
"66341":"9000",
"66342":"9876",
"66343":"9999",
"66344":"100th",
"66345":"101st",
"66346":"10th",
"66351":"11th",
"66352":"12th",
"66353":"13th",
"66354":"14th",
"66355":"15th",
"66356":"16th",
"66361":"17th",
"66362":"18th",
"66363":"19th",
"66364":"1st",
"66365":"20th",
"66366":"21st",
"66411":"22nd",
"66412":"23rd",
"66413":"24th",
"66414":"25th",
"66415":"26th",
"66416":"27th",
"66421":"28th",
"66422":"29th",
"66423":"2nd",
"66424":"30th",
"66425":"31st",
"66426":"32nd",
"66431":"33rd",
"66432":"34th",
"66433":"35th",
"66434":"36th",
"66435":"37th",
"66436":"38th",
"66441":"39th",
"66442":"3rd",
"66443":"40th",
"66444":"41st",
"66445":"42nd",
"66446":"43rd",
"66451":"44th",
"66452":"45th",
"66453":"46th",
"66454":"47th",
"66455":"48th",
"66456":"49th",
"66461":"4th",
"66462":"50th",
"66463":"51st",
"66464":"52nd",
"66465":"53rd",
"66466":"54th",
"66511":"55th",
"66512":"56th",
"66513":"57th",
"66514":"58th",
"66515":"59th",
"66516":"5th",
"66521":"60th",
"66522":"61st",
"66523":"62nd",
"66524":"63rd",
"66525":"65th",
"66526":"66th",
"66531":"67th",
"66532":"68th",
"66533":"69th",
"66534":"6th",
"66535":"70th",
"66536":"71st",
"66541":"72nd",
"66542":"73rd",
"66543":"74th",
"66544":"75th",
"66545":"76th",
"66546":"77th",
"66551":"78th",
"66552":"79th",
"66553":"7th",
"66554":"80th",
"66555":"81st",
"66556":"82nd",
"66561":"83rd",
"66562":"84th",
"66563":"85th",
"66564":"86th",
"66565":"87th",
"66566":"88th",
"66611":"89th",
"66612":"8th",
"66613":"90th",
"66614":"91st",
"66615":"92nd",
"66616":"93rd",
"66621":"94th",
"66622":"95th",
"66623":"96th",
"66624":"97th",
"66625":"98th",
"66626":"99th",
"66631":"9th",
"66632":"!",
"66633":"!!",
"66634":"\"",
"66635":"#",
"66636":"##",
"66641":"$",
"66642":"$$",
"66643":"%",
"66644":"%%",
"66645":"&",
"66646":"(",
"66651":"()",
"66652":")",
"66653":"*",
"66654":"**",
"66655":"+",
"66656":"-",
"66661":":",
"66662":";",
"66663":"=",
"66664":"?",
"66665":"??",
"66666":"@"}
from random import SystemRandom
import sys
def generate_word_index(rng):
key = ""
for i in range(0, 5):
key += str(rng.randint(a=1, b = 6))
return key
def generate_passphrase(words, verbose):
rng = SystemRandom()
passphrase = ""
if verbose == True:
print("Info : RNG is random.SystemRandom()")
for i in range(0, words):
word_index = generate_word_index(rng)
if verbose == True:
print("{} => {}".format(word_index, dicewords[word_index]))
passphrase += dicewords[word_index]
if i < words - 1:
passphrase += " "
print(passphrase)
def usage():
print("diceware WORDCOUNT [options]")
print("")
print(" --hurt-me-plenty Overrides security warning for generating a passphrase with fewer than six dicewords.")
print(" It beats passw0rd1 I guess...")
def integer_as_string(num):
if num == 1:
return "one"
if num == 2:
return "two"
if num == 3:
return "three"
if num == 4:
return "four"
if num == 5:
return "five"
if __name__ == "__main__":
if len(sys.argv) == 1:
usage()
else:
try:
words = int(sys.argv[1])
except ValueError:
print("Error : WORDCOUNT must be an integer.")
exit()
if words < 1:
print("Error : WORDCOUNT must be at least one.")
exit()
hurt_me_plenty = False
verbose = False
for argument in sys.argv[2:32]:
if argument == "--hurt-me-plenty":
hurt_me_plenty = True
if argument == "-v" or argument == "--verbose":
verbose = True
if(words < 6 and hurt_me_plenty == False):
print("Warning: We recommend at least six diceware words. Append option --hurt-me-plenty to generate anyway.")
print(" See http://arstechnica.com/information-technology/2014/03/diceware-passwords-now-need-six-random-words-to-thwart-hackers/")
elif(words < 6 and hurt_me_plenty == True):
print("Info : We're seriously generating a {}-word passphrase right past a warning.".format(integer_as_string(words)))
generate_passphrase(words, verbose)
else:
generate_passphrase(words, verbose)
|
AdrianCohea/DicewareGen
|
diceware.py
|
Python
|
gpl-2.0
| 128,334
|
[
"Amber",
"BLAST",
"Brian",
"DIRAC",
"Elk",
"GULP",
"Galaxy",
"MOE",
"MOOSE",
"NEURON",
"VisIt"
] |
ff635a36bdc6c4d445f6beae2571808edbeefb9c886798604518b7bd44ea3431
|
"""Interface to Sailthru API."""
import hashlib
import itertools
import socket
import urllib
import urllib2
try:
# Prefer simplejson as it's usually faster
import simplejson as json
except ImportError:
import json
try:
import settings
except ImportError:
class settings:
class services:
sailthru = {
'server': 'api.sailthru.com',
'key': 'YOUR-KEY-HERE',
'secret': 'YOUR-SECRET-HERE',
}
TIMEOUT = 10
class Error(Exception):
"""A Sailthru error."""
def __init__(self, error_code=0, error_msg='None'):
self.args = (error_code, error_msg)
self.error_code = error_code
self.error_msg = error_msg
class UrlMethodRequest(urllib2.Request):
"""Subclass Request so we can override get_method() to allow non-GET/POST methods."""
def __init__(self, method, *args, **kwargs):
self._method = method
urllib2.Request.__init__(self, *args, **kwargs)
def get_method(self):
return self._method
def safestr(obj):
r"""Convert given object to utf-8 encoded string (from web.py).
>>> safestr('hello')
'hello'
>>> safestr(u'\u1234')
'\xe1\x88\xb4'
>>> safestr(2)
'2'
"""
if isinstance(obj, unicode):
return obj.encode('utf-8')
elif isinstance(obj, str):
return obj
elif hasattr(obj, 'next') and hasattr(obj, '__iter__'): # iterator
return itertools.imap(safestr, obj)
else:
return str(obj)
def _flatten(dictionary, base_key='', output=None):
r"""Return flattened version of given dictionary. Values in nested dictionaries are
placed in the output with keys "k0[k1]", where k0 is the key of the dict in the top
dict and k1 is the key of the value in the inner dict.
>>> _flatten({})
{}
>>> sorted(_flatten({'k': 'v'}).items())
[('k', 'v')]
>>> sorted(_flatten({'k1': 'v1', 'k2': {'k3': {'k4': 'v2'}, 'k5': 'v3'}}).items())
[('k1', 'v1'), (u'k2[k3][k4]', 'v2'), (u'k2[k5]', 'v3')]
>>> sorted(_flatten({'k%c': 'v%c'}).items())
[('k%c', 'v%c')]
>>> sorted(_flatten({'k1%c': {'k2%c': 'v1%c'}}).items())
[(u'k1%c[k2%c]', 'v1%c')]
>>> sorted(_flatten({'k1': {u'o\u2019kane': u'o\u2019hare'}}).items())
[(u'k1[o\u2019kane]', u'o\u2019hare')]
"""
if output is None:
output = {}
for key, value in dictionary.iteritems():
if base_key:
inner_key = u'{0}[{1}]'.format(base_key, key)
else:
inner_key = key
if hasattr(value, 'iteritems'):
_flatten(value, inner_key, output)
else:
output[inner_key] = value
return output
def _sailthru_request(action, method, kw):
"""
@raise: sailthru.Error
"""
assert method in ('GET', 'POST', 'DELETE')
kw['api_key'] = settings.services.sailthru['key']
kw['format'] = 'json'
if action == 'send' and 'vars' in kw:
kw['vars'] = json.dumps(kw['vars'])
kw = _flatten(kw)
# Ensure keys and values are encoded as UTF-8
kw = dict((safestr(k), safestr(v)) for k, v in kw.iteritems())
values = sorted(kw.itervalues())
kw['sig'] = hashlib.md5(settings.services.sailthru['secret'] + ''.join(values)).hexdigest()
query = urllib.urlencode(kw)
url = 'http://' + settings.services.sailthru['server'] + '/' + action
data = None
headers = {}
if method == 'POST':
data = query
else:
url += '?' + query
http_error = None
try:
request = UrlMethodRequest(method, url, data=data, headers=headers)
response = urllib2.urlopen(request, timeout=TIMEOUT)
response = response.read()
except urllib2.HTTPError as e:
response = e.read()
http_error = e
except (urllib2.URLError, socket.error) as e:
raise Error(-1, 'No Connection: ' + str(e))
try:
json_response = json.loads(response)
except (TypeError, ValueError) as e:
error_message = "{0}Malformed JSON, couldn't parse: {1} - {2!r}".format(
str(http_error) + ' - ' if http_error else '',
e, response[:100])
raise Error(-2, error_message)
if 'error' in json_response:
raise Error(json_response['error'], json_response['errormsg'])
return json_response
def send_mail(template, to_address, bcc=None, **kw):
""" Send an email. If a single email address is given (and optionally a bcc address),
return the Sailthru send_id of the email. If multiple, comma-separated email
addresses are given, return a dictionary of {email: send_id} pairs.
@param vars: dict of replacement variables for this particular email
Special variables:
name - Name to put on the "To" line like "Joe Example" <joe@example.com>
from_email - Sets the from email address, it must already be an approved sender address
@param options: dict with
replyto - override the Reply-To header
test - Set to 1 for a test email. 'TEST:' will be put on subject line,
and it will not count towards stats
@raise: sailthru.Error
"""
kw['template'] = template
kw['email'] = to_address
if bcc:
# Not a real bcc, but with Sailthru this acts as a bcc
kw['email'] += ',' + bcc
response = _sailthru_request('send', 'POST', kw)
if 'send_ids' in response:
if bcc:
if to_address in response['send_ids']:
return response['send_ids'][to_address]
else:
# Didn't go through. Because we're bcc'ing, we don't get error
# information, so use our best guess
raise Error(34, 'Email may not be emailed')
else:
return response['send_ids']
elif 'send_id' in response:
return response['send_id']
else:
raise Error(-2, 'Malformed JSON: no send_id(s)')
def cancel_mail(send_id):
"""Cancel email with given send_id that was previously scheduled to be sent.
@raise: sailthru.Error
"""
return _sailthru_request('send', 'DELETE', {'send_id': send_id})
def update_blast(blast_id, **kw):
"""Update Sailthru blast with given keyword parameters. See also:
http://docs.sailthru.com/api/blast
"""
kw['blast_id'] = blast_id
return _sailthru_request('blast', 'POST', kw)
def send_blast(name, list_name, from_name, from_email, subject, html, text='',
schedule_time='now', reply_to=None, link_tracking=True,
google_analytics=True, public=True, ehash=True, utm_content=True, **kw):
"""Send or schedule a mass mail blast and return the blast ID. For full list of
optional kw parameters, see http://docs.sailthru.com/api/blast
@raise: sailthru.Error - if Sailthru error occurs or error talking to Sailthru
"""
kw['name'] = name
kw['list'] = list_name
kw['from_name'] = from_name
kw['from_email'] = from_email
kw['subject'] = subject
kw['content_html'] = html
kw['content_text'] = text
if schedule_time is not None:
kw['schedule_time'] = schedule_time
if reply_to is not None:
kw['replyto'] = reply_to
if link_tracking is not None:
kw['is_link_tracking'] = '1' if link_tracking else '0'
if google_analytics is not None:
kw['is_google_analytics'] = '1' if google_analytics else '0'
if public is not None:
kw['is_public'] = '1' if public else '0'
link_params = {}
if ehash:
link_params['_ehash'] = "{md5(email)}"
if utm_content:
link_params['utm_content'] = "{source}"
kw['link_params'] = json.dumps(link_params)
response = _sailthru_request('blast', 'POST', kw)
if 'blast_id' not in response:
raise Error(-2, 'Malformed JSON: blast_id not in response')
return response['blast_id']
def get_user_blasts(email_address, num_blasts):
"""Get the last x blasts sent to a user
@raise: sailthru.Error
"""
blasts = []
user = get_user_properties(email_address, recent_blasts=num_blasts)
if user['recent_blasts']:
for blast in user['recent_blasts']:
blast.update(get_blast_properties(blast['blast_id']))
blasts.append(blast)
return blasts
def get_blast_properties(blast_id):
""" Gets information about a campaign mail
@raise: sailthru.Error
"""
return _sailthru_request('blast', 'GET', {'blast_id': blast_id})
def get_email_properties(send_id):
""" Gets information about a sent email
@raise: sailthru.Error
"""
return _sailthru_request('send', 'GET', {'send_id': send_id})
def get_user_properties(email_address, **kw):
""" Get information about an email address
@param recent_sends: Get last x transactional mails sent to user
@param recent_blasts: Get last x blast mails sent to user
@raise: sailthru.Error
"""
properties = { 'verified': 0, # Has a user confirmed their email address
'optout': 0 } # Has a user opted-out of Oyster emails
vars = {'email': email_address}
vars.update(kw)
response = _sailthru_request('email', 'GET', vars)
properties.update(response)
return properties
def set_user_properties(email, **kw):
""" Set properties on a user
@raise: sailthru.Error
"""
kw['email'] = email
return _sailthru_request('email', 'POST', kw)
def get_template_properties(template):
""" Get information about a template
As far as i can tell, 'html' is the only useful field of the result
@raise: sailthru.Error
"""
return _sailthru_request('template', 'GET', {'template': template})
def set_template_properties(template, **kw):
""" Set template properties
@raise: sailthru.Error
"""
kw['template'] = template
return _sailthru_request('template', 'POST', kw)
def set_user_lists(email, lists, add=True):
""" Add or remove a user from some lists
@param email: Email address of user
@param lists: A string or a list of strings that are the list names
@raise: sailthru.Error
"""
if isinstance(lists, basestring):
lists = [lists]
if add:
list_value = 1
else:
list_value = 0
kw = {}
kw['email'] = email
kw['lists'] = {}
for list in lists:
kw['lists'][list] = list_value
return _sailthru_request('email', 'POST', kw)
def add_users_to_list(list_name, emails, report_email=None):
"""Add list of emails to given list. Optionally send an email to report_email when finished.
@raise: sailthru.Error
"""
params = {}
if report_email:
params['report_email'] = report_email
params['job'] = 'import'
params['list'] = list_name
params['emails'] = ','.join(emails)
return _sailthru_request('job', 'POST', params)
def set_vars(url, report_email=None):
"""Set large number of per-user vars using given CSV data feed URL.
@raise: sailthru.Error
"""
kw = {'url': url}
if report_email:
kw['report_email'] = report_email
return _sailthru_request('vars', 'POST', kw)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
oysterhotels/sailthru
|
sailthru.py
|
Python
|
mit
| 11,270
|
[
"BLAST"
] |
79ef941c85f4c04bc9e85a44abf5663bae29c0a1deb1089de823b5c5d0a770b9
|
# coding=utf-8
"""
Color Scheme information
"""
__author__ = 'Matt Eland'
class ColorScheme(object):
"""
A color scheme
:type background: tuple RGB values indicating the background
:type foreground: tuple RGB values for most text and buttons
:type highlight: tuple RGB values for highlighted text
"""
# Color Constants - Shared values that aren't necessarily themed. Used for maps
white = (200, 200, 200)
red = (200, 0, 0)
yellow = (200, 200, 0)
blue = (0, 0, 200)
slight_blue = (150, 200, 200)
green = (0, 200, 0)
purple = (100, 0, 200)
gray = (128, 128, 128)
brown = (150, 86, 64)
salmon = (200, 100, 100)
greenish = (0, 150, 64)
blueish = (0, 100, 200)
pink = (200, 0, 200)
khaki = (189, 183, 107)
# Keyed Colors
background = (0, 0, 0)
foreground = (0, 255, 0)
disabled = (0, 120, 0)
detail = (128, 128, 128),
highlight = (255, 255, 255)
# Status Colors
caution = (230, 230, 0)
caution_bg = (42, 42, 0)
critical = (230, 0, 0)
critical_bg = (30, 0, 0)
# Map Colors
map_commercial = slight_blue
map_automotive = blueish
map_water = blueish
map_private = yellow
map_service = khaki
map_infrastructure = khaki
map_residential = greenish
map_recreation = greenish
map_vegetation = green
map_unknown = pink
map_emergency = red
map_health = red
map_public = salmon
map_major_road = (85, 251, 167)
map_government = purple
map_pedestrian = brown
map_structural = gray
def __init__(self, name, background=(0, 0, 0), foreground=(0, 255, 0), highlight=(255, 255, 255),
detail=(128, 128, 128), disabled=(0, 120, 0)):
self.background = background
self.foreground = foreground
self.disabled = disabled
self.highlight = highlight
self.detail = detail
self.name = name
pass
def __str__(self):
if self.name:
return self.name
return super(ColorScheme, self).__str__()
def clone_to(self, target):
"""
Clones values in this object to other objects
:param target: The object to receive the values
:return: The target with its adjusted files.
"""
target.background = self.background
target.foreground = self.foreground
target.detail = self.detail
target.highlight = self.highlight
target.disabled = self.disabled
target.name = self.name
return target
name = None
def get_focus_color(self, is_focused):
"""
Gets the color to use for rendering a foreground depending on if the control is focused or not
:param is_focused: Whether the control is focused
:return: The color to use. This will be highlight for focused and foreground for unfocused
"""
if is_focused:
return self.highlight
else:
return self.foreground
class ColorSchemes(object):
"""
A collection of available color schemes.
"""
@staticmethod
def get_green_color_scheme():
"""
Gets a green-based color scheme
:return: A green-based color scheme resembling military avionics displays
"""
return ColorScheme(name='Green',
background=(0, 42, 0),
foreground=(0, 210, 0),
disabled=(0, 100, 0),
detail=(85, 251, 167),
highlight=(230, 230, 230))
@staticmethod
def get_avionics_color_scheme():
"""
Gets a green-based color scheme resembling military avionics displays
:return: A green-based color scheme resembling military avionics displays
"""
return ColorScheme(name='Avoionics',
background=(13, 36, 11),
foreground=(0, 194, 0),
disabled=(0, 90, 0),
detail=(0, 123, 0),
highlight=(0, 250, 0))
@staticmethod
def get_terminal_color_scheme():
"""
Gets a green-based color scheme resembling a terminal display
:return: A green-based color scheme resembling military avionics displays
"""
return ColorScheme(name='Terminal',
background=(1, 19, 1),
foreground=(0, 150, 66),
disabled=(0, 75, 33),
detail=(0, 69, 20),
highlight=(182, 179, 174))
@staticmethod
def get_cyan_color_scheme():
"""
Gets a cyan-based color scheme
:return: A cyan-based color scheme
"""
return ColorScheme(name='Cyan',
background=(0, 32, 32),
foreground=(0, 170, 170),
disabled=(0, 80, 80),
detail=(128, 128, 128),
highlight=(0, 255, 255))
@staticmethod
def get_tactical_color_scheme():
"""
Gets a tactical blue / cyan color scheme
:return: A tactical blue / cyan color scheme
"""
return ColorScheme(name='Tactical',
background=(16, 29, 44),
foreground=(104, 147, 152),
disabled=(32, 60, 68),
detail=(51, 98, 106),
highlight=(203, 203, 191))
@staticmethod
def get_blue_color_scheme():
"""
Gets an ice-blue-based color scheme
:return: An ice-blue-based color scheme
"""
return ColorScheme(name='Blue',
background=(0, 0, 32),
foreground=(0, 128, 255),
disabled=(0, 60, 120),
detail=(128, 128, 128),
highlight=(255, 255, 255))
@staticmethod
def get_white_color_scheme():
"""
Gets a white / monochrome-based color scheme
:return: A white / monochrome-based color scheme
"""
return ColorScheme(name='White',
background=(0, 0, 0),
foreground=(150, 150, 150),
disabled=(70, 70, 70),
detail=(128, 128, 128),
highlight=(255, 255, 255))
@staticmethod
def get_red_color_scheme():
"""
Gets a red-based color scheme
:return: A red-based color scheme
"""
return ColorScheme(name='Red',
background=(32, 0, 0),
foreground=(170, 0, 0),
disabled=(80, 0, 0),
detail=(128, 128, 128),
highlight=(255, 0, 0))
@staticmethod
def get_amber_color_scheme():
"""
Gets an amber-based color scheme
:return: A amber-based color scheme
"""
return ColorScheme(name='Amber',
background=(39, 38, 35),
foreground=(247, 241, 158),
disabled=(128, 116, 39),
detail=(164, 156, 71),
highlight=(249, 245, 250))
@staticmethod
def get_gold_color_scheme():
"""
Gets an amber-based color scheme
:return: A amber-based color scheme
"""
return ColorScheme(name='Gold',
background=(30, 26, 4),
foreground=(231, 176, 75),
disabled=(110, 90, 35),
detail=(128, 128, 128),
highlight=(250, 255, 51))
@classmethod
def get_color_schemes(cls):
"""
:rtype : list
"""
schemes = [cls.get_green_color_scheme(),
cls.get_avionics_color_scheme(),
cls.get_terminal_color_scheme(),
cls.get_blue_color_scheme(),
cls.get_amber_color_scheme(),
cls.get_gold_color_scheme(),
cls.get_cyan_color_scheme(),
cls.get_tactical_color_scheme(),
cls.get_red_color_scheme(),
cls.get_white_color_scheme()]
return schemes
|
IntegerMan/Pi-MFD
|
PiMFD/UI/ColorScheme.py
|
Python
|
gpl-2.0
| 8,532
|
[
"Amber"
] |
3c8fc462af03601e5bc9ccb09b3210236decd80eedce687e49f3797e29254f86
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class texttospeechCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'list_voices': ('language_code', ),
'synthesize_speech': ('input', 'voice', 'audio_config', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: not a.keyword.value in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=texttospeechCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the texttospeech client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-texttospeech
|
scripts/fixup_keywords.py
|
Python
|
apache-2.0
| 6,017
|
[
"VisIt"
] |
179fce54595e6ab268b492cf7bd9c84ddbe41ac8a3fc54ebaed540fef668d577
|
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import hashlib
from commoncode.codec import bin_to_num
from commoncode.codec import urlsafe_b64encode
from commoncode import filetype
"""
Hashes and checksums.
Low level hash functions using standard crypto hashes used to construct hashes
of various lengths. Hashes that are smaller than 128 bits are based on a
truncated md5. Other length use SHA hashes.
Checksums are operating on files.
"""
def _hash_mod(bitsize, hmodule):
"""
Return a hashing class returning hashes with a `bitsize` bit length. The
interface of this class is similar to the hash module API.
"""
class hasher(object):
def __init__(self, msg=None):
self.digest_size = bitsize // 8
self.h = msg and hmodule(msg).digest()[:self.digest_size] or None
def digest(self):
return self.h
def hexdigest(self):
return self.h and self.h.encode('hex')
def b64digest(self):
return self.h and urlsafe_b64encode(self.h)
def intdigest(self):
return self.h and bin_to_num(self.h)
return hasher
# Base hashers for each bit size
_hashmodules_by_bitsize = {
# md5-based
32: _hash_mod(32, hashlib.md5),
64: _hash_mod(64, hashlib.md5),
128: _hash_mod(128, hashlib.md5),
# sha-based
160: _hash_mod(160, hashlib.sha1),
256: _hash_mod(256, hashlib.sha256),
384: _hash_mod(384, hashlib.sha384),
512: _hash_mod(512, hashlib.sha512)
}
def get_hasher(bitsize):
"""
Return a hasher for a given size in bits of the resulting hash.
"""
return _hashmodules_by_bitsize[bitsize]
_hashmodules_by_name = {
'md5': get_hasher(128),
'sha1': get_hasher(160),
'sha256': get_hasher(256),
'sha384': get_hasher(384),
'sha512': get_hasher(512)
}
def checksum(location, bitsize, base64=False):
"""
Return a checksum of `bitsize` length from the content of the file at
`location`. The checksum is a hexdigest or base64-encoded is `base64` is
True.
"""
if not filetype.is_file(location):
return
hasher = get_hasher(bitsize)
# fixme: we should read in chunks
with open(location, 'rb') as f:
hashable = f.read()
hashed = hasher(hashable)
if base64:
return hashed.b64digest()
return hashed.hexdigest()
def md5(location):
return checksum(location, bitsize=128, base64=False)
def sha1(location):
return checksum(location, bitsize=160, base64=False)
def b64sha1(location):
return checksum(location, bitsize=160, base64=True)
def sha256(location):
return checksum(location, bitsize=256, base64=False)
def sha512(location):
return checksum(location, bitsize=512, base64=False)
def multi_checksums(location, checksum_names=('md5', 'sha1', 'sha256', 'sha512')):
"""
Return a mapping of hexdigest checksums keyed by checksum name from the content
of the file at `location`. Use the `checksum_names` list of checksum names.
The mapping is guaranted to contains all the requested names as keys.
If the location is not a file, the values are None.
"""
results = OrderedDict([(name, None) for name in checksum_names])
if not filetype.is_file(location):
return results
# fixme: we should read in chunks
with open(location, 'rb') as f:
hashable = f.read()
for name in checksum_names:
results[name] = _hashmodules_by_name[name](hashable).hexdigest()
return results
|
yasharmaster/scancode-toolkit
|
src/commoncode/hash.py
|
Python
|
apache-2.0
| 4,976
|
[
"VisIt"
] |
b1aa3cba0f9c7c1ceed2c0a962159cbc8c90bb798bd00beac7b576fc5d0eaa6a
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RRvcheck(RPackage):
"""Check latest release version of R and R package (both in 'CRAN',
'Bioconductor' or 'Github')."""
homepage = "https://cran.r-project.org/package=rvcheck"
url = "https://cran.rstudio.com/src/contrib/rvcheck_0.0.9.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/rvcheck"
version('0.0.9', '7e9821de754577f94fdcbf7b02a20edc')
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-rvcheck/package.py
|
Python
|
lgpl-2.1
| 1,655
|
[
"Bioconductor"
] |
ce499ef7f51de8f4fa626734409f30416c4edd9730ad18522982253ce1d54113
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, unicode_literals
# FIXME: snapcraft targets the '16' series, hardcode it until more choices
# become available server side -- vila 2016-04-22
DEFAULT_SERIES = "16"
SCAN_STATUS_POLL_DELAY = 5
SCAN_STATUS_POLL_RETRIES = 5
SNAP_STORE_DASHBOARD_ROOT_URL = "https://dashboard.snapcraft.io/"
UBUNTU_SSO_API_ROOT_URL = "https://login.ubuntu.com/api/v2/"
UBUNTU_STORE_API_ROOT_URL = SNAP_STORE_DASHBOARD_ROOT_URL + "/dev/api/"
UBUNTU_STORE_SEARCH_ROOT_URL = "https://api.snapcraft.io/"
UBUNTU_STORE_UPLOAD_ROOT_URL = "https://upload.apps.ubuntu.com/"
UBUNTU_STORE_TOS_URL = "https://dashboard.snapcraft.io/dev/tos/"
UBUNTU_STORE_ACCOUNT_URL = "https://dashboard.snapcraft.io/dev/account/"
# Messages and warnings.
MISSING_AGREEMENT = "Developer has not signed agreement."
MISSING_NAMESPACE = "Developer profile is missing short namespace."
AGREEMENT_ERROR = (
"You must agree to the developer terms and conditions to upload snaps."
)
NAMESPACE_ERROR = (
"You need to set a username. It will appear in the developer field "
"alongside the other details for your snap. Please visit {} and login "
"again."
)
AGREEMENT_INPUT_MSG = "Do you agree to the developer terms and conditions. ({})?"
AGREEMENT_SIGN_ERROR = (
"Unexpected error encountered during signing the developer terms and "
"conditions. Please visit {} and agree to the terms and conditions before "
"continuing."
)
TWO_FACTOR_WARNING = (
"We strongly recommend enabling multi-factor authentication: "
"https://help.ubuntu.com/community/SSO/FAQs/2FA"
)
INVALID_CREDENTIALS = "Invalid credentials supplied."
AUTHENTICATION_ERROR = "Problems encountered when authenticating your credentials."
ACCOUNT_INFORMATION_ERROR = "Unexpected error when obtaining your account information."
|
ubuntu-core/snapcraft
|
snapcraft/storeapi/constants.py
|
Python
|
gpl-3.0
| 2,476
|
[
"VisIt"
] |
c820e6514f32eb91fd660fdfbd43faaa4bfaa04eb831ddb1ae3ce7f7fc92d23b
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
import os
import random
import time
from absl import flags
from REDACTED.minigo import coords
from REDACTED.minigo import go
from REDACTED.minigo import mcts
from REDACTED.minigo import sgf_wrapper
from REDACTED.minigo.player_interface import MCTSPlayerInterface
from REDACTED.minigo.utils import dbg
flags.DEFINE_integer(
'softpick_move_cutoff', (go.N * go.N // 12) // 2 * 2,
'The move number (<) up to which moves are softpicked from MCTS visits.')
# Ensure that both white and black have an equal number of softpicked moves.
flags.register_validator('softpick_move_cutoff', lambda x: x % 2 == 0)
flags.DEFINE_float('resign_threshold', -0.9,
'The post-search Q evaluation at which resign should happen.'
'A threshold of -1 implies resign is disabled.')
flags.register_validator('resign_threshold', lambda x: -1 <= x < 0)
flags.DEFINE_integer(
'num_readouts', 800 if go.N == 19 else 200,
'Number of searches to add to the MCTS search tree before playing a move.')
flags.register_validator('num_readouts', lambda x: x > 0)
flags.DEFINE_integer(
'parallel_readouts', 8,
'Number of searches to execute in parallel. This is also the batch size'
'for neural network evaluation.')
# this should be called "verbosity" but flag name conflicts with absl.logging.
# Should fix this by overhauling this logging system with appropriate
# logging.info/debug.
flags.DEFINE_integer('verbose', 1, 'How much debug info to print.')
FLAGS = flags.FLAGS
# pylint: disable=g-doc-args, g-short-docstring-punctuation, missing-super-argument, g-doc-return-or-yield
def time_recommendation(move_num, seconds_per_move=5, time_limit=15 * 60,
decay_factor=0.98):
"""Given the current move number and the 'desired' seconds per move, return
how much time should actually be used. This is intended specifically for
CGOS time controls, which has an absolute 15-minute time limit.
The strategy is to spend the maximum possible moves using seconds_per_move,
and then switch to an exponentially decaying time usage, calibrated so that
we have enough time for an infinite number of moves.
"""
# Divide by two since you only play half the moves in a game.
player_move_num = move_num / 2
# Sum of geometric series maxes out at endgame_time seconds.
endgame_time = seconds_per_move / (1 - decay_factor)
if endgame_time > time_limit:
# There is so little main time that we're already in 'endgame' mode.
base_time = time_limit * (1 - decay_factor)
core_moves = 0
else:
# Leave over endgame_time seconds for the end, and play at
# seconds_per_move for as long as possible.
base_time = seconds_per_move
core_moves = (time_limit - endgame_time) / seconds_per_move
return base_time * decay_factor**max(player_move_num - core_moves, 0)
class MCTSPlayer(MCTSPlayerInterface):
def __init__(self,
network,
seconds_per_move=5,
num_readouts=0,
resign_threshold=None,
two_player_mode=False,
timed_match=False):
self.network = network
self.seconds_per_move = seconds_per_move
self.num_readouts = num_readouts or FLAGS.num_readouts
self.verbosity = FLAGS.verbose
self.two_player_mode = two_player_mode
if two_player_mode:
self.temp_threshold = -1
else:
self.temp_threshold = FLAGS.softpick_move_cutoff
self.initialize_game()
self.root = None
self.resign_threshold = resign_threshold or FLAGS.resign_threshold
self.timed_match = timed_match
assert (self.timed_match and
self.seconds_per_move > 0) or self.num_readouts > 0
super().__init__()
def get_position(self):
return self.root.position if self.root else None
def get_root(self):
return self.root
def get_result_string(self):
return self.result_string
def initialize_game(self, position=None):
if position is None:
position = go.Position()
self.root = mcts.MCTSNode(position)
self.result = 0
self.result_string = None
self.comments = []
self.searches_pi = []
# pylint: disable=g-doc-return-or-yield
def suggest_move(self, position):
"""Used for playing a single game.
For parallel play, use initialize_move, select_leaf,
incorporate_results, and pick_move
"""
start = time.time()
if self.timed_match:
while time.time() - start < self.seconds_per_move:
self.tree_search()
else:
current_readouts = self.root.N
while self.root.N < current_readouts + self.num_readouts:
self.tree_search()
if self.verbosity > 0:
dbg('%d: Searched %d times in %.2f seconds\n\n' %
(position.n, self.num_readouts, time.time() - start))
# print some stats on moves considered.
if self.verbosity > 2:
dbg(self.root.describe())
dbg('\n\n')
if self.verbosity > 3:
dbg(self.root.position)
return self.pick_move()
def play_move(self, c):
"""Notable side effects:
- finalizes the probability distribution according to
this roots visit counts into the class' running tally, `searches_pi`
- Makes the node associated with this move the root, for future
`inject_noise` calls.
"""
if not self.two_player_mode:
self.searches_pi.append(
self.root.children_as_pi(self.root.position.n < self.temp_threshold))
self.comments.append(self.root.describe())
try:
self.root = self.root.maybe_add_child(coords.to_flat(c))
except go.IllegalMove:
dbg('Illegal move')
if not self.two_player_mode:
self.searches_pi.pop()
self.comments.pop()
raise
self.position = self.root.position # for showboard
del self.root.parent.children
return True # GTP requires positive result.
def pick_move(self):
"""Picks a move to play, based on MCTS readout statistics.
Highest N is most robust indicator. In the early stage of the game, pick
a move weighted by visit count; later on, pick the absolute max.
"""
if self.root.position.n >= self.temp_threshold:
fcoord = self.root.best_child()
else:
cdf = self.root.children_as_pi(squash=True).cumsum()
cdf /= cdf[-2] # Prevents passing via softpick.
selection = random.random()
fcoord = cdf.searchsorted(selection)
assert self.root.child_N[fcoord] != 0
return coords.from_flat(fcoord)
def tree_search(self, parallel_readouts=None):
if parallel_readouts is None:
parallel_readouts = min(FLAGS.parallel_readouts, self.num_readouts)
leaves = []
failsafe = 0
while len(leaves) < parallel_readouts and failsafe < parallel_readouts * 2:
failsafe += 1
leaf = self.root.select_leaf()
if self.verbosity >= 4:
dbg(self.show_path_to_root(leaf))
# if game is over, override the value estimate with the true score
if leaf.is_done():
value = 1 if leaf.position.score() > 0 else -1
leaf.backup_value(value, up_to=self.root)
continue
leaf.add_virtual_loss(up_to=self.root)
leaves.append(leaf)
if leaves:
move_probs, values = self.network.run_many(
[leaf.position for leaf in leaves])
for leaf, move_prob, value in zip(leaves, move_probs, values):
leaf.revert_virtual_loss(up_to=self.root)
leaf.incorporate_results(move_prob, value, up_to=self.root)
return leaves
def show_path_to_root(self, node):
pos = node.position
diff = node.position.n - self.root.position.n
if len(pos.recent) == 0: # pylint: disable=g-explicit-length-test
return
def fmt(move):
return '{}-{}'.format('b' if move.color == go.BLACK else 'w',
coords.to_gtp(move.move))
path = ' '.join(fmt(move) for move in pos.recent[-diff:])
if node.position.n >= FLAGS.max_game_length:
path += ' (depth cutoff reached) %0.1f' % node.position.score()
elif node.position.is_game_over():
path += ' (game over) %0.1f' % node.position.score()
return path
def is_done(self):
return self.result != 0 or self.root.is_done()
def should_resign(self):
"""Returns true if the player resigned.
No further moves should be played.
"""
return self.root.Q_perspective < self.resign_threshold
def set_result(self, winner, was_resign):
self.result = winner
if was_resign:
string = 'B+R' if winner == go.BLACK else 'W+R'
else:
string = self.root.position.result_string()
self.result_string = string
def to_sgf(self, use_comments=True):
assert self.result_string is not None
pos = self.root.position
if use_comments:
comments = self.comments or ['No comments.']
comments[0] = ('Resign Threshold: %0.3f\n' %
self.resign_threshold) + comments[0]
else:
comments = []
return sgf_wrapper.make_sgf(
pos.recent,
self.result_string,
white_name=os.path.basename(self.network.save_file) or 'Unknown',
black_name=os.path.basename(self.network.save_file) or 'Unknown',
comments=comments)
def extract_data(self):
assert len(self.searches_pi) == self.root.position.n
assert self.result != 0
for pwc, pi in zip(
go.replay_position(self.root.position, self.result), self.searches_pi):
yield pwc.position, pi, pwc.result
def get_num_readouts(self):
return self.num_readouts
def set_num_readouts(self, readouts):
self.num_readouts = readouts
class CGOSPlayer(MCTSPlayer):
def suggest_move(self, position):
self.seconds_per_move = time_recommendation(position.n)
return super().suggest_move(position)
|
mlperf/training_results_v0.7
|
Google/benchmarks/minigo/implementations/minigo-research-TF-tpu-v4-128/strategies.py
|
Python
|
apache-2.0
| 10,398
|
[
"VisIt"
] |
97dc7224eaff22a9c61983de1744089f09fab46adbf4ab77c2c0694d5dea2f50
|
#import printStatWithName
from AZutilities import dataUtilities
from AZutilities import paramOptUtilities
from trainingMethods import AZorngRF
from trainingMethods import AZorngCvSVM
import Orange
import orange
import math
import copy
import string
"""
Module for calculation of non conformity scores and the corresponding p-values and
conformal predictions for binary classifiers.
getPvalue
|
|
getScore
|
|
{Methods to calculate the non-conf score}
"""
def meanStd(data):
""" Calculate mean and standard deviation of data data[]: """
length, mean, std = len(data), 0, 0
for elem in data:
mean = mean + elem
mean = mean / float(length)
for elem in data:
std = std + (elem - mean) ** 2
std = math.sqrt(std / float(length))
mean = round(mean, 3)
std = round(std, 3)
return mean, std
def getScore(idx, extTrain, SVMparam, method = "minNN", maxDistRatio = None, measure = None):
"""
Calculates non-conformity score for the example with index idx in the data set extTrain
method:
1) minNN - Get relative (all ex with diff labels) min distance in feature space from ex with idx in extTrain to the rest of extTrain with the same label as idx
2) avgNN - average distance to 10 NN of the two diff classes
"""
if method == "minNN":
alpha = minNN(idx, extTrain, measure)
elif method == "avgNN":
alpha = avgNN(idx, extTrain, measure)
elif method == "scaledMinNN":
print "There is some problem with the scaling"
alpha = minNN(idx, extTrain, maxDistRatio, measure)
elif method == "kNNratio":
alpha = kNNratio(idx, extTrain, measure)
elif method == "kNNratioStruct":
alpha = kNNratioStruct(idx, extTrain, measure)
elif method == "probPred":
alpha, SVMparam = probPred(idx, extTrain, SVMparam)
elif method == "LLOO":
alpha = LLOO(idx, extTrain, measure)
elif method == "LLOOprob":
alpha = LLOOprob(idx, extTrain, measure)
elif method == "LLOOprob_b":
alpha = LLOOprob_b(idx, extTrain, measure)
else:
alpha = None
print "Method not implemented"
return alpha, SVMparam
def descRange(idx, extTrain):
"""
Use the fraction of descriptors in the train set range.
Not possible to use. Alpha must reflect the non-conformity with the rest of the train set with a given lable.
Inside our outside the range is not predictive for which class the example belongs to.
"""
# Deselect example idx in extTrain
idxList = range(0,idx)
idxList.extend(range(idx+1,len(extTrain)))
train = extTrain.get_items(idxList)
# Get the idx example
idxEx = extTrain.get_items([idx])
# Loop over att attributes to see if the idxEx values are within the range of train
outRangeCount = 0
stat = Orange.statistics.basic.Domain(train)
#print "%20s %5s %5s %5s" % ("feature", "min", "max", "avg")
for a in stat:
if a:
#print "%20s %5.3f %5.3f %5.3f" % (a.variable.name, a.min, a.max, a.avg)
#print idxEx[0][a.variable.name]
idxValue = idxEx[0][a.variable.name]
trainMin = a.min
trainMax = a.max
try:
if idxValue < trainMin:
outRangeCount = outRangeCount + 1
elif idxValue > trainMax:
outRangeCount = outRangeCount + 1
except: pass
alpha = float(outRangeCount)/len(extTrain.domain.attributes)
return alpha
def trainSVMOptParam(train, SVMparam):
# Optimize parameters
#SVMparam = [1.0, 0.05]
if not SVMparam:
trainDataFile = "/scratch/trainDataTmp.tab"
train.save(trainDataFile)
learner = AZorngCvSVM.CvSVMLearner()
param = paramOptUtilities.getOptParam(learner, trainDataFile, paramList = None, useGrid = False, verbose = 1, queueType = "NoSGE", runPath = None, nExtFolds = None, nFolds = 10, logFile = "", getTunedPars = True, fixedParams = {})
optC = float(param[1]["C"])
optGamma = float(param[1]["gamma"])
SVMparam = [optC, optGamma]
else:
optC = SVMparam[0]
optGamma = SVMparam[1]
#print "Optimal SVM parameters ", optC, optGamma
model = AZorngCvSVM.CvSVMLearner(train, C = optC, gamma = optGamma)
return model, SVMparam
def probPred(idx, extTrain, SVMparam):
"""
Use the RF prediction probability to set the non-conf score
"""
attrList = ["SMILES_1"]
extTrain = dataUtilities.attributeDeselectionData(extTrain, attrList)
# Deselect example idx in extTrain
idxList = range(0,idx)
idxList.extend(range(idx+1,len(extTrain)))
train = extTrain.get_items(idxList)
# Train a model
model = AZorngRF.RFLearner(train)
#model, SVMparam = trainSVMOptParam(train, SVMparam)
# Predict example idx
predList = model(extTrain[idx], returnDFV = True)
pred = predList[0].value
prob = predList[1]
actual = extTrain[idx].get_class().value
#print pred, actual, prob
# More non conforming if prediction is different from actual label
if pred != actual:
alpha = 1.0 + abs(prob)
else:
alpha = 1.0 - abs(prob)
#print alpha
return alpha, SVMparam
def minNN(idx, extTrain, maxDistRatio = None, measure = None):
"""
Use the ratio between the distance to the nearest neighbor of the same and of the other class
Two versions exist, with and without scaling with the max distance ratio within the train set.
"""
attrList = ["SMILES_1"]
extTrain = dataUtilities.attributeDeselectionData(extTrain, attrList)
distListSame = []
distListDiff = []
#measure = Orange.distance.Euclidean(extTrain)
if not measure:
measure = orange.ExamplesDistanceConstructor_Euclidean(extTrain)
for runIdx in range(len(extTrain)):
if runIdx != idx:
dist = measure(extTrain[idx], extTrain[runIdx])
if extTrain[idx].get_class().value == extTrain[runIdx].get_class().value:
distListSame.append(dist)
else:
distListDiff.append(dist)
minDistSame = min(distListSame)
minDistDiff = min(distListDiff)
if minDistDiff == 0:
if maxDistRatio:
alpha = 1.0
else:
alpha = max(distListDiff)
else:
if maxDistRatio:
alpha = minDistSame/(float(minDistDiff)*maxDistRatio)
else:
alpha = minDistSame/float(minDistDiff)
#fid = open("tempFile.txt", "a")
#fid.write(str(minDistSame)+"\t"+str(minDistDiff)+"\t"+str(maxDistRatio)+"\t"+str(alpha)+"\n")
#fid.close()
return alpha
def avgNN(idx, extTrain, measure = None):
"""
Use the ratio between the distance to the kNN of the same and of the other class
"""
attrList = ["SMILES_1"]
extTrain = dataUtilities.attributeDeselectionData(extTrain, attrList)
distListSame = []
distListDiff = []
#measure = Orange.distance.Euclidean(extTrain)
if not measure:
measure = orange.ExamplesDistanceConstructor_Euclidean(extTrain)
for runIdx in range(len(extTrain)):
if runIdx != idx:
dist = measure(extTrain[idx], extTrain[runIdx])
if extTrain[idx].get_class().value == extTrain[runIdx].get_class().value:
distListSame.append(dist)
else:
distListDiff.append(dist)
distListSame.sort()
avgSame = sum(distListSame[0:10])/10.0
distListDiff.sort()
avgDiff = sum(distListDiff[0:10])/10.0
if avgDiff == 0:
alpha = max(distListDiff)
else:
alpha = avgSame/float(avgDiff)
return alpha
def kNNratio(idx, extTrain, measure = None):
"""
Use the fraction of kNN with the same response.
"""
attrList = ["SMILES_1"]
extTrain = dataUtilities.attributeDeselectionData(extTrain, attrList)
distList = []
if not measure:
#measure = instances.MahalanobisConstructor(extTrain)
measure = orange.ExamplesDistanceConstructor_Euclidean(extTrain)
for runIdx in range(len(extTrain)):
if runIdx != idx:
dist = measure(extTrain[idx], extTrain[runIdx])
distList.append(dist)
# Get the distance of the 10th NN
distList.sort()
thresDist = distList[9]
# Find the labels of the 10 NN
sameCount = 0
for runIdx in range(len(extTrain)):
if runIdx != idx:
dist = measure(extTrain[idx], extTrain[runIdx])
if dist <= thresDist:
if extTrain[idx].get_class().value == extTrain[runIdx].get_class().value:
sameCount = sameCount + 1
alpha = 1.00 - float(sameCount)/10.0
return alpha
def kNNratioInd(train, calSet, measure = None):
"""
Use the fraction of kNN with the same response.
"""
if not measure:
#measure = instances.MahalanobisConstructor(extTrain)
measure = orange.ExamplesDistanceConstructor_Euclidean(train)
alphaList = []
for predEx in calSet:
distList = []
for runIdx in range(len(train)):
dist = measure(predEx, train[runIdx])
distList.append(dist)
# Get the distance of the 10th NN
distList.sort()
thresDist = distList[9]
# Find the labels of the 10 NN
sameCount = 0
for runIdx in range(len(train)):
dist = measure(predEx, train[runIdx])
if dist <= thresDist:
if predEx.get_class().value == train[runIdx].get_class().value:
sameCount = sameCount + 1
alpha = 1.00 - float(sameCount)/10.0
alphaList.append(alpha)
return alphaList, train
def kNNratioStruct(idx, extTrain, measure = None):
"""
Use the fraction of kNN with the same response.
"""
from rdkit import Chem
from rdkit.Chem.Fingerprints import FingerprintMols
from rdkit import DataStructs
# Daylight like fp
smiles = extTrain[idx]["SMILES_1"].value
mol = Chem.MolFromSmiles(smiles)
fp = FingerprintMols.FingerprintMol(mol)
simList = []
for runIdx in range(len(extTrain)):
if runIdx != idx:
smiles0 = extTrain[runIdx]["SMILES_1"].value
mol0 = Chem.MolFromSmiles(smiles0)
fp0 = FingerprintMols.FingerprintMol(mol0)
tanSim = DataStructs.FingerprintSimilarity(fp,fp0)
simList.append(tanSim)
# Get the distance of the 10th NN
simList.sort(reverse = True)
thresDist = simList[9]
# Find the labels of the 10 NN
sameCount = 0
for runIdx in range(len(extTrain)):
if runIdx != idx:
smiles0 = extTrain[runIdx]["SMILES_1"].value
mol0 = Chem.MolFromSmiles(smiles0)
fp0 = FingerprintMols.FingerprintMol(mol0)
tanSim = DataStructs.FingerprintSimilarity(fp,fp0)
if tanSim >= thresDist:
if extTrain[idx].get_class().value == extTrain[runIdx].get_class().value:
sameCount = sameCount + 1
alpha = 1.00 - float(sameCount)/10.0
return alpha
def LLOO(idx, extTrain, measure = None):
"""
Use the fraction of kNN correctly predicted by a local model
Hard coded to 20 NN.
Modeling method. RF of Tree?
"""
attrList = ["SMILES_1"]
extTrain = dataUtilities.attributeDeselectionData(extTrain, attrList)
distList = []
if not measure:
measure = orange.ExamplesDistanceConstructor_Euclidean(extTrain)
for runIdx in range(len(extTrain)):
if runIdx != idx:
dist = measure(extTrain[idx], extTrain[runIdx])
distList.append(dist)
# Get the distance of the 20th NN
distList.sort()
thresDist = distList[19]
# Find the labels of the 20 NN
kNN = []
for runIdx in range(len(extTrain)):
dist = measure(extTrain[idx], extTrain[runIdx])
if dist <= thresDist:
kNN.append(extTrain[runIdx])
kNNtrain = dataUtilities.DataTable(kNN)
# Find the fraction of correctly predicted ex in a LOO over kNN
corrPred = 0
for idx in range(len(kNNtrain)):
# Deselect example idx in extTrain
idxList = range(0,idx)
idxList.extend(range(idx+1,len(kNNtrain)))
train = kNNtrain.get_items(idxList)
# Train a model
model = AZorngRF.RFLearner(train)
#model = Orange.classification.tree.TreeLearner(train)
pred = model(kNNtrain[idx]).value
actual = kNNtrain[idx].get_class().value
if pred == actual:
corrPred = corrPred + 1
alpha = 1.0 - float(corrPred)/len(kNNtrain)
return alpha
def LLOOprob(idx, extTrain, measure = None):
"""
Use the fraction of kNN correctly predicted by a local model
Hard coded to 20 NN.
Modeling method. RF of Tree?
"""
distList = []
if not measure:
measure = orange.ExamplesDistanceConstructor_Euclidean(extTrain)
for runIdx in range(len(extTrain)):
if runIdx != idx:
dist = measure(extTrain[idx], extTrain[runIdx])
distList.append(dist)
# Get the distance of the 20th NN
distList.sort()
thresDist = distList[50] # Smaller number of NN does not work with returnDFV
# Find the predEx and the 20 NN
kNN = []
for runIdx in range(len(extTrain)):
dist = measure(extTrain[idx], extTrain[runIdx])
if dist <= thresDist:
kNN.append(extTrain[runIdx])
kNNtrain = dataUtilities.DataTable(kNN)
# Find the fraction of correctly predicted ex in a LOO over kNN
alphaList = []
for iidx in range(len(kNNtrain)):
# Deselect example idx in extTrain
idxList = range(0,iidx)
idxList.extend(range(iidx+1,len(kNNtrain)))
train = kNNtrain.get_items(idxList)
# Get prediction and pred probability
model = AZorngRF.RFLearner(train)
predList = model(kNNtrain[iidx], returnDFV = True)
pred = predList[0].value
prob = predList[1]
actual = kNNtrain[iidx].get_class().value
# alpha should be greater the less certain the model
try:
if pred != actual:
alpha = 1.0 + abs(prob)
else:
alpha = 1.0 - abs(prob)
alphaList.append(alpha)
except: pass
alpha = sum(alphaList)/float(len(alphaList))
return alpha
def LLOOprob_b(idx, extTrain, measure = None):
"""
Use the fraction of kNN correctly predicted by a local model
Hard coded to 50 NN.
Modeling method. RF of Tree?
"""
distList = []
if not measure:
measure = orange.ExamplesDistanceConstructor_Euclidean(extTrain)
for runIdx in range(len(extTrain)):
if runIdx != idx:
dist = measure(extTrain[idx], extTrain[runIdx])
distList.append(dist)
# Get the distance of the 50th NN
distList.sort()
thresDist = distList[50] # Smaller number of NN does not work with returnDFV
# Find the predEx and the 20 NN
kNN = []
for runIdx in range(len(extTrain)):
dist = measure(extTrain[idx], extTrain[runIdx])
if dist <= thresDist:
kNN.append(extTrain[runIdx])
kNNtrain = dataUtilities.DataTable(kNN)
# Find the fraction of correctly predicted ex in a LOO over kNN
alphaList = []
alphaEx = 0
for iidx in range(len(kNNtrain)):
# Deselect example idx in extTrain
idxList = range(0,iidx)
idxList.extend(range(iidx+1,len(kNNtrain)))
train = kNNtrain.get_items(idxList)
# Get prediction and pred probability
model = AZorngRF.RFLearner(train)
predList = model(kNNtrain[iidx], returnDFV = True)
pred = predList[0].value
prob = predList[1]
actual = kNNtrain[iidx].get_class().value
# The prob of the predEx is more important
dist = measure(extTrain[idx], kNNtrain[iidx])
# alpha should be greater the less certain the model
try:
if pred != actual:
alpha = 1.0 + abs(prob)
if dist < 0.001:
alphaEx = alpha
else:
alpha = 1.0 - abs(prob)
if dist < 0.001:
alphaEx = alpha
alphaList.append(alpha)
except: pass
alpha = alphaEx + sum(alphaList)/float(len(alphaList))
return alpha
def getMeanStd(extTrain):
# Get the min dist for all ex in the data set
minSame = []
minDiff = []
measure = orange.ExamplesDistanceConstructor_Euclidean(extTrain)
for idx in range(len(extTrain)):
distListSame = []
distListDiff = []
for iidx in range(len(extTrain)):
if idx != iidx:
dist = measure(extTrain[idx], extTrain[iidx])
if extTrain[idx].get_class().value == extTrain[iidx].get_class().value:
distListSame.append(dist)
else:
distListDiff.append(dist)
minSame.append(min(distListSame))
minDiff.append(min(distListDiff))
# Calculate mean and std of all the min distances
meanSame, stdSame = meanStd(minSame)
meanDiff, stdDiff = meanStd(minDiff)
return meanSame, stdSame, meanDiff, stdDiff
def getMinDistRatio(train):
"""
Calculate the minDistSame and minDistDiff ratio for all ex in the data set and select the greatest quotient.
Used to scale the minDist ratios in the non-conf score.
"""
# Get the min dist for all ex in the data set
minSame = []
minDiff = []
minRatio = []
measure = orange.ExamplesDistanceConstructor_Euclidean(extTrain)
for idx in range(len(train)):
distListSame = []
distListDiff = []
for iidx in range(len(train)):
if idx != iidx:
dist = measure(train[idx], train[iidx])
if train[idx].get_class().value == train[iidx].get_class().value:
distListSame.append(dist)
else:
distListDiff.append(dist)
minSame.append(min(distListSame))
minDiff.append(min(distListDiff))
if min(distListDiff) == 0:
alpha = max(distListDiff)
else:
minRatio.append(min(distListSame)/float(min(distListDiff)))
# Calculate min, mean and std of all the min distances
meanSame, stdSame = meanStd(minSame)
meanDiff, stdDiff = meanStd(minDiff)
maxDistRatio = max(minRatio)
return maxDistRatio
def getPvalueFromList(nonConfList):
trainList = nonConfList[0:len(nonConfList)-1]
alphaPredEx = nonConfList[len(nonConfList)-1]
moreNonConfList = []
for score in trainList:
if score > alphaPredEx:
moreNonConfList.append(score)
pvalue = len(moreNonConfList)/float(len(trainList))
return pvalue
def printResults(pvalues, labels, actualLabel, method, resultsFile, name):
confLevel = 0.95
#print "OBS! Assuming two labels!!!"
#print "Confidence level in predicting label ", labels[0]
conf1 = round(1-pvalues[1], 3)
#print "Confidence level in predicting label ", labels[1]
conf2 = round(1-pvalues[0], 3)
#print "Requiering 95% confidence gives "
if conf1 > confLevel and conf2 < confLevel:
# print "Label ", labels[0], "is predicted with at least 95% conf"
prediction = labels[0]
elif conf1 < confLevel and conf2 > confLevel:
# print "Label ", labels[1], "is predicted with at least 95% conf"
prediction = labels[1]
elif conf1 <= confLevel and conf2 <= confLevel:
# print "No prediction can be given at 95% confidence. Both?"
prediction = "Both"
else: # if conf1 > confLevel and conf2 > confLevel:
# print "Predicting both labels. Empty?"
prediction = "Empty"
fid = open(resultsFile, "a")
fid.write(str(name)+"\t"+actualLabel+"\t"+labels[0]+"\t"+labels[1]+"\t"+str(pvalues[0])+"\t"+str(pvalues[1])+"\t"+str(conf1)+"\t"+str(conf2)+"\t"+prediction+"\n")
fid.close()
return prediction
def printStat(resDict, labels):
# Print statistics
T0 = 0
T1 = 0
F0 = 0
F1 = 0
Both = 0
Empty = 0
for key, values in resDict.iteritems():
if values["actualLabel"] == labels[0]:
if values["actualLabel"] == values["prediction"]:
T0 = T0 + 1
elif values["prediction"] == "Both":
Both = Both + 1
elif values["prediction"] == "Empty":
Empty = Empty + 1
elif values["prediction"] == labels[1]:
F1 = F1 + 1
if values["actualLabel"] == labels[1]:
if values["actualLabel"] == values["prediction"]:
T1 = T1 + 1
elif values["prediction"] == "Both":
Both = Both + 1
elif values["prediction"] == "Empty":
Empty = Empty + 1
elif values["prediction"] == labels[0]:
F0 = F0 + 1
print "True ", labels[0], ": ", T0
print "True ", labels[1], ": ", T1
print "False ", labels[0], ": ", F0
print "False ", labels[1], ": ", F1
print "Both: ", Both
print "Empty: ", Empty
def getRFAcc(train, work):
model = AZorngRF.RFLearner(train)
TP = 0
TN = 0
FP = 0
FN = 0
for ex in work:
pred = model(ex).value
actual = ex.get_class().value
if actual == "POS":
if pred == "POS":
TP = TP + 1
else:
FN = FN + 1
elif actual == "NEG":
if pred == "NEG":
TN = TN + 1
else:
FP = FP + 1
print "TP\tTN\tFP\tFN\n"
print str(TP)+"\t"+str(TN)+"\t"+str(FP)+"\t"+str(FN)+"\n"
fid = open("RFresults.txt", "a")
fid.write(str(TP)+"\t"+str(TN)+"\t"+str(FP)+"\t"+str(FN)+"\n")
fid.close()
def getRFprobAcc(train, work, probThres):
model = AZorngRF.RFLearner(train)
TP = 0
TN = 0
FP = 0
FN = 0
noPred = 0
for ex in work:
actual = ex.get_class().value
predList = model(ex, returnDFV = True)
pred = predList[0].value
prob = predList[1]
if abs(prob) > probThres:
if actual == "POS":
if pred == "POS":
TP = TP + 1
else:
FN = FN + 1
elif actual == "NEG":
if pred == "NEG":
TN = TN + 1
else:
FP = FP + 1
else:
noPred = noPred + 1
print "TP\tTN\tFP\tFN\tnoPred\n"
print str(TP)+"\t"+str(TN)+"\t"+str(FP)+"\t"+str(FN)+"\t"+str(noPred)+"\n"
fid = open("RFprob"+str(probThres)+"Results.txt", "a")
fid.write(str(TP)+"\t"+str(TN)+"\t"+str(FP)+"\t"+str(FN)+"\t"+str(noPred)+"\n")
fid.close()
def getNCS(ex, redTrain, SVMparam):
"""
Use the prediction probability to set the non-conf score
"""
# Train a model
model = AZorngRF.RFLearner(redTrain)
#model, SVMparam = trainSVMOptParam(redTrain, SVMparam)
# Predict example
predList = model(ex, returnDFV = True)
pred = predList[0].value
prob = predList[1]
actual = ex.get_class().value
# More non conforming if prediction is different from actual label
if pred != actual:
alpha = 1.0 + abs(prob)
else:
alpha = 1.0 - abs(prob)
#print alpha
return alpha, SVMparam
def getNCSprecalc(model, ex):
"""
Use the prediction probability to set the non-conf score
"""
# Predict example
predList = model(ex, returnDFV = True)
pred = predList[0].value
prob = predList[1]
actual = ex.get_class().value
# More non conforming if prediction is different from actual label
if pred != actual:
alpha = 1.0 + abs(prob)
else:
alpha = 1.0 - abs(prob)
#print alpha
return alpha
def getNCStrain(train, SVMparam):
labels = train.domain.classVar.values
NCSdict = {}
for label in labels:
NCSdict[label] = []
for idx in range(len(train)):
# Deselect example idx in extTrain
idxList = range(0,idx)
idxList.extend(range(idx+1,len(train)))
redTrain = train.get_items(idxList)
# Get the idx example
predEx = train[idx]
alpha, SVMparam = getNCS(predEx, redTrain, SVMparam)
label = predEx.get_class().value
NCSdict[label].append(alpha)
return NCSdict
def getPvalue(model, NCSdict, predEx, label, method = "probPred", measure = None):
"""
method; avgNN, scaledMinNN, minNN, kNNratio
"""
# Set label to class of predEx
newPredEx = Orange.data.Table(predEx.domain, [predEx])
newPredEx[0][newPredEx.domain.classVar] = label
nonConfListMondrian = NCSdict[label]
alpha = getNCSprecalc(model, newPredEx[0])
nonConfListMondrian.append(alpha)
#nonConfListSorted = copy.deepcopy(nonConfList)
#nonConfListSorted.sort()
#nonConfListMondrianSorted = copy.deepcopy(nonConfListMondrian)
#nonConfListMondrianSorted.sort()
#fid = open("NonConf.txt", "w")
#for ex in nonConfListSorted:
# fid.write(str(ex)+"\n")
#fid.close()
# The last non-conf score is that of predEx
# The p-value is the fraction of ex with alpha gt that of predEx
pvalueMondrian = getPvalueFromList(nonConfListMondrian)
return pvalueMondrian
def getLOOCP(train, work, method, SVMparam, resultsFile = "CPresults.txt", verbose = False):
"""
method - non-conformity score method
"""
# Get NCS of train set
NCSdict = getNCStrain(train, SVMparam)
# Train a model
print "Please note, model alg needs to be changed in two places"
model = AZorngRF.RFLearner(train)
#model, SVMparam = trainSVMOptParam(train, SVMparam)
# Get conformal predictions
resDict = {}
idx = 0
for predEx in work:
labels = train.domain.classVar.values
pvaluesMondrian = []
for label in labels:
pvalueMondrian = getPvalue(model, NCSdict, predEx, label, method)
pvaluesMondrian.append(pvalueMondrian)
print pvaluesMondrian
actualLabel = predEx.get_class().value
name = None
predictionMondrian = printResults(pvaluesMondrian, labels, actualLabel, method, resultsFile, name)
idx = idx + 1
resDict[idx] = {"actualLabel": actualLabel, "prediction": predictionMondrian}
if verbose:
printStat(resDict, labels)
return SVMparam, resDict
if __name__ == "__main__":
"""
Assumptions;
Binary classification
This main will test the implemented CP methods in a 10 fold CV
"""
data = dataUtilities.DataTable('clusterTrain_bulk.txt')
attrList = ['"HLM_XEN025;Mean;CLint (uL/min/mg);(Num)"', 'Structure', 'MV Number', "Class List"]
data = dataUtilities.attributeDeselectionData(data, attrList)
method = "probPred"
SVMparam = []
resultsFile = "CPresultst.txt"
fid = open(resultsFile, "w")
fid.write("Name\tActualLabel\tLabel1\tLabel2\tPvalue1\tPvalue2\tConf1\tConf2\tPrediction\n")
fid.close()
# Run a 10 fold CV
nFolds = 10
ind = Orange.data.sample.SubsetIndicesCV(data, nFolds)
for idx in range(nFolds):
work = data.select(ind, idx)
train = None
for iidx in range(nFolds):
if iidx != idx:
if not train:
train = data.select(ind, iidx)
else:
train.extend(data.select(ind, iidx))
print "Length of train ", len(train)
print "Length of work ", len(work)
# Create results file and get the conformal predictions
SVMparam, resDict = getLOOCP(train, work, method, SVMparam, resultsFile, True)
|
JonnaStalring/AZOrange
|
azorange/AZutilities/LOOCPMondrian.py
|
Python
|
lgpl-3.0
| 27,986
|
[
"RDKit"
] |
e26d668c685d9fcee3009acac51e566f429f5c9d40540a1b359aa70fa51933a7
|
"""Contains the parent class for the DMD MPFDisplay module as well as some
module-level functions related to using DMD files."""
# dmd.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import os
import struct
import pygame # todo make it so this doesn't crash if pygame is not available
import logging
from mpf.media_controller.core.display import MPFDisplay
def load_dmd_file(file_name, palette=None, alpha_color=None,
alpha_pixels=False):
"""Loads a .DMD file from disk and returns a Pygame surface compatible with
MPF's DMD display system.
Args:
file_name: A string of the file name (with path) of the file to load.
palette: Optional Python list in the Pygame palette format.
alpha_color: Whether one of the colors of this rendered DMD should be
transparent. Default is None, which means this DMD file will have
no transparencies.
alpha_pixels. Boolean which controls whether this DMD file should be
created with pixel-level alpha channels. Default is False.
Returns: A list of Pygame surfaces. Single-frame DMD files (i.e. static
images) result in a single-element list. Multi-frame DMD files (i.e.
animations) result in one list element with a Pygame surface for each
frame.
This .DMD format is open source DMD file format originally created for
Pyprocgame by Gerry Stellenberg and Adam Preble. Support for it in MPF is
included with permission.
Details of the file format are here:
# http://pyprocgame.pindev.org/ref/dmd.html?highlight=dmd#dmd-format
"""
surface_list = list()
width = 0
height = 0
frame_count = 0
# This code to read DMD files is based on the following:
# https://github.com/preble/pyprocgame/blob/master/procgame/dmd/animation.py#L267-L280
try:
with open(file_name, 'rb') as f:
f.seek(0, os.SEEK_END) # Go to the end of the file to get its length
file_length = f.tell()
f.seek(4) # Skip over the 4 byte DMD header.
frame_count = struct.unpack("I", f.read(4))[0]
width = struct.unpack("I", f.read(4))[0]
height = struct.unpack("I", f.read(4))[0]
if file_length != 16 + width * height * frame_count:
print "File size inconsistent with header information."
for frame_index in range(frame_count):
frame_string = f.read(width * height)
surface = pygame.image.fromstring(frame_string,
(width, height), 'P')
if palette:
surface.set_palette(palette)
if alpha_color is not None:
surface.set_colorkey((alpha_color, 0, 0))
surface_list.append(surface)
return surface_list
except IOError:
raise Exception()
def surface_to_dmd(surface, shades=16, alpha_color=None,
weights=(.299, .587, .114)):
"""Converts a 24-bit RGB Pygame surface to surface that's compatible with
DMD displays in MPF.
Args:
surface: The 24-bit Pygame surface you're converting
shades: How many shades (brightness levels) you want in the new DMD
surface. Default is 16.
alpha_color: The pixel value that should be used as an alpha value. (In
other words, pixels of this color will be transparent.) Default is
None.
weights: A tuple of the relative weights of the R, G, and B channels
that will be used to convert the 24-bit surface to the new surface.
Default is (.299, .587, .114)
Returns: An 8-bit Pygame surface ready to display on the DMD.
DMDs in pinball machines are single color with (usually) 16 different
shades. So essentially what this method does is convert a 24-bit surface
with millions of colors to a grayscale surface with 16 shades of gray.
Since humans perceive different hues to be different intensities, this
forumula uses relative weights to ensure that the conversion is as accurate
as possible.
More information on this conversion process, and the reason we chose the
default weights we did, is here:
http://en.wikipedia.org/wiki/Grayscale#Luma_coding_in_video_systems
"""
width, height = surface.get_size()
pa = pygame.PixelArray(surface)
new_surface = pygame.Surface((width, height), depth=8)
# todo add support for alpha channel (per pixel), and specifying the
# alpha color before the conversion versus after
palette = [
(0, 0, 0),
(1, 0, 0),
(2, 0, 0),
(3, 0, 0),
(4, 0, 0),
(5, 0, 0),
(6, 0, 0),
(7, 0, 0),
(8, 0, 0),
(9, 0, 0),
(10, 0, 0),
(11, 0, 0),
(12, 0, 0),
(13, 0, 0),
(14, 0, 0),
(15, 0, 0)] * 16
new_surface.set_palette(palette)
if alpha_color is not None:
new_surface.set_colorkey((alpha_color, 0, 0))
new_pa = pygame.PixelArray(new_surface)
for x in range(width):
for y in range(height):
pixel_color = surface.unmap_rgb(pa[x, y])
pixel_weight = ((pixel_color[0] * weights[0]) +
(pixel_color[1] * weights[1]) +
(pixel_color[2] * weights[2])) / 255.0
new_pa[x, y] = int(round(pixel_weight * (shades - 1)))
return new_pa.surface
def create_palette(bright_color=(255, 0, 0), dark_color=(0, 0, 0),
steps=16):
"""Creates a Pygame palette based on the colors passed to it. This method
is typically used to generate the "on screen" color representations for a
DMD.
Args:
bright_color: A list or tuple of three integers (0-255 each) which
represents the RGB values of a fully bright (full "on") color of a
pixel. Default is (255, 0, 0) (red).
dark_color: A list or tuple of three integers (0-255 each) which
represents the RGB values of the dark (or "off") color of a pixel.
Default is (0, 0, 0) (black).
steps: An integer which is the number of steps (or shades) in the
palette. Typical values are 2 (1-bit color), 4 (2-bit color), or 16
(4-bit color). Default is 16.
Returns: A Pygame palette which is a list of three-item lists. The first
item will always be the dark_color, and the last item will always
be the bright_color. The values in between are the steps.
"""
palette = []
step_size = [(bright_color[0] - dark_color[0]) / (steps - 1.0),
(bright_color[1] - dark_color[1]) / (steps - 1.0),
(bright_color[2] - dark_color[2]) / (steps - 1.0)
]
current_color = dark_color
# manually add the first entry to ensure it's exactly as entered
palette.append((int(current_color[0]),
int(current_color[1]),
int(current_color[2])))
# calculate all the middle values (all except the dark and bright)
for i in range(steps-2):
current_color[0] += step_size[0]
current_color[1] += step_size[1]
current_color[2] += step_size[2]
palette.append((int(current_color[0]),
int(current_color[1]),
int(current_color[2])))
# manually add the last entry to ensure it's exactly as entered
palette.append(bright_color)
return palette
def is_used(config):
"""Checks a config dictionary to see if this display module should be used.
Args:
config: A python dictionary
Returns: Boolean as to whether the sections the DMD class needs are present.
"""
# todo change to try
if 'dmd' in config:
return True
else:
return False
class DMD(MPFDisplay):
"""Base class for a traditional dot matrix display (DMD) in a pinball
machine. This class is used to control a physical DMD connected via the
14-pin header on the controller.
Note that if you want to control a "color DMD", that is done via the Window
display, not this DMD class. However if you would like to display a
rendering of a traditional DMD in your on screen window, then you use this
class to create the DMD display object.
Args:
machine: A reference to the main machine controller object.
"""
def __init__(self, machine):
if 'dmd' in machine.config:
self.config = machine.config['dmd']
else:
self.config = dict()
self.log = logging.getLogger('dmd')
MPFDisplay.__init__(self, machine, self.config)
self.use_physical = False
self.depth = 8
self.color_dmd = False
# Due to the way Pygame handles blits with 8-bit surfaces, we have to
# have a standard palette that's the same for all of them and that's
# 'known' so we can render fonts to known palette locations. This
# palette is somewhat arbitrary but guarantees that everything we
# render to this display will use the same palette.
# todo change this to just use the screen DMD one, and set a default
# in mpfconfig? That way we can do a blit instead of a PA when we render
# it to a window? If we do that then we'll also have to change the
# 1-bit display elements so they use the proper palette location.
self.palette = [
(0, 0, 0),
(1, 0, 0),
(2, 0, 0),
(3, 0, 0),
(4, 0, 0),
(5, 0, 0),
(6, 0, 0),
(7, 0, 0),
(8, 0, 0),
(9, 0, 0),
(10, 0, 0),
(11, 0, 0),
(12, 0, 0),
(13, 0, 0),
(14, 0, 0),
(15, 0, 0)] * 16
self.name = 'dmd'
if 'shades' not in self.config:
self.config['shades'] = 16
if 'physical' in self.config:
self.use_physical = self.config['physical']
else:
self.use_physical = False
if 'type' in self.config and self.config['type'] == 'color':
self.color_dmd = True
self.depth = 24
def _initialize(self):
# Internal method which initialized the DMD. This is separate from
# __init__ because we have to wait until Pygame has been initialized
super(DMD, self)._initialize()
self.machine.display.default_display = self
def update(self):
"""Automatically called based on a timer when the display should update.
"""
super(DMD, self).update()
if self.use_physical and self.depth == 8:
try:
self.machine.send_dmd_frame(
pygame.image.tostring(self.current_slide.surface, 'P'))
except TypeError:
return False
elif self.use_physical and self.depth == 24:
try:
self.machine.send_dmd_frame(
pygame.image.tostring(self.current_slide.surface, 'RGB'))
except TypeError:
return False
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
spierepf/mpf
|
mpf/media_controller/display_modules/dmd.py
|
Python
|
mit
| 12,533
|
[
"Brian"
] |
36a59b6156da63d5154671a695856fd430ebc2278b8e27fe485268044a7ba2dd
|
#!/usr/bin/env pvbatch
from paraview.simple import *
import pviz
import sys
oviz = pviz.viz(sys.argv) # instantiate viz object (and load data)
part = pviz.makeContour(varName='T_s',isoValueArray=[0.01],ColorArrayName='u_s')
ResetCamera() # auto-adapt camera to part extent
oviz.writeImage('isoT_0p01') # save image
|
ruizanthony/pviz
|
examples/simple/isosurface.py
|
Python
|
lgpl-3.0
| 333
|
[
"ParaView"
] |
6056c7c22820c2e7c9670d952898ef353911cdd43c0de6149e655dbd20c16bbd
|
"""This module contains implementation of bolfire."""
__all__ = ['BOLFIRE']
import logging
import numpy as np
import elfi.methods.mcmc as mcmc
from elfi.classifiers.classifier import Classifier, LogisticRegression
from elfi.loader import get_sub_seed
from elfi.methods.bo.acquisition import LCBSC, AcquisitionBase
from elfi.methods.bo.gpy_regression import GPyRegression
from elfi.methods.bo.utils import CostFunction
from elfi.methods.inference.parameter_inference import ParameterInference
from elfi.methods.posteriors import BOLFIREPosterior
from elfi.methods.results import BOLFIRESample
from elfi.methods.utils import arr2d_to_batch, batch_to_arr2d, resolve_sigmas
from elfi.model.elfi_model import ElfiModel, Summary
from elfi.model.extensions import ModelPrior
logger = logging.getLogger(__name__)
class BOLFIRE(ParameterInference):
"""Bayesian Optimization and Classification in Likelihood-Free Inference (BOLFIRE)."""
def __init__(self,
model,
n_training_data=10,
marginal=None,
seed_marginal=None,
classifier=None,
bounds=None,
n_initial_evidence=0,
acq_noise_var=0,
exploration_rate=10,
update_interval=1,
target_model=None,
acquisition_method=None,
*args, **kwargs):
"""Initialize the BOLFIRE method.
Parameters
----------
model: ElfiModel
Elfi graph used by the algorithm.
n_training_data: int, optional
Size of training data.
marginal: np.ndnarray, optional
Marginal data.
seed_marginal: int, optional
Seed for marginal data generation.
classifier: str, optional
Classifier to be used. Default LogisticRegression.
bounds: dict, optional
The region where to estimate the posterior for each parameter in
model.parameters: dict('parameter_name': (lower, upper), ... ). Not used if
custom target_model is given.
n_initial_evidence: int, optional
Number of initial evidence.
acq_noise_var : float or dict, optional
Variance(s) of the noise added in the default LCBSC acquisition method.
If a dictionary, values should be float specifying the variance for each dimension.
exploration_rate: float, optional
Exploration rate of the acquisition method.
update_interval : int, optional
How often to update the GP hyperparameters of the target_model.
target_model: GPyRegression, optional
A surrogate model to be used.
acquisition_method: Acquisition, optional
Method of acquiring evidence points. Default LCBSC.
"""
# Resolve model and initialize
model = self._resolve_model(model)
super(BOLFIRE, self).__init__(model, output_names=None, *args, **kwargs)
# Initialize attributes
self.n_training_data = self._resolve_n_training_data(n_training_data)
self.summary_names = self._get_summary_names(self.model)
self.marginal = self._resolve_marginal(marginal, seed_marginal)
self.classifier = self._resolve_classifier(classifier)
self.observed = self._get_observed_summary_values(self.model, self.summary_names)
self.prior = ModelPrior(self.model)
# TODO: write resolvers for the attributes below
self.bounds = bounds
self.acq_noise_var = acq_noise_var
self.exploration_rate = exploration_rate
self.update_interval = update_interval
# Initialize GP regression
self.target_model = self._resolve_target_model(target_model)
# Define acquisition cost
self.cost = CostFunction(self.prior.logpdf, self.prior.gradient_logpdf, scale=-1)
# Initialize BO
self.n_initial_evidence = self._resolve_n_initial_evidence(n_initial_evidence)
self.acquisition_method = self._resolve_acquisition_method(acquisition_method)
# Initialize state dictionary
self.state['n_evidence'] = 0
self.state['last_GP_update'] = self.n_initial_evidence
# Initialize classifier attributes list
self.classifier_attributes = []
@property
def n_evidence(self):
"""Return the number of acquired evidence points."""
return self.state['n_evidence']
def set_objective(self, n_evidence):
"""Set an objective for inference. You can continue BO by giving a larger n_evidence.
Parameters
----------
n_evidence: int
Number of total evidence for the GP fitting. This includes any initial evidence.
"""
if n_evidence < self.n_evidence:
logger.warning('Requesting less evidence than there already exists.')
self.objective['n_sim'] = n_evidence
def extract_result(self):
"""Extract the results from the current state."""
return BOLFIREPosterior(self.parameter_names,
self.target_model,
self.prior,
self.classifier_attributes)
def update(self, batch, batch_index):
"""Update the GP regression model of the target node with a new batch.
Parameters
----------
batch : dict
dict with `self.outputs` as keys and the corresponding outputs for the batch
as values
batch_index : int
Index of batch.
"""
# Update the inference state
self.state['n_batches'] += 1
self.state['n_sim'] += self.batch_size * self.n_training_data
# Predict log-ratio
likelihood = self._generate_likelihood(self._get_parameter_values(batch))
X, y = self._generate_training_data(likelihood, self.marginal)
negative_log_ratio_value = -1 * self.predict_log_ratio(X, y, self.observed)
# Update classifier attributes list
self.classifier_attributes += [self.classifier.attributes]
# BO part
self.state['n_evidence'] += self.batch_size
parameter_values = batch_to_arr2d(batch, self.parameter_names)
optimize = self._should_optimize()
self.target_model.update(parameter_values, negative_log_ratio_value, optimize)
if optimize:
self.state['last_GP_update'] = self.target_model.n_evidence
def prepare_new_batch(self, batch_index):
"""Prepare values for a new batch.
Parameters
----------
batch_index: int
Returns
-------
batch: dict
"""
t = batch_index - self.n_initial_evidence
if t < 0: # Sample parameter values from the model priors
return
# Acquire parameter values from the acquisition function
acquisition = self.acquisition_method.acquire(self.batch_size, t)
return arr2d_to_batch(acquisition, self.parameter_names)
def predict_log_ratio(self, X, y, X_obs):
"""Predict the log-ratio, i.e, logarithm of likelihood / marginal.
Parameters
----------
X: np.ndarray
Training data features.
y: np.ndarray
Training data labels.
X_obs: np.ndarray
Observed data.
Returns
-------
np.ndarray
"""
self.classifier.fit(X, y)
return self.classifier.predict_log_likelihood_ratio(X_obs)
def fit(self, n_evidence, bar=True):
"""Fit the surrogate model.
That is, generate a regression model for the negative posterior value given the parameters.
Currently only GP regression are supported as surrogate models.
Parameters
----------
n_evidence: int
Number of evidence for fitting.
bar: bool, optional
Flag to show or hide the progress bar during fit.
Returns
-------
BOLFIREPosterior
"""
logger.info('BOLFIRE: Fitting the surrogate model...')
if isinstance(n_evidence, int) and n_evidence > 0:
return self.infer(n_evidence, bar=bar)
raise TypeError('n_evidence must be a positive integer.')
def sample(self,
n_samples,
warmup=None,
n_chains=4,
initials=None,
algorithm='nuts',
sigma_proposals=None,
n_evidence=None,
*args, **kwargs):
"""Sample from the posterior distribution of BOLFIRE.
Sampling is performed with an MCMC sampler.
Parameters
----------
n_samples: int
Number of requested samples from the posterior for each chain. This includes warmup,
and note that the effective sample size is usually considerably smaller.
warmup: int, optional
Length of warmup sequence in MCMC sampling.
n_chains: int, optional
Number of independent chains.
initials: np.ndarray (n_chains, n_params), optional
Initial values for the sampled parameters for each chain.
algorithm: str, optional
Sampling algorithm to use.
sigma_proposals: np.ndarray
Standard deviations for Gaussian proposals of each parameter for Metropolis-Hastings.
n_evidence: int, optional
If the surrogate model is not fitted yet, specify the amount of evidence.
Returns
-------
BOLFIRESample
"""
# Fit posterior in case not done
if self.state['n_batches'] == 0:
self.fit(n_evidence)
# Check algorithm
if algorithm not in ['nuts', 'metropolis']:
raise ValueError('The given algorithm is not supported.')
# Check standard deviations of Gaussian proposals when using Metropolis-Hastings
if algorithm == 'metropolis':
sigma_proposals = resolve_sigmas(self.target_model.parameter_names,
sigma_proposals,
self.target_model.bounds)
posterior = self.extract_result()
warmup = warmup or n_samples // 2
# Unless given, select the evidence points with best likelihood ratio
if initials is not None:
if np.asarray(initials).shape != (n_chains, self.target_model.input_dim):
raise ValueError('The shape of initials must be (n_chains, n_params).')
else:
inds = np.argsort(self.target_model.Y[:, 0])
initials = np.asarray(self.target_model.X[inds])
# Enable caching for default RBF kernel
self.target_model.is_sampling = True
tasks_ids = []
ii_initial = 0
for ii in range(n_chains):
seed = get_sub_seed(self.seed, ii)
# Discard bad initialization points
while np.isinf(posterior.logpdf(initials[ii_initial])):
ii_initial += 1
if ii_initial == len(inds):
raise ValueError('BOLFIRE.sample: Cannot find enough acceptable '
'initialization points!')
if algorithm == 'nuts':
tasks_ids.append(
self.client.apply(mcmc.nuts,
n_samples,
initials[ii_initial],
posterior.logpdf,
posterior.gradient_logpdf,
n_adapt=warmup,
seed=seed,
**kwargs))
elif algorithm == 'metropolis':
tasks_ids.append(
self.client.apply(mcmc.metropolis,
n_samples,
initials[ii_initial],
posterior.logpdf,
sigma_proposals,
warmup,
seed=seed,
**kwargs))
ii_initial += 1
# Get results from completed tasks or run sampling (client-specific)
chains = []
for id in tasks_ids:
chains.append(self.client.get_result(id))
chains = np.asarray(chains)
logger.info(f'{n_chains} chains of {n_samples} iterations acquired. '
'Effective sample size and Rhat for each parameter:')
for ii, node in enumerate(self.parameter_names):
logger.info(f'{node} {mcmc.eff_sample_size(chains[:, :, ii])} '
f'{mcmc.gelman_rubin_statistic(chains[:, :, ii])}')
self.target_model.is_sampling = False
return BOLFIRESample(method_name='BOLFIRE',
chains=chains,
parameter_names=self.parameter_names,
warmup=warmup,
n_sim=self.state['n_sim'],
seed=self.seed,
*args, **kwargs)
def _resolve_model(self, model):
"""Resolve a given elfi model."""
if not isinstance(model, ElfiModel):
raise ValueError('model must be an ElfiModel.')
if len(self._get_summary_names(model)) == 0:
raise NotImplementedError('model must have at least one Summary node.')
return model
def _resolve_n_training_data(self, n_training_data):
"""Resolve the size of training data to be used."""
if isinstance(n_training_data, int) and n_training_data > 0:
return n_training_data
raise TypeError('n_training_data must be a positive int.')
def _get_summary_names(self, model):
"""Return the names of summary statistics."""
return [node for node in model.nodes if isinstance(model[node], Summary)
and not node.startswith('_')]
def _resolve_marginal(self, marginal, seed_marginal=None):
"""Resolve marginal data."""
if marginal is None:
marginal = self._generate_marginal(seed_marginal)
x, y = marginal.shape
logger.info(f'New marginal data ({x} x {y}) are generated.')
return marginal
if isinstance(marginal, np.ndarray) and len(marginal.shape) == 2:
return marginal
raise TypeError('marginal must be 2d numpy array.')
def _generate_marginal(self, seed_marginal=None):
"""Generate marginal data."""
batch = self.model.generate(self.n_training_data,
outputs=self.summary_names,
seed=seed_marginal)
return np.column_stack([batch[summary_name] for summary_name in self.summary_names])
def _generate_likelihood(self, parameter_values):
"""Generate likelihood data."""
batch = self.model.generate(self.n_training_data,
outputs=self.summary_names,
with_values=parameter_values)
return np.column_stack([batch[summary_name] for summary_name in self.summary_names])
def _generate_training_data(self, likelihood, marginal):
"""Generate training data."""
X = np.vstack((likelihood, marginal))
y = np.concatenate((np.ones(likelihood.shape[0]), -1 * np.ones(marginal.shape[0])))
return X, y
def _resolve_classifier(self, classifier):
"""Resolve classifier."""
if classifier is None:
return LogisticRegression()
if isinstance(classifier, Classifier):
return classifier
raise ValueError('classifier must be an instance of Classifier.')
def _get_observed_summary_values(self, model, summary_names):
"""Return observed values for summary statistics."""
return np.column_stack([model[summary_name].observed for summary_name in summary_names])
def _get_parameter_values(self, batch):
"""Return parameter values from a given batch."""
return {parameter_name: batch[parameter_name] for parameter_name
in self.model.parameter_names}
def _resolve_n_initial_evidence(self, n_initial_evidence):
"""Resolve number of initial evidence."""
if isinstance(n_initial_evidence, int) and n_initial_evidence >= 0:
return n_initial_evidence
raise ValueError('n_initial_evidence must be a non-negative integer.')
def _resolve_target_model(self, target_model):
"""Resolve target model."""
if target_model is None:
return GPyRegression(self.model.parameter_names, self.bounds)
if isinstance(target_model, GPyRegression):
return target_model
raise TypeError('target_model must be an instance of GPyRegression.')
def _resolve_acquisition_method(self, acquisition_method):
"""Resolve acquisition method."""
if acquisition_method is None:
return LCBSC(model=self.target_model,
prior=self.prior,
noise_var=self.acq_noise_var,
exploration_rate=self.exploration_rate,
seed=self.seed,
additive_cost=self.cost)
if isinstance(acquisition_method, AcquisitionBase):
return acquisition_method
raise TypeError('acquisition_method must be an instance of AcquisitionBase.')
def _should_optimize(self):
"""Check whether GP hyperparameters should be optimized."""
current = self.target_model.n_evidence + self.batch_size
next_update = self.state['last_GP_update'] + self.update_interval
return current >= self.n_initial_evidence and current >= next_update
|
elfi-dev/elfi
|
elfi/methods/inference/bolfire.py
|
Python
|
bsd-3-clause
| 18,033
|
[
"Gaussian"
] |
741f4deffcfdb0f68ce644fb1519eb1cf97448b9d6b5a7ae4012e79bb4a7beeb
|
from __future__ import unicode_literals
from django.test import TestCase
from django.utils import six
from .models import (Building, Child, Device, Port, Item, Country, Connection,
ClientStatus, State, Client, SpecialClient, TUser, Person, Student,
Organizer, Class, Enrollment, Hen, Chick, Base, A, B, C)
class SelectRelatedRegressTests(TestCase):
def test_regression_7110(self):
"""
Regression test for bug #7110.
When using select_related(), we must query the
Device and Building tables using two different aliases (each) in order to
differentiate the start and end Connection fields. The net result is that
both the "connections = ..." queries here should give the same results
without pulling in more than the absolute minimum number of tables
(history has shown that it's easy to make a mistake in the implementation
and include some unnecessary bonus joins).
"""
b = Building.objects.create(name='101')
dev1 = Device.objects.create(name="router", building=b)
dev2 = Device.objects.create(name="switch", building=b)
dev3 = Device.objects.create(name="server", building=b)
port1 = Port.objects.create(port_number='4', device=dev1)
port2 = Port.objects.create(port_number='7', device=dev2)
port3 = Port.objects.create(port_number='1', device=dev3)
c1 = Connection.objects.create(start=port1, end=port2)
c2 = Connection.objects.create(start=port2, end=port3)
connections = Connection.objects.filter(start__device__building=b, end__device__building=b).order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
connections = Connection.objects.filter(start__device__building=b, end__device__building=b).select_related().order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
# This final query should only have seven tables (port, device and building
# twice each, plus connection once). Thus, 6 joins plus the FROM table.
self.assertEqual(str(connections.query).count(" JOIN "), 6)
def test_regression_8106(self):
"""
Regression test for bug #8106.
Same sort of problem as the previous test, but this time there are
more extra tables to pull in as part of the select_related() and some
of them could potentially clash (so need to be kept separate).
"""
us = TUser.objects.create(name="std")
usp = Person.objects.create(user=us)
uo = TUser.objects.create(name="org")
uop = Person.objects.create(user=uo)
s = Student.objects.create(person = usp)
o = Organizer.objects.create(person = uop)
c = Class.objects.create(org=o)
Enrollment.objects.create(std=s, cls=c)
e_related = Enrollment.objects.all().select_related()[0]
self.assertEqual(e_related.std.person.user.name, "std")
self.assertEqual(e_related.cls.org.person.user.name, "org")
def test_regression_8036(self):
"""
Regression test for bug #8036
the first related model in the tests below
("state") is empty and we try to select the more remotely related
state__country. The regression here was not skipping the empty column results
for country before getting status.
"""
Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
client = Client.objects.create(name='client', status=active)
self.assertEqual(client.status, active)
self.assertEqual(Client.objects.select_related()[0].status, active)
self.assertEqual(Client.objects.select_related('state')[0].status, active)
self.assertEqual(Client.objects.select_related('state', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('status')[0].status, active)
def test_multi_table_inheritance(self):
""" Exercising select_related() with multi-table model inheritance. """
c1 = Child.objects.create(name="child1", value=42)
Item.objects.create(name="item1", child=c1)
Item.objects.create(name="item2")
self.assertQuerysetEqual(
Item.objects.select_related("child").order_by("name"),
["<Item: item1>", "<Item: item2>"]
)
def test_regression_12851(self):
"""
Regression for #12851
Deferred fields are used correctly if you select_related a subset
of fields.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
Client.objects.create(name='Brian Burke', state=wa, status=active)
burke = Client.objects.select_related('state').defer('state__name').get(name='Brian Burke')
self.assertEqual(burke.name, 'Brian Burke')
self.assertEqual(burke.state.name, 'Western Australia')
# Still works if we're dealing with an inherited class
SpecialClient.objects.create(name='Troy Buswell', state=wa, status=active, value=42)
troy = SpecialClient.objects.select_related('state').defer('state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Still works if we defer an attribute on the inherited class
troy = SpecialClient.objects.select_related('state').defer('value', 'state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Also works if you use only, rather than defer
troy = SpecialClient.objects.select_related('state').only('name', 'state').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
def test_null_join_promotion(self):
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
bob = Client.objects.create(name='Bob', status=active)
jack = Client.objects.create(name='Jack', status=active, state=wa)
qs = Client.objects.filter(state=wa).select_related('state')
with self.assertNumQueries(1):
self.assertEqual(list(qs), [jack])
self.assertEqual(qs[0].state, wa)
# The select_related join wasn't promoted as there was already an
# existing (even if trimmed) inner join to state.
self.assertFalse('LEFT OUTER' in str(qs.query))
qs = Client.objects.select_related('state').order_by('name')
with self.assertNumQueries(1):
self.assertEqual(list(qs), [bob, jack])
self.assertIs(qs[0].state, None)
self.assertEqual(qs[1].state, wa)
# The select_related join was promoted as there is already an
# existing join.
self.assertTrue('LEFT OUTER' in str(qs.query))
def test_regression_19870(self):
"""
Regression for #19870
"""
hen = Hen.objects.create(name='Hen')
Chick.objects.create(name='Chick', mother=hen)
self.assertEqual(Chick.objects.all()[0].mother.name, 'Hen')
self.assertEqual(Chick.objects.select_related()[0].mother.name, 'Hen')
def test_ticket_10733(self):
a = A.objects.create(name='a', lots_of_text='lots_of_text_a', a_field='a_field')
b = B.objects.create(name='b', lots_of_text='lots_of_text_b', b_field='b_field')
c = C.objects.create(name='c', lots_of_text='lots_of_text_c', is_published=True,
c_a=a, c_b=b)
results = C.objects.all().only('name', 'lots_of_text', 'c_a', 'c_b', 'c_b__lots_of_text',
'c_a__name', 'c_b__name').select_related()
self.assertQuerysetEqual(results, [c], lambda x: x)
with self.assertNumQueries(0):
qs_c = results[0]
self.assertEqual(qs_c.name, 'c')
self.assertEqual(qs_c.lots_of_text, 'lots_of_text_c')
self.assertEqual(qs_c.c_b.lots_of_text, 'lots_of_text_b')
self.assertEqual(qs_c.c_a.name, 'a')
self.assertEqual(qs_c.c_b.name, 'b')
|
ericholscher/django
|
tests/select_related_regress/tests.py
|
Python
|
bsd-3-clause
| 9,055
|
[
"Brian"
] |
e21f8ce8f792348cc847e7acfcd0cf38c33fb1cb5e9997d6bac61cc8b297cee3
|
# $Id$
#
# Copyright (C) 2000-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" command line utility for building composite models
#DOC
**Usage**
BuildComposite [optional args] filename
Unless indicated otherwise (via command line arguments), _filename_ is
a QDAT file.
**Command Line Arguments**
- -o *filename*: name of the output file for the pickled composite
- -n *num*: number of separate models to add to the composite
- -p *tablename*: store persistence data in the database
in table *tablename*
- -N *note*: attach some arbitrary text to the persistence data
- -b *filename*: name of the text file to hold examples from the
holdout set which are misclassified
- -s: split the data into training and hold-out sets before building
the composite
- -f *frac*: the fraction of data to use in the training set when the
data is split
- -r: randomize the activities (for testing purposes). This ignores
the initial distribution of activity values and produces each
possible activity value with equal likliehood.
- -S: shuffle the activities (for testing purposes) This produces
a permutation of the input activity values.
- -l: locks the random number generator to give consistent sets
of training and hold-out data. This is primarily intended
for testing purposes.
- -B: use a so-called Bayesian composite model.
- -d *database name*: instead of reading the data from a QDAT file,
pull it from a database. In this case, the _filename_ argument
provides the name of the database table containing the data set.
- -D: show a detailed breakdown of the composite model performance
across the training and, when appropriate, hold-out sets.
- -P *pickle file name*: write out the pickled data set to the file
- -F *filter frac*: filters the data before training to change the
distribution of activity values in the training set. *filter
frac* is the fraction of the training set that should have the
target value. **See note below on data filtering.**
- -v *filter value*: filters the data before training to change the
distribution of activity values in the training set. *filter
value* is the target value to use in filtering. **See note below
on data filtering.**
- --modelFiltFrac *model filter frac*: Similar to filter frac above,
in this case the data is filtered for each model in the composite
rather than a single overall filter for a composite. *model
filter frac* is the fraction of the training set for each model
that should have the target value (*model filter value*).
- --modelFiltVal *model filter value*: target value to use for
filtering data before training each model in the composite.
- -t *threshold value*: use high-confidence predictions for the
final analysis of the hold-out data.
- -Q *list string*: the values of quantization bounds for the
activity value. See the _-q_ argument for the format of *list
string*.
- --nRuns *count*: build *count* composite models
- --prune: prune any models built
- -h: print a usage message and exit.
- -V: print the version number and exit
*-*-*-*-*-*-*-*- Tree-Related Options -*-*-*-*-*-*-*-*
- -g: be less greedy when training the models.
- -G *number*: force trees to be rooted at descriptor *number*.
- -L *limit*: provide an (integer) limit on individual model
complexity
- -q *list string*: Add QuantTrees to the composite and use the list
specified in *list string* as the number of target quantization
bounds for each descriptor. Don't forget to include 0's at the
beginning and end of *list string* for the name and value fields.
For example, if there are 4 descriptors and you want 2 quant
bounds apiece, you would use _-q "[0,2,2,2,2,0]"_.
Two special cases:
1) If you would like to ignore a descriptor in the model
building, use '-1' for its number of quant bounds.
2) If you have integer valued data that should not be quantized
further, enter 0 for that descriptor.
- --recycle: allow descriptors to be used more than once in a tree
- --randomDescriptors=val: toggles growing random forests with val
randomly-selected descriptors available at each node.
*-*-*-*-*-*-*-*- KNN-Related Options -*-*-*-*-*-*-*-*
- --doKnn: use K-Nearest Neighbors models
- --knnK=*value*: the value of K to use in the KNN models
- --knnTanimoto: use the Tanimoto metric in KNN models
- --knnEuclid: use a Euclidean metric in KNN models
*-*-*-*-*-*-*- Naive Bayes Classifier Options -*-*-*-*-*-*-*-*
- --doNaiveBayes : use Naive Bayes classifiers
- --mEstimateVal : the value to be used in the m-estimate formula
If this is greater than 0.0, we use it to compute the conditional
probabilities by the m-estimate
*-*-*-*-*-*-*-*- SVM-Related Options -*-*-*-*-*-*-*-*
**** NOTE: THESE ARE DISABLED ****
## - --doSVM: use Support-vector machines
## - --svmKernel=*kernel*: choose the type of kernel to be used for
## the SVMs. Options are:
## The default is:
## - --svmType=*type*: choose the type of support-vector machine
## to be used. Options are:
## The default is:
## - --svmGamma=*gamma*: provide the gamma value for the SVMs. If this
## is not provided, a grid search will be carried out to determine an
## optimal *gamma* value for each SVM.
## - --svmCost=*cost*: provide the cost value for the SVMs. If this is
## not provided, a grid search will be carried out to determine an
## optimal *cost* value for each SVM.
## - --svmWeights=*weights*: provide the weight values for the
## activities. If provided this should be a sequence of (label,
## weight) 2-tuples *nActs* long. If not provided, a weight of 1
## will be used for each activity.
## - --svmEps=*epsilon*: provide the epsilon value used to determine
## when the SVM has converged. Defaults to 0.001
## - --svmDegree=*degree*: provide the degree of the kernel (when
## sensible) Defaults to 3
## - --svmCoeff=*coeff*: provide the coefficient for the kernel (when
## sensible) Defaults to 0
## - --svmNu=*nu*: provide the nu value for the kernel (when sensible)
## Defaults to 0.5
## - --svmDataType=*float*: if the data is contains only 1 and 0 s, specify by
## using binary. Defaults to float
## - --svmCache=*cache*: provide the size of the memory cache (in MB)
## to be used while building the SVM. Defaults to 40
**Notes**
- *Data filtering*: When there is a large disparity between the
numbers of points with various activity levels present in the
training set it is sometimes desirable to train on a more
homogeneous data set. This can be accomplished using filtering.
The filtering process works by selecting a particular target
fraction and target value. For example, in a case where 95% of
the original training set has activity 0 and ony 5% activity 1, we
could filter (by randomly removing points with activity 0) so that
30% of the data set used to build the composite has activity 1.
"""
from __future__ import print_function
import sys,time
import math
import numpy
from rdkit.six.moves import cPickle
from rdkit import RDConfig
from rdkit.utils import listutils
from rdkit.ML.Composite import Composite,BayesComposite
#from ML.SVM import SVMClassificationModel as SVM
from rdkit.ML.Data import DataUtils,SplitData
from rdkit.ML import ScreenComposite
from rdkit.Dbase import DbModule
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.ML import CompositeRun
from rdkit import DataStructs
_runDetails = CompositeRun.CompositeRun()
__VERSION_STRING="3.2.3"
_verbose = 1
def message(msg):
""" emits messages to _sys.stdout_
override this in modules which import this one to redirect output
**Arguments**
- msg: the string to be displayed
"""
if _verbose: sys.stdout.write('%s\n'%(msg))
def testall(composite,examples,badExamples=[]):
""" screens a number of examples past a composite
**Arguments**
- composite: a composite model
- examples: a list of examples (with results) to be screened
- badExamples: a list to which misclassified examples are appended
**Returns**
a list of 2-tuples containing:
1) a vote
2) a confidence
these are the votes and confidence levels for **misclassified** examples
"""
wrong = []
for example in examples:
if composite.GetActivityQuantBounds():
answer = composite.QuantizeActivity(example)[-1]
else:
answer = example[-1]
res,conf = composite.ClassifyExample(example)
if res != answer:
wrong.append((res,conf))
badExamples.append(example)
return wrong
def GetCommandLine(details):
""" #DOC
"""
args = ['BuildComposite']
args.append('-n %d'%(details.nModels))
if details.filterFrac != 0.0: args.append('-F %.3f -v %d'%(details.filterFrac,details.filterVal))
if details.modelFilterFrac != 0.0: args.append('--modelFiltFrac=%.3f --modelFiltVal=%d'%(details.modelFilterFrac,
details.modelFilterVal))
if details.splitRun: args.append('-s -f %.3f'%(details.splitFrac))
if details.shuffleActivities: args.append('-S')
if details.randomActivities: args.append('-r')
if details.threshold > 0.0: args.append('-t %.3f'%(details.threshold))
if details.activityBounds: args.append('-Q "%s"'%(details.activityBoundsVals))
if details.dbName: args.append('-d %s'%(details.dbName))
if details.detailedRes: args.append('-D')
if hasattr(details,'noScreen') and details.noScreen: args.append('--noScreen')
if details.persistTblName and details.dbName:
args.append('-p %s'%(details.persistTblName))
if details.note:
args.append('-N %s'%(details.note))
if details.useTrees:
if details.limitDepth>0: args.append('-L %d'%(details.limitDepth))
if details.lessGreedy: args.append('-g')
if details.qBounds:
shortBounds = listutils.CompactListRepr(details.qBounds)
if details.qBounds: args.append('-q "%s"'%(shortBounds))
else:
if details.qBounds: args.append('-q "%s"'%(details.qBoundCount))
if details.pruneIt: args.append('--prune')
if details.startAt: args.append('-G %d'%details.startAt)
if details.recycleVars: args.append('--recycle')
if details.randomDescriptors: args.append('--randomDescriptors=%d'%details.randomDescriptors)
if details.useSigTrees:
args.append('--doSigTree')
if details.limitDepth>0: args.append('-L %d'%(details.limitDepth))
if details.randomDescriptors:
args.append('--randomDescriptors=%d'%details.randomDescriptors)
if details.useKNN:
args.append('--doKnn --knnK %d'%(details.knnNeighs))
if details.knnDistFunc=='Tanimoto':
args.append('--knnTanimoto')
else:
args.append('--knnEuclid')
if details.useNaiveBayes:
args.append('--doNaiveBayes')
if details.mEstimateVal >= 0.0 :
args.append('--mEstimateVal=%.3f'%details.mEstimateVal)
## if details.useSVM:
## args.append('--doSVM')
## if details.svmKernel:
## for k in SVM.kernels.keys():
## if SVM.kernels[k]==details.svmKernel:
## args.append('--svmKernel=%s'%k)
## break
## if details.svmType:
## for k in SVM.machineTypes.keys():
## if SVM.machineTypes[k]==details.svmType:
## args.append('--svmType=%s'%k)
## break
## if details.svmGamma:
## args.append('--svmGamma=%f'%details.svmGamma)
## if details.svmCost:
## args.append('--svmCost=%f'%details.svmCost)
## if details.svmWeights:
## args.append("--svmWeights='%s'"%str(details.svmWeights))
## if details.svmDegree:
## args.append('--svmDegree=%d'%details.svmDegree)
## if details.svmCoeff:
## args.append('--svmCoeff=%d'%details.svmCoeff)
## if details.svmEps:
## args.append('--svmEps=%f'%details.svmEps)
## if details.svmNu:
## args.append('--svmNu=%f'%details.svmNu)
## if details.svmCache:
## args.append('--svmCache=%d'%details.svmCache)
## if detail.svmDataType:
## args.append('--svmDataType=%s'%details.svmDataType)
## if not details.svmShrink:
## args.append('--svmShrink')
if details.replacementSelection: args.append('--replacementSelection')
# this should always be last:
if details.tableName: args.append(details.tableName)
return ' '.join(args)
def RunOnData(details,data,progressCallback=None,saveIt=1,setDescNames=0):
nExamples = data.GetNPts()
if details.lockRandom:
seed = details.randomSeed
else:
import random
seed = (random.randint(0,1e6),random.randint(0,1e6))
DataUtils.InitRandomNumbers(seed)
testExamples = []
if details.shuffleActivities == 1:
DataUtils.RandomizeActivities(data,shuffle=1,runDetails=details)
elif details.randomActivities == 1:
DataUtils.RandomizeActivities(data,shuffle=0,runDetails=details)
namedExamples = data.GetNamedData()
if details.splitRun == 1:
trainIdx,testIdx = SplitData.SplitIndices(len(namedExamples),details.splitFrac,
silent=not _verbose)
trainExamples = [namedExamples[x] for x in trainIdx]
testExamples = [namedExamples[x] for x in testIdx]
else:
testExamples = []
testIdx = []
trainIdx = range(len(namedExamples))
trainExamples = namedExamples
if details.filterFrac != 0.0:
# if we're doing quantization on the fly, we need to handle that here:
if hasattr(details,'activityBounds') and details.activityBounds:
tExamples = []
bounds = details.activityBounds
for pt in trainExamples:
pt = pt[:]
act = pt[-1]
placed=0
bound=0
while not placed and bound < len(bounds):
if act < bounds[bound]:
pt[-1] = bound
placed = 1
else:
bound += 1
if not placed:
pt[-1] = bound
tExamples.append(pt)
else:
bounds = None
tExamples = trainExamples
trainIdx,temp = DataUtils.FilterData(tExamples,details.filterVal,
details.filterFrac,-1,
indicesOnly=1)
tmp = [trainExamples[x] for x in trainIdx]
testExamples += [trainExamples[x] for x in temp]
trainExamples = tmp
counts = DataUtils.CountResults(trainExamples,bounds=bounds)
ks = counts.keys()
ks.sort()
message('Result Counts in training set:')
for k in ks:
message(str((k, counts[k])))
counts = DataUtils.CountResults(testExamples,bounds=bounds)
ks = counts.keys()
ks.sort()
message('Result Counts in test set:')
for k in ks:
message(str((k, counts[k])))
nExamples = len(trainExamples)
message('Training with %d examples'%(nExamples))
nVars = data.GetNVars()
attrs = range(1,nVars+1)
nPossibleVals = data.GetNPossibleVals()
for i in range(1,len(nPossibleVals)):
if nPossibleVals[i-1] == -1:
attrs.remove(i)
if details.pickleDataFileName != '':
pickleDataFile = open(details.pickleDataFileName,'wb+')
cPickle.dump(trainExamples,pickleDataFile)
cPickle.dump(testExamples,pickleDataFile)
pickleDataFile.close()
if details.bayesModel:
composite = BayesComposite.BayesComposite()
else:
composite = Composite.Composite()
composite._randomSeed = seed
composite._splitFrac = details.splitFrac
composite._shuffleActivities = details.shuffleActivities
composite._randomizeActivities = details.randomActivities
if hasattr(details,'filterFrac'):
composite._filterFrac = details.filterFrac
if hasattr(details,'filterVal'):
composite._filterVal = details.filterVal
composite.SetModelFilterData(details.modelFilterFrac, details.modelFilterVal)
composite.SetActivityQuantBounds(details.activityBounds)
nPossibleVals = data.GetNPossibleVals()
if details.activityBounds:
nPossibleVals[-1] = len(details.activityBounds)+1
if setDescNames:
composite.SetInputOrder(data.GetVarNames())
composite.SetDescriptorNames(details._descNames)
else:
composite.SetDescriptorNames(data.GetVarNames())
composite.SetActivityQuantBounds(details.activityBounds)
if details.nModels==1:
details.internalHoldoutFrac=0.0
if details.useTrees:
from rdkit.ML.DecTree import CrossValidate,PruneTree
if details.qBounds != []:
from rdkit.ML.DecTree import BuildQuantTree
builder = BuildQuantTree.QuantTreeBoot
else:
from rdkit.ML.DecTree import ID3
builder = ID3.ID3Boot
driver = CrossValidate.CrossValidationDriver
pruner = PruneTree.PruneTree
composite.SetQuantBounds(details.qBounds)
nPossibleVals = data.GetNPossibleVals()
if details.activityBounds:
nPossibleVals[-1] = len(details.activityBounds)+1
composite.Grow(trainExamples,attrs,nPossibleVals=[0]+nPossibleVals,
buildDriver=driver,
pruner=pruner,
nTries=details.nModels,pruneIt=details.pruneIt,
lessGreedy=details.lessGreedy,needsQuantization=0,
treeBuilder=builder,nQuantBounds=details.qBounds,
startAt=details.startAt,
maxDepth=details.limitDepth,
progressCallback=progressCallback,
holdOutFrac=details.internalHoldoutFrac,
replacementSelection=details.replacementSelection,
recycleVars=details.recycleVars,
randomDescriptors=details.randomDescriptors,
silent=not _verbose)
elif details.useSigTrees:
from rdkit.ML.DecTree import CrossValidate
from rdkit.ML.DecTree import BuildSigTree
builder = BuildSigTree.SigTreeBuilder
driver = CrossValidate.CrossValidationDriver
nPossibleVals = data.GetNPossibleVals()
if details.activityBounds:
nPossibleVals[-1] = len(details.activityBounds)+1
if hasattr(details,'sigTreeBiasList'):
biasList = details.sigTreeBiasList
else:
biasList=None
if hasattr(details,'useCMIM'):
useCMIM=details.useCMIM
else:
useCMIM=0
if hasattr(details,'allowCollections'):
allowCollections = details.allowCollections
else:
allowCollections=False
composite.Grow(trainExamples,attrs,nPossibleVals=[0]+nPossibleVals,
buildDriver=driver,
nTries=details.nModels,
needsQuantization=0,
treeBuilder=builder,
maxDepth=details.limitDepth,
progressCallback=progressCallback,
holdOutFrac=details.internalHoldoutFrac,
replacementSelection=details.replacementSelection,
recycleVars=details.recycleVars,
randomDescriptors=details.randomDescriptors,
biasList=biasList,
useCMIM=useCMIM,
allowCollection=allowCollections,
silent=not _verbose)
elif details.useKNN:
from rdkit.ML.KNN import CrossValidate
from rdkit.ML.KNN import DistFunctions
driver = CrossValidate.CrossValidationDriver
dfunc = ''
if (details.knnDistFunc == "Euclidean") :
dfunc = DistFunctions.EuclideanDist
elif (details.knnDistFunc == "Tanimoto"):
dfunc = DistFunctions.TanimotoDist
else:
assert 0,"Bad KNN distance metric value"
composite.Grow(trainExamples, attrs, nPossibleVals=[0]+nPossibleVals,
buildDriver=driver, nTries=details.nModels,
needsQuantization=0,
numNeigh=details.knnNeighs,
holdOutFrac=details.internalHoldoutFrac,
distFunc=dfunc)
elif details.useNaiveBayes or details.useSigBayes:
from rdkit.ML.NaiveBayes import CrossValidate
driver = CrossValidate.CrossValidationDriver
if not (hasattr(details,'useSigBayes') and details.useSigBayes):
composite.Grow(trainExamples, attrs, nPossibleVals=[0]+nPossibleVals,
buildDriver=driver, nTries=details.nModels,
needsQuantization=0, nQuantBounds=details.qBounds,
holdOutFrac=details.internalHoldoutFrac,
replacementSelection=details.replacementSelection,
mEstimateVal=details.mEstimateVal,
silent=not _verbose)
else:
if hasattr(details,'useCMIM'):
useCMIM=details.useCMIM
else:
useCMIM=0
composite.Grow(trainExamples, attrs, nPossibleVals=[0]+nPossibleVals,
buildDriver=driver, nTries=details.nModels,
needsQuantization=0, nQuantBounds=details.qBounds,
mEstimateVal=details.mEstimateVal,
useSigs=True,useCMIM=useCMIM,
holdOutFrac=details.internalHoldoutFrac,
replacementSelection=details.replacementSelection,
silent=not _verbose)
## elif details.useSVM:
## from rdkit.ML.SVM import CrossValidate
## driver = CrossValidate.CrossValidationDriver
## composite.Grow(trainExamples, attrs, nPossibleVals=[0]+nPossibleVals,
## buildDriver=driver, nTries=details.nModels,
## needsQuantization=0,
## cost=details.svmCost,gamma=details.svmGamma,
## weights=details.svmWeights,degree=details.svmDegree,
## type=details.svmType,kernelType=details.svmKernel,
## coef0=details.svmCoeff,eps=details.svmEps,nu=details.svmNu,
## cache_size=details.svmCache,shrinking=details.svmShrink,
## dataType=details.svmDataType,
## holdOutFrac=details.internalHoldoutFrac,
## replacementSelection=details.replacementSelection,
## silent=not _verbose)
else:
from rdkit.ML.Neural import CrossValidate
driver = CrossValidate.CrossValidationDriver
composite.Grow(trainExamples,attrs,[0]+nPossibleVals,nTries=details.nModels,
buildDriver=driver,needsQuantization=0)
composite.AverageErrors()
composite.SortModels()
modelList,counts,avgErrs = composite.GetAllData()
counts = numpy.array(counts)
avgErrs = numpy.array(avgErrs)
composite._varNames = data.GetVarNames()
for i in range(len(modelList)):
modelList[i].NameModel(composite._varNames)
# do final statistics
weightedErrs = counts*avgErrs
averageErr = sum(weightedErrs)/sum(counts)
devs = (avgErrs - averageErr)
devs = devs * counts
devs = numpy.sqrt(devs*devs)
avgDev = sum(devs)/sum(counts)
message('# Overall Average Error: %%% 5.2f, Average Deviation: %%% 6.2f'%(100.*averageErr,100.*avgDev))
if details.bayesModel:
composite.Train(trainExamples,verbose=0)
# blow out the saved examples and then save the composite:
composite.ClearModelExamples()
if saveIt:
composite.Pickle(details.outName)
details.model = DbModule.binaryHolder(cPickle.dumps(composite))
badExamples = []
if not details.detailedRes and (not hasattr(details,'noScreen') or not details.noScreen):
if details.splitRun:
message('Testing all hold-out examples')
wrong = testall(composite,testExamples,badExamples)
message('%d examples (%% %5.2f) were misclassified'%(len(wrong),
100.*float(len(wrong))/float(len(testExamples))))
_runDetails.holdout_error = float(len(wrong))/len(testExamples)
else:
message('Testing all examples')
wrong = testall(composite,namedExamples,badExamples)
message('%d examples (%% %5.2f) were misclassified'%(len(wrong),
100.*float(len(wrong))/float(len(namedExamples))))
_runDetails.overall_error = float(len(wrong))/len(namedExamples)
if details.detailedRes:
message('\nEntire data set:')
resTup = ScreenComposite.ShowVoteResults(range(data.GetNPts()),data,composite,
nPossibleVals[-1],details.threshold)
nGood,nBad,nSkip,avgGood,avgBad,avgSkip,voteTab = resTup
nPts = len(namedExamples)
nClass = nGood+nBad
_runDetails.overall_error = float(nBad) / nClass
_runDetails.overall_correct_conf = avgGood
_runDetails.overall_incorrect_conf = avgBad
_runDetails.overall_result_matrix = repr(voteTab)
nRej = nClass-nPts
if nRej > 0:
_runDetails.overall_fraction_dropped = float(nRej)/nPts
if details.splitRun:
message('\nHold-out data:')
resTup = ScreenComposite.ShowVoteResults(range(len(testExamples)),testExamples,
composite,
nPossibleVals[-1],details.threshold)
nGood,nBad,nSkip,avgGood,avgBad,avgSkip,voteTab = resTup
nPts = len(testExamples)
nClass = nGood+nBad
_runDetails.holdout_error = float(nBad) / nClass
_runDetails.holdout_correct_conf = avgGood
_runDetails.holdout_incorrect_conf = avgBad
_runDetails.holdout_result_matrix = repr(voteTab)
nRej = nClass-nPts
if nRej > 0:
_runDetails.holdout_fraction_dropped = float(nRej)/nPts
if details.persistTblName and details.dbName:
message('Updating results table %s:%s'%(details.dbName,details.persistTblName))
details.Store(db=details.dbName,table=details.persistTblName)
if details.badName != '':
badFile = open(details.badName,'w+')
for i in range(len(badExamples)):
ex = badExamples[i]
vote = wrong[i]
outStr = '%s\t%s\n'%(ex,vote)
badFile.write(outStr)
badFile.close()
composite.ClearModelExamples()
return composite
def RunIt(details,progressCallback=None,saveIt=1,setDescNames=0):
""" does the actual work of building a composite model
**Arguments**
- details: a _CompositeRun.CompositeRun_ object containing details
(options, parameters, etc.) about the run
- progressCallback: (optional) a function which is called with a single
argument (the number of models built so far) after each model is built.
- saveIt: (optional) if this is nonzero, the resulting model will be pickled
and dumped to the filename specified in _details.outName_
- setDescNames: (optional) if nonzero, the composite's _SetInputOrder()_ method
will be called using the results of the data set's _GetVarNames()_ method;
it is assumed that the details object has a _descNames attribute which
is passed to the composites _SetDescriptorNames()_ method. Otherwise
(the default), _SetDescriptorNames()_ gets the results of _GetVarNames()_.
**Returns**
the composite model constructed
"""
details.rundate = time.asctime()
fName = details.tableName.strip()
if details.outName == '':
details.outName = fName + '.pkl'
if not details.dbName:
if details.qBounds != []:
data = DataUtils.TextFileToData(fName)
else:
data = DataUtils.BuildQuantDataSet(fName)
elif details.useSigTrees or details.useSigBayes:
details.tableName = fName
data = details.GetDataSet(pickleCol=0,pickleClass=DataStructs.ExplicitBitVect)
elif details.qBounds != [] or not details.useTrees:
details.tableName = fName
data = details.GetDataSet()
else:
data = DataUtils.DBToQuantData(details.dbName,fName,quantName=details.qTableName,
user=details.dbUser,password=details.dbPassword)
composite = RunOnData(details,data,progressCallback=progressCallback,
saveIt=saveIt,setDescNames=setDescNames)
return composite
def ShowVersion(includeArgs=0):
""" prints the version number
"""
print('This is BuildComposite.py version %s' % (__VERSION_STRING))
if includeArgs:
import sys
print('command line was:')
print(' '.join(sys.argv))
def Usage():
""" provides a list of arguments for when this is used from the command line
"""
import sys
print(__doc__)
sys.exit(-1)
def SetDefaults(runDetails=None):
""" initializes a details object with default values
**Arguments**
- details: (optional) a _CompositeRun.CompositeRun_ object.
If this is not provided, the global _runDetails will be used.
**Returns**
the initialized _CompositeRun_ object.
"""
if runDetails is None: runDetails = _runDetails
return CompositeRun.SetDefaults(runDetails)
def ParseArgs(runDetails):
""" parses command line arguments and updates _runDetails_
**Arguments**
- runDetails: a _CompositeRun.CompositeRun_ object.
"""
import getopt
args,extra = getopt.getopt(sys.argv[1:],'P:o:n:p:b:sf:F:v:hlgd:rSTt:BQ:q:DVG:N:L:',
['nRuns=','prune','profile',
'seed=','noScreen',
'modelFiltFrac=', 'modelFiltVal=',
'recycle','randomDescriptors=',
'doKnn','knnK=','knnTanimoto','knnEuclid',
'doSigTree','allowCollections',
'doNaiveBayes', 'mEstimateVal=',
'doSigBayes',
## 'doSVM','svmKernel=','svmType=','svmGamma=',
## 'svmCost=','svmWeights=','svmDegree=',
## 'svmCoeff=','svmEps=','svmNu=','svmCache=',
## 'svmShrink','svmDataType=',
'replacementSelection',
])
runDetails.profileIt=0
for arg,val in args:
if arg == '-n':
runDetails.nModels = int(val)
elif arg == '-N':
runDetails.note=val
elif arg == '-o':
runDetails.outName = val
elif arg == '-Q':
qBounds = eval(val)
assert type(qBounds) in [type([]),type(())],'bad argument type for -Q, specify a list as a string'
runDetails.activityBounds=qBounds
runDetails.activityBoundsVals=val
elif arg == '-p':
runDetails.persistTblName=val
elif arg == '-P':
runDetails.pickleDataFileName= val
elif arg == '-r':
runDetails.randomActivities = 1
elif arg == '-S':
runDetails.shuffleActivities = 1
elif arg == '-b':
runDetails.badName = val
elif arg == '-B':
runDetails.bayesModels=1
elif arg == '-s':
runDetails.splitRun = 1
elif arg == '-f':
runDetails.splitFrac=float(val)
elif arg == '-F':
runDetails.filterFrac=float(val)
elif arg == '-v':
runDetails.filterVal=float(val)
elif arg == '-l':
runDetails.lockRandom = 1
elif arg == '-g':
runDetails.lessGreedy=1
elif arg == '-G':
runDetails.startAt = int(val)
elif arg == '-d':
runDetails.dbName=val
elif arg == '-T':
runDetails.useTrees = 0
elif arg == '-t':
runDetails.threshold=float(val)
elif arg == '-D':
runDetails.detailedRes = 1
elif arg == '-L':
runDetails.limitDepth = int(val)
elif arg == '-q':
qBounds = eval(val)
assert type(qBounds) in [type([]),type(())],'bad argument type for -q, specify a list as a string'
runDetails.qBoundCount=val
runDetails.qBounds = qBounds
elif arg == '-V':
ShowVersion()
sys.exit(0)
elif arg == '--nRuns':
runDetails.nRuns = int(val)
elif arg == '--modelFiltFrac':
runDetails.modelFilterFrac=float(val)
elif arg == '--modelFiltVal':
runDetails.modelFilterVal=float(val)
elif arg == '--prune':
runDetails.pruneIt=1
elif arg == '--profile':
runDetails.profileIt=1
elif arg == '--recycle':
runDetails.recycleVars=1
elif arg == '--randomDescriptors':
runDetails.randomDescriptors=int(val)
elif arg == '--doKnn':
runDetails.useKNN=1
runDetails.useTrees=0
## runDetails.useSVM=0
runDetails.useNaiveBayes=0
elif arg == '--knnK':
runDetails.knnNeighs = int(val)
elif arg == '--knnTanimoto':
runDetails.knnDistFunc="Tanimoto"
elif arg == '--knnEuclid':
runDetails.knnDistFunc="Euclidean"
elif arg == '--doSigTree':
## runDetails.useSVM=0
runDetails.useKNN=0
runDetails.useTrees=0
runDetails.useNaiveBayes=0
runDetails.useSigTrees=1
elif arg == '--allowCollections':
runDetails.allowCollections=True
elif arg == '--doNaiveBayes':
runDetails.useNaiveBayes=1
## runDetails.useSVM=0
runDetails.useKNN=0
runDetails.useTrees=0
runDetails.useSigBayes=0
elif arg == '--doSigBayes':
runDetails.useSigBayes=1
runDetails.useNaiveBayes=0
## runDetails.useSVM=0
runDetails.useKNN=0
runDetails.useTrees=0
elif arg == '--mEstimateVal':
runDetails.mEstimateVal=float(val)
## elif arg == '--doSVM':
## runDetails.useSVM=1
## runDetails.useKNN=0
## runDetails.useTrees=0
## runDetails.useNaiveBayes=0
## elif arg == '--svmKernel':
## if val not in SVM.kernels.keys():
## message('kernel %s not in list of available kernels:\n%s\n'%(val,SVM.kernels.keys()))
## sys.exit(-1)
## else:
## runDetails.svmKernel=SVM.kernels[val]
## elif arg == '--svmType':
## if val not in SVM.machineTypes.keys():
## message('type %s not in list of available machines:\n%s\n'%(val,SVM.machineTypes.keys()))
## sys.exit(-1)
## else:
## runDetails.svmType=SVM.machineTypes[val]
## elif arg == '--svmGamma':
## runDetails.svmGamma = float(val)
## elif arg == '--svmCost':
## runDetails.svmCost = float(val)
## elif arg == '--svmWeights':
## # FIX: this is dangerous
## runDetails.svmWeights = eval(val)
## elif arg == '--svmDegree':
## runDetails.svmDegree = int(val)
## elif arg == '--svmCoeff':
## runDetails.svmCoeff = float(val)
## elif arg == '--svmEps':
## runDetails.svmEps = float(val)
## elif arg == '--svmNu':
## runDetails.svmNu = float(val)
## elif arg == '--svmCache':
## runDetails.svmCache = int(val)
## elif arg == '--svmShrink':
## runDetails.svmShrink = 0
## elif arg == '--svmDataType':
## runDetails.svmDataType=val
elif arg== '--seed':
# FIX: dangerous
runDetails.randomSeed = eval(val)
elif arg== '--noScreen':
runDetails.noScreen=1
elif arg== '--replacementSelection':
runDetails.replacementSelection = 1
elif arg == '-h':
Usage()
else:
Usage()
runDetails.tableName=extra[0]
if __name__ == '__main__':
if len(sys.argv) < 2:
Usage()
_runDetails.cmd = ' '.join(sys.argv)
SetDefaults(_runDetails)
ParseArgs(_runDetails)
ShowVersion(includeArgs=1)
if _runDetails.nRuns > 1:
for i in range(_runDetails.nRuns):
sys.stderr.write('---------------------------------\n\tDoing %d of %d\n---------------------------------\n'%(i+1,_runDetails.nRuns))
RunIt(_runDetails)
else:
if _runDetails.profileIt:
import hotshot,hotshot.stats
prof=hotshot.Profile('prof.dat')
prof.runcall(RunIt,_runDetails)
stats = hotshot.stats.load('prof.dat')
stats.strip_dirs()
stats.sort_stats('time','calls')
stats.print_stats(30)
else:
RunIt(_runDetails)
|
adalke/rdkit
|
rdkit/ML/BuildComposite.py
|
Python
|
bsd-3-clause
| 36,064
|
[
"RDKit"
] |
8fa5f18c4d6ed88b6bc2d5f5b007eeef7108ce2f1ca37739239437014faa57a3
|
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
from os.path import abspath
from spdx.checksum import Algorithm
from spdx.creationinfo import Tool
from spdx.document import Document
from spdx.document import License
from spdx.document import ExtractedLicense
from spdx.file import File
from spdx.package import Package
from spdx.utils import NoAssert
from spdx.utils import SPDXNone
from spdx.version import Version
from plugincode.output import scan_output_writer
"""
Output plugins to write scan results in SPDX format.
"""
@scan_output_writer
def write_spdx_tag_value(files_count, version, notice, scanned_files, input, output_file, *args, **kwargs):
"""
Write scan output formatted as SPDX Tag/Value.
"""
write_spdx(version, notice, scanned_files, input, output_file, as_tagvalue=True)
@scan_output_writer
def write_spdx_rdf(files_count, version, notice, scanned_files, input, output_file, *args, **kwargs):
"""
Write scan output formatted as SPDX RDF.
"""
write_spdx(version, notice, scanned_files, input, output_file, as_tagvalue=False)
def write_spdx(version, notice, scanned_files, input, output_file, as_tagvalue=True):
"""
Write scan output formatted as SPDX Tag/value or RDF.
"""
absinput = abspath(input)
if os.path.isdir(absinput):
input_path = absinput
else:
input_path = os.path.dirname(absinput)
doc = Document(Version(2, 1), License.from_identifier('CC0-1.0'))
doc.comment = notice
doc.creation_info.add_creator(Tool('ScanCode ' + version))
doc.creation_info.set_created_now()
doc.package = Package(os.path.basename(input_path), NoAssert())
# Use a set of unique copyrights for the package.
doc.package.cr_text = set()
all_files_have_no_license = True
all_files_have_no_copyright = True
for file_data in scanned_files:
# Construct the absolute path in case we need to access the file
# to calculate its SHA1.
file_entry = File(os.path.join(input_path, file_data.get('path')))
file_sha1 = file_data.get('sha1')
if not file_sha1:
if os.path.isfile(file_entry.name):
# Calculate the SHA1 in case it is missing, e.g. for empty files.
file_sha1 = file_entry.calc_chksum()
else:
# Skip directories.
continue
# Restore the relative file name as that is what we want in
# SPDX output (with explicit leading './').
file_entry.name = './' + file_data.get('path')
file_entry.chk_sum = Algorithm('SHA1', file_sha1)
file_licenses = file_data.get('licenses')
if file_licenses:
all_files_have_no_license = False
for file_license in file_licenses:
spdx_id = file_license.get('spdx_license_key')
if spdx_id:
spdx_license = License.from_identifier(spdx_id)
else:
license_key = file_license.get('key')
# FIXME: we should prefix this with ScanCode-
licenseref_id = 'LicenseRef-' + license_key
spdx_license = ExtractedLicense(licenseref_id)
spdx_license.name = file_license.get('short_name')
comment = 'See details at https://github.com/nexB/scancode-toolkit/blob/develop/src/licensedcode/data/licenses/%s.yml\n' % license_key
spdx_license.comment = comment
text = file_license.get('matched_text')
# always set some text, even if we did not extract the matched text
if not text:
text = comment
spdx_license.text = text
doc.add_extr_lic(spdx_license)
# Add licenses in the order they appear in the file. Maintaining the order
# might be useful for provenance purposes.
file_entry.add_lics(spdx_license)
doc.package.add_lics_from_file(spdx_license)
elif file_licenses is None:
all_files_have_no_license = False
file_entry.add_lics(NoAssert())
else:
file_entry.add_lics(SPDXNone())
file_entry.conc_lics = NoAssert()
file_copyrights = file_data.get('copyrights')
if file_copyrights:
all_files_have_no_copyright = False
file_entry.copyright = []
for file_copyright in file_copyrights:
file_entry.copyright.extend(file_copyright.get('statements'))
doc.package.cr_text.update(file_entry.copyright)
# Create a text of copyright statements in the order they appear in the file.
# Maintaining the order might be useful for provenance purposes.
file_entry.copyright = '\n'.join(file_entry.copyright) + '\n'
elif file_copyrights is None:
all_files_have_no_copyright = False
file_entry.copyright = NoAssert()
else:
file_entry.copyright = SPDXNone()
doc.package.add_file(file_entry)
if len(doc.package.files) == 0:
if as_tagvalue:
output_file.write("# No results for package '{}'.\n".format(doc.package.name))
else:
output_file.write("<!-- No results for package '{}'. -->\n".format(doc.package.name))
# Remove duplicate licenses from the list for the package.
unique_licenses = set(doc.package.licenses_from_files)
if not len(doc.package.licenses_from_files):
if all_files_have_no_license:
doc.package.licenses_from_files = [SPDXNone()]
else:
doc.package.licenses_from_files = [NoAssert()]
else:
# List license identifiers alphabetically for the package.
doc.package.licenses_from_files = sorted(unique_licenses, key=lambda x: x.identifier)
if len(doc.package.cr_text) == 0:
if all_files_have_no_copyright:
doc.package.cr_text = SPDXNone()
else:
doc.package.cr_text = NoAssert()
else:
# Create a text of alphabetically sorted copyright
# statements for the package.
doc.package.cr_text = '\n'.join(sorted(doc.package.cr_text)) + '\n'
doc.package.verif_code = doc.package.calc_verif_code()
doc.package.license_declared = NoAssert()
doc.package.conc_lics = NoAssert()
if as_tagvalue:
from spdx.writers.tagvalue import write_document
else:
from spdx.writers.rdf import write_document
# As the spdx-tools package can only write the document to a
# "str" file but ScanCode provides a "unicode" file, write to a
# "str" buffer first and then manually write the value to a
# "unicode" file.
from StringIO import StringIO
str_buffer = StringIO()
write_document(doc, str_buffer, validate=True)
as_unicode = str_buffer.getvalue().decode('utf-8')
output_file.write(as_unicode)
|
yashdsaraf/scancode-toolkit
|
src/formattedcode/format_spdx.py
|
Python
|
apache-2.0
| 8,459
|
[
"VisIt"
] |
0cc4c94776e345bd075593eb81b6b08c5b0f6fbf7b9d8f6d04d625839dc6d855
|
from __future__ import annotations
class ParameterReporter:
"""
Keeps a record of all the ModelParameterisations and
ScanVaryingModelParameterisations present and provides access to their
Parameters and ScanVaryingParameterSets for reporting purposes.
It is assumed that the provided model parameterisations will be one of five
types:
* Detector parameterisation
* Beam parameterisation
* Crystal orientation parameterisation
* Crystal unit cell parameterisation
* Goniometer setting parameterisation
"""
def __init__(
self,
detector_parameterisations=None,
beam_parameterisations=None,
xl_orientation_parameterisations=None,
xl_unit_cell_parameterisations=None,
goniometer_parameterisations=None,
):
if detector_parameterisations is None:
detector_parameterisations = []
if beam_parameterisations is None:
beam_parameterisations = []
if xl_orientation_parameterisations is None:
xl_orientation_parameterisations = []
if xl_unit_cell_parameterisations is None:
xl_unit_cell_parameterisations = []
if goniometer_parameterisations is None:
goniometer_parameterisations = []
# Keep references to all parameterised models
self._detector_parameterisations = detector_parameterisations
self._beam_parameterisations = beam_parameterisations
self._xl_orientation_parameterisations = xl_orientation_parameterisations
self._xl_unit_cell_parameterisations = xl_unit_cell_parameterisations
self._goniometer_parameterisations = goniometer_parameterisations
self._length = self._len()
def _len(self):
length = 0
for model in self._detector_parameterisations:
length += model.num_free()
for model in self._beam_parameterisations:
length += model.num_free()
for model in self._xl_orientation_parameterisations:
length += model.num_free()
for model in self._xl_unit_cell_parameterisations:
length += model.num_free()
for model in self._goniometer_parameterisations:
length += model.num_free()
return length
def __len__(self):
return self._length
def _indent(self, string):
return "\n".join(" " + e for e in str(string).split("\n"))
def __str__(self):
s = "Parameter Report:\n"
if self._detector_parameterisations:
s += "Detector parameters:\n"
det_plists = [x.get_params() for x in self._detector_parameterisations]
params = [x for l in det_plists for x in l]
for p in params:
tmp = self._indent(p)
s += tmp + "\n"
if self._beam_parameterisations:
s += "Beam parameters:\n"
beam_plists = [x.get_params() for x in self._beam_parameterisations]
params = [x for l in beam_plists for x in l]
for p in params:
tmp = self._indent(p)
s += tmp + "\n"
if self._xl_orientation_parameterisations:
s += "Crystal orientation parameters:\n"
xlo_plists = [
x.get_params() for x in self._xl_orientation_parameterisations
]
params = [x for l in xlo_plists for x in l]
for p in params:
tmp = self._indent(p)
s += tmp + "\n"
if self._xl_unit_cell_parameterisations:
s += "Crystal unit cell parameters:\n"
xluc_plists = [x.get_params() for x in self._xl_unit_cell_parameterisations]
params = [x for l in xluc_plists for x in l]
for p in params:
tmp = self._indent(p)
s += tmp + "\n"
if self._goniometer_parameterisations:
s += "Goniometer parameters:\n"
gon_plists = [x.get_params() for x in self._goniometer_parameterisations]
params = [x for l in gon_plists for x in l]
for p in params:
tmp = self._indent(p)
s += tmp + "\n"
return s
def varying_params_vs_image_number(self, image_range):
"""Returns a string which is a table of scan-varying parameter values vs
image number, if scan-varying parameters are present. Otherwise returns
None"""
image_numbers = list(range(image_range[0], image_range[1] + 1))
columns = [TableColumn("Image", image_numbers)]
for parameterisation in (
self._detector_parameterisations
+ self._beam_parameterisations
+ self._xl_orientation_parameterisations
+ self._xl_unit_cell_parameterisations
+ self._goniometer_parameterisations
):
for p in parameterisation.get_params():
try:
vals = [
parameterisation.get_smoothed_parameter_value(i, p)
for i in image_numbers
]
columns.append(TableColumn(p.name_stem, vals))
except AttributeError:
continue
if len(columns) > 1:
header = "\t".join([e.title for e in columns])
text = header + "\n"
for i in range(len(columns[0])):
vals = "\t".join(["%.6f" % e.values[i] for e in columns])
text += vals + "\n"
return text
else:
return None
def get_params(self, only_free=True):
"""return a concatenated list of parameters from each of the components
in the global model"""
global_p_list = []
for parameterisation in (
self._detector_parameterisations
+ self._beam_parameterisations
+ self._xl_orientation_parameterisations
+ self._xl_unit_cell_parameterisations
+ self._goniometer_parameterisations
):
global_p_list.extend(parameterisation.get_params(only_free))
return global_p_list
class TableColumn:
"""Bucket to store data to be used for constructing tables to print."""
def __init__(self, title, values):
self._title = title
self._values = values
def __len__(self):
return len(self._values)
@property
def title(self):
return self._title
@property
def values(self):
return self._values
|
dials/dials
|
algorithms/refinement/parameterisation/parameter_report.py
|
Python
|
bsd-3-clause
| 6,541
|
[
"CRYSTAL"
] |
d4823aa3c9627b99a4512e34544d7c1262450b4e4dec0ae7605311f168867d8a
|
# GeneaCrystal Copyright (C) 2012-2013
# Christian Jaeckel, <christian.doe@gmail.com>
# Frederic Kerber, <fkerber@gmail.com>
# Pascal Lessel, <maverickthe6@gmail.com>
# Michael Mauderer, <mail@michaelmauderer.de>
#
# GeneaCrystal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GeneaCrystal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GeneaCrystal. If not, see <http://www.gnu.org/licenses/>.
from geneacrystal import gameElements, physic, util
import pymunk
from libavg import avg
from geneacrystal.gameElements import items, GameElementBase
import math
from geneacrystal.nodes import StaticOverlayNode
class PlayerBase(GameElementBase):
def __init__(self, crystalGenerator, hitPoints=None, timeoutDuration=None,
showExitButton=True, infoManager=None, *args, **kwargs):
self._crystalGenerator = crystalGenerator
self._maxHitPoints = hitPoints
self._hitPoints = self._maxHitPoints
GameElementBase.__init__(self, *args, **kwargs)
self._timeoutCallback = None
self._timeoutTimer = None
self.timeoutDuration = timeoutDuration
self.destructionCallback = None
self._spawnPos = self._getSpawnPos()
self._infoManager = infoManager
self._scoreDisplay = None
self._exitButton = None
if self.owner is not None:
self.owner.setCannon(self)
if infoManager is not None:
button, box = self._makeInfoElements()
self._placeInfoElements(button, box)
self._scoreDisplay = self._makeScoreCounter()
if self._scoreDisplay is not None:
self._alignScore(self._scoreDisplay)
self.owner.setOnScoreChangeCallback(self._setScore)
if showExitButton:
self._makeExitButton()
self.startCrystalSpawn()
def _setScore(self, value):
if self._scoreDisplay is not None:
self._scoreDisplay.text = str(value)
self._alignScore(self._scoreDisplay)
def _alignScore(self, scoreNode ):
self._scorePosition = (-self.size[0]*0.18+self.size[0]*0.01,
-self.size[1]*0.18-self.size[1]*0.01)
util.centerNodeOnPosition(scoreNode, self._scorePosition)
scoreNode.angle = math.pi/4
@property
def timeOutCB(self):
return self._timeoutCallback
@timeOutCB.setter
def timeOutCB(self, value):
self._timeoutCallback = value
self._activateTimeOut()
def _activateTimeOut(self):
if self._timeoutTimer is not None:
avg.Player.get().clearInterval(self._timeoutTimer)
if self._timeoutCallback is not None and self.timeoutDuration is not None:
self._timeoutTimer = avg.Player.get().setTimeout(self.timeoutDuration,
self._timeoutCallback)
def _resetTimeout(self):
self._activateTimeOut()
def _getSpawnPos(self):
raise NotImplementedError
def _placeInfoElements(self):
raise NotImplementedError
def _getCreateCrystalIfEmpty(self):
if self.getItemOnLauncher() is not None:
return
self._resetTimeout()
color, crystalClass = self._crystalGenerator.getNextItemOrCrystalConstructor()
if color is None:
crystal = crystalClass(space = self._space,
parent=self._root.getParent(),
position = self._spawnPos,
owner=self.owner,
helpSystem=self._helpSystem
)
else:
crystal = crystalClass(color=color,
space = self._space,
parent=self._root.getParent(),
position = self._spawnPos,
owner=self.owner,
helpSystem=self._helpSystem
)
crystal.rotationSpeed = 2
crystal.rotatioNenabled = True
def getItemOnLauncher(self):
shape = self.getShapesOnLauncher()
if shape is None or not isinstance(shape.body.gameElement, items.Item):
return None
else:
crystal = shape.body.gameElement
return crystal
def getShapesOnLauncher(self):
intersectionShapes = self._space.shape_query(self._sensorShape)
intersectionShapes = filter(lambda shape: isinstance(shape.body,physic.BaseBody),
intersectionShapes)
intersectionCircles = filter(lambda shape: isinstance(shape.body.gameElement,
items.Item),
intersectionShapes)
if len(intersectionCircles) == 0:
return None
else:
return intersectionCircles[0]
def _initPhysic(self, position, angle):
self._body = physic.BaseBody(self, None,None)
self._body.position = position
self._body.angle = angle
points = [(0,0), (self.size[0],0), self.size, (0,self.size[1])]
points = map(lambda p: util.vectorSub(p, util.vectorMult(self.size, 0.5)),
points)
shape = pymunk.Poly(self._body, points)
shape.collision_type = physic.CannonCollisionType
shape.elasticity = 1
self._addShape(shape)
sensorPoints = map(lambda p: util.vectorMult(p, 0.9), points)
self._sensorShape = pymunk.Poly(self._body, sensorPoints)
self._sensorShape.sensor = True
self._addShape(self._sensorShape)
self._addLaunchAreaDivider()
def _setBackground(self):
pass
def _initLibavg(self, root):
self._setBackground()
if self._hitPoints is not None:
self._makeLifebars()
def onCrystalCollision(self, other, dX, dY):
return dX > self.size[0]/2 or dY > self.size[1]/2
def delete(self):
if not self.alive:
return
self.stopCrystalSpawn()
if self.destructionCallback is not None:
self.destructionCallback()
self.destructionCallback = None
if self.owner is not None:
self.owner.setCannon(None)
self.owner.scoreChangeCallbacks = []
crystal = self.getItemOnLauncher()
if crystal is not None:
crystal.delete()
if self._exitButton is not None:
self._exitButton.unlink(True)
self._exitButton = None
if self._scoreDisplay is not None:
self._scoreDisplay.unlink(True)
self._scoreDisplay = None
if self._timeoutTimer is not None:
avg.Player.get().clearInterval(self._timeoutTimer)
self._timeoutCallback = None
gameElements.GameElementBase.delete(self)
def applyDamage(self, value):
if self._hitPoints is None or not self.alive:
return
self._hitPoints -= value
if self._hitPoints <=0:
self.delete()
return
greenSizeY = self._lifeBarLength * self._hitPoints/self._maxHitPoints
redSizeY = self._lifeBarLength - greenSizeY
redPosY = self._lifeBarLength - redSizeY
self._lifebarGreen.size = self._lifebarGreen.size[0], greenSizeY
self._lifebarRed.pos = self._lifebarRed.pos[0], redPosY
self._lifebarRed.size = self._lifebarRed.size[0], redSizeY
def _makeLifebars(self):
lifebarDiv = avg.DivNode(parent=self._root)
lifebarDiv.angle = -math.pi/4
lifebarDiv.pos = -self.size[1]*0.10,-self.size[1]*0.44
self._lifeBarLength = math.sqrt(self.size[0]**2+self.size[1]**2)*0.5
self._lifebarGreen = avg.RectNode(pos=(0,0),
parent=lifebarDiv,
fillcolor="00FF00",
color="000000",
fillopacity=0.5,
size=(self.size[0]/20, self._lifeBarLength),
)
self._lifebarRed = avg.RectNode(pos =(0,self._lifeBarLength),
parent=lifebarDiv,
fillcolor="FF0000",
color="000000",
fillopacity=0.5,
size=(self.size[0]/20, 0),
)
def hide(self):
GameElementBase.hide(self)
crystal = self.getItemOnLauncher()
if crystal is not None:
crystal.delete()
self.stopCrystalSpawn()
def show(self):
GameElementBase.show(self)
crystal = self.getItemOnLauncher()
if crystal is not None:
crystal.delete()
self.startCrystalSpawn()
def onStructureCollision(self, other):
self.delete()
def stopCrystalSpawn(self):
avg.Player.get().clearInterval(self._spawnTimer)
def startCrystalSpawn(self):
player = avg.Player.get()
self._spawnTimer = player.setInterval(500, self._getCreateCrystalIfEmpty)
def _makeInfoElements(self):
minSize = min(self._parent.size)*0.85
infoButtonSize = self.size[0]/3,self.size[1]/3
infoButton = self._infoManager.getInfoButton(parent=self._root, size=infoButtonSize)
util.centerNodeOnPosition(infoButton, (0,0))
infoBox = self._infoManager.getInfoBox(parent=self._root, size =(minSize/3, minSize/4) )
util.centerNodeOnPosition(infoBox, (0,0))
return infoButton, infoBox
def _makeExitButton(self):
pos = self.size[0]*0.1, -self.size[1] *0
self._exitButton = StaticOverlayNode(self.delete, parent=self._root,
size=util.vectorMult(self.size, 0.25),
pos=pos)
def _makeScoreCounter(self):
pass
class DiagonalPlayerBase3Room(PlayerBase):
infoKey = "breakable_base"
def _getSpawnPos(self):
spawnPos = util.vectorMult((self.size[0], -self.size[1]), 0.5)
spawnPos = util.vectorSub(spawnPos, (util.CRYSTAL_SIZE,-util.CRYSTAL_SIZE))
spawnPos = self._root.getAbsPos(spawnPos)
return spawnPos
def _placeInfoElements(self, button, box):
button.angle = math.pi/4
box.angle = math.pi/4
angle = -math.pi/4
baseDisplacement = math.sqrt(2*(self.size[0]**2)) /2.0
infoButtonElementAdjustemnt = util.getVectotInDirection(angle ,
-baseDisplacement + math.sqrt(2*(button.size[1]**2))/2)
infoBoxElementAdjustemnt = util.getVectotInDirection(angle,
baseDisplacement + box.size[1]/2)
util.centerNodeOnPosition(button, infoButtonElementAdjustemnt)
util.centerNodeOnPosition(box, infoBoxElementAdjustemnt)
def _setBackground(self):
self._theme.BaseDiagonal3Room(parent=self._root, size=self.size)
def _addLaunchAreaDivider(self):
a = -self.size[0]*0.15, -self.size[1]/2
b=self.size[0]/2, self.size[1]*0.15
divider = pymunk.Segment(self._body, b, a, 1)
divider.elasticity = 1
self._addShape(divider)
def _makeExitButton(self):
pos = self.size[0]*0.065, self.size[1] *0.065
self._exitButton = StaticOverlayNode(self.delete, parent=self._root,
size=(50,50), pos=pos, angle=math.pi/4)
def _makeScoreCounter(self):
scoreCounter = avg.WordsNode( color="FFFFFF", fontsize=25,
parent = self._root, text = "0",
sensitive=False,
#alignment="center",
)
return scoreCounter
class DiagonalPlayerBase2Room(DiagonalPlayerBase3Room):
infoKey = "simple_base"
def _placeInfoElements(self, button, box):
button.angle = math.pi/4
box.angle = math.pi/4
angle = -math.pi/4
baseDisplacement = math.sqrt(2*(self.size[0]**2)) /2.0
infoBoxElementAdjustemnt = util.getVectotInDirection(angle,
baseDisplacement + box.size[1]/2)
util.centerNodeOnPosition(button, (self.size[0]*0.17, self.size[1]*0.2))
util.centerNodeOnPosition(box, infoBoxElementAdjustemnt)
def _setBackground(self):
self._theme.BaseDiagonal2Room(parent=self._root, size=self.size)
def _makeExitButton(self):
pos = -self.size[0]*0.29, -self.size[1] *0.29
self._exitButton = StaticOverlayNode(self.delete, parent=self._root,
size=util.vectorMult(self.size, 0.25),
pos=pos,
angle=math.pi/4)
def _makeScoreCounter(self):
return None
class AlignedPlayerBase(PlayerBase):
infoKey = "score_base"
def _getSpawnPos(self):
spawnPos = 0, -self.size[1]/2
spawnPos = util.vectorAdd(spawnPos, (0,util.CRYSTAL_SIZE))
spawnPos = self._root.getAbsPos(spawnPos)
return spawnPos
def _placeInfoElements(self, button, box):
angle = math.pi/2
baseDisplacement = math.sqrt(2*(self.size[0]**2)) /2.0
infoBoxElementAdjustemnt = util.getVectotInDirection(angle,
-baseDisplacement - box.size[1]/2)
buttonPos = (-self.size[0]*0.23, self.size[1]*0.27)
button.size = util.vectorMult(self.size, 0.20)
util.centerNodeOnPosition(button, buttonPos)
util.centerNodeOnPosition(box, infoBoxElementAdjustemnt)
util.centerNodeOnPosition(box, infoBoxElementAdjustemnt)
def _setBackground(self):
self._theme.BaseAligned(parent=self._root, size=self.size)
def _makeScoreCounter(self):
scoreCounter = avg.WordsNode( color="FFFFFF", fontsize=20,
parent = self._root, text = "0",
sensitive=False,
)
return scoreCounter
def _alignScore(self, scoreNode):
scorePos = (self.size[0]*0, -self.size[1]*0.015)
util.centerNodeOnPosition(scoreNode, scorePos)
def _addLaunchAreaDivider(self):
a = -self.size[0]/2, -self.size[1]*0.1
b=self.size[0]/2, -self.size[1]*0.1
divider = pymunk.Segment(self._body, b, a, 0)
divider.elasticity = 1
self._addShape(divider)
def _makeExitButton(self):
pos = self.size[0]*0.125, self.size[1] *0.165
self._exitButton = StaticOverlayNode(self.delete, parent=self._root,
size=util.vectorMult(self.size, 0.2),
pos=pos)
|
MichaelMauderer/GeneaCrystal
|
geneacrystal/gameElements/playerBases.py
|
Python
|
gpl-3.0
| 16,824
|
[
"CRYSTAL"
] |
4e1cc7a3868f6a0ca0fc6f90e6833d8f6570d594bbeab756f7e3be1f0f78a567
|
# -*- coding: utf-8 -*-
import logging
from collections import OrderedDict
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
from CIP.logic.SlicerUtil import SlicerUtil
from CIP.logic import Util
from CIP.ui import CaseReportsWidget
#
# CIP_PAARatio
#
class CIP_PAARatio(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "PAA Ratio"
self.parent.categories = SlicerUtil.CIP_ModulesCategory
self.parent.dependencies = [SlicerUtil.CIP_ModuleName]
self.parent.contributors = ["Jorge Onieva (jonieva@bwh.harvard.edu)", "Applied Chest Imaging Laboratory", "Brigham and Women's Hospital"]
self.parent.helpText = """Calculate the ratio between pulmonary arterial and aorta.<br>
A quick tutorial of the module can be found <a href='https://chestimagingplatform.org/files/chestimagingplatform/files/paa_ratio.pdf'>here</a>.<br><br>
The PAA Ratio biomarker has been proved to predict acute exacerbations of COPD (Wells, J. M., Washko, G. R.,
Han, M. K., Abbas, N., Nath, H., Mamary, a. J., Dransfield, M. T. (2012).
Pulmonary Arterial Enlargement and Acute Exacerbations of COPD. New England Journal of Medicine, 367(10), 913-921).
For more information refer to: <a href='http://www.nejm.org/doi/full/10.1056/NEJMoa1203830'>http://www.nejm.org/doi/full/10.1056/NEJMoa1203830</a>"""
self.parent.acknowledgementText = SlicerUtil.ACIL_AcknowledgementText
#
# CIP_PAARatioWidget
#
class CIP_PAARatioWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
@property
def currentVolumeId(self):
return self.volumeSelector.currentNodeID
def __init__(self, parent):
ScriptedLoadableModuleWidget.__init__(self, parent)
self.moduleName = "CIP_PAARatio"
from functools import partial
def __onNodeAddedObserver__(self, caller, eventId, callData):
"""Node added to the Slicer scene"""
if callData.GetClassName() == 'vtkMRMLScalarVolumeNode' \
and slicer.util.mainWindow().moduleSelector().selectedModule == self.moduleName: # Current module visible
self.volumeSelector.setCurrentNode(callData)
SlicerUtil.changeContrastWindow(350, 40)
self.__onNodeAddedObserver__ = partial(__onNodeAddedObserver__, self)
self.__onNodeAddedObserver__.CallDataType = vtk.VTK_OBJECT
def setup(self):
"""This is called one time when the module GUI is initialized
"""
ScriptedLoadableModuleWidget.setup(self)
# Create objects that can be used anywhere in the module. Example: in most cases there should be just one
# object of the logic class
self.logic = CIP_PAARatioLogic()
#
# Create all the widgets. Example Area
mainAreaCollapsibleButton = ctk.ctkCollapsibleButton()
mainAreaCollapsibleButton.text = "Main parameters"
self.layout.addWidget(mainAreaCollapsibleButton)
self.mainAreaLayout = qt.QGridLayout(mainAreaCollapsibleButton)
self.label = qt.QLabel("Select the volume")
self.label.setStyleSheet("margin:10px 0 20px 7px")
self.mainAreaLayout.addWidget(self.label, 0, 0)
self.volumeSelector = slicer.qMRMLNodeComboBox()
self.volumeSelector.nodeTypes = ( "vtkMRMLScalarVolumeNode", "" )
self.volumeSelector.name = "paa_volumeSelector"
self.volumeSelector.selectNodeUponCreation = True
self.volumeSelector.autoFillBackground = True
self.volumeSelector.addEnabled = True
self.volumeSelector.noneEnabled = False
self.volumeSelector.removeEnabled = False
self.volumeSelector.showHidden = False
self.volumeSelector.showChildNodeTypes = False
self.volumeSelector.setMRMLScene( slicer.mrmlScene )
self.volumeSelector.setStyleSheet("margin:0px 0 0px 0; padding:2px 0 2px 5px")
self.mainAreaLayout.addWidget(self.volumeSelector, 0, 1)
self.jumptToTemptativeSliceButton = ctk.ctkPushButton()
self.jumptToTemptativeSliceButton.name = "jumptToTemptativeSliceButton"
self.jumptToTemptativeSliceButton.text = "Jump to temptative slice"
self.jumptToTemptativeSliceButton.toolTip = "Jump to the best estimated slice to place the rulers"
self.jumptToTemptativeSliceButton.setIcon(qt.QIcon("{0}/ruler.png".format(SlicerUtil.CIP_ICON_DIR)))
self.jumptToTemptativeSliceButton.setIconSize(qt.QSize(20, 20))
self.jumptToTemptativeSliceButton.setStyleSheet("font-weight: bold;")
# self.jumptToTemptativeSliceButton.setFixedWidth(140)
self.mainAreaLayout.addWidget(self.jumptToTemptativeSliceButton, 1, 1)
### Structure Selector
self.structuresGroupbox = qt.QGroupBox("Select the structure")
self.groupboxLayout = qt.QVBoxLayout()
self.structuresGroupbox.setLayout(self.groupboxLayout)
self.mainAreaLayout.addWidget(self.structuresGroupbox, 2, 0)
self.structuresButtonGroup=qt.QButtonGroup()
# btn = qt.QRadioButton("None")
# btn.visible = False
# self.structuresButtonGroup.addButton(btn)
# self.groupboxLayout.addWidget(btn)
btn = qt.QRadioButton("Both")
btn.name = "paaButton"
btn.checked = True
self.structuresButtonGroup.addButton(btn, 0)
self.groupboxLayout.addWidget(btn)
btn = qt.QRadioButton("Pulmonary Arterial")
btn.name = "paRadioButton"
self.structuresButtonGroup.addButton(btn, 1)
self.groupboxLayout.addWidget(btn)
btn = qt.QRadioButton("Aorta")
btn.name = "aortaRadioButton"
self.structuresButtonGroup.addButton(btn, 2)
self.groupboxLayout.addWidget(btn)
### Buttons toolbox
self.buttonsToolboxFrame = qt.QFrame()
self.buttonsToolboxLayout = qt.QGridLayout()
self.buttonsToolboxFrame.setLayout(self.buttonsToolboxLayout)
self.mainAreaLayout.addWidget(self.buttonsToolboxFrame, 2, 1)
self.placeRulersButton = ctk.ctkPushButton()
self.placeRulersButton.text = "Place ruler/s"
self.placeRulersButton.name = "placeRulersButton"
self.placeRulersButton.toolTip = "Place the ruler/s for the selected structure/s in the current slice"
self.placeRulersButton.setIcon(qt.QIcon("{0}/ruler.png".format(SlicerUtil.CIP_ICON_DIR)))
self.placeRulersButton.setIconSize(qt.QSize(20,20))
self.placeRulersButton.setFixedWidth(105)
self.placeRulersButton.setStyleSheet("font-weight:bold")
self.buttonsToolboxLayout.addWidget(self.placeRulersButton, 0, 0)
self.moveUpButton = ctk.ctkPushButton()
self.moveUpButton.text = "Move up"
self.moveUpButton.toolTip = "Move the selected ruler/s one slice up"
self.moveUpButton.setIcon(qt.QIcon("{0}/move_up.png".format(SlicerUtil.CIP_ICON_DIR)))
self.moveUpButton.setIconSize(qt.QSize(20,20))
self.moveUpButton.setFixedWidth(95)
self.buttonsToolboxLayout.addWidget(self.moveUpButton, 0, 1)
self.moveDownButton = ctk.ctkPushButton()
self.moveDownButton.text = "Move down"
self.moveDownButton.toolTip = "Move the selected ruler/s one slice down"
self.moveDownButton.setIcon(qt.QIcon("{0}/move_down.png".format(SlicerUtil.CIP_ICON_DIR)))
self.moveDownButton.setIconSize(qt.QSize(20,20))
self.moveDownButton.setFixedWidth(95)
self.buttonsToolboxLayout.addWidget(self.moveDownButton, 0, 2)
self.removeButton = ctk.ctkPushButton()
self.removeButton.text = "Remove ALL rulers"
self.removeButton.toolTip = "Remove all the rulers for this volume"
self.removeButton.setIcon(qt.QIcon("{0}/delete.png".format(SlicerUtil.CIP_ICON_DIR)))
self.removeButton.setIconSize(qt.QSize(20,20))
self.buttonsToolboxLayout.addWidget(self.removeButton, 1, 1, 1, 2, 2)
### Textboxes
self.textboxesFrame = qt.QFrame()
self.textboxesLayout = qt.QFormLayout()
self.textboxesFrame.setLayout(self.textboxesLayout)
self.textboxesFrame.setFixedWidth(190)
self.mainAreaLayout.addWidget(self.textboxesFrame, 3, 0)
self.paTextBox = qt.QLineEdit()
self.paTextBox.setReadOnly(True)
self.textboxesLayout.addRow("PA (mm): ", self.paTextBox)
self.aortaTextBox = qt.QLineEdit()
self.aortaTextBox.setReadOnly(True)
self.textboxesLayout.addRow("Aorta (mm): ", self.aortaTextBox)
self.ratioTextBox = qt.QLineEdit()
self.ratioTextBox.name = "ratioTextBox"
self.ratioTextBox.setReadOnly(True)
self.textboxesLayout.addRow("Ratio PA/A: ", self.ratioTextBox)
# Save case data
self.reportsCollapsibleButton = ctk.ctkCollapsibleButton()
self.reportsCollapsibleButton.text = "Reporting"
self.layout.addWidget(self.reportsCollapsibleButton)
self.reportsLayout = qt.QHBoxLayout(self.reportsCollapsibleButton)
self.storedColumnNames = ["caseId", "paDiameterMm", "aortaDiameterMm",
"pa1r", "pa1a", "pa1s", "pa2r", "pa2a", "pa2s",
"a1r", "a1a", "a1s", "a2r", "a2a", "a2s"]
columns = CaseReportsWidget.getColumnKeysNormalizedDictionary(self.storedColumnNames)
self.reportsWidget = CaseReportsWidget(self.moduleName, columns, parentWidget=self.reportsCollapsibleButton)
self.reportsWidget.setup()
# Init state
self.resetModuleState()
self.preventSavingState = False
self.saveStateBeforeEnteringModule()
self.preventSavingState = True
self.switchToRedView()
#####
# Case navigator
if SlicerUtil.isSlicerACILLoaded():
caseNavigatorAreaCollapsibleButton = ctk.ctkCollapsibleButton()
caseNavigatorAreaCollapsibleButton.text = "Case navigator"
self.layout.addWidget(caseNavigatorAreaCollapsibleButton, 0x0020)
# caseNavigatorLayout = qt.QVBoxLayout(caseNavigatorAreaCollapsibleButton)
# Add a case list navigator
from ACIL.ui import CaseNavigatorWidget
self.caseNavigatorWidget = CaseNavigatorWidget(self.moduleName, caseNavigatorAreaCollapsibleButton)
self.caseNavigatorWidget.setup()
self.layout.addStretch()
# Connections
self.observers = []
self.volumeSelector.connect('currentNodeChanged(vtkMRMLNode*)', self.onVolumeSelectorChanged)
self.jumptToTemptativeSliceButton.connect('clicked()', self.onJumpToTemptativeSliceButtonClicked)
self.placeRulersButton.connect('clicked()', self.onPlaceRulersClicked)
self.moveUpButton.connect('clicked()', self.onMoveUpRulerClicked)
self.moveDownButton.connect('clicked()', self.onMoveDownRulerClicked)
self.removeButton.connect('clicked()', self.onRemoveRulerClicked)
self.reportsWidget.addObservable(self.reportsWidget.EVENT_SAVE_BUTTON_CLICKED, self.onSaveReport)
# Init state
self.resetModuleState()
self.preventSavingState = False
self.saveStateBeforeEnteringModule()
self.preventSavingState = True
def enter(self):
"""This is invoked every time that we select this module as the active module in Slicer (not only the first time)"""
# activeVolumeId = SlicerUtil.getActiveVolumeIdInRedSlice()
# if activeVolumeId is not None:
# self.volumeSelector.setCurrentNodeID(activeVolumeId)
# if activeVolumeId not in self.logic.currentVolumesLoaded:
# self.placeDefaultRulers(activeVolumeId)
# Save state
self.saveStateBeforeEnteringModule()
# Start listening again to scene events
self.__addSceneObservables__()
volumeId = self.volumeSelector.currentNodeID
if volumeId:
SlicerUtil.displayBackgroundVolume(volumeId)
# Show the current rulers (if existing)
self.logic.rulersVisible(volumeId, visible=True)
# This module always works in Axial
SlicerUtil.changeLayoutToAxial()
self.changeToDefaultContrastLevel()
def exit(self):
"""This is invoked every time that we switch to another module (not only when Slicer is closed)."""
# Stop listening to Scene events
self.__removeSceneObservables()
# Hide rulers
if self.currentVolumeId:
self.logic.rulersVisible(self.currentVolumeId, False)
# Load previous state
self.restoreStateBeforeExitingModule()
def cleanup(self):
"""This is invoked as a destructor of the GUI when the module is no longer going to be used"""
self.__removeSceneObservables()
self.reportsWidget.cleanup()
self.reportsWidget = None
def saveStateBeforeEnteringModule(self):
"""Save the state of the module regarding labelmap, etc. This state will be saved/loaded when
exiting/entering the module
"""
if self.preventSavingState:
# Avoid that the first time that the module loads, the state is saved twice
self.preventSavingState = False
return
# Save existing layout
self.savedLayout = None
if slicer.app.layoutManager() is not None:
self.savedLayout = slicer.app.layoutManager().layout
# Get the active volume (it it exists)
activeVolumeId = SlicerUtil.getFirstActiveVolumeId()
if activeVolumeId is None:
# Reset state
self.resetModuleState()
else:
# There is a Volume loaded. Save state
try:
self.savedVolumeID = activeVolumeId
displayNode = SlicerUtil.getNode(activeVolumeId).GetDisplayNode()
self.savedContrastLevel = (displayNode.GetWindow(), displayNode.GetLevel())
# activeLabelmapId = SlicerUtil.getFirstActiveLabelmapId()
# self.savedLabelmapID = activeLabelmapId
# if activeLabelmapId is None:
# self.savedLabelmapOpacity = None
# else:
# self.savedLabelmapOpacity = SlicerUtil.getLabelmapOpacity()
# # Hide any labelmap
# SlicerUtil.displayLabelmapVolume(None)
except:
Util.print_last_exception()
# Not action really needed
pass
def restoreStateBeforeExitingModule(self):
"""Load the last state of the module when the user exited (labelmap, opacity, contrast window, etc.)
"""
try:
if self.savedVolumeID:
# There is a previously saved valid state.
SlicerUtil.setActiveVolumeIds(self.savedVolumeID)
SlicerUtil.changeContrastWindow(self.savedContrastLevel[0], self.savedContrastLevel[1])
# if self.savedLabelmapID:
# print "Restoring active labelmap: " + self.savedLabelmapID
# # There was a valid labelmap. Restore it
# SlicerUtil.displayLabelmapVolume(self.savedLabelmapID)
# # Restore previous opacity
# SlicerUtil.changeLabelmapOpacity(self.savedLabelmapOpacity)
# else:
# # Hide labelmap
# print "No labelmap saved. Hide all"
# SlicerUtil.displayLabelmapVolume(None)
# else:
# # Hide labelmap
# print "No volume saved. Hide labelmap"
# SlicerUtil.displayLabelmapVolume(None)
# Restore layout
SlicerUtil.changeLayout(self.savedLayout)
except:
Util.print_last_exception()
pass
def resetModuleState(self):
""" Reset all the module state variables
"""
self.savedVolumeID = None # Active grayscale volume ID
self.savedLabelmapID = None # Active labelmap node ID
self.savedLabelmapOpacity = None # Labelmap opacity
self.savedContrastLevel = (None, None) # Contrast window/level that the user had when entering the module
SlicerUtil.changeContrastWindow(350, 40)
def changeToDefaultContrastLevel(self):
# Preferred contrast
SlicerUtil.changeContrastWindow(1000, 200)
def jumpToTemptativeSlice(self, volumeId):
""" Jump the red window to a predefined slice based on the size of the volume
:param volumeId:
"""
# Get the default coordinates of the ruler
aorta1, aorta2, pa1, pa2 = self.logic.getDefaultCoords(volumeId)
# Set the display in the right slice
self.moveRedWindowToSlice(aorta1[2])
redSliceNode = slicer.util.getFirstNodeByClassByName("vtkMRMLSliceNode", "Red")
factor = 0.5
newFOVx = redSliceNode.GetFieldOfView()[0] * factor
newFOVy = redSliceNode.GetFieldOfView()[1] * factor
newFOVz = redSliceNode.GetFieldOfView()[2]
# Move the camera up to fix the view
redSliceNode.SetXYZOrigin(0, 50, 0)
# Update the FOV (zoom in)
redSliceNode.SetFieldOfView(newFOVx, newFOVy, newFOVz)
# Refresh the data in the viewer
redSliceNode.UpdateMatrices()
def placeDefaultRulers(self, volumeId):
""" Set the Aorta and PA rulers to a default estimated position and jump to that slice
:param volumeId:
"""
if not volumeId:
return
# Hide all the actual ruler nodes
self.logic.hideAllRulers()
# Remove the current rulers for this volume
self.logic.removeRulers(volumeId)
# Create the default rulers
self.logic.createDefaultRulers(volumeId, self.onRulerUpdated)
# Activate both structures
self.structuresButtonGroup.buttons()[0].setChecked(True)
# Jump to the slice where the rulers are
self.jumpToTemptativeSlice(volumeId)
# Place the rulers in the current slice
self.placeRuler()
# Add the current volume to the list of loaded volumes
#self.logic.currentVolumesLoaded.add(volumeId)
# Modify the zoom of the Red slice
redSliceNode = slicer.util.getFirstNodeByClassByName("vtkMRMLSliceNode", "Red")
factor = 0.5
newFOVx = redSliceNode.GetFieldOfView()[0] * factor
newFOVy = redSliceNode.GetFieldOfView()[1] * factor
newFOVz = redSliceNode.GetFieldOfView()[2]
redSliceNode.SetFieldOfView( newFOVx, newFOVy, newFOVz )
# Move the camera up to fix the view
redSliceNode.SetXYZOrigin(0, 50, 0)
# Refresh the data in the viewer
redSliceNode.UpdateMatrices()
def placeRuler(self):
""" Place one or the two rulers in the current visible slice in Red node
"""
volumeId = self.volumeSelector.currentNodeID
if volumeId == '':
self.showUnselectedVolumeWarningMessage()
return
selectedStructure = self.getCurrentSelectedStructure()
if selectedStructure == self.logic.NONE:
qt.QMessageBox.warning(slicer.util.mainWindow(), 'Review structure',
'Please select Pulmonary Arterial, Aorta or both to place the right ruler/s')
return
# Get the current slice
currentSlice = self.getCurrentRedWindowSlice()
if selectedStructure == self.logic.BOTH:
structures = [self.logic.PA, self.logic.AORTA]
else:
structures = [selectedStructure]
for structure in structures:
self.logic.placeRulerInSlice(volumeId, structure, currentSlice, self.onRulerUpdated)
self.refreshTextboxes()
def getCurrentSelectedStructure(self):
""" Get the current selected structure id
:return: self.logic.AORTA or self.logic.PA
"""
selectedStructureText = self.structuresButtonGroup.checkedButton().text
if selectedStructureText == "Aorta": return self.logic.AORTA
elif selectedStructureText == "Pulmonary Arterial": return self.logic.PA
elif selectedStructureText == "Both": return self.logic.BOTH
return self.logic.NONE
def stepSlice(self, offset):
""" Move the selected structure one slice up or down
:param offset: +1 or -1
:return:
"""
volumeId = self.volumeSelector.currentNodeID
if volumeId == '':
self.showUnselectedVolumeWarningMessage()
return
selectedStructure = self.getCurrentSelectedStructure()
if selectedStructure == self.logic.NONE:
self.showUnselectedStructureWarningMessage()
return
if selectedStructure == self.logic.BOTH:
# Move both rulers
self.logic.stepSlice(volumeId, self.logic.AORTA, offset)
newSlice = self.logic.stepSlice(volumeId, self.logic.PA, offset)
else:
newSlice = self.logic.stepSlice(volumeId, selectedStructure, offset)
self.moveRedWindowToSlice(newSlice)
def removeRulers(self):
""" Remove all the rulers related to the current volume node
:return:
"""
self.logic.removeRulers(self.volumeSelector.currentNodeID)
self.refreshTextboxes(reset=True)
def getCurrentRedWindowSlice(self):
""" Get the current slice (in RAS) of the Red window
:return:
"""
redNodeSliceNode = slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceNode()
return redNodeSliceNode.GetSliceOffset()
def moveRedWindowToSlice(self, newSlice):
""" Moves the red display to the specified RAS slice
:param newSlice: slice to jump (RAS format)
:return:
"""
redNodeSliceNode = slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceNode()
redNodeSliceNode.JumpSlice(0,0,newSlice)
def refreshTextboxes(self, reset=False):
""" Update the information of the textboxes that give information about the measurements
"""
self.aortaTextBox.setText("0")
self.paTextBox.setText("0")
self.ratioTextBox.setText("0")
self.ratioTextBox.setStyleSheet(" QLineEdit { background-color: white; color: black}");
volumeId = self.volumeSelector.currentNodeID
# if volumeId not in self.logic.currentVolumesLoaded:
# return
if volumeId:
self.logic.changeActiveRulersColor(volumeId, self.logic.defaultColor)
aorta = None
pa = None
if not reset:
rulerAorta, newAorta = self.logic.getRulerNodeForVolumeAndStructure(self.volumeSelector.currentNodeID,
self.logic.AORTA, createIfNotExist=False)
rulerPA, newPA = self.logic.getRulerNodeForVolumeAndStructure(self.volumeSelector.currentNodeID,
self.logic.PA, createIfNotExist=False)
if rulerAorta:
aorta = rulerAorta.GetDistanceMeasurement()
self.aortaTextBox.setText(str(aorta))
if rulerPA:
pa = rulerPA.GetDistanceMeasurement()
self.paTextBox.setText(str(pa))
if pa is not None and aorta is not None and aorta != 0:
try:
ratio = pa / aorta
self.ratioTextBox.setText(str(ratio))
if ratio > 1.0:
# Switch colors ("alarm")
st = " QLineEdit {{ background-color: rgb({0}, {1}, {2}); color: white }}". \
format(int(self.logic.defaultWarningColor[0]*255),
int(self.logic.defaultWarningColor[1]*255),
int(self.logic.defaultWarningColor[2]*255))
self.ratioTextBox.setStyleSheet(st)
self.logic.changeActiveRulersColor(volumeId, self.logic.defaultWarningColor)
except Exception:
Util.print_last_exception()
def showUnselectedVolumeWarningMessage(self):
qt.QMessageBox.warning(slicer.util.mainWindow(), 'Select a volume',
'Please select a volume')
def showUnselectedStructureWarningMessage(self):
qt.QMessageBox.warning(slicer.util.mainWindow(), 'Review structure',
'Please select Aorta, Pulmonary Arterial or Both to place the right ruler/s')
def switchToRedView(self):
""" Switch the layout to Red slice only
:return:
"""
layoutManager = slicer.app.layoutManager()
# Test the layout manager is not none in case the module is initialized without a main window
# This happens for example in automatic tests
if layoutManager is not None:
layoutManager.setLayout(6)
def __addSceneObservables__(self):
self.observers.append(slicer.mrmlScene.AddObserver(slicer.vtkMRMLScene.NodeAddedEvent, self.__onNodeAddedObserver__))
self.observers.append(slicer.mrmlScene.AddObserver(slicer.vtkMRMLScene.EndCloseEvent, self.__onSceneClosed__))
def __removeSceneObservables(self):
for observer in self.observers:
slicer.mrmlScene.RemoveObserver(observer)
self.observers.remove(observer)
#########
# EVENTS
def onVolumeSelectorChanged(self, node):
#if node is not None and node.GetID() not in self.currentVolumesLoaded:
# if node is not None:
# # New node. Load default rulers
# if node.GetID() not in self.logic.currentVolumesLoaded:
# self.placeDefaultRulers(node.GetID())
logging.info("Volume selector node changed: {0}".format(
'(None)' if node is None else node.GetName()
))
# Preferred contrast (TODO: set right level)
SlicerUtil.changeContrastWindow(1144, 447)
self.refreshTextboxes()
def onStructureClicked(self, button):
fiducialsNode = self.getFiducialsNode(self.volumeSelector.currentNodeID)
if fiducialsNode is not None:
self.__addRuler__(button.text, self.volumeSelector.currentNodeID)
markupsLogic = slicer.modules.markups.logic()
markupsLogic.SetActiveListID(fiducialsNode)
applicationLogic = slicer.app.applicationLogic()
selectionNode = applicationLogic.GetSelectionNode()
selectionNode.SetReferenceActivePlaceNodeClassName("vtkMRMLAnnotationRulerNode")
interactionNode = applicationLogic.GetInteractionNode()
interactionNode.SwitchToSinglePlaceMode()
def onJumpToTemptativeSliceButtonClicked(self):
volumeId = self.volumeSelector.currentNodeID
if volumeId == '':
self.showUnselectedVolumeWarningMessage()
return
#self.placeDefaultRulers(volumeId)
self.jumpToTemptativeSlice(volumeId)
def onRulerUpdated(self, node, event):
self.refreshTextboxes()
def onPlaceRulersClicked(self):
self.placeRuler()
def onMoveUpRulerClicked(self):
self.stepSlice(1)
def onMoveDownRulerClicked(self):
self.stepSlice(-1)
def onRemoveRulerClicked(self):
if (qt.QMessageBox.question(slicer.util.mainWindow(), 'Remove rulers',
'Are you sure you want to remove all the rulers from this volume?',
qt.QMessageBox.Yes|qt.QMessageBox.No)) == qt.QMessageBox.Yes:
self.logic.removeRulers(self.volumeSelector.currentNodeID)
self.refreshTextboxes()
def onSaveReport(self):
""" Save the current values in a persistent csv file
:return:
"""
volumeId = self.volumeSelector.currentNodeID
if volumeId:
caseName = slicer.mrmlScene.GetNodeByID(volumeId).GetName()
coords = [0, 0, 0, 0]
pa1 = pa2 = a1 = a2 = None
# PA
rulerNode, newNode = self.logic.getRulerNodeForVolumeAndStructure(volumeId, self.logic.PA, createIfNotExist=False)
if rulerNode:
# Get current RAS coords
rulerNode.GetPositionWorldCoordinates1(coords)
pa1 = list(coords)
rulerNode.GetPositionWorldCoordinates2(coords)
pa2 = list(coords)
# AORTA
rulerNode, newNode = self.logic.getRulerNodeForVolumeAndStructure(volumeId, self.logic.AORTA, createIfNotExist=False)
if rulerNode:
rulerNode.GetPositionWorldCoordinates1(coords)
a1 = list(coords)
rulerNode.GetPositionWorldCoordinates2(coords)
a2 = list(coords)
self.reportsWidget.insertRow(
caseId=caseName,
paDiameterMm=self.paTextBox.text,
aortaDiameterMm=self.aortaTextBox.text,
pa1r = pa1[0] if pa1 is not None else '',
pa1a = pa1[1] if pa1 is not None else '',
pa1s = pa1[2] if pa1 is not None else '',
pa2r = pa2[0] if pa2 is not None else '',
pa2a = pa2[1] if pa2 is not None else '',
pa2s = pa2[2] if pa2 is not None else '',
a1r = a1[0] if a1 is not None else '',
a1a = a1[1] if a1 is not None else '',
a1s = a1[2] if a1 is not None else '',
a2r = a2[0] if a2 is not None else '',
a2a = a2[1] if a2 is not None else '',
a2s = a2[2] if a2 is not None else ''
)
qt.QMessageBox.information(slicer.util.mainWindow(), 'Data saved', 'The data were saved successfully')
def __onSceneClosed__(self, arg1, arg2):
""" Scene closed. Reset currently loaded volumes
:param arg1:
:param arg2:
:return:
"""
#self.logic.currentVolumesLoaded.clear()
self.logic.currentActiveVolumeId = None
# CIP_PAARatioLogic
#
class CIP_PAARatioLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
NONE = 0
AORTA = 1
PA = 2
BOTH = 3
SLICEFACTOR = 0.6
# Default XY coordinates for Aorta and PA (the Z will be estimated depending on the number of slices)
defaultAorta1 = [220, 170, 0]
defaultAorta2 = [275, 175, 0]
defaultPA1 = [280, 175, 0]
defaultPA2 = [320, 190, 0]
defaultColor = [0.5, 0.5, 1.0]
defaultWarningColor = [1.0, 0.0, 0.0]
def __init__(self):
self.currentActiveVolumeId = None
# self.currentVolumesLoaded = set()
def getRootAnnotationsNode(self):
""" Get the root annotations node global to the scene, creating it if necessary
:return: "All Annotations" vtkMRMLAnnotationHierarchyNode
"""
return SlicerUtil.getRootAnnotationsNode()
def getRulersListNode(self, volumeId, createIfNotExist=True):
""" Get the rulers node for this volume, creating it if it doesn't exist yet
:param volumeId:
:return: "volumeId_paaRulersNode" vtkMRMLAnnotationHierarchyNode
"""
# Search for the current volume hierarchy node (each volume has its own hierarchy)
nodeName = volumeId + '_paaRulersNode'
rulersNode = SlicerUtil.getNode(nodeName)
if rulersNode is None and createIfNotExist:
# Create the node
annotationsLogic = slicer.modules.annotations.logic()
rootHierarchyNode = self.getRootAnnotationsNode()
annotationsLogic.SetActiveHierarchyNodeID(rootHierarchyNode.GetID())
annotationsLogic.AddHierarchy()
n = rootHierarchyNode.GetNumberOfChildrenNodes()
rulersNode = rootHierarchyNode.GetNthChildNode(n-1)
# Rename the node
rulersNode.SetName(nodeName)
logging.debug("Created node " + nodeName + " (general rulers node for this volume")
# Return the node
return rulersNode
def getRulerNodeForVolumeAndStructure(self, volumeId, structureId, createIfNotExist=True, callbackWhenRulerModified=None):
""" Search for the right ruler node to be created based on the volume and the selected
structure (Aorta or PA).
It also creates the necessary node hierarchy if it doesn't exist.
:param volumeId:
:param structureId: Aorta (1), PA (2)
:param createIfNotExist: create the ruler node if it doesn't exist yet
:param callbackWhenRulerModified: function to call when the ruler node is modified
:return: node and a boolean indicating if the node has been created now
"""
isNewNode = False
if structureId == 0: # none
return None, isNewNode
if structureId == self.AORTA: # Aorta
#nodeName = volumeId + '_paaRulers_aorta'
nodeName = "A"
elif structureId == self.PA: # 'Pulmonary Arterial':
# nodeName = volumeId + '_paaRulers_pa'
nodeName = "PA"
# Get the node that contains all the rulers for this volume
rulersListNode = self.getRulersListNode(volumeId, createIfNotExist=createIfNotExist)
node = None
if rulersListNode:
# Search for the node
for i in range(rulersListNode.GetNumberOfChildrenNodes()):
nodeWrapper = rulersListNode.GetNthChildNode(i)
# nodeWrapper is also a HierarchyNode. We need to look for its only child that will be the rulerNode
col = vtk.vtkCollection()
nodeWrapper.GetChildrenDisplayableNodes(col)
rulerNode = col.GetItemAsObject(0)
if rulerNode.GetName() == nodeName:
node = rulerNode
break
if node is None and createIfNotExist:
# Create the node
# Set the active node, so that the new ruler is a child node
annotationsLogic = slicer.modules.annotations.logic()
annotationsLogic.SetActiveHierarchyNodeID(rulersListNode.GetID())
node = slicer.mrmlScene.CreateNodeByClass('vtkMRMLAnnotationRulerNode')
node.SetName(nodeName)
self.__changeColor__(node, self.defaultColor)
slicer.mrmlScene.AddNode(node)
isNewNode = True
node.AddObserver(vtk.vtkCommand.ModifiedEvent, callbackWhenRulerModified)
logging.debug("Created node " + nodeName + " for volume " + volumeId)
return node, isNewNode
def hideAllRulers(self):
"""
Hide all the current rulers in the scene
:return:
"""
nodes = slicer.mrmlScene.GetNodesByClass("vtkMRMLAnnotationRulerNode")
for i in range(nodes.GetNumberOfItems()):
nodes.GetItemAsObject(i).SetDisplayVisibility(False)
def rulersVisible(self, volumeId, visible):
""" Show or hide all the ruler nodes
"""
if volumeId is not None:
rulersListNode = self.getRulersListNode(volumeId, False)
if rulersListNode:
for i in range(rulersListNode.GetNumberOfChildrenNodes()):
nodeWrapper = rulersListNode.GetNthChildNode(i)
# nodeWrapper is also a HierarchyNode. We need to look for its only child that will be the rulerNode
col = vtk.vtkCollection()
nodeWrapper.GetChildrenDisplayableNodes(col)
rulerNode = col.GetItemAsObject(0)
rulerNode.SetDisplayVisibility(visible)
def __changeColor__(self, node, color):
for i in range(3):
n = node.GetNthDisplayNode(i)
if n:
n.SetColor(color)
layoutManager = slicer.app.layoutManager()
# Test the layout manager is not none in case the module is initialized without a main window
# This happens for example in automatic tests
if layoutManager is not None:
# Refresh UI to repaint both rulers. Is this the best way? Who knows...
layoutManager.sliceWidget("Red").sliceView().mrmlSliceNode().Modified()
def changeActiveRulersColor(self, volumeId, color):
""" Change the color for all the rulers in this volume
:param volumeId:
:param color:
:return:
"""
for structureId in [self.PA, self.AORTA]:
node, new = self.getRulerNodeForVolumeAndStructure(volumeId, structureId, createIfNotExist=False)
if node:
self.__changeColor__(node, color)
def createDefaultRulers(self, volumeId, callbackWhenRulerModified):
""" Set the Aorta and PA rulers to their default position.
The X and Y will be configured in "defaultAorta1, defaultAorta2, defaultPA1, defaultPA2" properties
The Z will be estimated based on the number of slices of the volume
:param volumeId: volume id
:param callbackWhenRulerModified: function to invoke when the ruler is modified
:return: a tuple of 4 vales. For each node, return the node and a boolean indicating if the node was
created now
"""
aorta1, aorta2, pa1, pa2 = self.getDefaultCoords(volumeId)
rulerNodeAorta, newNodeAorta = self.getRulerNodeForVolumeAndStructure(volumeId, self.AORTA,
createIfNotExist=True, callbackWhenRulerModified=callbackWhenRulerModified)
rulerNodeAorta.SetPositionWorldCoordinates1(aorta1)
rulerNodeAorta.SetPositionWorldCoordinates2(aorta2)
rulerNodePA, newNodePA = self.getRulerNodeForVolumeAndStructure(volumeId, self.PA,
createIfNotExist=True, callbackWhenRulerModified=callbackWhenRulerModified)
rulerNodePA.SetPositionWorldCoordinates1(pa1)
rulerNodePA.SetPositionWorldCoordinates2(pa2)
return rulerNodeAorta, newNodeAorta, rulerNodePA, newNodePA
def stepSlice(self, volumeId, structureId, sliceStep):
""" Move the selected ruler up or down one slice.
:param volumeId:
:param structureId:
:param sliceStep: +1 or -1
:return: new slice in RAS format
"""
# Calculate the RAS coords of the slice where we should jump to
rulerNode, newNode = self.getRulerNodeForVolumeAndStructure(volumeId, structureId, createIfNotExist=False)
if not rulerNode:
# The ruler has not been created. This op doesn't make sense
return False
coords = [0, 0, 0, 0]
# Get current RAS coords
rulerNode.GetPositionWorldCoordinates1(coords)
# Get the transformation matrixes
rastoijk=vtk.vtkMatrix4x4()
ijktoras=vtk.vtkMatrix4x4()
scalarVolumeNode = slicer.mrmlScene.GetNodeByID(volumeId)
scalarVolumeNode.GetRASToIJKMatrix(rastoijk)
scalarVolumeNode.GetIJKToRASMatrix(ijktoras)
# Get the current slice (Z). It will be the same in both positions
ijkCoords = list(rastoijk.MultiplyPoint(coords))
# Add/substract the offset to Z
ijkCoords[2] += sliceStep
# Convert back to RAS, just replacing the Z
newSlice = ijktoras.MultiplyPoint(ijkCoords)[2]
self._placeRulerInSlice_(rulerNode, structureId, volumeId, newSlice)
return newSlice
def placeRulerInSlice(self, volumeId, structureId, newSlice, callbackWhenUpdated=None):
""" Move the ruler to the specified slice (in RAS format)
:param volumeId:
:param structureId:
:param newSlice: slice in RAS format
:return: tuple with ruler node and a boolean indicating if the node was just created
"""
# Get the correct ruler
rulerNode, newNode = self.getRulerNodeForVolumeAndStructure(volumeId, structureId,
createIfNotExist=True, callbackWhenRulerModified=callbackWhenUpdated)
# Add the volume to the list of volumes that have some ruler
# self.currentVolumesLoaded.add(volumeId)
# Move the ruler
self._placeRulerInSlice_(rulerNode, structureId, volumeId, newSlice)
#return rulerNode, newNode
def _placeRulerInSlice_(self, rulerNode, structureId, volumeId, newSlice):
""" Move the ruler to the specified slice (in RAS format)
:param rulerNode: node of type vtkMRMLAnnotationRulerNode
:param newSlice: slice in RAS format
:return: True if the operation was succesful
"""
coords1 = [0, 0, 0, 0]
coords2 = [0, 0, 0, 0]
# Get RAS coords of the ruler node
rulerNode.GetPositionWorldCoordinates1(coords1)
rulerNode.GetPositionWorldCoordinates2(coords2)
# Set the slice of the coordinate
coords1[2] = coords2[2] = newSlice
if coords1[0] == 0 and coords1[1] == 0:
# New node, get default coordinates depending on the structure
defaultCoords = self.getDefaultCoords(volumeId)
if structureId == self.AORTA:
coords1[0] = defaultCoords[0][0]
coords1[1] = defaultCoords[0][1]
coords2[0] = defaultCoords[1][0]
coords2[1] = defaultCoords[1][1]
elif structureId == self.PA:
coords1[0] = defaultCoords[2][0]
coords1[1] = defaultCoords[2][1]
coords2[0] = defaultCoords[3][0]
coords2[1] = defaultCoords[3][1]
rulerNode.SetPositionWorldCoordinates1(coords1)
rulerNode.SetPositionWorldCoordinates2(coords2)
def getDefaultCoords(self, volumeId):
""" Get the default coords for aorta and PA in this volume (RAS format)
:param volumeId:
:return: (aorta1, aorta2, pa1, pa2). All of them lists of 3 positions in RAS format
"""
volume = slicer.mrmlScene.GetNodeByID(volumeId)
rasBounds = [0,0,0,0,0,0]
volume.GetRASBounds(rasBounds)
# Get the slice (Z)
ijk = self.RAStoIJK(volume, [0, 0, rasBounds[5]])
slice = int(ijk[2] * self.SLICEFACTOR) # Empiric estimation
# Get the default coords, converting from IJK to RAS
aorta1 = list(self.defaultAorta1)
aorta1[2] = slice
aorta1 = self.IJKtoRAS(volume, aorta1)
aorta2 = list(self.defaultAorta2)
aorta2[2] = slice
aorta2 = self.IJKtoRAS(volume, aorta2)
pa1 = list(self.defaultPA1)
pa1[2] = slice
pa1 = self.IJKtoRAS(volume, pa1)
pa2 = list(self.defaultPA2)
pa2[2] = slice
pa2 = self.IJKtoRAS(volume, pa2)
return aorta1, aorta2, pa1, pa2
def removeRulers(self, volumeId):
""" Remove all the rulers for the selected volume
:param volumeId:
:param structureId:
"""
#rulerNode, newNode = self.getRulerNodeForVolumeAndStructure(volumeId, structureId)
rulersListNode = self.getRulersListNode(volumeId, createIfNotExist=False)
if rulersListNode:
rulersListNode.RemoveAllChildrenNodes()
slicer.mrmlScene.RemoveNode(rulersListNode)
def RAStoIJK(self, volumeNode, rasCoords):
""" Transform a list of RAS coords in IJK for a volume
:return: list of IJK coordinates
"""
rastoijk=vtk.vtkMatrix4x4()
volumeNode.GetRASToIJKMatrix(rastoijk)
rasCoords.append(1)
return list(rastoijk.MultiplyPoint(rasCoords))
def IJKtoRAS(self, volumeNode, ijkCoords):
""" Transform a list of IJK coords in RAS for a volume
:return: list of RAS coordinates
"""
ijktoras=vtk.vtkMatrix4x4()
volumeNode.GetIJKToRASMatrix(ijktoras)
ijkCoords.append(1)
return list(ijktoras.MultiplyPoint(ijkCoords))
class CIP_PAARatioTest(ScriptedLoadableModuleTest):
@classmethod
def setUpClass(cls):
""" Executed once for all the tests """
slicer.util.selectModule('CIP_PAARatio')
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_CIP_PAARatio()
def test_CIP_PAARatio(self):
self.assertIsNotNone(slicer.modules.cip_paaratio)
# Get the widget
widget = slicer.modules.cip_paaratio.widgetRepresentation()
volume = SlicerUtil.downloadVolumeForTests(widget=widget)
self.assertFalse(volume is None)
# Get the logic
logging.info("Getting logic...")
logic = widget.self().logic
# Actions
# Make sure that the right volume is selected
volumeSelector = SlicerUtil.findChildren(widget=widget, name='paa_volumeSelector')[0]
volumeSelector.setCurrentNode(volume)
button = SlicerUtil.findChildren(widget=widget, name='jumptToTemptativeSliceButton')[0]
# Place default rulers
button.click()
logging.info("Default rulers placed...OK")
# Get rulers
aorta = logic.getRulerNodeForVolumeAndStructure(volume.GetID(), logic.AORTA, createIfNotExist=False)[0]
pa = logic.getRulerNodeForVolumeAndStructure(volume.GetID(), logic.PA, createIfNotExist=False)[0]
# Make sure that rulers are in default color
color = aorta.GetNthDisplayNode(0).GetColor()
for i in range(3):
self.assertEqual(color[i], logic.defaultColor[i])
logging.info("Default color...OK")
# Check that the rulers are properly positioned
coordsAorta1 = [0,0,0]
coordsPa1 = [0,0,0]
aorta.GetPosition1(coordsAorta1)
pa.GetPosition1(coordsPa1)
# Aorta ruler should be on the left
self.assertTrue(coordsAorta1[0] > coordsPa1[0])
# Aorta and PA should be in the same slice
self.assertTrue(coordsAorta1[2] == coordsPa1[2])
logging.info("Default position...OK")
# Change Slice of the Aorta ruler
layoutManager = slicer.app.layoutManager()
redWidget = layoutManager.sliceWidget('Red')
style = redWidget.interactorStyle()
style.MoveSlice(1)
# Click in the radio button
button = SlicerUtil.findChildren(widget=widget, name='aortaRadioButton')[0]
button.click()
# click in the place ruler button
button = SlicerUtil.findChildren(widget=widget, name='placeRulersButton')[0]
button.click()
# Make sure that the slice of the ruler has changed
aorta.GetPosition1(coordsAorta1)
self.assertTrue(coordsAorta1[2] != coordsPa1[2])
logging.info("Position changed...OK")
# Force PAA ratio > 1
coordsAorta2 = [0,0,0]
coordsPa2 = [0,0,0]
aorta.GetPosition2(coordsAorta2)
pa.GetPosition2(coordsPa2)
currentRatio = pa.GetDistanceMeasurement() / aorta.GetDistanceMeasurement()
# Calculate how much do we have to increase the position of the pa marker
delta = 1 - currentRatio + 0.2
pa.SetPosition2(coordsPa2[0] + coordsPa2[0]*delta, coordsPa2[1], coordsPa2[2])
# Make sure that rulers are red now
color = aorta.GetNthDisplayNode(0).GetColor()
for i in range(3):
self.assertEqual(color[i], logic.defaultWarningColor[i])
logging.info("Red color...OK")
self.delayDisplay('Test passed!')
|
acil-bwh/SlicerCIP
|
Scripted/CIP_PAARatio/CIP_PAARatio.py
|
Python
|
bsd-3-clause
| 48,544
|
[
"VTK"
] |
7b8839119e661fd36cf8386eeafb7b2abd9942707bb65a56fc732cd987cdd5bf
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This tests the scafacos p2nfft dipolar calculations by matching against
# reference data from direct summation. In 2d, reference data from the mdlc
# test case is used
import numpy as np
import unittest as ut
import unittest_decorators as utx
import espressomd
import espressomd.magnetostatics as magnetostatics
from tests_common import abspath
@utx.skipIfMissingFeatures(["SCAFACOS_DIPOLES"])
class Scafacos1d2d(ut.TestCase):
def test_scafacos(self):
rho = 0.3
# This is only for box size calculation. The actual particle number is
# lower, because particles are removed from the mdlc gap region
n_particle = 100
particle_radius = 0.5
#################################################
box_l = pow(((4 * n_particle * np.pi) / (3 * rho)),
1.0 / 3.0) * particle_radius
skin = 0.5
s = espressomd.System(box_l=[1.0, 1.0, 1.0])
# give Espresso some parameters
s.time_step = 0.01
s.cell_system.skin = skin
s.box_l = 3 * [box_l]
for dim in 2, 1:
print("Dimension", dim)
# Read reference data
if dim == 2:
file_prefix = "data/mdlc"
s.periodicity = [1, 1, 0]
else:
s.periodicity = [1, 0, 0]
file_prefix = "data/scafacos_dipoles_1d"
with open(abspath(file_prefix + "_reference_data_energy.dat")) as f:
ref_E = float(f.readline())
# Particles
data = np.genfromtxt(abspath(
file_prefix + "_reference_data_forces_torques.dat"))
for p in data[:, :]:
s.part.add(
id=int(p[0]), pos=p[1:4], dip=p[4:7], rotation=(1, 1, 1))
if dim == 2:
scafacos = magnetostatics.Scafacos(
prefactor=1,
method_name="p2nfft",
method_params={
"p2nfft_verbose_tuning": 0,
"pnfft_N": "80,80,160",
"pnfft_window_name": "bspline",
"pnfft_m": "4",
"p2nfft_ignore_tolerance": "1",
"pnfft_diff_ik": "0",
"p2nfft_r_cut": "6",
"p2nfft_alpha": "0.8",
"p2nfft_epsB": "0.05"})
s.actors.add(scafacos)
# change box geometry in x,y direction to ensure that
# scafacos survives it
s.box_l = np.array((1, 1, 1.3)) * box_l
else:
if dim == 1:
# 1d periodic in x
scafacos = magnetostatics.Scafacos(
prefactor=1,
method_name="p2nfft",
method_params={
"p2nfft_verbose_tuning": 1,
"pnfft_N": "32,128,128",
"pnfft_direct": 0,
"p2nfft_r_cut": 2.855,
"p2nfft_alpha": "1.5",
"p2nfft_intpol_order": "-1",
"p2nfft_reg_kernel_name": "ewald",
"p2nfft_p": "16",
"p2nfft_ignore_tolerance": "1",
"pnfft_window_name": "bspline",
"pnfft_m": "8",
"pnfft_diff_ik": "1",
"p2nfft_epsB": "0.125"})
s.box_l = np.array((1, 1, 1)) * box_l
s.actors.add(scafacos)
else:
raise Exception("This shouldn't happen.")
s.thermostat.turn_off()
s.integrator.run(0)
# Calculate errors
err_f = np.sum(np.linalg.norm(
s.part[:].f - data[:, 7:10], axis=1)) / np.sqrt(data.shape[0])
err_t = np.sum(np.linalg.norm(
s.part[:].torque_lab - data[:, 10:13], axis=1)) / np.sqrt(data.shape[0])
err_e = s.analysis.energy()["dipolar"] - ref_E
print("Energy difference", err_e)
print("Force difference", err_f)
print("Torque difference", err_t)
tol_f = 2E-3
tol_t = 2E-3
tol_e = 1E-3
self.assertLessEqual(
abs(err_e), tol_e, "Energy difference too large")
self.assertLessEqual(
abs(err_t), tol_t, "Torque difference too large")
self.assertLessEqual(
abs(err_f), tol_f, "Force difference too large")
s.part.clear()
del s.actors[0]
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/scafacos_dipoles_1d_2d.py
|
Python
|
gpl-3.0
| 5,495
|
[
"ESPResSo"
] |
eed3ed3699ca758473d3d938d2c8ca989c3d4897a34c5f97bd885e3c3fef80e5
|
"""Generate Java code from an ASDL description."""
# TO DO
# handle fields that have a type but no name
import os, sys, traceback
import asdl
TABSIZE = 4
MAX_COL = 100
def reflow_lines(s, depth):
"""Reflow the line s indented depth tabs.
Return a sequence of lines where no line extends beyond MAX_COL
when properly indented. The first line is properly indented based
exclusively on depth * TABSIZE. All following lines -- these are
the reflowed lines generated by this function -- start at the same
column as the first character beyond the opening { in the first
line.
"""
size = MAX_COL - depth * TABSIZE
if len(s) < size:
return [s]
lines = []
cur = s
padding = ""
while len(cur) > size:
i = cur.rfind(' ', 0, size)
assert i != -1, "Impossible line to reflow: %s" % `s`
lines.append(padding + cur[:i])
if len(lines) == 1:
# find new size based on brace
j = cur.find('{', 0, i)
if j >= 0:
j += 2 # account for the brace and the space after it
size -= j
padding = " " * j
cur = cur[i + 1:]
else:
lines.append(padding + cur)
return lines
class EmitVisitor(asdl.VisitorBase):
"""Visit that emits lines"""
def __init__(self):
super(EmitVisitor, self).__init__()
def open(self, name, refersToSimpleNode=1, useDataOutput=0):
self.file = open("%s.java" % name, "wb")
self.file.write("// Autogenerated AST node\n")
self.file.write('package org.python.pydev.parser.jython.ast;\n')
self.emit("", 0)
if refersToSimpleNode:
self.file.write('import org.python.pydev.parser.jython.SimpleNode;\n')
self.file.write('import java.util.Arrays;\n')
# if useDataOutput:
# print >> self.file, 'import java.io.DataOutputStream;'
# print >> self.file, 'import java.io.IOException;'
self.file.write('\n')
def close(self):
self.file.close()
def emit(self, s, depth):
# XXX reflow long lines?
lines = reflow_lines(s, depth)
for line in lines:
line = (" " * TABSIZE * depth) + line + "\n"
self.file.write(line)
# This step will add a 'simple' boolean attribute to all Sum and Product
# nodes and add a 'typedef' link to each Field node that points to the
# Sum or Product node that defines the field.
class AnalyzeVisitor(EmitVisitor):
index = 0
def makeIndex(self):
self.index += 1
return self.index
def visitModule(self, mod):
self.types = {}
for dfn in mod.dfns:
self.types[str(dfn.name)] = dfn.value
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
sum.simple = 1
for t in sum.types:
if t.fields:
sum.simple = 0
break
for t in sum.types:
if not sum.simple:
t.index = self.makeIndex()
self.visit(t, name, depth)
def visitProduct(self, product, name, depth):
product.simple = 0
product.index = self.makeIndex()
for f in product.fields:
self.visit(f, depth + 1)
def visitConstructor(self, cons, name, depth):
for f in cons.fields:
self.visit(f, depth + 1)
def visitField(self, field, depth):
field.typedef = self.types.get(str(field.type))
# The code generator itself.
#
class JavaVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if sum.simple:
self.simple_sum(sum, name, depth)
else:
self.sum_with_constructor(sum, name, depth)
def simple_sum(self, sum, name, depth):
self.open("%sType" % name, refersToSimpleNode=0)
self.emit("public interface %(name)sType {" % locals(), depth)
for i in range(len(sum.types)):
type = sum.types[i]
self.emit("public static final int %s = %d;" % (type.name, i + 1),
depth + 1)
self.emit("", 0)
self.emit("public static final String[] %sTypeNames = new String[] {" %
name, depth + 1)
self.emit('"<undef>",', depth + 2)
for type in sum.types:
self.emit('"%s",' % type.name, depth + 2)
self.emit("};", depth + 1)
self.emit("}", depth)
self.close()
def sum_with_constructor(self, sum, name, depth):
self.open("%sType" % name)
self.emit("public abstract class %(name)sType extends SimpleNode {" %
locals(), depth)
#fabioz: HACK WARNING: Moved the suite body to suiteType!
if str(name) == 'suite':
self.emit("public stmtType[] body;", depth + 1)
#HACK WARNING: Moved the suite body to suiteType!
self.emit("}", depth)
self.close()
for t in sum.types:
self.visit(t, name, depth)
def visitProduct(self, product, name, depth):
self.open("%sType" % name, useDataOutput=1)
self.emit("public final class %(name)sType extends SimpleNode {" % locals(), depth)
for f in product.fields:
self.visit(f, depth + 1)
self.emit("", depth)
self.javaMethods(product, name, "%sType" % name, product.fields,
depth + 1)
self.emit("}", depth)
self.close()
def visitConstructor(self, cons, name, depth):
self.open(cons.name, useDataOutput=1)
enums = []
for f in cons.fields:
if f.typedef and f.typedef.simple:
enums.append("%sType" % f.type)
if enums:
s = "implements %s " % ", ".join(enums)
else:
s = ""
self.emit("public final class %s extends %sType %s{" %
(cons.name, name, s), depth)
#fabioz: HACK WARNING: Moved the suite body to suiteType!
if str(name) != 'suite':
for f in cons.fields:
self.visit(f, depth + 1)
#HACK WARNING: Moved the suite body to suiteType!
self.emit("", depth)
self.javaMethods(cons, cons.name, cons.name, cons.fields, depth + 1)
self.emit("}", depth)
self.close()
def javaMethods(self, type, clsname, ctorname, fields, depth):
# The java ctors
fpargs = ", ".join([self.fieldDef(f) for f in fields])
self.emit("public %s(%s) {" % (ctorname, fpargs), depth)
for f in fields:
self.emit("this.%s = %s;" % (f.name, f.name), depth + 1)
if str(ctorname) == 'Suite':
self.emit("if(body != null && body.length > 0){", depth + 1)
self.emit("beginColumn = body[0].beginColumn;", depth + 2)
self.emit("beginLine = body[0].beginLine;", depth + 2)
self.emit("}", depth + 1)
if str(ctorname) == 'Expr':
self.emit("if(value != null){", depth + 1)
self.emit("beginColumn = value.beginColumn;", depth + 2)
self.emit("beginLine = value.beginLine;", depth + 2)
self.emit("}", depth + 1)
self.emit("}", depth)
self.emit("", 0)
if fpargs:
fpargs += ", "
#fabioz: Removed the consructor with the parent that set the beginLine/Col, as this wasn't used and added some
#confusion because the parent wasn't properly set -- if a parent is actually set, it's set later in the parsing (because
#the parent is resolved after the child).
# Creates something as:
# public Attribute(exprType value, NameTokType attr, int ctx, SimpleNode
# parent) {
# this(value, attr, ctx);
# this.beginLine = parent.beginLine;
# this.beginColumn = parent.beginColumn;
# }
# self.emit("public %s(%sSimpleNode parent) {" % (ctorname, fpargs), depth)
# self.emit("this(%s);" %
# ", ".join([str(f.name) for f in fields]), depth+1)
# self.emit("this.beginLine = parent.beginLine;", depth+1);
# self.emit("this.beginColumn = parent.beginColumn;", depth+1);
# self.emit("}", depth)
self.emit("@Override", depth)
self.emit("public int hashCode() {", depth)
self.emit("final int prime = 31;", depth + 1)
self.emit("int result = 1;", depth + 1)
for f in fields:
jType = self.jType(f)
if f.seq:
self.emit("result = prime * result + Arrays.hashCode(%s);" % (f.name,), depth + 1)
elif jType == 'int':
self.emit("result = prime * result + %s;" % (f.name,), depth + 1)
elif jType == 'boolean':
self.emit("result = prime * result + (%s ? 17 : 137);" % (f.name,), depth + 1)
else:
self.emit("result = prime * result + ((%s == null) ? 0 : %s.hashCode());" % (f.name, f.name), depth + 1)
self.emit("return result;", depth+1)
self.emit("}", depth)
#equals()
self.emit("", 0)
self.emit("@Override", depth)
self.emit("public boolean equals(Object obj) {", depth)
self.emit("if (this == obj) return true;", depth + 1)
self.emit("if (obj == null) return false;", depth + 1)
self.emit("if (getClass() != obj.getClass()) return false;", depth + 1)
self.emit("%s other = (%s) obj;" % (ctorname, ctorname,), depth + 1)
for f in fields:
jType = self.jType(f)
if f.seq:
self.emit('if (!Arrays.equals(%s, other.%s)) return false;' % (f.name, f.name,), depth + 1)
elif jType in ('int', 'boolean'):
self.emit('if(this.%s != other.%s) return false;' % (f.name, f.name,), depth + 1)
else:
self.emit('if (%s == null) { if (other.%s != null) return false;}' % (f.name, f.name,), depth + 1)
self.emit('else if (!%s.equals(other.%s)) return false;' % (f.name, f.name,), depth + 1)
self.emit("return true;", depth + 1)
self.emit("}", depth)
#createCopy()
self.emit("@Override", depth)
self.emit("public %s createCopy() {" % (ctorname,), depth)
self.emit("return createCopy(true);", depth + 1)
self.emit("}", depth)
self.emit("@Override", depth)
self.emit("public %s createCopy(boolean copyComments) {" % (ctorname,), depth)
params = []
copy_i = 0
for f in fields:
jType = self.jType(f)
if jType in ('int', 'boolean', 'String', 'Object'):
if f.seq:
self.emit('%s[] new%s;' % (jType, copy_i), depth + 1)
self.emit('if(this.%s != null){' % (f.name,), depth + 1)
#int[] new0 = new int[this.ops.length];
#System.arraycopy(this.ops, 0, new0, 0, this.ops.length);
self.emit('new%s = new %s[this.%s.length];' % (copy_i, jType, f.name), depth + 2)
self.emit('System.arraycopy(this.%s, 0, new%s, 0, this.%s.length);' % (f.name, copy_i, f.name), depth + 2)
self.emit('}else{', depth + 1)
self.emit('new%s = this.%s;' % (copy_i, f.name), depth + 2)
self.emit('}', depth + 1)
params.append('new%s' % (copy_i,))
copy_i += 1
else:
params.append(str(f.name))
else:
if f.seq:
#comprehensionType[] new0 = new comprehensionType[this.generators.length];
#for(int i=0;i<this.generators.length;i++){
# new0[i] = (comprehensionType) this.generators[i] != null?this.generators[i].createCopy():null;
#}
self.emit('%s[] new%s;' % (jType, copy_i), depth + 1)
self.emit('if(this.%s != null){' % (f.name,), depth + 1)
self.emit('new%s = new %s[this.%s.length];' % (copy_i, jType, f.name), depth + 1)
self.emit('for(int i=0;i<this.%s.length;i++){' % (f.name), depth + 1)
self.emit('new%s[i] = (%s) (this.%s[i] != null? this.%s[i].createCopy(copyComments):null);' % (copy_i, jType, f.name, f.name), depth + 2)
self.emit('}', depth + 1)
self.emit('}else{', depth + 1)
self.emit('new%s = this.%s;' % (copy_i, f.name), depth + 2)
self.emit('}', depth + 1)
params.append('new%s' % (copy_i,))
copy_i += 1
else:
params.append('%s!=null?(%s)%s.createCopy(copyComments):null' % (f.name, jType, f.name))
params = ", ".join(params)
self.emit("%s temp = new %s(%s);" %
(ctorname, ctorname, params), depth + 1)
self.emit("temp.beginLine = this.beginLine;", depth + 1);
self.emit("temp.beginColumn = this.beginColumn;", depth + 1);
def EmitSpecials(s):
self.emit('if(this.specials%s != null && copyComments){' % s, depth + 1)
self.emit(' for(Object o:this.specials%s){' % s, depth + 1)
self.emit(' if(o instanceof commentType){', depth + 1)
self.emit(' commentType commentType = (commentType) o;', depth + 1)
self.emit(' temp.getSpecials%s().add(commentType.createCopy(copyComments));' % s, depth + 1)
self.emit(' }', depth + 1)
self.emit(' }', depth + 1)
self.emit('}', depth + 1)
EmitSpecials('Before')
EmitSpecials('After')
self.emit("return temp;", depth + 1);
self.emit("}", depth)
self.emit("", 0)
# The toString() method
self.emit("@Override", depth)
self.emit("public String toString() {", depth)
self.emit('StringBuffer sb = new StringBuffer("%s[");' % clsname,
depth + 1)
for f in fields:
self.emit('sb.append("%s=");' % f.name, depth + 1)
if not self.bltinnames.has_key(str(f.type)) and f.typedef.simple:
self.emit("sb.append(dumpThis(this.%s, %sType.%sTypeNames));" %
(f.name, f.type, f.type), depth + 1)
else:
self.emit("sb.append(dumpThis(this.%s));" % f.name, depth + 1)
if f != fields[-1]:
self.emit('sb.append(", ");', depth + 1)
self.emit('sb.append("]");', depth + 1)
self.emit("return sb.toString();", depth + 1)
self.emit("}", depth)
self.emit("", 0)
# # The pickle() method -- commented out, as it's not used within Pydev
# self.emit("public void pickle(DataOutputStream ostream) throws IOException {", depth)
# self.emit("pickleThis(%s, ostream);" % type.index, depth+1);
# for f in fields:
# self.emit("pickleThis(this.%s, ostream);" % f.name, depth+1)
# self.emit("}", depth)
# self.emit("", 0)
# The accept() method
self.emit("@Override", depth)
self.emit("public Object accept(VisitorIF visitor) throws Exception {", depth)
if clsname == ctorname:
self.emit('return visitor.visit%s(this);' % clsname, depth + 1)
else:
self.emit('if (visitor instanceof VisitorBase) {', depth + 1)
self.emit('((VisitorBase) visitor).traverse(this);', depth + 2)
self.emit('} else {' % clsname, depth + 1)
self.emit('traverse(visitor);', depth + 2)
self.emit('}', depth + 1)
self.emit('return null;' % clsname, depth + 1)
self.emit("}", depth)
self.emit("", 0)
# The visitChildren() method
self.emit("@Override", depth)
self.emit("public void traverse(VisitorIF visitor) throws Exception {", depth)
for f in fields:
if self.bltinnames.has_key(str(f.type)):
continue
if f.typedef.simple:
continue
if f.seq:
self.emit('if (%s != null) {' % f.name, depth + 1)
self.emit('for (int i = 0; i < %s.length; i++) {' % f.name,
depth + 2)
self.emit('if (%s[i] != null) {' % f.name, depth + 3)
self.emit('%s[i].accept(visitor);' % f.name, depth + 4)
self.emit('}' % f.name, depth + 3)
self.emit('}', depth + 2)
self.emit('}', depth + 1)
else:
self.emit('if (%s != null) {' % f.name, depth + 1)
self.emit('%s.accept(visitor);' % f.name, depth + 2)
self.emit('}' % f.name, depth + 1)
self.emit('}', depth)
self.emit("", 0)
def visitField(self, field, depth):
self.emit("public %s;" % self.fieldDef(field), depth)
bltinnames = {
'bool' : 'boolean',
'int' : 'int',
'identifier' : 'String',
'string' : 'String',
'object' : 'Object', # was PyObject
}
def jType(self, field):
jtype = str(field.type)
if field.typedef and field.typedef.simple:
jtype = 'int'
else:
jtype = self.bltinnames.get(jtype, jtype + 'Type')
return jtype
def fieldDef(self, field):
jtype = self.jType(field)
name = field.name
seq = field.seq and "[]" or ""
return "%(jtype)s%(seq)s %(name)s" % locals()
class VisitorVisitor(EmitVisitor):
def __init__(self):
EmitVisitor.__init__(self)
self.ctors = []
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
self.open("VisitorIF", refersToSimpleNode=0)
self.emit('public interface VisitorIF {', 0)
for ctor in self.ctors:
self.emit("public Object visit%s(%s node) throws Exception;" %
(ctor, ctor), 1)
self.emit('}', 0)
self.close()
self.open("ISimpleNodeSwitch", refersToSimpleNode=0)
self.emit('public interface ISimpleNodeSwitch {', 0)
for ctor in self.ctors:
self.emit("public void visit(%s node);" %
(ctor,), 1)
self.emit('}', 0)
self.close()
self.open("VisitorBase")
self.emit('public abstract class VisitorBase implements VisitorIF {', 0)
for ctor in self.ctors:
self.emit("public Object visit%s(%s node) throws Exception {" %
(ctor, ctor), 1)
self.emit("Object ret = unhandled_node(node);", 2)
self.emit("traverse(node);", 2)
self.emit("return ret;", 2)
self.emit('}', 1)
self.emit('', 0)
self.emit("abstract protected Object unhandled_node(SimpleNode node) throws Exception;", 1)
self.emit("abstract public void traverse(SimpleNode node) throws Exception;", 1)
self.emit('}', 0)
self.close()
def visitType(self, type, depth=1):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if not sum.simple:
for t in sum.types:
self.visit(t, name, depth)
def visitProduct(self, product, name, depth):
pass
def visitConstructor(self, cons, name, depth):
self.ctors.append(cons.name)
class ChainOfVisitors:
def __init__(self, *visitors):
self.visitors = visitors
def visit(self, object):
for v in self.visitors:
v.visit(object)
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.argv.append('Python.asdl')
mod = asdl.parse(sys.argv[1])
if not asdl.check(mod):
sys.exit(1)
c = ChainOfVisitors(AnalyzeVisitor(),
JavaVisitor(),
VisitorVisitor())
c.visit(mod)
|
fabioz/Pydev
|
plugins/org.python.pydev.parser/src/org/python/pydev/parser/jython/ast/asdl_java.py
|
Python
|
epl-1.0
| 20,265
|
[
"VisIt"
] |
d4dcf3974f6c89304178839cfb74ed2e5558d322436bb6cde676f9bfc64533a2
|
#!/usr/bin/python
#
# Script para criar estrutura do banco de dados
#
# Por: Alisson Menezes
#
#
from pymongo import *
print "[+] Conectando com o banco de dados"
client = MongoClient('localhost',27017)
db = client["octopus"]
print "[+] Conectado ..."
print "[+] Criando estrutura do banco"
db.nodes.update({ "_id":"default", "feet":[], "nodes":[]},{ "_id":"default", "feet":[], "nodes":[]}, upsert=True)
print "[+] Estrutura criada!"
|
AlissonMMenezes/Octopus
|
inicia_db.py
|
Python
|
gpl-2.0
| 436
|
[
"Octopus"
] |
2ede7ec334148029dd24c91b9212d1f0335dadd5697de5424a21f957d46ba0d5
|
r"""
I/O Registry (:mod:`skbio.io.registry`)
=======================================
.. currentmodule:: skbio.io.registry
Classes
-------
.. autosummary::
:toctree: generated/
IORegistry
Format
Functions
---------
.. autosummary::
:toctree: generated/
create_format
Exceptions
----------
.. autosummary::
:toctree: generated/
DuplicateRegistrationError
InvalidRegistrationError
Creating a new format for scikit-bio
------------------------------------
scikit-bio makes it simple to add new file formats to its I/O registry.
scikit-bio maintains a singleton of the :class:`IORegistry` class called
`io_registry`. This is where all scikit-bio file formats are registered. One
could also instantiate their own :class:`IORegistry`, but that is not the focus
of this tutorial.
The first step to creating a new format is to add a submodule in
`skbio/io/format/` named after the file format you are implementing.
For example, if the format you are implementing is called `myformat` then you
would create a file called `skbio/io/format/myformat.py`.
The next step is to import the :func:`create_format` factory from
:mod:`skbio.io`. This will allow you to create a new :class:`Format` object
that `io_registry` will know about.
Ideally you should name the result of :func:`create_format` as your file name.
For example:
.. code-block:: python
from skbio.io import create_format
myformat = create_format('myformat')
The `myformat` object is what we will use to register our new functionality.
At this point you should evaulate whether your format is binary or text.
If your format is binary, your :func:`create_format` call should look like
this:
.. code-block:: python
myformat = create_format('myformat', encoding='binary')
Alternatively if your format is text and has a specific encoding or newline
handling you can also specify that:
.. code-block:: python
myformat = create_format('myformat', encoding='ascii', newline='\n')
This will ensure that our registry will open files with a default encoding of
`'ascii'` for `'myformat'` and expect all newlines to be `'\n'` characters.
Having worked out these details, we are ready to register the actual
functionality of our format (e.g., sniffer, readers, and writers).
To create a sniffer simply decorate the following onto your sniffer function:
.. code-block:: python
@myformat.sniffer()
def _myformat_sniffer(fh):
# do something with `fh` to determine the membership of the file
For futher details on sniffer functions see :func:`Format.sniffer`.
Creating a reader is very similar, but has one difference:
.. code-block:: python
@myformat.reader(SomeSkbioClass)
def _myformat_to_some_skbio_class(fh, kwarg1='default', extra=FileSentinel):
# parse `fh` and return a SomeSkbioClass instance here
# `extra` will also be an open filehandle if provided else None
Here we bound a function to a specific class. We also demonstrated using
our FileSentinel object to indicate to the registry that this reader can take
auxilary files that should be handled in the same way as the primary file.
For futher details on reader functions see :func:`Format.reader`.
Creating a writer is about the same:
.. code-block:: python
@myformat.writer(SomeSkbioClass)
def _some_skbio_class_to_myformat(obj, fh, kwarg1='whatever',
extra=FileSentinel):
# write the contents of `obj` into `fh` and whatever else into `extra`
# do not return anything, it will be ignored
This is exactly the same as the `reader` above just in reverse, we also
receive the object we are writing as the first parameter instead of the file
(which is the second one). For further details on writer functions see
:func:`Format.writer`.
.. note:: When raising errors in readers and writers, the error should be a
subclass of ``FileFormatError`` specific to your new format.
Once you are satisfied with the functionality, you will need to ensure that
`skbio/io/__init__.py` contains an import of your new submodule so the
decorators are executed. Add the function
``import_module('skbio.io.format.myformat')`` with your module name to the
existing list.
.. note:: Because scikit-bio handles all of the I/O boilerplate, you only need
to unit-test the actual business logic of your `readers`, `writers`, and
`sniffers`.
Reserved Keyword Arguments
--------------------------
The following keyword args may not be used when defining new `readers` or
`writers` as they already have special meaning to the registry system:
- `format`
- `into`
- `verify`
- `mode`
- `encoding`
- `errors`
- `newline`
- `compression`
- `compresslevel`
The following are not yet used but should be avoided as well:
- `auth`
- `user`
- `password`
- `buffering`
- `buffer_size`
- `closefd`
- `exclusive`
- `append`
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from warnings import warn
import types
import traceback
import itertools
import inspect
from functools import wraps
from future.builtins import zip
from ._exception import DuplicateRegistrationError, InvalidRegistrationError
from . import (UnrecognizedFormatError, ArgumentOverrideWarning,
FormatIdentificationWarning)
from .util import _resolve_file, open_file, open_files, _d as _open_kwargs
from skbio.util._misc import make_sentinel, find_sentinels
from skbio.util._decorator import stable
FileSentinel = make_sentinel("FileSentinel")
class IORegistry(object):
"""Create a registry of formats and implementations which map to classes.
"""
@stable(as_of="0.4.0")
def __init__(self):
# This seperation of binary and text formats is useful because there
# are many situations where we may have recieved a text-file. When this
# happens, the binary data fundamentally does not exist. We could
# assume encoding should be interpreted in reverse, however this misses
# the bigger point: why would the user ever want text to be treated as
# binary? They already went through the effort to hand us text.
# Therefore, during format resolution, we should skip the binary
# formats if they are irrelevant. (They are incompatible with such a
# filehandle anyways.)
self._binary_formats = {}
self._text_formats = {}
self._lookups = (self._binary_formats, self._text_formats)
@stable(as_of="0.4.0")
def create_format(self, *args, **kwargs):
"""A simple factory for creating new file formats.
This will automatically register the format with this regsistry.
All arguments are passed through to the Format constructor.
Returns
-------
Format
A new format that is registered with the registry.
"""
format = Format(*args, **kwargs)
self.add_format(format)
return format
@stable(as_of="0.4.0")
def add_format(self, format_object):
"""Add a format to the registry.
Parameters
----------
format_object : Format
The format to add to the registry.
"""
# See comment in the constructor for an explanation for why this split
# occurs.
name = format_object.name
if name in self._binary_formats or name in self._text_formats:
raise DuplicateRegistrationError("A format already exists with"
" that name: %s" % name)
if format_object.is_binary_format:
self._binary_formats[name] = format_object
else:
self._text_formats[name] = format_object
@stable(as_of="0.4.0")
def get_sniffer(self, format_name):
"""Locate the sniffer for a format.
Parameters
----------
format_name : str
The name of the format to lookup.
Returns
-------
function or None
The sniffer associated with `format_name`
"""
for lookup in self._lookups:
if format_name in lookup:
return lookup[format_name].sniffer_function
return None
@stable(as_of="0.4.0")
def get_reader(self, format_name, cls):
"""Locate the reader for a format and class.
Parameters
----------
format_name : str
The name of the format to lookup.
cls : type or None
The class which the reader will return an instance of. If `cls` is
None, the reader will return a generator.
Default is None.
Returns
-------
function or None
The reader associated with `format_name` and `cls`
"""
return self._get_rw(format_name, cls, 'readers')
@stable(as_of="0.4.0")
def get_writer(self, format_name, cls):
"""Locate the writer for a format and class.
Parameters
----------
format_name : str
The name of the format to lookup.
cls : type or None
The class which the writer will expect an instance of. If `cls` is
None, the writer will expect a generator.
Default is None.
Returns
-------
function or None
The writer associated with `format_name` and `cls`
"""
return self._get_rw(format_name, cls, 'writers')
def _get_rw(self, format_name, cls, lookup_name):
for lookup in self._lookups:
if format_name in lookup:
format_lookup = getattr(lookup[format_name], lookup_name)
if cls in format_lookup:
return format_lookup[cls]
return None
@stable(as_of="0.4.0")
def list_read_formats(self, cls):
"""Return a list of available read formats for a given `cls` type.
Parameters
----------
cls : type
The class which will be used to determine what read formats exist
for an instance of `cls`.
Returns
-------
list
A list of available read formats for an instance of `cls`. List may
be empty.
"""
return list(self._iter_rw_formats(cls, 'readers'))
@stable(as_of="0.4.0")
def list_write_formats(self, cls):
"""Return a list of available write formats for a given `cls` type.
Parameters
----------
cls : type
The class which will be used to determine what write formats exist
for an instance of `cls`.
Returns
-------
list
A list of available write formats for an instance of `cls`. List
may be empty.
"""
return list(self._iter_rw_formats(cls, 'writers'))
def _iter_rw_formats(self, cls, lookup_name):
for lookup in self._lookups:
for format in lookup.values():
if cls in getattr(format, lookup_name):
yield format.name
@stable(as_of="0.4.0")
def sniff(self, file, **kwargs):
"""Detect the format of a given `file` and suggest kwargs for reading.
Parameters
----------
file : openable (filepath, URL, filehandle, etc.)
The file to sniff. Something that is understood by `skbio.io.open`.
kwargs : dict, optional
Keyword arguments will be passed to `skbio.io.open`.
Returns
-------
(str, dict)
The name of the format of the file and any suggested kwargs for
use with the corresponding reader.
Raises
------
UnrecognizedFormatError
This occurs when the format is not 'claimed' by any registered
sniffer or when the format is ambiguous and has been 'claimed' by
more than one sniffer.
"""
# By resolving the input here, we have the oppurtunity to reuse the
# file (which is potentially ephemeral). Each sniffer will also resolve
# the file, but that call will short-circuit and won't claim
# responsibility for closing the file. This means that the file
# should only close after leaving this context. This is also the reason
# that we have to use SaneTextIOWrapper because each sniffer will
# wrap the file to produce an appropriate default encoding for their
# format (if unspecified). This results in the SaneTextIOWrapper being
# garbage collected (using io.TextIOBase results in close being called
# on our buffer by the deconstructor which we wanted to share with the
# next sniffer)
with _resolve_file(file, mode='r', **kwargs) as (fh, _,
is_binary_file):
# tell may fail noisily if the user provided a TextIOBase or
# BufferedReader which has already been iterated over (via next()).
matches = []
backup = fh.tell()
if is_binary_file and kwargs.get('encoding', 'binary') == 'binary':
matches = self._find_matches(fh, self._binary_formats,
**kwargs)
if kwargs.get('encoding', None) != 'binary':
# We can always turn a binary file into a text file, but the
# reverse doesn't make sense.
matches += self._find_matches(fh, self._text_formats, **kwargs)
fh.seek(backup)
elif not is_binary_file:
raise ValueError("Cannot decode text source (%r) as binary."
% file)
# else we are a binary_file and our encoding did not exclude binary
# so we have already handled that condition
if len(matches) > 1:
raise UnrecognizedFormatError("File format for %r is ambiguous,"
" may be one of: %r"
% (file, [m for m, s in matches]))
elif len(matches) == 0:
raise UnrecognizedFormatError("Could not detect the format of %r"
% file)
return matches[0]
def _find_matches(self, file, lookup, **kwargs):
matches = []
for format in lookup.values():
if format.sniffer_function is not None:
is_format, skwargs = format.sniffer_function(file, **kwargs)
file.seek(0)
if is_format:
matches.append((format.name, skwargs))
return matches
@stable(as_of="0.4.0")
def read(self, file, format=None, into=None, verify=True, **kwargs):
"""Read `file` as `format` into an object.
Parameters
----------
file : openable (filepath, URL, filehandle, etc.)
The file to read. Something that is understood by `skbio.io.open`.
format : str, optional
The format of the file if known. If None, the format will be
inferred from the file.
into : type or None, optional
The object which will be returned. If None, a generator will be
returned.
verify : bool, optional
When True, will double check the `format` if provided.
kwargs : dict, optional
Keyword arguments will be passed to their respective handlers
(`skbio.io.open` and the reader for `format`)
Returns
-------
object or generator
An instance of `into` if `into` is not None else generator
Raises
------
ValueError
Raised when `format` and `into` are both None.
UnrecognizedFormatError
Raised when a reader could not be found for a given `format` or the
format could not be guessed.
FormatIdentificationWarning
Raised when `verify` is True and the sniffer of a `format` did
not agree that `file` is a member of `format`
ArgumentOverrideWarning
Raised when `verify` is True and a user-supplied argument is
overriding the suggestion provided by the sniffer of `format`.
"""
# Context managers do not compose well with generators. We have to
# duplicate the logic so that the file will stay open while yielding.
# Otherwise the context exits as soon as the generator is returned
# (making any iteration fail as the file is closed from its
# perspective).
if into is None:
if format is None:
raise ValueError("`into` and `format` cannot both be None")
gen = self._read_gen(file, format, into, verify, kwargs)
# This is done so that any errors occur immediately instead of
# on the first call from __iter__
# eta-reduction is possible, but we want to the type to be
# GeneratorType
return (x for x in itertools.chain([next(gen)], gen))
else:
return self._read_ret(file, format, into, verify, kwargs)
def _read_ret(self, file, fmt, into, verify, kwargs):
io_kwargs = self._find_io_kwargs(kwargs)
with _resolve_file(file, **io_kwargs) as (file, _, _):
reader, kwargs = self._init_reader(file, fmt, into, verify, kwargs,
io_kwargs)
return reader(file, **kwargs)
def _read_gen(self, file, fmt, into, verify, kwargs):
io_kwargs = self._find_io_kwargs(kwargs)
# We needed to get the io_kwargs from kwargs for things like
# _resolve_file and for verifying a format.
# kwargs should still retain the contents of io_kwargs because the
# actual reader will also need them.
with _resolve_file(file, **io_kwargs) as (file, _, _):
reader, kwargs = self._init_reader(file, fmt, into, verify, kwargs,
io_kwargs)
generator = reader(file, **kwargs)
while True:
yield next(generator)
def _find_io_kwargs(self, kwargs):
return {k: kwargs[k] for k in _open_kwargs if k in kwargs}
def _init_reader(self, file, fmt, into, verify, kwargs, io_kwargs):
skwargs = {}
if fmt is None:
fmt, skwargs = self.sniff(file, **io_kwargs)
elif verify:
sniffer = self.get_sniffer(fmt)
if sniffer is not None:
backup = file.tell()
is_format, skwargs = sniffer(file, **io_kwargs)
file.seek(backup)
if not is_format:
warn("%r does not look like a %s file"
% (file, fmt), FormatIdentificationWarning)
for key in skwargs:
if key not in kwargs:
kwargs[key] = skwargs[key]
elif kwargs[key] != skwargs[key]:
warn('Best guess was: %s=%r, continuing with user'
' supplied: %r' % (key, skwargs[key],
kwargs[key]),
ArgumentOverrideWarning)
reader = self.get_reader(fmt, into)
if reader is None:
raise UnrecognizedFormatError(
"Cannot read %r from %r, no %s reader found." %
(fmt, file, into.__name__ if into else 'generator'))
return reader, kwargs
@stable(as_of="0.4.0")
def write(self, obj, format, into, **kwargs):
"""Write `obj` as `format` into a file.
Parameters
----------
obj : object
The object to write as `format`
format : str
The format to write `obj` as
into : openable (filepath, URL, filehandle, etc.)
What to write `obj` to. Something that is understood by
`skbio.io.open`.
kwargs : dict, optional
Keyword arguments will be passed to their respective handlers
(`skbio.io.open` and the writer for `format`)
Returns
-------
openable (filepath, URL, filehandle, etc.)
Will pass back the user argument for `into` as a convenience.
Raises
------
UnrecognizedFormatError
Raised when a writer for writing `obj` as `format` could not be
found.
"""
# The simplest functionality here.
cls = None
if not isinstance(obj, types.GeneratorType):
cls = obj.__class__
writer = self.get_writer(format, cls)
if writer is None:
raise UnrecognizedFormatError(
"Cannot write %r into %r, no %s writer found." %
(format, into, obj.__class__.__name__))
writer(obj, into, **kwargs)
return into
@stable(as_of="0.4.0")
def monkey_patch(self):
"""Monkey-patch `read` and `write` methods onto registered classes.
Will modify classes which have been registered to a reader or writer
to have `read` and `write` methods which will contain documentation
specifying useable formats for that class.
The actual functionality will be a pass-through to `skbio.io.read`
and `skbio.io.write` respectively.
"""
reads = set()
writes = set()
for lookup in self._lookups:
for format in lookup.values():
reads |= format.monkey_patched_readers
writes |= format.monkey_patched_writers
for cls in reads:
self._apply_read(cls)
for cls in writes:
self._apply_write(cls)
def _apply_read(registry, cls):
"""Add read method if any formats have a reader for `cls`."""
read_formats = registry.list_read_formats(cls)
@classmethod
def read(cls, file, format=None, **kwargs):
return registry.read(file, into=cls, format=format, **kwargs)
imports = registry._import_paths(read_formats)
doc_list = registry._formats_for_docs(read_formats, imports)
read.__func__.__doc__ = _read_docstring % {
'name': cls.__name__,
'list': doc_list,
'see': '\n'.join(imports)
}
cls.read = read
def _apply_write(registry, cls):
"""Add write method if any formats have a writer for `cls`."""
write_formats = registry.list_write_formats(cls)
if not hasattr(cls, 'default_write_format'):
raise NotImplementedError(
"Classes with registered writers must provide a "
"`default_write_format`. Please add `default_write_format`"
" to '%s'." % cls.__name__)
def write(self, file, format=cls.default_write_format, **kwargs):
return registry.write(self, into=file, format=format, **kwargs)
imports = registry._import_paths(write_formats)
doc_list = registry._formats_for_docs(write_formats, imports)
write.__doc__ = _write_docstring % {
'name': cls.__name__,
'list': doc_list,
'see': '\n'.join(imports),
'default': cls.default_write_format
}
cls.write = write
def _import_paths(self, formats):
lines = []
for fmt in formats:
lines.append("skbio.io.format." + fmt)
return lines
def _formats_for_docs(self, formats, imports):
lines = []
for fmt, imp in zip(formats, imports):
lines.append("- ``'%s'`` (:mod:`%s`)" % (fmt, imp))
return '\n'.join(lines)
_read_docstring = """Create a new ``%(name)s`` instance from a file.
This is a convenience method for :func:`skbio.io.registry.read`. For
more information about the I/O system in scikit-bio, please see
:mod:`skbio.io`.
Supported file formats include:
%(list)s
Parameters
----------
file : openable (filepath, URL, filehandle, etc.)
The location to read the given `format`. Something that is
understood by :func:`skbio.io.util.open`. Filehandles are not
automatically closed, it is the responsibility of the caller.
format : str, optional
The format must be a format name with a reader for ``%(name)s``.
If a `format` is not provided or is None, it will attempt to
guess the format.
kwargs : dict, optional
Keyword arguments passed to :func:`skbio.io.registry.read` and
the file format reader for ``%(name)s``.
Returns
-------
%(name)s
A new instance.
See Also
--------
write
skbio.io.registry.read
skbio.io.util.open
%(see)s
"""
_write_docstring = """Write an instance of ``%(name)s`` to a file.
This is a convenience method for :func:`skbio.io.registry.write`.
For more information about the I/O system in scikit-bio, please
see :mod:`skbio.io`.
Supported file formats include:
%(list)s
Parameters
----------
file : openable (filepath, URL, filehandle, etc.)
The location to write the given `format` into. Something
that is understood by :func:`skbio.io.util.open`. Filehandles
are not automatically closed, it is the responsibility of the
caller.
format : str
The format must be a registered format name with a writer for
``%(name)s``.
Default is `'%(default)s'`.
kwargs : dict, optional
Keyword arguments passed to :func:`skbio.io.registry.write`
and the file format writer.
See Also
--------
read
skbio.io.registry.write
skbio.io.util.open
%(see)s
"""
class Format(object):
"""Defines a format on which readers/writers/sniffer can be registered.
Parameters
----------
name : str
The name of this format.
encoding : str, optional
What the default encoding of this format is. If set to 'binary' then
all registered handlers will receive an :class:`io.BufferedReader` or
:class:`io.BufferedWriter` instead of an :class:`io.TextIOBase`. The
user will also be unable to override the encoding in that case.
newline : str, optional
What the default newline handling of this format is. Default is to use
universal newline handling.
"""
@property
@stable(as_of="0.4.0")
def name(self):
"""The name of this format."""
return self._name
@property
@stable(as_of="0.4.0")
def is_binary_format(self):
"""Return True if this is a binary format."""
return self._encoding == 'binary'
@property
@stable(as_of="0.4.0")
def sniffer_function(self):
"""The sniffer function associated with this format."""
return self._sniffer_function
@property
@stable(as_of="0.4.0")
def readers(self):
"""Dictionary that maps classes to their writers for this format."""
return self._readers
@property
@stable(as_of="0.4.0")
def writers(self):
"""Dictionary that maps classes to their writers for this format."""
return self._writers
@property
@stable(as_of="0.4.0")
def monkey_patched_readers(self):
"""Set of classes bound to readers to monkey patch."""
return self._monkey_patch['read']
@property
@stable(as_of="0.4.0")
def monkey_patched_writers(self):
"""Set of classes bound to writers to monkey patch."""
return self._monkey_patch['write']
def __init__(self, name, encoding=None, newline=None):
self._encoding = encoding
self._newline = newline
self._name = name
self._sniffer_function = None
self._readers = {}
self._writers = {}
self._monkey_patch = {'read': set(), 'write': set()}
@stable(as_of="0.4.0")
def sniffer(self, override=False):
"""Decorate a function to act as the sniffer for this format.
The function should take one argument which will be an implementation
of either :class:`io.TextIOBase` or :class:`io.BufferedReader`
depending on if the format is text or binary, respectively.
The sniffer will always receive a filehandle which is pointing to the
beginning of the file. It must return a tuple of bool and a dict of
suggested keyword arguments (if any) to pass to the reader.
.. note:: Keyword arguments are not permitted in `sniffers`.
`Sniffers` may not raise exceptions; if an exception is thrown by a
`sniffer`, the user will be asked to report it on our `issue tracker
<https://github.com/biocore/scikit-bio/issues/>`_.
Parameters
----------
override : bool, optional
If True, the existing sniffer will be overriden.
Raises
------
DuplicateRegistrationError
When `override` is False and a sniffer is already registered for
this format.
Examples
--------
>>> from skbio.io.registry import Format
>>> # If developing a new format for skbio, use the create_format()
>>> # factory instead of this constructor.
>>> myformat = Format('myformat')
>>> @myformat.sniffer()
... def myformat_sniffer(fh):
... check = fh.read(8) == "myformat"
... if check:
... version = int(fh.read(1))
... return True, {'version': version}
... return False, {}
...
>>> myformat_sniffer([u"myformat2\\n", u"some content\\n"])
(True, {'version': 2})
>>> myformat_sniffer([u"something else\\n"])
(False, {})
"""
if not type(override) is bool:
raise InvalidRegistrationError("`override` must be a bool not %r"
% override)
if not override and self._sniffer_function is not None:
raise DuplicateRegistrationError("A sniffer is already registered"
" to format: %s" % self._name)
def decorator(sniffer):
@wraps(sniffer)
def wrapped_sniffer(file, encoding=self._encoding, errors='ignore',
newline=self._newline, **kwargs):
self._validate_encoding(encoding)
if encoding == 'binary':
# Errors is irrelevant so set to default to prevent raising
# a usage exception in open.
errors = _open_kwargs['errors']
with open_file(file, mode='r', encoding=encoding,
newline=newline, errors=errors, **kwargs) as fh:
try:
# Some formats may have headers which indicate their
# format sniffers should be able to rely on the
# filehandle to point at the beginning of the file.
fh.seek(0)
return sniffer(fh)
except UnicodeDecodeError:
pass
except Exception:
warn("'%s' has encountered a problem.\nPlease"
" send the following to our issue tracker at\n"
"https://github.com/biocore/scikit-bio/issues\n\n"
"%s" % (sniffer.__name__, traceback.format_exc()),
FormatIdentificationWarning)
return False, {}
self._sniffer_function = wrapped_sniffer
return wrapped_sniffer
return decorator
@stable(as_of="0.4.0")
def reader(self, cls, monkey_patch=True, override=False):
"""Decorate a function to act as the reader for a class in this format.
The function should take an argument which will be an implementation
of either :class:`io.TextIOBase` or :class:`io.BufferedReader`
depending on if the format is text or binary, respectively. Any kwargs
given by the user which are not handled by :func:`skbio.io.util.open`
will be passed into the function. Any kwarg with a default of
`FileSentinel` will transform user input for that parameter into a
filehandle or `None` if not provided.
Parameters
----------
cls : type or None
The class which the function will be registered to handle. If
None, it is assumed that the function will produce a generator.
monkey_patch : bool, optional
Whether to allow an IORegistry to attach a `read` method to `cls`
with this format listed as an option.
override : bool, optional
If True, any existing readers for `cls` in this format will be
overriden.
Raises
------
DuplicateRegistrationError
When `override` is False and a reader is already registered to
`cls` for this format.
Examples
--------
>>> from skbio.io.registry import Format, IORegistry
>>> registry = IORegistry()
>>> myformat = Format('myformat')
>>> registry.add_format(myformat)
>>> # If developing a new format for skbio, use the create_format()
>>> # factory instead of the above.
>>> class MyObject(object):
... def __init__(self, content):
... self.content = content
...
>>> @myformat.reader(MyObject)
... def myformat_reader(fh):
... return MyObject(fh.readlines()[1:])
...
>>> registry.monkey_patch() # If developing skbio, this isn't needed
>>> MyObject.read([u"myformat2\\n", u"some content here!\\n"],
... format='myformat').content
[u'some content here!\\n']
"""
self._check_registration(cls)
def decorator(reader_function):
file_params = find_sentinels(reader_function, FileSentinel)
# This split has to occur for the same reason as in IORegistry.read
if cls is not None:
@wraps(reader_function)
def wrapped_reader(file, encoding=self._encoding,
newline=self._newline, **kwargs):
file_keys, files, io_kwargs = self._setup_locals(
file_params, file, encoding, newline, kwargs)
with open_files(files, mode='r', **io_kwargs) as fhs:
# The primary file is at the end of fh because append
# is cheaper than insert
kwargs.update(zip(file_keys, fhs[:-1]))
return reader_function(fhs[-1], **kwargs)
else:
@wraps(reader_function)
def wrapped_reader(file, encoding=self._encoding,
newline=self._newline, **kwargs):
file_keys, files, io_kwargs = self._setup_locals(
file_params, file, encoding, newline, kwargs)
with open_files(files, mode='r', **io_kwargs) as fhs:
kwargs.update(zip(file_keys, fhs[:-1]))
generator = reader_function(fhs[-1], **kwargs)
while True:
yield next(generator)
self._add_reader(cls, wrapped_reader, monkey_patch, override)
return wrapped_reader
return decorator
@stable(as_of="0.4.0")
def writer(self, cls, monkey_patch=True, override=False):
"""Decorate a function to act as the writer for a class in this format.
The function should take an instance of `cls` as its first argument
and the second argument is a filehandle which will be an implementation
of either :class:`io.TextIOBase` or :class:`io.BufferedWriter`
depending on if the format is text or binary, respectively. Any kwargs
given by the user which are not handled by :func:`skbio.io.util.open`
will be passed into the function. Any kwarg with a default of
`FileSentinel` will transform user input for that parameter into a
filehandle or `None` if not provided.
Parameters
----------
cls : type or None
The class which the function will be registered to handle. If
None, it is assumed that the function will consume a generator.
monkey_patch : bool, optional
Whether to allow an IORegistry to attach a `write` method to `cls`
with this format listed as an option.
override : bool, optional
If True, any existing writers for `cls` in this format will be
overriden.
Raises
------
DuplicateRegistrationError
When `override` is False and a writer is already registered to
`cls` for this format.
Examples
--------
>>> from skbio.io.registry import Format, IORegistry
>>> registry = IORegistry()
>>> myformat = Format('myformat')
>>> registry.add_format(myformat)
>>> # If developing a new format for skbio, use the create_format()
>>> # factory instead of the above.
>>> class MyObject(object):
... default_write_format = 'myformat'
... def __init__(self, content):
... self.content = content
...
>>> @myformat.writer(MyObject)
... def myformat_reader(obj, fh):
... fh.write(u"myformat2\\n")
... for c in obj.content:
... fh.write(c)
...
>>> registry.monkey_patch() # If developing skbio, this isn't needed
>>> obj = MyObject([u"some content here!\\n"])
>>> obj.write([], format='myformat')
[u'myformat2\\n', u'some content here!\\n']
"""
self._check_registration(cls)
def decorator(writer_function):
file_params = find_sentinels(writer_function, FileSentinel)
@wraps(writer_function)
def wrapped_writer(obj, file, encoding=self._encoding,
newline=self._newline, **kwargs):
file_keys, files, io_kwargs = self._setup_locals(
file_params, file, encoding, newline, kwargs)
with open_files(files, mode='w', **io_kwargs) as fhs:
kwargs.update(zip(file_keys, fhs[:-1]))
writer_function(obj, fhs[-1], **kwargs)
self._add_writer(cls, wrapped_writer, monkey_patch, override)
return wrapped_writer
return decorator
def _check_registration(self, cls):
if cls is not None and not inspect.isclass(cls):
raise InvalidRegistrationError("`cls` must be a class or None, not"
" %r" % cls)
def _setup_locals(self, file_params, file, encoding, newline, kwargs):
self._validate_encoding(encoding)
io_kwargs = self._pop_io_kwargs(kwargs, encoding, newline)
file_keys, files = self._setup_file_args(kwargs, file_params)
files.append(file)
return file_keys, files, io_kwargs
def _validate_encoding(self, encoding):
if encoding != self._encoding:
if self._encoding == 'binary':
raise ValueError("Encoding must be 'binary' for %r"
% self.name)
if encoding == 'binary':
raise ValueError("Encoding must not be 'binary' for %r"
% self.name)
def _pop_io_kwargs(self, kwargs, encoding, newline):
io_kwargs = dict(encoding=encoding, newline=newline)
for key in _open_kwargs:
if key in kwargs:
io_kwargs[key] = kwargs.pop(key)
return io_kwargs
def _setup_file_args(self, kwargs, file_params):
file_keys = []
files = []
for param in file_params:
arg = kwargs.get(param, None)
if arg is not None:
file_keys.append(param)
files.append(arg)
else:
# set to None to mask FileSentinel when user neglected argument
kwargs[param] = None
return file_keys, files
def _add_writer(self, cls, writer, monkey_patch, override):
if cls in self._writers and not override:
raise DuplicateRegistrationError("There is already a writer"
" registered to %s in format: %s"
% (cls, self._name))
self._writers[cls] = writer
if monkey_patch and cls is not None:
self._monkey_patch['write'].add(cls)
def _add_reader(self, cls, reader, monkey_patch, override):
if cls in self._readers and not override:
raise DuplicateRegistrationError("There is already a reader"
" registered to %s in format: %s"
% (cls, self._name))
self._readers[cls] = reader
if monkey_patch and cls is not None:
self._monkey_patch['read'].add(cls)
io_registry = IORegistry()
@wraps(IORegistry.sniff)
def sniff(file, **kwargs):
return io_registry.sniff(file, **kwargs)
@wraps(IORegistry.read)
def read(file, format=None, into=None, verify=True, **kwargs):
return io_registry.read(file, format=format, into=into, verify=verify,
**kwargs)
@wraps(IORegistry.write)
def write(obj, format, into, **kwargs):
return io_registry.write(obj, format, into, **kwargs)
@wraps(IORegistry.create_format)
def create_format(*args, **kwargs):
return io_registry.create_format(*args, **kwargs)
|
demis001/scikit-bio
|
skbio/io/registry.py
|
Python
|
bsd-3-clause
| 42,113
|
[
"scikit-bio"
] |
c71b3b46791e89c911e3684caeb7b5d99a28de07b6328d240dc1de7d7e0bba9a
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import calendar
from datetime import datetime, date
from dateutil import relativedelta
from lxml import etree
import json
import time
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.addons.resource.faces import task as Task
from openerp.osv import fields, osv
from openerp.tools.translate import _
class project_task_type(osv.osv):
_name = 'project.task.type'
_description = 'Task Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'description': fields.text('Description'),
'sequence': fields.integer('Sequence'),
'case_default': fields.boolean('Default for New Projects',
help="If you check this field, this stage will be proposed by default on each new project. It will not assign this stage to existing projects."),
'project_ids': fields.many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', 'Projects'),
'legend_priority': fields.text(
'Priority Management Explanation', translate=True,
help='Explanation text to help users using the star and priority mechanism on stages or issues that are in this stage.'),
'legend_blocked': fields.char(
'Kanban Blocked Explanation', translate=True,
help='Override the default value displayed for the blocked state for kanban selection, when the task or issue is in that stage.'),
'legend_done': fields.char(
'Kanban Valid Explanation', translate=True,
help='Override the default value displayed for the done state for kanban selection, when the task or issue is in that stage.'),
'legend_normal': fields.char(
'Kanban Ongoing Explanation', translate=True,
help='Override the default value displayed for the normal state for kanban selection, when the task or issue is in that stage.'),
'fold': fields.boolean('Folded in Kanban View',
help='This stage is folded in the kanban view when'
'there are no records in that stage to display.'),
}
def _get_default_project_ids(self, cr, uid, ctx={}):
project_id = self.pool['project.task']._get_default_project_id(cr, uid, context=ctx)
if project_id:
return [project_id]
return None
_defaults = {
'sequence': 1,
'project_ids': _get_default_project_ids,
}
_order = 'sequence'
class project(osv.osv):
_name = "project.project"
_description = "Project"
_inherits = {'account.analytic.account': "analytic_account_id",
"mail.alias": "alias_id"}
_inherit = ['mail.thread', 'ir.needaction_mixin']
_period_number = 5
def _auto_init(self, cr, context=None):
""" Installation hook: aliases, project.project """
# create aliases for all projects and avoid constraint errors
alias_context = dict(context, alias_model_name='project.task')
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(project, self)._auto_init,
'project.task', self._columns['alias_id'], 'id', alias_prefix='project+', alias_defaults={'project_id':'id'}, context=alias_context)
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
if user == 1:
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
if context and context.get('user_preference'):
cr.execute("""SELECT project.id FROM project_project project
LEFT JOIN account_analytic_account account ON account.id = project.analytic_account_id
LEFT JOIN project_user_rel rel ON rel.project_id = project.id
WHERE (account.user_id = %s or rel.uid = %s)"""%(user, user))
return [(r[0]) for r in cr.fetchall()]
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order,
context=context, count=count)
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
partner_obj = self.pool.get('res.partner')
val = {}
if not part:
return {'value': val}
if 'pricelist_id' in self.fields_get(cr, uid, context=context):
pricelist = partner_obj.read(cr, uid, part, ['property_product_pricelist'], context=context)
pricelist_id = pricelist.get('property_product_pricelist', False) and pricelist.get('property_product_pricelist')[0] or False
val['pricelist_id'] = pricelist_id
return {'value': val}
def _get_projects_from_tasks(self, cr, uid, task_ids, context=None):
tasks = self.pool.get('project.task').browse(cr, uid, task_ids, context=context)
project_ids = [task.project_id.id for task in tasks if task.project_id]
return self.pool.get('project.project')._get_project_and_parents(cr, uid, project_ids, context)
def _get_project_and_parents(self, cr, uid, ids, context=None):
""" return the project ids and all their parent projects """
res = set(ids)
while ids:
cr.execute("""
SELECT DISTINCT parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND project.id IN %s
""", (tuple(ids),))
ids = [t[0] for t in cr.fetchall()]
res.update(ids)
return list(res)
def _get_project_and_children(self, cr, uid, ids, context=None):
""" retrieve all children projects of project ids;
return a dictionary mapping each project to its parent project (or None)
"""
res = dict.fromkeys(ids, None)
while ids:
cr.execute("""
SELECT project.id, parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND parent.id IN %s
""", (tuple(ids),))
dic = dict(cr.fetchall())
res.update(dic)
ids = dic.keys()
return res
def _progress_rate(self, cr, uid, ids, names, arg, context=None):
child_parent = self._get_project_and_children(cr, uid, ids, context)
# compute planned_hours, total_hours, effective_hours specific to each project
cr.execute("""
SELECT project_id, COALESCE(SUM(planned_hours), 0.0),
COALESCE(SUM(total_hours), 0.0), COALESCE(SUM(effective_hours), 0.0)
FROM project_task
LEFT JOIN project_task_type ON project_task.stage_id = project_task_type.id
WHERE project_task.project_id IN %s AND project_task_type.fold = False
GROUP BY project_id
""", (tuple(child_parent.keys()),))
# aggregate results into res
res = dict([(id, {'planned_hours':0.0, 'total_hours':0.0, 'effective_hours':0.0}) for id in ids])
for id, planned, total, effective in cr.fetchall():
# add the values specific to id to all parent projects of id in the result
while id:
if id in ids:
res[id]['planned_hours'] += planned
res[id]['total_hours'] += total
res[id]['effective_hours'] += effective
id = child_parent[id]
# compute progress rates
for id in ids:
if res[id]['total_hours']:
res[id]['progress_rate'] = round(100.0 * res[id]['effective_hours'] / res[id]['total_hours'], 2)
else:
res[id]['progress_rate'] = 0.0
return res
def unlink(self, cr, uid, ids, context=None):
alias_ids = []
mail_alias = self.pool.get('mail.alias')
analytic_account_to_delete = set()
for proj in self.browse(cr, uid, ids, context=context):
if proj.tasks:
raise osv.except_osv(_('Invalid Action!'),
_('You cannot delete a project containing tasks. You can either delete all the project\'s tasks and then delete the project or simply deactivate the project.'))
elif proj.alias_id:
alias_ids.append(proj.alias_id.id)
if proj.analytic_account_id and not proj.analytic_account_id.line_ids:
analytic_account_to_delete.add(proj.analytic_account_id.id)
res = super(project, self).unlink(cr, uid, ids, context=context)
mail_alias.unlink(cr, uid, alias_ids, context=context)
self.pool['account.analytic.account'].unlink(cr, uid, list(analytic_account_to_delete), context=context)
return res
def _get_attached_docs(self, cr, uid, ids, field_name, arg, context):
res = {}
attachment = self.pool.get('ir.attachment')
task = self.pool.get('project.task')
for id in ids:
project_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.project'), ('res_id', '=', id)], context=context, count=True)
task_ids = task.search(cr, uid, [('project_id', '=', id)], context=context)
task_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)], context=context, count=True)
res[id] = (project_attachments or 0) + (task_attachments or 0)
return res
def _task_count(self, cr, uid, ids, field_name, arg, context=None):
res={}
for tasks in self.browse(cr, uid, ids, context):
res[tasks.id] = len(tasks.task_ids)
return res
def _get_alias_models(self, cr, uid, context=None):
""" Overriden in project_issue to offer more options """
return [('project.task', "Tasks")]
def _get_visibility_selection(self, cr, uid, context=None):
""" Overriden in portal_project to offer more options """
return [('public', _('Public project')),
('employees', _('Internal project: all employees can access')),
('followers', _('Private project: followers Only'))]
def attachment_tree_view(self, cr, uid, ids, context):
task_ids = self.pool.get('project.task').search(cr, uid, [('project_id', 'in', ids)])
domain = [
'|',
'&', ('res_model', '=', 'project.project'), ('res_id', 'in', ids),
'&', ('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)]
res_id = ids and ids[0] or False
return {
'name': _('Attachments'),
'domain': domain,
'res_model': 'ir.attachment',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'kanban,tree,form',
'view_type': 'form',
'limit': 80,
'context': "{'default_res_model': '%s','default_res_id': %d}" % (self._name, res_id)
}
def __get_bar_values(self, cr, uid, obj, domain, read_fields, value_field, groupby_field, context=None):
""" Generic method to generate data for bar chart values using SparklineBarWidget.
This method performs obj.read_group(cr, uid, domain, read_fields, groupby_field).
:param obj: the target model (i.e. crm_lead)
:param domain: the domain applied to the read_group
:param list read_fields: the list of fields to read in the read_group
:param str value_field: the field used to compute the value of the bar slice
:param str groupby_field: the fields used to group
:return list section_result: a list of dicts: [
{ 'value': (int) bar_column_value,
'tootip': (str) bar_column_tooltip,
}
]
"""
month_begin = date.today().replace(day=1)
section_result = [{
'value': 0,
'tooltip': (month_begin + relativedelta.relativedelta(months=-i)).strftime('%B'),
} for i in range(self._period_number - 1, -1, -1)]
group_obj = obj.read_group(cr, uid, domain, read_fields, groupby_field, context=context)
pattern = tools.DEFAULT_SERVER_DATE_FORMAT if obj.fields_get(cr, uid, groupby_field)[groupby_field]['type'] == 'date' else tools.DEFAULT_SERVER_DATETIME_FORMAT
for group in group_obj:
group_begin_date = datetime.strptime(group['__domain'][0][2], pattern)
month_delta = relativedelta.relativedelta(month_begin, group_begin_date)
section_result[self._period_number - (month_delta.months + 1)] = {'value': group.get(value_field, 0), 'tooltip': group.get(groupby_field, 0)}
return section_result
def _get_project_task_data(self, cr, uid, ids, field_name, arg, context=None):
obj = self.pool['project.task']
month_begin = date.today().replace(day=1)
date_begin = (month_begin - relativedelta.relativedelta(months=self._period_number - 1)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
date_end = month_begin.replace(day=calendar.monthrange(month_begin.year, month_begin.month)[1]).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
res = {}
for id in ids:
created_domain = [('project_id', '=', id), ('create_date', '>=', date_begin ), ('create_date', '<=', date_end ), ('stage_id.fold', '=', False)]
res[id] = json.dumps(self.__get_bar_values(cr, uid, obj, created_domain, [ 'create_date'], 'create_date_count', 'create_date', context=context))
return res
# Lambda indirection method to avoid passing a copy of the overridable method when declaring the field
_alias_models = lambda self, *args, **kwargs: self._get_alias_models(*args, **kwargs)
_visibility_selection = lambda self, *args, **kwargs: self._get_visibility_selection(*args, **kwargs)
_columns = {
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the project without removing it."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of Projects."),
'analytic_account_id': fields.many2one(
'account.analytic.account', 'Contract/Analytic',
help="Link this project to an analytic account if you need financial management on projects. "
"It enables you to connect projects with budgets, planning, cost and revenue analysis, timesheets on projects, etc.",
ondelete="cascade", required=True, auto_join=True),
'label_tasks': fields.char('Use Tasks as', help="Gives label to tasks on project's kanaban view."),
'members': fields.many2many('res.users', 'project_user_rel', 'project_id', 'uid', 'Project Members',
help="Project's members are users who can have an access to the tasks related to this project.", states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'tasks': fields.one2many('project.task', 'project_id', "Task Activities"),
'planned_hours': fields.function(_progress_rate, multi="progress", string='Planned Time', help="Sum of planned hours of all tasks related to this project and its child projects.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'stage_id'], 20),
}),
'effective_hours': fields.function(_progress_rate, multi="progress", string='Time Spent', help="Sum of spent hours of all tasks related to this project and its child projects.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'stage_id'], 20),
}),
'total_hours': fields.function(_progress_rate, multi="progress", string='Total Time', help="Sum of total hours of all tasks related to this project and its child projects.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'stage_id'], 20),
}),
'progress_rate': fields.function(_progress_rate, multi="progress", string='Progress', type='float', group_operator="avg", help="Percent of tasks closed according to the total of tasks todo.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'stage_id'], 20),
}),
'resource_calendar_id': fields.many2one('resource.calendar', 'Working Time', help="Timetable working hours to adjust the gantt diagram report", states={'close':[('readonly',True)]} ),
'type_ids': fields.many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', 'Tasks Stages', states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'task_count': fields.function(_task_count, type='integer', string="Tasks",),
'task_ids': fields.one2many('project.task', 'project_id',
domain=[('stage_id.fold', '=', False)]),
'color': fields.integer('Color Index'),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Internal email associated with this project. Incoming emails are automatically synchronized"
"with Tasks (or optionally Issues if the Issue Tracker module is installed)."),
'alias_model': fields.selection(_alias_models, "Alias Model", select=True, required=True,
help="The kind of document created when an email is received on this project's email alias"),
'privacy_visibility': fields.selection(_visibility_selection, 'Privacy / Visibility', required=True,
help="Holds visibility of the tasks or issues that belong to the current project:\n"
"- Public: everybody sees everything; if portal is activated, portal users\n"
" see all tasks or issues; if anonymous portal is activated, visitors\n"
" see all tasks or issues\n"
"- Portal (only available if Portal is installed): employees see everything;\n"
" if portal is activated, portal users see the tasks or issues followed by\n"
" them or by someone of their company\n"
"- Employees Only: employees see all tasks or issues\n"
"- Followers Only: employees see only the followed tasks or issues; if portal\n"
" is activated, portal users see the followed tasks or issues."),
'state': fields.selection([('template', 'Template'),
('draft','New'),
('open','In Progress'),
('cancelled', 'Cancelled'),
('pending','Pending'),
('close','Closed')],
'Status', required=True, copy=False),
'monthly_tasks': fields.function(_get_project_task_data, type='char', readonly=True,
string='Project Task By Month'),
'doc_count': fields.function(
_get_attached_docs, string="Number of documents attached", type='integer'
)
}
def _get_type_common(self, cr, uid, context):
ids = self.pool.get('project.task.type').search(cr, uid, [('case_default','=',1)], context=context)
return ids
_order = "sequence, id"
_defaults = {
'active': True,
'type': 'contract',
'label_tasks': 'Tasks',
'state': 'open',
'sequence': 10,
'type_ids': _get_type_common,
'alias_model': 'project.task',
'privacy_visibility': 'employees',
}
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(project, self).message_get_suggested_recipients(cr, uid, ids, context=context)
for data in self.browse(cr, uid, ids, context=context):
if data.partner_id:
reason = _('Customer Email') if data.partner_id.email else _('Customer')
self._message_add_suggested_recipient(cr, uid, recipients, data, partner=data.partner_id, reason= '%s' % reason)
return recipients
# TODO: Why not using a SQL contraints ?
def _check_dates(self, cr, uid, ids, context=None):
for leave in self.read(cr, uid, ids, ['date_start', 'date'], context=context):
if leave['date_start'] and leave['date']:
if leave['date_start'] > leave['date']:
return False
return True
_constraints = [
(_check_dates, 'Error! project start-date must be lower then project end-date.', ['date_start', 'date'])
]
def set_template(self, cr, uid, ids, context=None):
return self.setActive(cr, uid, ids, value=False, context=context)
def set_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'close'}, context=context)
def set_cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancelled'}, context=context)
def set_pending(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'pending'}, context=context)
def set_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def reset_project(self, cr, uid, ids, context=None):
return self.setActive(cr, uid, ids, value=True, context=context)
def map_tasks(self, cr, uid, old_project_id, new_project_id, context=None):
""" copy and map tasks from old to new project """
if context is None:
context = {}
map_task_id = {}
task_obj = self.pool.get('project.task')
proj = self.browse(cr, uid, old_project_id, context=context)
for task in proj.tasks:
# preserve task name and stage, normally altered during copy
defaults = {'stage_id': task.stage_id.id,
'name': task.name}
map_task_id[task.id] = task_obj.copy(cr, uid, task.id, defaults, context=context)
self.write(cr, uid, [new_project_id], {'tasks':[(6,0, map_task_id.values())]})
task_obj.duplicate_task(cr, uid, map_task_id, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
context = dict(context or {})
context['active_test'] = False
proj = self.browse(cr, uid, id, context=context)
if not default.get('name'):
default.update(name=_("%s (copy)") % (proj.name))
res = super(project, self).copy(cr, uid, id, default, context)
self.map_tasks(cr, uid, id, res, context=context)
return res
def duplicate_template(self, cr, uid, ids, context=None):
context = dict(context or {})
data_obj = self.pool.get('ir.model.data')
result = []
for proj in self.browse(cr, uid, ids, context=context):
parent_id = context.get('parent_id', False)
context.update({'analytic_project_copy': True})
new_date_start = time.strftime('%Y-%m-%d')
new_date_end = False
if proj.date_start and proj.date:
start_date = date(*time.strptime(proj.date_start,'%Y-%m-%d')[:3])
end_date = date(*time.strptime(proj.date,'%Y-%m-%d')[:3])
new_date_end = (datetime(*time.strptime(new_date_start,'%Y-%m-%d')[:3])+(end_date-start_date)).strftime('%Y-%m-%d')
context.update({'copy':True})
new_id = self.copy(cr, uid, proj.id, default = {
'name':_("%s (copy)") % (proj.name),
'state':'open',
'date_start':new_date_start,
'date':new_date_end,
'parent_id':parent_id}, context=context)
result.append(new_id)
child_ids = self.search(cr, uid, [('parent_id','=', proj.analytic_account_id.id)], context=context)
parent_id = self.read(cr, uid, new_id, ['analytic_account_id'])['analytic_account_id'][0]
if child_ids:
self.duplicate_template(cr, uid, child_ids, context={'parent_id': parent_id})
if result and len(result):
res_id = result[0]
form_view_id = data_obj._get_id(cr, uid, 'project', 'edit_project')
form_view = data_obj.read(cr, uid, form_view_id, ['res_id'])
tree_view_id = data_obj._get_id(cr, uid, 'project', 'view_project')
tree_view = data_obj.read(cr, uid, tree_view_id, ['res_id'])
search_view_id = data_obj._get_id(cr, uid, 'project', 'view_project_project_filter')
search_view = data_obj.read(cr, uid, search_view_id, ['res_id'])
return {
'name': _('Projects'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'project.project',
'view_id': False,
'res_id': res_id,
'views': [(form_view['res_id'],'form'),(tree_view['res_id'],'tree')],
'type': 'ir.actions.act_window',
'search_view_id': search_view['res_id'],
'nodestroy': True
}
# set active value for a project, its sub projects and its tasks
def setActive(self, cr, uid, ids, value=True, context=None):
task_obj = self.pool.get('project.task')
for proj in self.browse(cr, uid, ids, context=None):
self.write(cr, uid, [proj.id], {'state': value and 'open' or 'template'}, context)
cr.execute('select id from project_task where project_id=%s', (proj.id,))
tasks_id = [x[0] for x in cr.fetchall()]
if tasks_id:
task_obj.write(cr, uid, tasks_id, {'active': value}, context=context)
child_ids = self.search(cr, uid, [('parent_id','=', proj.analytic_account_id.id)])
if child_ids:
self.setActive(cr, uid, child_ids, value, context=None)
return True
def _schedule_header(self, cr, uid, ids, force_members=True, context=None):
context = context or {}
if type(ids) in (long, int,):
ids = [ids]
projects = self.browse(cr, uid, ids, context=context)
for project in projects:
if (not project.members) and force_members:
raise osv.except_osv(_('Warning!'),_("You must assign members on the project '%s'!") % (project.name,))
resource_pool = self.pool.get('resource.resource')
result = "from openerp.addons.resource.faces import *\n"
result += "import datetime\n"
for project in self.browse(cr, uid, ids, context=context):
u_ids = [i.id for i in project.members]
if project.user_id and (project.user_id.id not in u_ids):
u_ids.append(project.user_id.id)
for task in project.tasks:
if task.user_id and (task.user_id.id not in u_ids):
u_ids.append(task.user_id.id)
calendar_id = project.resource_calendar_id and project.resource_calendar_id.id or False
resource_objs = resource_pool.generate_resources(cr, uid, u_ids, calendar_id, context=context)
for key, vals in resource_objs.items():
result +='''
class User_%s(Resource):
efficiency = %s
''' % (key, vals.get('efficiency', False))
result += '''
def Project():
'''
return result
def _schedule_project(self, cr, uid, project, context=None):
resource_pool = self.pool.get('resource.resource')
calendar_id = project.resource_calendar_id and project.resource_calendar_id.id or False
working_days = resource_pool.compute_working_calendar(cr, uid, calendar_id, context=context)
# TODO: check if we need working_..., default values are ok.
puids = [x.id for x in project.members]
if project.user_id:
puids.append(project.user_id.id)
result = """
def Project_%d():
start = \'%s\'
working_days = %s
resource = %s
""" % (
project.id,
project.date_start or time.strftime('%Y-%m-%d'), working_days,
'|'.join(['User_'+str(x) for x in puids]) or 'None'
)
vacation = calendar_id and tuple(resource_pool.compute_vacation(cr, uid, calendar_id, context=context)) or False
if vacation:
result+= """
vacation = %s
""" % ( vacation, )
return result
#TODO: DO Resource allocation and compute availability
def compute_allocation(self, rc, uid, ids, start_date, end_date, context=None):
if context == None:
context = {}
allocation = {}
return allocation
def schedule_tasks(self, cr, uid, ids, context=None):
context = context or {}
if type(ids) in (long, int,):
ids = [ids]
projects = self.browse(cr, uid, ids, context=context)
result = self._schedule_header(cr, uid, ids, False, context=context)
for project in projects:
result += self._schedule_project(cr, uid, project, context=context)
result += self.pool.get('project.task')._generate_task(cr, uid, project.tasks, ident=4, context=context)
local_dict = {}
exec result in local_dict
projects_gantt = Task.BalancedProject(local_dict['Project'])
for project in projects:
project_gantt = getattr(projects_gantt, 'Project_%d' % (project.id,))
for task in project.tasks:
if task.stage_id and task.stage_id.fold:
continue
p = getattr(project_gantt, 'Task_%d' % (task.id,))
self.pool.get('project.task').write(cr, uid, [task.id], {
'date_start': p.start.strftime('%Y-%m-%d %H:%M:%S'),
'date_end': p.end.strftime('%Y-%m-%d %H:%M:%S')
}, context=context)
if (not task.user_id) and (p.booked_resource):
self.pool.get('project.task').write(cr, uid, [task.id], {
'user_id': int(p.booked_resource[0].name[5:]),
}, context=context)
return True
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
# Prevent double project creation when 'use_tasks' is checked + alias management
create_context = dict(context, project_creation_in_progress=True,
alias_model_name=vals.get('alias_model', 'project.task'),
alias_parent_model_name=self._name)
if vals.get('type', False) not in ('template', 'contract'):
vals['type'] = 'contract'
ir_values = self.pool.get('ir.values').get_default(cr, uid, 'project.config.settings', 'generate_project_alias')
if ir_values:
vals['alias_name'] = vals.get('alias_name') or vals.get('name')
project_id = super(project, self).create(cr, uid, vals, context=create_context)
project_rec = self.browse(cr, uid, project_id, context=context)
values = {'alias_parent_thread_id': project_id, 'alias_defaults': {'project_id': project_id}}
self.pool.get('mail.alias').write(cr, uid, [project_rec.alias_id.id], values, context=context)
return project_id
def write(self, cr, uid, ids, vals, context=None):
# if alias_model has been changed, update alias_model_id accordingly
if vals.get('alias_model'):
model_ids = self.pool.get('ir.model').search(cr, uid, [('model', '=', vals.get('alias_model', 'project.task'))])
vals.update(alias_model_id=model_ids[0])
return super(project, self).write(cr, uid, ids, vals, context=context)
class task(osv.osv):
_name = "project.task"
_description = "Task"
_date_name = "date_start"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_mail_post_access = 'read'
_track = {
'stage_id': {
# this is only an heuristics; depending on your particular stage configuration it may not match all 'new' stages
'project.mt_task_new': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence <= 1,
'project.mt_task_stage': lambda self, cr, uid, obj, ctx=None: obj.stage_id.sequence > 1,
},
'user_id': {
'project.mt_task_assigned': lambda self, cr, uid, obj, ctx=None: obj.user_id and obj.user_id.id,
},
'kanban_state': {
'project.mt_task_blocked': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'blocked',
'project.mt_task_ready': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'done',
},
}
def _get_default_partner(self, cr, uid, context=None):
project_id = self._get_default_project_id(cr, uid, context)
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return project.partner_id.id
return False
def _get_default_project_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
return (self._resolve_project_id_from_context(cr, uid, context=context) or False)
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
project_id = self._get_default_project_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], project_id, [('fold', '=', False)], context=context)
def _resolve_project_id_from_context(self, cr, uid, context=None):
""" Returns ID of project based on the value of 'default_project_id'
context key, or None if it cannot be resolved to a single
project.
"""
if context is None:
context = {}
if type(context.get('default_project_id')) in (int, long):
return context['default_project_id']
if isinstance(context.get('default_project_id'), basestring):
project_name = context['default_project_id']
project_ids = self.pool.get('project.project').name_search(cr, uid, name=project_name, context=context)
if len(project_ids) == 1:
return project_ids[0][0]
return None
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
access_rights_uid = access_rights_uid or uid
if read_group_order == 'stage_id desc':
order = '%s desc' % order
search_domain = []
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
if project_id:
search_domain += ['|', ('project_ids', '=', project_id), ('id', 'in', ids)]
else:
search_domain += ['|', ('id', 'in', ids), ('case_default', '=', True)]
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x, y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def _read_group_user_id(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
res_users = self.pool.get('res.users')
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
access_rights_uid = access_rights_uid or uid
if project_id:
ids += self.pool.get('project.project').read(cr, access_rights_uid, project_id, ['members'], context=context)['members']
order = res_users._order
# lame way to allow reverting search, should just work in the trivial case
if read_group_order == 'user_id desc':
order = '%s desc' % order
# de-duplicate and apply search order
ids = res_users._search(cr, uid, [('id','in',ids)], order=order, access_rights_uid=access_rights_uid, context=context)
result = res_users.name_get(cr, access_rights_uid, ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(ids.index(x[0]), ids.index(y[0])))
return result, {}
_group_by_full = {
'stage_id': _read_group_stage_ids,
'user_id': _read_group_user_id,
}
def _str_get(self, task, level=0, border='***', context=None):
return border+' '+(task.user_id and task.user_id.name.upper() or '')+(level and (': L'+str(level)) or '')+(' - %.1fh / %.1fh'%(task.effective_hours or 0.0,task.planned_hours))+' '+border+'\n'+ \
border[0]+' '+(task.name or '')+'\n'+ \
(task.description or '')+'\n\n'
# Compute: effective_hours, total_hours, progress
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
res = {}
cr.execute("SELECT task_id, COALESCE(SUM(hours),0) FROM project_task_work WHERE task_id IN %s GROUP BY task_id",(tuple(ids),))
hours = dict(cr.fetchall())
for task in self.browse(cr, uid, ids, context=context):
res[task.id] = {'effective_hours': hours.get(task.id, 0.0), 'total_hours': (task.remaining_hours or 0.0) + hours.get(task.id, 0.0)}
res[task.id]['delay_hours'] = res[task.id]['total_hours'] - task.planned_hours
res[task.id]['progress'] = 0.0
if (task.remaining_hours + hours.get(task.id, 0.0)):
res[task.id]['progress'] = round(min(100.0 * hours.get(task.id, 0.0) / res[task.id]['total_hours'], 99.99),2)
# TDE CHECK: if task.state in ('done','cancelled'):
if task.stage_id and task.stage_id.fold:
res[task.id]['progress'] = 100.0
return res
def onchange_remaining(self, cr, uid, ids, remaining=0.0, planned=0.0):
if remaining and not planned:
return {'value': {'planned_hours': remaining}}
return {}
def onchange_planned(self, cr, uid, ids, planned=0.0, effective=0.0):
return {'value': {'remaining_hours': planned - effective}}
def onchange_project(self, cr, uid, id, project_id, context=None):
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return {'value': {'partner_id': project.partner_id.id}}
return {}
def onchange_user_id(self, cr, uid, ids, user_id, context=None):
vals = {}
if user_id:
vals['date_start'] = fields.datetime.now()
return {'value': vals}
def duplicate_task(self, cr, uid, map_ids, context=None):
mapper = lambda t: map_ids.get(t.id, t.id)
for task in self.browse(cr, uid, map_ids.values(), context):
new_child_ids = set(map(mapper, task.child_ids))
new_parent_ids = set(map(mapper, task.parent_ids))
if new_child_ids or new_parent_ids:
task.write({'parent_ids': [(6,0,list(new_parent_ids))],
'child_ids': [(6,0,list(new_child_ids))]})
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if not default.get('name'):
current = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % current.name
return super(task, self).copy_data(cr, uid, id, default, context)
def _is_template(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for task in self.browse(cr, uid, ids, context=context):
res[task.id] = True
if task.project_id:
if task.project_id.active == False or task.project_id.state == 'template':
res[task.id] = False
return res
def _get_task(self, cr, uid, ids, context=None):
result = {}
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id: result[work.task_id.id] = True
return result.keys()
_columns = {
'active': fields.function(_is_template, store=True, string='Not a Template Task', type='boolean', help="This field is computed automatically and have the same behavior than the boolean 'active' field: if the task is linked to a template or unactivated project, it will be hidden unless specifically asked."),
'name': fields.char('Task Summary', track_visibility='onchange', size=128, required=True, select=True),
'description': fields.html('Description'),
'priority': fields.selection([('0','Normal'), ('1','High')], 'Priority', select=True),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of tasks."),
'stage_id': fields.many2one('project.task.type', 'Stage', track_visibility='onchange', select=True,
domain="[('project_ids', '=', project_id)]", copy=False),
'categ_ids': fields.many2many('project.category', string='Tags'),
'kanban_state': fields.selection([('normal', 'In Progress'),('done', 'Ready for next stage'),('blocked', 'Blocked')], 'Kanban State',
track_visibility='onchange',
help="A task's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this task\n"
" * Ready for next stage indicates the task is ready to be pulled to the next stage",
required=False, copy=False),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'write_date': fields.datetime('Last Modification Date', readonly=True, select=True), #not displayed in the view but it might be useful with base_action_rule module (and it needs to be defined first for that)
'date_start': fields.datetime('Starting Date', select=True, copy=False),
'date_end': fields.datetime('Ending Date', select=True, copy=False),
'date_deadline': fields.date('Deadline', select=True, copy=False),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True, copy=False),
'project_id': fields.many2one('project.project', 'Project', ondelete='set null', select=True, track_visibility='onchange', change_default=True),
'parent_ids': fields.many2many('project.task', 'project_task_parent_rel', 'task_id', 'parent_id', 'Parent Tasks'),
'child_ids': fields.many2many('project.task', 'project_task_parent_rel', 'parent_id', 'task_id', 'Delegated Tasks'),
'notes': fields.text('Notes'),
'planned_hours': fields.float('Initially Planned Hours', help='Estimated time to do the task, usually set by the project manager when the task is in draft state.'),
'effective_hours': fields.function(_hours_get, string='Hours Spent', multi='hours', help="Computed using the sum of the task work done.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'remaining_hours': fields.float('Remaining Hours', digits=(16,2), help="Total remaining time, can be re-estimated periodically by the assignee of the task."),
'total_hours': fields.function(_hours_get, string='Total', multi='hours', help="Computed as: Time Spent + Remaining Time.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'progress': fields.function(_hours_get, string='Working Time Progress (%)', multi='hours', group_operator="avg", help="If the task has a progress of 99.99% you should close the task if it's finished or reevaluate the time",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours', 'state', 'stage_id'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'delay_hours': fields.function(_hours_get, string='Delay Hours', multi='hours', help="Computed as difference between planned hours by the project manager and the total hours of the task.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'user_id': fields.many2one('res.users', 'Assigned to', select=True, track_visibility='onchange'),
'delegated_user_id': fields.related('child_ids', 'user_id', type='many2one', relation='res.users', string='Delegated To'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'work_ids': fields.one2many('project.task.work', 'task_id', 'Work done'),
'manager_id': fields.related('project_id', 'analytic_account_id', 'user_id', type='many2one', relation='res.users', string='Project Manager'),
'company_id': fields.many2one('res.company', 'Company'),
'id': fields.integer('ID', readonly=True),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
}
_defaults = {
'stage_id': _get_default_stage_id,
'project_id': _get_default_project_id,
'date_last_stage_update': fields.datetime.now,
'kanban_state': 'normal',
'priority': '0',
'progress': 0,
'sequence': 10,
'active': True,
'user_id': lambda obj, cr, uid, ctx=None: uid,
'company_id': lambda self, cr, uid, ctx=None: self.pool.get('res.company')._company_default_get(cr, uid, 'project.task', context=ctx),
'partner_id': lambda self, cr, uid, ctx=None: self._get_default_partner(cr, uid, context=ctx),
'date_start': fields.datetime.now,
}
_order = "priority desc, sequence, date_start, name, id"
def _check_recursion(self, cr, uid, ids, context=None):
for id in ids:
visited_branch = set()
visited_node = set()
res = self._check_cycle(cr, uid, id, visited_branch, visited_node, context=context)
if not res:
return False
return True
def _check_cycle(self, cr, uid, id, visited_branch, visited_node, context=None):
if id in visited_branch: #Cycle
return False
if id in visited_node: #Already tested don't work one more time for nothing
return True
visited_branch.add(id)
visited_node.add(id)
#visit child using DFS
task = self.browse(cr, uid, id, context=context)
for child in task.child_ids:
res = self._check_cycle(cr, uid, child.id, visited_branch, visited_node, context=context)
if not res:
return False
visited_branch.remove(id)
return True
def _check_dates(self, cr, uid, ids, context=None):
if context == None:
context = {}
obj_task = self.browse(cr, uid, ids[0], context=context)
start = obj_task.date_start or False
end = obj_task.date_end or False
if start and end :
if start > end:
return False
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive tasks.', ['parent_ids']),
(_check_dates, 'Error ! Task end-date must be greater then task start-date', ['date_start','date_end'])
]
# Override view according to the company definition
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
users_obj = self.pool.get('res.users')
if context is None: context = {}
# read uom as admin to avoid access rights issues, e.g. for portal/share users,
# this should be safe (no context passed to avoid side-effects)
obj_tm = users_obj.browse(cr, SUPERUSER_ID, uid, context=context).company_id.project_time_mode_id
tm = obj_tm and obj_tm.name or 'Hours'
res = super(task, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar, submenu=submenu)
if tm in ['Hours','Hour']:
return res
eview = etree.fromstring(res['arch'])
def _check_rec(eview):
if eview.attrib.get('widget','') == 'float_time':
eview.set('widget','float')
for child in eview:
_check_rec(child)
return True
_check_rec(eview)
res['arch'] = etree.tostring(eview)
for f in res['fields']:
if 'Hours' in res['fields'][f]['string']:
res['fields'][f]['string'] = res['fields'][f]['string'].replace('Hours',tm)
return res
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_id'] = context.get('default_project_id')
context['empty_list_help_model'] = 'project.project'
context['empty_list_help_document_name'] = _("tasks")
return super(task, self).get_empty_list_help(cr, uid, help, context=context)
# ----------------------------------------
# Case management
# ----------------------------------------
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default stage; if not set, stages must be default
stages
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
search_domain = []
if section_ids:
search_domain = [('|')] * (len(section_ids) - 1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def _check_child_task(self, cr, uid, ids, context=None):
if context == None:
context = {}
tasks = self.browse(cr, uid, ids, context=context)
for task in tasks:
if task.child_ids:
for child in task.child_ids:
if child.stage_id and not child.stage_id.fold:
raise osv.except_osv(_("Warning!"), _("Child task still open.\nPlease cancel or complete child task first."))
return True
def _delegate_task_attachments(self, cr, uid, task_id, delegated_task_id, context=None):
attachment = self.pool.get('ir.attachment')
attachment_ids = attachment.search(cr, uid, [('res_model', '=', self._name), ('res_id', '=', task_id)], context=context)
new_attachment_ids = []
for attachment_id in attachment_ids:
new_attachment_ids.append(attachment.copy(cr, uid, attachment_id, default={'res_id': delegated_task_id}, context=context))
return new_attachment_ids
def do_delegate(self, cr, uid, ids, delegate_data=None, context=None):
"""
Delegate Task to another users.
"""
if delegate_data is None:
delegate_data = {}
assert delegate_data['user_id'], _("Delegated User should be specified")
delegated_tasks = {}
for task in self.browse(cr, uid, ids, context=context):
delegated_task_id = self.copy(cr, uid, task.id, {
'name': delegate_data['name'],
'project_id': delegate_data['project_id'] and delegate_data['project_id'][0] or False,
'stage_id': delegate_data.get('stage_id') and delegate_data.get('stage_id')[0] or False,
'user_id': delegate_data['user_id'] and delegate_data['user_id'][0] or False,
'planned_hours': delegate_data['planned_hours'] or 0.0,
'parent_ids': [(6, 0, [task.id])],
'description': delegate_data['new_task_description'] or '',
'child_ids': [],
'work_ids': []
}, context=context)
self._delegate_task_attachments(cr, uid, task.id, delegated_task_id, context=context)
newname = delegate_data['prefix'] or ''
task.write({
'remaining_hours': delegate_data['planned_hours_me'],
'planned_hours': delegate_data['planned_hours_me'] + (task.effective_hours or 0.0),
'name': newname,
}, context=context)
delegated_tasks[task.id] = delegated_task_id
return delegated_tasks
def set_remaining_time(self, cr, uid, ids, remaining_time=1.0, context=None):
for task in self.browse(cr, uid, ids, context=context):
if (task.stage_id and task.stage_id.sequence <= 1) or (task.planned_hours == 0.0):
self.write(cr, uid, [task.id], {'planned_hours': remaining_time}, context=context)
self.write(cr, uid, ids, {'remaining_hours': remaining_time}, context=context)
return True
def set_remaining_time_1(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 1.0, context)
def set_remaining_time_2(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 2.0, context)
def set_remaining_time_5(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 5.0, context)
def set_remaining_time_10(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 10.0, context)
def _store_history(self, cr, uid, ids, context=None):
for task in self.browse(cr, uid, ids, context=context):
self.pool.get('project.task.history').create(cr, uid, {
'task_id': task.id,
'remaining_hours': task.remaining_hours,
'planned_hours': task.planned_hours,
'kanban_state': task.kanban_state,
'type_id': task.stage_id.id,
'user_id': task.user_id.id
}, context=context)
return True
# ------------------------------------------------
# CRUD overrides
# ------------------------------------------------
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
# for default stage
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
# user_id change: update date_start
if vals.get('user_id') and not vals.get('date_start'):
vals['date_start'] = fields.datetime.now()
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
task_id = super(task, self).create(cr, uid, vals, context=create_context)
self._store_history(cr, uid, [task_id], context=context)
return task_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals['date_last_stage_update'] = fields.datetime.now()
# user_id change: update date_start
if vals.get('user_id') and 'date_start' not in vals:
vals['date_start'] = fields.datetime.now()
# Overridden to reset the kanban_state to normal whenever
# the stage (stage_id) of the task changes.
if vals and not 'kanban_state' in vals and 'stage_id' in vals:
new_stage = vals.get('stage_id')
vals_reset_kstate = dict(vals, kanban_state='normal')
for t in self.browse(cr, uid, ids, context=context):
write_vals = vals_reset_kstate if t.stage_id.id != new_stage else vals
super(task, self).write(cr, uid, [t.id], write_vals, context=context)
result = True
else:
result = super(task, self).write(cr, uid, ids, vals, context=context)
if any(item in vals for item in ['stage_id', 'remaining_hours', 'user_id', 'kanban_state']):
self._store_history(cr, uid, ids, context=context)
return result
def unlink(self, cr, uid, ids, context=None):
if context == None:
context = {}
self._check_child_task(cr, uid, ids, context=context)
res = super(task, self).unlink(cr, uid, ids, context)
return res
def _generate_task(self, cr, uid, tasks, ident=4, context=None):
context = context or {}
result = ""
ident = ' '*ident
for task in tasks:
if task.stage_id and task.stage_id.fold:
continue
result += '''
%sdef Task_%s():
%s todo = \"%.2fH\"
%s effort = \"%.2fH\"''' % (ident,task.id, ident,task.remaining_hours, ident,task.total_hours)
start = []
for t2 in task.parent_ids:
start.append("up.Task_%s.end" % (t2.id,))
if start:
result += '''
%s start = max(%s)
''' % (ident,','.join(start))
if task.user_id:
result += '''
%s resource = %s
''' % (ident, 'User_'+str(task.user_id.id))
result += "\n"
return result
# ---------------------------------------------------
# Mail gateway
# ---------------------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
tasks = self.browse(cr, SUPERUSER_ID, ids, context=context)
project_ids = set([task.project_id.id for task in tasks if task.project_id])
aliases = self.pool['project.project'].message_get_reply_to(cr, uid, list(project_ids), context=context)
return dict((task.id, aliases.get(task.project_id and task.project_id.id or 0, False)) for task in tasks)
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Override to updates the document according to the email. """
if custom_values is None:
custom_values = {}
defaults = {
'name': msg.get('subject'),
'planned_hours': 0.0,
}
defaults.update(custom_values)
res = super(task, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
email_list = tools.email_split(msg.get('to', '') + ',' + msg.get('cc', ''))
new_task = self.browse(cr, uid, res, context=context)
if new_task.project_id and new_task.project_id.alias_name: # check left-part is not already an alias
email_list = filter(lambda x: x.split('@')[0] != new_task.project_id.alias_name, email_list)
partner_ids = filter(lambda x: x, self._find_partner_from_emails(cr, uid, None, email_list, context=context, check_followers=False))
self.message_subscribe(cr, uid, [res], partner_ids, context=context)
return res
def message_update(self, cr, uid, ids, msg, update_vals=None, context=None):
""" Override to update the task according to the email. """
if update_vals is None:
update_vals = {}
maps = {
'cost': 'planned_hours',
}
for line in msg['body'].split('\n'):
line = line.strip()
res = tools.command_re.match(line)
if res:
match = res.group(1).lower()
field = maps.get(match)
if field:
try:
update_vals[field] = float(res.group(2).lower())
except (ValueError, TypeError):
pass
return super(task, self).message_update(cr, uid, ids, msg, update_vals=update_vals, context=context)
class project_work(osv.osv):
_name = "project.task.work"
_description = "Project Task Work"
_columns = {
'name': fields.char('Work summary'),
'date': fields.datetime('Date', select="1"),
'task_id': fields.many2one('project.task', 'Task', ondelete='cascade', required=True, select="1"),
'hours': fields.float('Time Spent'),
'user_id': fields.many2one('res.users', 'Done by', required=True, select="1"),
'company_id': fields.related('task_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
_defaults = {
'user_id': lambda obj, cr, uid, context: uid,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S')
}
_order = "date desc"
def create(self, cr, uid, vals, context=None):
if 'hours' in vals and (not vals['hours']):
vals['hours'] = 0.00
if 'task_id' in vals:
cr.execute('update project_task set remaining_hours=remaining_hours - %s where id=%s', (vals.get('hours',0.0), vals['task_id']))
self.pool.get('project.task').invalidate_cache(cr, uid, ['remaining_hours'], [vals['task_id']], context=context)
return super(project_work,self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'hours' in vals and (not vals['hours']):
vals['hours'] = 0.00
if 'hours' in vals:
task_obj = self.pool.get('project.task')
for work in self.browse(cr, uid, ids, context=context):
cr.execute('update project_task set remaining_hours=remaining_hours - %s + (%s) where id=%s', (vals.get('hours',0.0), work.hours, work.task_id.id))
task_obj.invalidate_cache(cr, uid, ['remaining_hours'], [work.task_id.id], context=context)
return super(project_work,self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, context=None):
task_obj = self.pool.get('project.task')
for work in self.browse(cr, uid, ids):
cr.execute('update project_task set remaining_hours=remaining_hours + %s where id=%s', (work.hours, work.task_id.id))
task_obj.invalidate_cache(cr, uid, ['remaining_hours'], [work.task_id.id], context=context)
return super(project_work,self).unlink(cr, uid, ids, context=context)
class account_analytic_account(osv.osv):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
_columns = {
'use_tasks': fields.boolean('Tasks', help="If checked, this contract will be available in the project menu and you will be able to manage tasks or track issues"),
'company_uom_id': fields.related('company_id', 'project_time_mode_id', string="Company UOM", type='many2one', relation='product.uom'),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_tasks'] = template.use_tasks
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
'''
This function is used to decide if a project needs to be automatically created or not when an analytic account is created. It returns True if it needs to be so, False otherwise.
'''
if context is None: context = {}
return vals.get('use_tasks') and not 'project_creation_in_progress' in context
def project_create(self, cr, uid, analytic_account_id, vals, context=None):
'''
This function is called at the time of analytic account creation and is used to create a project automatically linked to it if the conditions are meet.
'''
project_pool = self.pool.get('project.project')
project_id = project_pool.search(cr, uid, [('analytic_account_id','=', analytic_account_id)])
if not project_id and self._trigger_project_creation(cr, uid, vals, context=context):
project_values = {
'name': vals.get('name'),
'analytic_account_id': analytic_account_id,
'type': vals.get('type','contract'),
}
return project_pool.create(cr, uid, project_values, context=context)
return False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('child_ids', False) and context.get('analytic_project_copy', False):
vals['child_ids'] = []
analytic_account_id = super(account_analytic_account, self).create(cr, uid, vals, context=context)
self.project_create(cr, uid, analytic_account_id, vals, context=context)
return analytic_account_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
vals_for_project = vals.copy()
for account in self.browse(cr, uid, ids, context=context):
if not vals.get('name'):
vals_for_project['name'] = account.name
if not vals.get('type'):
vals_for_project['type'] = account.type
self.project_create(cr, uid, account.id, vals_for_project, context=context)
return super(account_analytic_account, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
proj_ids = self.pool['project.project'].search(cr, uid, [('analytic_account_id', 'in', ids)])
has_tasks = self.pool['project.task'].search(cr, uid, [('project_id', 'in', proj_ids)], count=True, context=context)
if has_tasks:
raise osv.except_osv(_('Warning!'), _('Please remove existing tasks in the project linked to the accounts you want to delete.'))
return super(account_analytic_account, self).unlink(cr, uid, ids, context=context)
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if context is None:
context={}
if context.get('current_model') == 'project.project':
project_ids = self.search(cr, uid, args + [('name', operator, name)], limit=limit, context=context)
return self.name_get(cr, uid, project_ids, context=context)
return super(account_analytic_account, self).name_search(cr, uid, name, args=args, operator=operator, context=context, limit=limit)
class project_project(osv.osv):
_inherit = 'project.project'
_defaults = {
'use_tasks': True
}
class project_task_history(osv.osv):
"""
Tasks History, used for cumulative flow charts (Lean/Agile)
"""
_name = 'project.task.history'
_description = 'History of Tasks'
_rec_name = 'task_id'
_log_access = False
def _get_date(self, cr, uid, ids, name, arg, context=None):
result = {}
for history in self.browse(cr, uid, ids, context=context):
if history.type_id and history.type_id.fold:
result[history.id] = history.date
continue
cr.execute('''select
date
from
project_task_history
where
task_id=%s and
id>%s
order by id limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
result[history.id] = res and res[0] or False
return result
def _get_related_date(self, cr, uid, ids, context=None):
result = []
for history in self.browse(cr, uid, ids, context=context):
cr.execute('''select
id
from
project_task_history
where
task_id=%s and
id<%s
order by id desc limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
if res:
result.append(res[0])
return result
_columns = {
'task_id': fields.many2one('project.task', 'Task', ondelete='cascade', required=True, select=True),
'type_id': fields.many2one('project.task.type', 'Stage'),
'kanban_state': fields.selection([('normal', 'Normal'), ('blocked', 'Blocked'), ('done', 'Ready for next stage')], 'Kanban State', required=False),
'date': fields.date('Date', select=True),
'end_date': fields.function(_get_date, string='End Date', type="date", store={
'project.task.history': (_get_related_date, None, 20)
}),
'remaining_hours': fields.float('Remaining Time', digits=(16, 2)),
'planned_hours': fields.float('Planned Time', digits=(16, 2)),
'user_id': fields.many2one('res.users', 'Responsible'),
}
_defaults = {
'date': fields.date.context_today,
}
class project_task_history_cumulative(osv.osv):
_name = 'project.task.history.cumulative'
_table = 'project_task_history_cumulative'
_inherit = 'project.task.history'
_auto = False
_columns = {
'end_date': fields.date('End Date'),
'nbr_tasks': fields.integer('# of Tasks', readonly=True),
'project_id': fields.many2one('project.project', 'Project'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'project_task_history_cumulative')
cr.execute(""" CREATE VIEW project_task_history_cumulative AS (
SELECT
history.date::varchar||'-'||history.history_id::varchar AS id,
history.date AS end_date,
*
FROM (
SELECT
h.id AS history_id,
h.date+generate_series(0, CAST((coalesce(h.end_date, DATE 'tomorrow')::date - h.date) AS integer)-1) AS date,
h.task_id, h.type_id, h.user_id, h.kanban_state,
count(h.task_id) as nbr_tasks,
greatest(h.remaining_hours, 1) AS remaining_hours, greatest(h.planned_hours, 1) AS planned_hours,
t.project_id
FROM
project_task_history AS h
JOIN project_task AS t ON (h.task_id = t.id)
GROUP BY
h.id,
h.task_id,
t.project_id
) AS history
)
""")
class project_category(osv.osv):
""" Category of project's task (or issue) """
_name = "project.category"
_description = "Category of project's task, issue, ..."
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
inspyration/odoo
|
addons/project/project.py
|
Python
|
agpl-3.0
| 74,481
|
[
"VisIt"
] |
f50a15db97f99d75bd5c821509f2f824d349ccf9fa247346bcd9d4229035eff7
|
#!/usr/bin/env python3
# coding: utf-8
""" E-VRP is a project about the routing of a fleet of electrical vehicles.
E-VRP is a project developed for the Application of Operational Research
exam at University of Modena and Reggio Emilia.
Copyright (C) 2017 Serena Ziviani, Federico Motta
This file is part of E-VRP.
E-VRP is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
E-VRP is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with E-VRP. If not, see <http://www.gnu.org/licenses/>.
"""
__authors__ = "Serena Ziviani, Federico Motta"
__copyright__ = "E-VRP Copyright (C) 2017"
__license__ = "GPL3"
import math
import time
import copy
import IO
import solution
import utility
class GreedyHeuristic(object):
def __init__(self, abstract_g, cache):
self._abstract_g = abstract_g
self._cache = cache
self._depot = abstract_g.depot
self._solution = None
assert self._depot is not None, 'Could not find depot in graph'
def create_feasible_solution(self):
"""Build a feasible, greedy solution for the problem."""
self._solution = solution.Solution(self._abstract_g, self._cache)
# While customers are not all served:
while self._solution.missing_customers():
self._customer = list(self._solution.missing_customers())
self._temp_route = solution.Route(self._cache, greenest=True)
self.create_feasible_route()
self._solution.routes.append(self._temp_route)
assert not self._solution.missing_customers(), 'self._customer is ' \
'empty even if sol.' \
'missing_customers(' \
') is not'
return self._solution
def create_feasible_route(self):
current_node = self._depot
while True:
if not self._solution.missing_customers():
# We have visited all customers: add depot
dest = self._depot
else:
dest = find_nearest(self._solution, current_node, 'customer')
try:
self._temp_route.append(dest)
except solution.BatteryCriticalException:
IO.Log.debug(f'Inserting node {dest} makes'
' the battery below critical threshold')
self.handle_insufficient_energy()
except solution.MaximumTimeException:
IO.Log.debug(f'Inserting node {dest} makes self._temp_route'
' exceed the maximum time')
self.handle_max_time_exceeded()
return
except solution.UnfeasibleRouteException as e:
IO.Log.debug('Caught UnfeasibleRouteException in '
'GreedyHeuristic.create_feasible_route() '
f'({str(e)})')
else:
IO.Log.debug(f'Successfully inserted node {dest}')
if dest == self._depot:
return
else:
current_node = dest
def handle_max_time_exceeded(self):
try:
self._temp_route.append(self._depot)
except solution.MaximumTimeException:
IO.Log.debug('Inserting depot still raises a MaximumTimeException')
last = self._temp_route.last_node()
if last is None:
raise SystemExit(f'Max time {self._temp_route.time_limit} too '
'short for this problem!')
else:
self._temp_route.remove(last)
self.handle_max_time_exceeded()
else:
IO.Log.debug(f'Successfully inserted node {self._depot}')
def handle_insufficient_energy(self):
dest = find_nearest(self._solution, self._temp_route.last_node(),
'station')
try:
self._temp_route.append(dest)
except solution.BatteryCriticalException:
IO.Log.debug('Inserting nearest station still raises '
'BatteryCriticalException')
last = self._temp_route.last_node()
if last is None:
raise SystemExit('Total battery energy '
F'{solution.Battery().charge} is too small '
'for this problem!')
else:
self._temp_route.remove(last)
self.handle_insufficient_energy()
else:
IO.Log.debug(f'Successfully inserted node {dest}')
IO.Log.debug(f'Recharging battery in station {dest}')
IO.Log.debug('Old charge: '
f'{self._temp_route.last_battery().charge}')
self._temp_route.last_battery().recharge()
IO.Log.debug('New charge: '
f'{self._temp_route.last_battery().charge}')
def find_nearest(sol, current_node, type_to_find):
min_time = math.inf
min_node = None
for dest, green, short in sol._graph_cache.source_iterator(current_node):
if type_to_find == 'customer':
if dest[2] == type_to_find and \
dest in sol.missing_customers() and \
green.time < min_time:
min_time = green.time
min_node = dest
else:
if dest[2] == type_to_find and green.time < min_time:
min_time = green.time
min_node = dest
return min_node
def two_opt_neighbors(sol):
"""Generator which produces a series of solutions close to the given one.
(close in neighborhood sense)
note: https://docs.python.org/3/glossary.html#term-generator
https://en.wikipedia.org/wiki/2-opt
~> A C - ... ~> A - C - ...
X ) becomes )
<~ D B - ... <~ D - B - ...
(A, B, ... C, D) (A, C, ..., B, D)
"""
mod_sol = copy.deepcopy(sol)
for route in mod_sol.routes:
for i in utility.shuffled_range(len(route._paths) - 1):
if i >= len(route._paths): # should never happen
continue
node_i = route._paths[i].last_node() # node C of the example
for j in utility.shuffled_range(i + 1, len(route._paths)):
if j >= len(route._paths): # should never happen
continue
node_j = route._paths[j].last_node() # node B of the example
try:
route.swap(node_i, node_j)
except solution.UnfeasibleRouteException as e:
IO.Log.debug(f'two_opt_neighbors() generator got '
f'UnfeasibleRouteException: {str(e)}')
continue
else:
yield mod_sol
finally:
mod_sol = copy.deepcopy(sol)
def three_opt_neighbors(sol, _d={}):
"""Generator which produces a series of solutions close to the given one.
(close in neighborhood sense)
note: https://docs.python.org/3/glossary.html#term-generator
https://en.wikipedia.org/wiki/3-opt
explanation: the three edge are selected by choosing 3 different nodes
(each edge will be the one arriving to that node);
to try all the possible reconnections of the three selected
edges we need to understand which are the possible
permutations of them:
>>> import itertools
>>> for i, j, k in itertools.permutations('ijk'):
>>> print(f'{i}\t{j}\t{k}')
i j k # actual arrangement (will be skipped)
i k j # 'i' is in the same position
j i k # 'k' is in the same position
j k i # that solution must be explored with 3-opt
k i j # that solution must be explored with 3-opt
k j i # 'j' is in the same position
In practise the arrangements which keep i, j, and k in the
current positions will be skipped because they are already
explored by the visit of the 2-opt neighborhood!
"""
if not utility.CLI.args().use_3_opt_neighborhood:
if 'written_once' not in _d:
IO.Log.debug('To explore 3-opt neighborhood use -3 CLI argument.')
_d['written_once'] = True
return iter(list())
mod_sol = copy.deepcopy(sol)
for route in mod_sol.routes:
for i in utility.shuffled_range(len(route._paths) - 2):
if i >= len(route._paths): # should never happen
continue
node_i = route._paths[i].last_node()
for j in utility.shuffled_range(i + 1, len(route._paths) - 1):
if j >= len(route._paths): # should never happen
continue
node_j = route._paths[j].last_node()
for k in utility.shuffled_range(j + 1, len(route._paths)):
if k >= len(route._paths): # should never happen
continue
node_k = route._paths[k].last_node()
route_bkp = copy.deepcopy(route) # i - j - k
try:
route.swap(node_i, node_j) # j - i - k
route.swap(node_i, node_k) # j - k - i
except solution.UnfeasibleRouteException as e:
IO.Log.debug(f'three_opt_neighbors() generator got '
f'UnfeasibleRouteException: {str(e)}')
else:
yield mod_sol
mod_sol = copy.deepcopy(sol)
route = copy.deepcopy(route_bkp) # i - j - k
try:
route.swap(node_i, node_k) # k - j - i
route.swap(node_j, node_i) # k - i - j
except solution.UnfeasibleRouteException as e:
IO.Log.debug(f'three_opt_neighbors() generator got '
f'UnfeasibleRouteException: {str(e)}')
else:
yield mod_sol
mod_sol = copy.deepcopy(sol)
def move_neighbors(sol):
"""Generator which produces a move neighborhood of the given solution."""
mod_sol = copy.deepcopy(sol)
for route in mod_sol.routes:
for i in utility.shuffled_range(len(route._paths) - 1):
if i >= len(route._paths): # should never happen
continue
node_i = route._paths[i].last_node()
for j in utility.shuffled_range(i + 1, len(route._paths)):
try:
route.remove(node_i) # a remove shifts indexes left by one
route.insert(node_i, j - 1)
except solution.UnfeasibleRouteException as e:
IO.Log.debug(f'Move of node {i} ({node_i}) to position '
f'{j} is not feasible ({str(e)})')
continue
else:
yield mod_sol
finally:
mod_sol = copy.deepcopy(sol)
def swap_neighbors(sol):
"""Generator which produces a swap neighborhood of the given solution."""
mod_sol = copy.deepcopy(sol)
for a in utility.shuffled_range(len(mod_sol.routes) - 1):
route_a = mod_sol.routes[a]
for b in utility.shuffled_range(a + 1, len(mod_sol.routes)):
route_b = mod_sol.routes[b]
for i in utility.shuffled_range(len(route_a._paths)):
if i >= len(route_a._paths): # should never happen
continue
node_i = route_a._paths[i].last_node()
for j in utility.shuffled_range(len(route_b._paths)):
if j >= len(route_b._paths): # should never happen
continue
node_j = route_b._paths[j].last_node()
try:
route_a.remove(node_i)
route_b.remove(node_j)
route_a.insert(node_j, i)
route_a.insert(node_i, j)
except solution.UnfeasibleRouteException as e:
IO.Log.debug(f'Swap between node {i} ({node_i}) of '
f'route {a} and node {j} ({node_j}) of '
f'route {b} is not feasible ({str(e)})')
continue
else:
yield mod_sol
finally:
mod_sol = copy.deepcopy(sol)
neighborhoods = {'2-opt': two_opt_neighbors,
'3-opt': three_opt_neighbors,
'swap': swap_neighbors,
'move': move_neighbors}
def metaheuristic(initial_solution, max_iter=10**3):
"""Look (in different initial solution neighborhoods) for better solutions.
(a first improving approach is used in neighborhood exploration)
Return the best solution found after max_iter or maximum time exceeded.
"""
actual_solution = copy.deepcopy(initial_solution)
vns_it = 0
best_it = 0
t0 = time.time()
num_explored_solutions = 0
for vns_it in range(max_iter):
# exit if time exceeded
if time.time() > t0 + utility.CLI.args().max_time:
break
# explore each available neighborhood
for k, neighborhood_generator in enumerate(neighborhoods.values()):
# explore each solution in the neighborhood
sol = shake(actual_solution, k)
sol = local_search(sol, neighborhood_generator)
if sol[0] is not None:
# local search found a better solution in the neighborhood
actual_solution = sol[0]
best_it = vns_it
num_explored_solutions += sol[1]
if vns_it >= best_it + 3:
break
t_tot = time.time() - t0
IO.Log.info('VNS summary:')
IO.Log.info(f'{vns_it:>8} iterations')
IO.Log.debug(f'({vns_it - best_it - 1:>7} empty iterations)')
IO.Log.info(f'{t_tot:>12.3f} seconds')
IO.Log.info(f'{num_explored_solutions:>8} explored sol.')
return actual_solution
def local_search(actual_solution, neighborhood):
"""Look in the neighborhood of actual_solution for better neighbors."""
num_explored_solutions = 0
for neighbor in neighborhood(actual_solution):
num_explored_solutions += 1
# return the first improving one
if (neighbor.time < actual_solution.time
or (neighbor.time == actual_solution.time and
neighbor.energy < actual_solution.energy)):
delta_energy = neighbor.energy - actual_solution.energy
delta_time = neighbor.time - actual_solution.time
IO.Log.info(f'VNS found a better solution '
f'({delta_time:>+10.6f} m, '
f'{delta_energy:>+10.1f} J)')
assert not neighbor.missing_customers(), 'There are some ' \
'customers left out'
assert neighbor.is_feasible(), 'neighbor found is not feasible'
return copy.deepcopy(neighbor), num_explored_solutions
# Could not find a better solution in actual_solution's neigborhood
# => actual_solution is a local optimum for that neighborhood
return None, num_explored_solutions
def shake(sol, k):
"""Make k random feasible swap moves on the given solution."""
for i in range(k):
try:
sol = swap_neighbors(sol).__next__()
except StopIteration:
IO.Log.debug(f'Stopped after {i} shake iterations')
return sol
|
sere/E-VRP
|
heuristic.py
|
Python
|
gpl-3.0
| 16,595
|
[
"VisIt"
] |
3aac2c79b09c75e3c0b49a4f48ca27ba06b49c255dc9f4e8ec5b09aa8a83d063
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long
"""
.. _tutorial-use-pass-infra:
How to Use TVM Pass Infra
=========================
**Author**: `Zhi Chen <https://github.com/zhiics>`_
As the number of optimization passes increases in Relay/tir, it becomes intractable to
execute them and maintain their dependencies manually. Therefore, we have
introduced an infrastructure to manage the optimization passes and make it
applicable to different layers of the IR in the TVM stack.
The optimizations of a Relay/tir program could be applied at various granularity,
namely function-level and module-level using :py:class:`tvm.relay.transform.FunctionPass`/
:py:class:`tvm.tir.transform.PrimFuncPass` and :py:class:`tvm.transform.ModulePass`
respectively. Or users can rely on :py:class:`tvm.transform.Sequential` to apply a sequence of passes
on a Relay/tir program where the dependencies between passes can be resolved by the
pass infra. For more details about each type of these passes, please refer to
the :ref:`pass-infra`
This tutorial mainly demostrates how developers can use the pass infra to perform
a certain optimization and create an optimization pipeline for a Relay program.
The same approach can be used for tir as well.
"""
import numpy as np
import tvm
from tvm import te
import tvm.relay as relay
###############################################################################
# Create An Example Relay Program
# -------------------------------
# First of all, we create a simple Relay program for the tutorial. This program
# will be used by various optimizations of the examples in this tutorial.
# Similarly, users can write a tir primitive function and apply the tir passes.
def example():
shape = (1, 64, 54, 54)
c_data = np.empty(shape).astype("float32")
c = relay.const(c_data)
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.var("x", relay.TensorType((1, 64, 56, 56), "float32"))
conv = relay.nn.conv2d(x, weight)
y = relay.add(c, c)
y = relay.multiply(y, relay.const(2, "float32"))
y = relay.add(conv, y)
z = relay.add(y, c)
z1 = relay.add(y, c)
z2 = relay.add(z, z1)
return relay.Function([x, weight], z2)
###############################################################################
# Optimize the Program
# --------------------
# Now we would like to optimize the program. Relay features a host of
# optimizations. We will select some of them to apply on this example program.
#
# There are multiple ways to optimize a Relay program. Below we will provide
# examples for each of them.
#
# Manually Apply Optimization Passes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Let's first create a relay Module which contains one or multiple Relay
# functions for optimization.
f = example()
mod = tvm.IRModule.from_expr(f)
# Now we can apply constant folding on the module.
# fold_const here is a callback that doesn't take any parameters.
fold_const = relay.transform.FoldConstant()
# Then, we can invoke the pass on the given module. Note that the constant
# folding pass works at the function-level. That being said, each function in
# the module will be applied with the optimization. Users don't need to iterate
# through individual functions manually to apply this pass.
mod = fold_const(mod)
# We can see from the updated program that the constants are folded.
print(mod)
###############################################################################
# More optimizations can be applied in the similar manner. For instance, we can
# eliminate the common expressions that used by `z` and `z1`.
mod = relay.transform.EliminateCommonSubexpr()(mod)
print(mod)
###############################################################################
# Some optimizations, such as fusion, are parameteric as well. For example,
# opt level 0 will not allow operators to be fused together. Users can pass the
# `fuse_opt_level` to enable this.
mod = relay.transform.FuseOps(fuse_opt_level=0)(mod)
# We can observe that the optimized module contains functions that only have
# a signle primitive op.
print(mod)
###############################################################################
# Use Sequential to Apply a Sequence of Passes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Applying passes as above is actually tedious and it may require users to have
# better understanding about the dependencies between them. For example, fusion
# currently doesn't work well on let bindings. Therefore, we would not be able
# to fuse operators that were fusable if :py:func:`relay.transform.ToANormalForm` is applied before
# fusion, as this pass generates let bindings for each expression to
# canonicalize a Relay program.
#
# Relay, hence, provides :py:class:`tvm.transform.Sequential` to alleviate developers from handling
# these issues explicitly by specifying the required passes of each pass and
# packing them as a whole to execute. For example, the same passes can now be
# applied using the sequential style as the following. :py:class:`tvm.transform.Sequential` is
# similiar to `torch.nn.sequential <https://pytorch.org/docs/stable/nn.html#torch.nn.Sequential>`_
# and `mxnet.gluon.block <https://mxnet.apache.org/api/python/docs/_modules/mxnet/gluon/block.html>`_.
# For example, `torch.nn.sequential` is used to contain a sequence of PyTorch
# `Modules` that will be added to build a network. It focuses on the network
# layers. Instead, the :py:class:`tvm.transform.Sequential` in our pass infra works on the optimizing
# pass.
# Now let's execute some passes through :py:class:`tvm.transform.Sequential`
f = example()
mod = tvm.IRModule.from_expr(f)
# Glob the interested passes.
seq = tvm.transform.Sequential(
[
relay.transform.FoldConstant(),
relay.transform.EliminateCommonSubexpr(),
relay.transform.FuseOps(fuse_opt_level=2),
]
)
mod1 = seq(mod)
print(mod1)
###############################################################################
# From the transformed Relay program, we can see that there are still two
# identical addition operations. This is because ``EliminateCommonSubexpr``
# was not actually performed. The reason is because only the passes that have
# optimization level less or equal to 2 will be executed by default under
# :py:class:`tvm.transform.Sequential`. The pass infra,
# however, provides a configuration interface
# for users to customize the optimization level that they want to execute.
with tvm.transform.PassContext(opt_level=3):
mod2 = seq(mod)
print(mod2)
###############################################################################
# Now we can see that only one of the two identical additions is kept.
#
# In addition, users can selectively disable some passes using the
# `disabled_pass` config, which is similar to the `-fno-xxx` option used the
# general purpose compilers, such as Clang and GCC. For example, we can disable
# EliminateCommonSubexpr as following. The printed module will again show two
# identical addition operations.
with tvm.transform.PassContext(opt_level=3, disabled_pass=["EliminateCommonSubexpr"]):
mod3 = seq(mod)
print(mod3)
##############################################################################
# Implement a Pass Using Python Decorator
# ------------------------------------------
# The next example illustrates how we can orchestrate a customized optimization
# pipeline through the pass infra using Python decorators. This functionality
# greatly eases the implementation of passes. For example, users can simply
# define a decorated class to do function-level optimizations as the following
# example shows. `transform_function` wraps a class to replace all constants
# with a multiple of `c`. Later on, each function in a given module will be
# visited and each constant in the function will be replaced when we invoke the
# customized pass.
@relay.transform.function_pass(opt_level=1)
class CustomPipeline:
"""Simple test function to replace one argument to another."""
def __init__(self, multiplier):
self.multiplier = multiplier
# This function can define a pass.
def transform_function(self, func, mod, ctx):
obj = self
class ReplaceConstant(tvm.relay.ExprMutator):
def visit_constant(self, c):
return relay.multiply(obj.multiplier, c)
return ReplaceConstant().visit(func)
f = example()
mod = tvm.IRModule.from_expr(f)
custom_pass = CustomPipeline(multiplier=relay.const(3, "float32"))
assert custom_pass.info.name == "CustomPipeline"
mod3 = custom_pass(mod)
print(mod3)
##############################################################################
# Debug a Pass
# ------------
# TVM provides users a plug-and-play style debugging pass that print the IR
# after a certain pass is done through a special pass (``PrintIR``) to dump the IR of the
# whole module. A slightly modified version of the sequential pass example
# could be like the following to enable IR dumping for ``FoldConstant`` optimization.
f = example()
mod = tvm.IRModule.from_expr(f)
seq = tvm.transform.Sequential(
[
relay.transform.FoldConstant(),
tvm.transform.PrintIR(),
relay.transform.EliminateCommonSubexpr(),
relay.transform.FuseOps(),
]
)
###############################################################################
# By inserting the ``PrintIR`` pass after ``FoldConstant``, the pass infra will
# dump out the module IR when ``FoldConstant`` is done. Users can plug in this
# pass after any pass they want to debug for viewing the optimization effect.
#
# There is a more flexible debugging mechanism. One can implement a ``PassInstrument``
# class to execute arbitrary code not only before and/or after each pass but also
# at entering/exiting ``PassContext``. See :ref:`pass_instrument_cpp_backend`
# for more details.
#
# Here we use :py::func`tvm.instrument.pass_instrument` decorator to implement
# a PassInsturment class printing IR before execution of each passes:
@tvm.instrument.pass_instrument
class PrintIR:
"""Print the name of the pass, the IR, only before passes execute."""
def run_before_pass(self, mod, info):
print("Running pass: {}", info)
print(mod)
with tvm.transform.PassContext(opt_level=3, instruments=[PrintIR()]):
with tvm.target.Target("llvm"):
# Perform the optimizations.
mod = seq(mod)
print(mod)
print("done")
##############################################################################
# Summary
# -------
# This tutorial has covered how we can write and invoke passes in TVM more
# conveniently using the pass infra. Different ways of invoking a pass are also
# disucssed. Using :py:class:`tvm.transform.Sequential` can largely help
# users to ease the work of handling multiple optimization passes and their
# dependencies. In addition, an example is provided to illustrate
# how we can debug a pass using the ``PrintIR`` and tracing.
|
Laurawly/tvm-1
|
gallery/how_to/extend_tvm/use_pass_infra.py
|
Python
|
apache-2.0
| 11,740
|
[
"VisIt"
] |
c7d67b3b9012b0517c74a8b45eabe7dc30157724dc3e4efa6f9d0278efc39f29
|
from random import random,randint
import math
def wineprice(rating,age):
peak_age=rating-50
# Calculate price based on rating
price=rating/2
if age>peak_age:
# Past its peak, goes bad in 10 years
price=price*(5-(age-peak_age)/2)
else:
# Increases to 5x original value as it
# approaches its peak
price=price*(5*((age+1)/peak_age))
if price<0: price=0
return price
def wineset1():
rows=[]
for i in range(300):
# Create a random age and rating
rating=random()*50+50
age=random()*50
# Get reference price
price=wineprice(rating,age)
# Add some noise
price*=(random()*0.2+0.9)
# Add to the dataset
rows.append({'input':(rating,age),
'result':price})
return rows
def euclidean(v1,v2):
d=0.0
for i in range(len(v1)):
d+=(v1[i]-v2[i])**2
return math.sqrt(d)
def getdistances(data,vec1):
distancelist=[]
# Loop over every item in the dataset
for i in range(len(data)):
vec2=data[i]['input']
# Add the distance and the index
distancelist.append((euclidean(vec1,vec2),i))
# Sort by distance
distancelist.sort()
return distancelist
def knnestimate(data,vec1,k=5):
# Get sorted distances
dlist=getdistances(data,vec1)
avg=0.0
# Take the average of the top k results
for i in range(k):
idx=dlist[i][1]
avg+=data[idx]['result']
avg=avg/k
return avg
def inverseweight(dist,num=1.0,const=0.1):
return num/(dist+const)
def subtractweight(dist,const=1.0):
if dist>const:
return 0
else:
return const-dist
def gaussian(dist,sigma=5.0):
return math.e**(-dist**2/(2*sigma**2))
def weightedknn(data,vec1,k=5,weightf=gaussian):
# Get distances
dlist=getdistances(data,vec1)
avg=0.0
totalweight=0.0
# Get weighted average
for i in range(k):
dist=dlist[i][0]
idx=dlist[i][1]
weight=weightf(dist)
avg+=weight*data[idx]['result']
totalweight+=weight
if totalweight==0: return 0
avg=avg/totalweight
return avg
def dividedata(data,test=0.05):
trainset=[]
testset=[]
for row in data:
if random()<test:
testset.append(row)
else:
trainset.append(row)
return trainset,testset
def testalgorithm(algf,trainset,testset):
error=0.0
for row in testset:
guess=algf(trainset,row['input'])
error+=(row['result']-guess)**2
#print row['result'],guess
#print error/len(testset)
return error/len(testset)
def crossvalidate(algf,data,trials=100,test=0.1):
error=0.0
for i in range(trials):
trainset,testset=dividedata(data,test)
error+=testalgorithm(algf,trainset,testset)
return error/trials
def wineset2():
rows=[]
for i in range(300):
rating=random()*50+50
age=random()*50
aisle=float(randint(1,20))
bottlesize=[375.0,750.0,1500.0][randint(0,2)]
price=wineprice(rating,age)
price*=(bottlesize/750)
price*=(random()*0.2+0.9)
rows.append({'input':(rating,age,aisle,bottlesize),
'result':price})
return rows
def rescale(data,scale):
scaleddata=[]
for row in data:
scaled=[scale[i]*row['input'][i] for i in range(len(scale))]
scaleddata.append({'input':scaled,'result':row['result']})
return scaleddata
def createcostfunction(algf,data):
def costf(scale):
sdata=rescale(data,scale)
return crossvalidate(algf,sdata,trials=20)
return costf
weightdomain=[(0,10)]*4
def wineset3():
rows=wineset1()
for row in rows:
if random()<0.5:
# Wine was bought at a discount store
row['result']*=0.6
return rows
def probguess(data,vec1,low,high,k=5,weightf=gaussian):
dlist=getdistances(data,vec1)
nweight=0.0
tweight=0.0
for i in range(k):
dist=dlist[i][0]
idx=dlist[i][1]
weight=weightf(dist)
v=data[idx]['result']
# Is this point in the range?
if v>=low and v<=high:
nweight+=weight
tweight+=weight
if tweight==0: return 0
# The probability is the weights in the range
# divided by all the weights
return nweight/tweight
from pylab import *
def cumulativegraph(data,vec1,high,k=5,weightf=gaussian):
t1=arange(0.0,high,0.1)
cprob=array([probguess(data,vec1,0,v,k,weightf) for v in t1])
plot(t1,cprob)
show()
def probabilitygraph(data,vec1,high,k=5,weightf=gaussian,ss=5.0):
# Make a range for the prices
t1=arange(0.0,high,0.1)
# Get the probabilities for the entire range
probs=[probguess(data,vec1,v,v+0.1,k,weightf) for v in t1]
# Smooth them by adding the gaussian of the nearby probabilites
smoothed=[]
for i in range(len(probs)):
sv=0.0
for j in range(0,len(probs)):
dist=abs(i-j)*0.1
weight=gaussian(dist,sigma=ss)
sv+=weight*probs[j]
smoothed.append(sv)
smoothed=array(smoothed)
plot(t1,smoothed)
show()
|
jefflyn/buddha
|
src/pci/chapter8/numpredict.py
|
Python
|
artistic-2.0
| 4,802
|
[
"Gaussian"
] |
bf6b16c2973ead155c9ed0a453cd7106a44d20774ba0b2229838d8ed118c2ed6
|
# -*- coding: utf-8 -*-
WHITE = 1 # node not visit
GRAY = 2 # visited but has other nodes
BLACK = 3 # visited and so have other
class Graph(object):
def __init__(self):
self._edges = {}
self._nodes = {} # ensure nodes are unique
def insert_edge(self, index1, index2):
if index1 not in self._nodes:
self._nodes[index1] = Node(index1)
node1 = self._nodes[index1]
if index2 not in self._nodes:
self._nodes[index2] = Node(index2)
node2 = self._nodes[index2]
# since this is an undirected, record both directory
# record - node1 -> node2
if node1.index() not in self._edges:
self._edges[node1.index()] = [node2]
else:
# Is node2 this list?
exists_p = False
for node in self._edges[node1.index()]:
if node.index() == node2.index():
exists_p = True
if not exists_p:
self._edges[node1.index()].append(node2)
# ... and node2 -> node1
if node2.index() not in self._edges:
self._edges[node2.index()] = [node1]
else:
# Is node2 this list?
exists_p = False
for node in self._edges[node2.index()]:
if node.index() == node1.index():
exists_p = True
if not exists_p:
self._edges[node2.index()].append(node1)
def dump(self):
print("dump".format())
for src in self._edges:
tmp = "<node: {0}, {1} - ".format(src, len(self._edges[src]))
first = True
for dest in self._edges[src]:
tmp += str(dest)
tmp += ","
tmp = tmp[:-1] # trim trailing comma
tmp += ">"
print(tmp)
def dfs_visit(self, node, depth=0):
if isinstance( node, int ):
node = self._nodes[node]
depth += 1
if node.color() == WHITE:
print("\t"*depth + str(node))
node.set_state(GRAY)
# import pdb; pdb.set_trace()
for child in self._edges[node.index()]:
if child.color() == WHITE:
self.dfs_visit(child, depth)
node.set_state(BLACK)
class Node (object):
def __init__(self, value):
self._state = WHITE
self._index = value
def __str__(self):
state = None
if self._state == WHITE:
state = "white"
elif self._state == GRAY:
state = "gray"
else:
state = "black"
return "<{0}, {1}>".format(self._index, state)
def color(self):
return self._state
def index(self):
return self._index
def set_state(self, state):
self._state = state
|
kern3020/incubator
|
unrooted/graph.py
|
Python
|
mit
| 2,830
|
[
"VisIt"
] |
dc9e49c3ec16c21097beb341b59c28862571886759374da9008ce4e27811b4ad
|
# -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import copy
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import connection, connections
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
override_settings,
)
from django.test.signals import setting_changed
from django.utils import six, timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable(object):
def __getstate__(self):
raise pickle.PickleError()
class UnpicklableType(object):
# Unpicklable using the default pickling protocol on Python 2.
__slots__ = 'a',
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = {k: base.copy() for k in _caches_setting_base.keys()}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertEqual(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
def test_get_or_set_version(self):
cache.get_or_set('brian', 1979, version=2)
with self.assertRaisesMessage(ValueError, 'You need to specify a value.'):
cache.get_or_set('brian')
with self.assertRaisesMessage(ValueError, 'You need to specify a value.'):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = six.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = six.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
memcached_never_expiring_params = memcached_params.copy()
memcached_never_expiring_params['TIMEOUT'] = None
memcached_far_future_params = memcached_params.copy()
memcached_far_future_params['TIMEOUT'] = 31536000 # 60*60*24*365, 1 year
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache_config in settings.CACHES.items():
if cache_config['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(base=memcached_never_expiring_params))
def test_default_never_expiring_timeout(self):
# Regression test for #22845
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
@override_settings(CACHES=caches_setting_for_tests(base=memcached_far_future_params))
def test_default_far_future_timeout(self):
# Regression test for #22845
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
cache.set('small_value', large_value)
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(FileBasedCacheTests, self).tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
def test_cache_write_unpicklable_type(self):
# This fails if not using the highest pickling protocol on Python 2.
cache.set('unpicklable', UnpicklableType())
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def text_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
('', {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, 'default')
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
MounirMesselmeni/django
|
tests/cache/tests.py
|
Python
|
bsd-3-clause
| 85,442
|
[
"Brian"
] |
535f34ea543a58cab5ab5c4e52ee34a73e3e0db493288b33156666eae7d5acd0
|
"""
Maximum likelihood covariance estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
# avoid division truncation
from __future__ import division
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import check_array
from ..utils.extmath import fast_logdet, pinvh
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
sample mean of the log-likelihood
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + fast_logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Parameters
----------
store_precision : bool
Specifies if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
`covariance_` : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
"""
def __init__(self, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
`precision_` : array-like,
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
----------
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionaly scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, observations):
"""Computes the Mahalanobis distances of given observations.
The provided observations are assumed to be centered. One may want to
center them using a location estimate first.
Parameters
----------
observations : array-like, shape = [n_observations, n_features]
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit (including centering).
Returns
-------
mahalanobis_distance : array, shape = [n_observations,]
Mahalanobis distances of the observations.
"""
precision = self.get_precision()
# compute mahalanobis distances
centered_obs = observations - self.location_
mahalanobis_dist = np.sum(
np.dot(centered_obs, precision) * centered_obs, 1)
return mahalanobis_dist
|
eickenberg/scikit-learn
|
sklearn/covariance/empirical_covariance_.py
|
Python
|
bsd-3-clause
| 9,099
|
[
"Gaussian"
] |
48b0d060325f41746021ca6af3bbba058b74dc500b65631107e21e97f4eab858
|
""" CSAPI exposes update functionalities to the Configuration.
Most of these functions can only be done by administrators
"""
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.Utilities import List, Time
from DIRAC.Core.Security.X509Chain import X509Chain # pylint: disable=import-error
from DIRAC.Core.Security import Locations
from DIRAC.ConfigurationSystem.private.Modificator import Modificator
from DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
__RCSID__ = "$Id$"
class CSAPI(object):
""" CSAPI objects need an initialization phase
"""
def __init__(self):
"""
Initialization function
"""
self.csModified = False
self.__baseSecurity = "/Registry"
self.__userDN = ''
self.__userGroup = ''
self.__rpcClient = None
self.__csMod = None
self.__initialized = S_ERROR("Not initialized")
self.initialize()
if not self.__initialized['OK']:
gLogger.error(self.__initialized)
def __getProxyID(self):
proxyLocation = Locations.getProxyLocation()
if not proxyLocation:
gLogger.error("No proxy found!")
return False
chain = X509Chain()
if not chain.loadProxyFromFile(proxyLocation):
gLogger.error("Can't read proxy!", proxyLocation)
return False
retVal = chain.getIssuerCert()
if not retVal['OK']:
gLogger.error("Can't parse proxy!", retVal['Message'])
return False
idCert = retVal['Value']
self.__userDN = idCert.getSubjectDN()['Value']
self.__userGroup = chain.getDIRACGroup()['Value']
return True
def __getCertificateID(self):
certLocation = Locations.getHostCertificateAndKeyLocation()
if not certLocation:
gLogger.error("No certificate found!")
return False
chain = X509Chain()
retVal = chain.loadChainFromFile(certLocation[0])
if not retVal['OK']:
gLogger.error("Can't parse certificate!", retVal['Message'])
return False
idCert = chain.getIssuerCert()['Value']
self.__userDN = idCert.getSubjectDN()['Value']
self.__userGroup = 'host'
return True
def initialize(self):
if self.__initialized['OK']:
return self.__initialized
if not gConfig.useServerCertificate():
res = self.__getProxyID()
else:
res = self.__getCertificateID()
if not res:
self.__initialized = S_ERROR("Cannot locate client credentials")
return self.__initialized
retVal = gConfig.getOption("/DIRAC/Configuration/MasterServer")
if not retVal['OK']:
self.__initialized = S_ERROR("Master server is not known. Is everything initialized?")
return self.__initialized
self.__rpcClient = RPCClient(gConfig.getValue("/DIRAC/Configuration/MasterServer", ""))
self.__csMod = Modificator(self.__rpcClient, "%s - %s - %s" %
(self.__userGroup, self.__userDN, Time.dateTime().strftime("%Y-%m-%d %H:%M:%S")))
retVal = self.downloadCSData()
if not retVal['OK']:
self.__initialized = S_ERROR("Can not download the remote cfg. Is everything initialized?")
return self.__initialized
self.__initialized = S_OK()
return self.__initialized
def downloadCSData(self):
if not self.__csMod:
return S_ERROR("CSAPI not yet initialized")
result = self.__csMod.loadFromRemote()
if not result['OK']:
return result
self.csModified = False
self.__csMod.updateGConfigurationData()
return S_OK()
def listUsers(self, group=False):
if not self.__initialized['OK']:
return self.__initialized
if not group:
return S_OK(self.__csMod.getSections("%s/Users" % self.__baseSecurity))
users = self.__csMod.getValue("%s/Groups/%s/Users" % (self.__baseSecurity, group))
if not users:
return S_OK([])
return S_OK(List.fromChar(users))
def listHosts(self):
if not self.__initialized['OK']:
return self.__initialized
return S_OK(self.__csMod.getSections("%s/Hosts" % self.__baseSecurity))
def describeUsers(self, users=None):
""" describe users by nickname
:param list users: list of users' nickanames
:return: a S_OK(description) of the users in input
"""
if users is None:
users = []
if not self.__initialized['OK']:
return self.__initialized
return S_OK(self.__describeEntity(users))
def describeHosts(self, hosts=None):
if hosts is None:
hosts = []
if not self.__initialized['OK']:
return self.__initialized
return S_OK(self.__describeEntity(hosts, True))
def __describeEntity(self, mask, hosts=False):
if hosts:
csSection = "%s/Hosts" % self.__baseSecurity
else:
csSection = "%s/Users" % self.__baseSecurity
if mask:
entities = [entity for entity in self.__csMod.getSections(csSection) if entity in mask]
else:
entities = self.__csMod.getSections(csSection)
entitiesDict = {}
for entity in entities:
entitiesDict[entity] = {}
for option in self.__csMod.getOptions("%s/%s" % (csSection, entity)):
entitiesDict[entity][option] = self.__csMod.getValue("%s/%s/%s" % (csSection, entity, option))
if not hosts:
groupsDict = self.describeGroups()['Value']
entitiesDict[entity]['Groups'] = []
for group in groupsDict:
if 'Users' in groupsDict[group] and entity in groupsDict[group]['Users']:
entitiesDict[entity]['Groups'].append(group)
entitiesDict[entity]['Groups'].sort()
return entitiesDict
def listGroups(self):
"""
List all groups
"""
if not self.__initialized['OK']:
return self.__initialized
return S_OK(self.__csMod.getSections("%s/Groups" % self.__baseSecurity))
def describeGroups(self, mask=None):
"""
List all groups that are in the mask (or all if no mask) with their properties
"""
if mask is None:
mask = []
if not self.__initialized['OK']:
return self.__initialized
groups = [
group for group in self.__csMod.getSections(
"%s/Groups" %
self.__baseSecurity) if not mask or (
mask and group in mask)]
groupsDict = {}
for group in groups:
groupsDict[group] = {}
for option in self.__csMod.getOptions("%s/Groups/%s" % (self.__baseSecurity, group)):
groupsDict[group][option] = self.__csMod.getValue("%s/Groups/%s/%s" % (self.__baseSecurity, group, option))
if option in ("Users", "Properties"):
groupsDict[group][option] = List.fromChar(groupsDict[group][option])
return S_OK(groupsDict)
def deleteUsers(self, users):
"""
Delete a user/s can receive as a param either a string or a list
"""
if not self.__initialized['OK']:
return self.__initialized
if isinstance(users, basestring):
users = [users]
usersData = self.describeUsers(users)['Value']
for username in users:
if username not in usersData:
gLogger.warn("User %s does not exist")
continue
userGroups = usersData[username]['Groups']
for group in userGroups:
self.__removeUserFromGroup(group, username)
gLogger.info("Deleted user %s from group %s" % (username, group))
self.__csMod.removeSection("%s/Users/%s" % (self.__baseSecurity, username))
gLogger.info("Deleted user %s" % username)
self.csModified = True
return S_OK(True)
def __removeUserFromGroup(self, group, username):
"""
Remove user from a group
"""
usersInGroup = self.__csMod.getValue("%s/Groups/%s/Users" % (self.__baseSecurity, group))
if usersInGroup is not None:
userList = List.fromChar(usersInGroup, ",")
userPos = userList.index(username)
userList.pop(userPos)
self.__csMod.setOptionValue("%s/Groups/%s/Users" % (self.__baseSecurity, group), ",".join(userList))
def __addUserToGroup(self, group, username):
"""
Add user to a group
"""
usersInGroup = self.__csMod.getValue("%s/Groups/%s/Users" % (self.__baseSecurity, group))
if usersInGroup is not None:
userList = List.fromChar(usersInGroup)
if username not in userList:
userList.append(username)
self.__csMod.setOptionValue("%s/Groups/%s/Users" % (self.__baseSecurity, group), ",".join(userList))
else:
gLogger.warn("User %s is already in group %s" % (username, group))
def addUser(self, username, properties):
"""
Add a user to the cs
:param str username: group name
:param dict properties: dictionary describing user properties:
- DN
- groups
- <extra params>
:return: True/False
"""
if not self.__initialized['OK']:
return self.__initialized
for prop in ("DN", "Groups"):
if prop not in properties:
gLogger.error("Missing property for user", "%s: %s" % (prop, username))
return S_OK(False)
if username in self.listUsers()['Value']:
gLogger.error("User is already registered", username)
return S_OK(False)
groups = self.listGroups()['Value']
for userGroup in properties['Groups']:
if userGroup not in groups:
gLogger.error("User group is not a valid group", "%s %s" % (username, userGroup))
return S_OK(False)
self.__csMod.createSection("%s/Users/%s" % (self.__baseSecurity, username))
for prop in properties:
if prop == "Groups":
continue
self.__csMod.setOptionValue("%s/Users/%s/%s" % (self.__baseSecurity, username, prop), properties[prop])
for userGroup in properties['Groups']:
gLogger.info("Added user %s to group %s" % (username, userGroup))
self.__addUserToGroup(userGroup, username)
gLogger.info("Registered user %s" % username)
self.csModified = True
return S_OK(True)
def modifyUser(self, username, properties, createIfNonExistant=False):
"""
Modify a user
:param str username: group name
:param dict properties: dictionary describing user properties:
- DN
- Groups
- <extra params>
:return: S_OK, Value = True/False
"""
if not self.__initialized['OK']:
return self.__initialized
modifiedUser = False
userData = self.describeUsers([username])['Value']
if username not in userData:
if createIfNonExistant:
gLogger.info("Registering user %s" % username)
return self.addUser(username, properties)
gLogger.error("User is not registered", username)
return S_OK(False)
for prop in properties:
if prop == "Groups":
continue
prevVal = self.__csMod.getValue("%s/Users/%s/%s" % (self.__baseSecurity, username, prop))
if not prevVal or prevVal != properties[prop]:
gLogger.info("Setting %s property for user %s to %s" % (prop, username, properties[prop]))
self.__csMod.setOptionValue("%s/Users/%s/%s" % (self.__baseSecurity, username, prop), properties[prop])
modifiedUser = True
if 'Groups' in properties:
groups = self.listGroups()['Value']
for userGroup in properties['Groups']:
if userGroup not in groups:
gLogger.error("User group is not a valid group", "%s %s" % (username, userGroup))
return S_OK(False)
groupsToBeDeletedFrom = []
groupsToBeAddedTo = []
for prevGroup in userData[username]['Groups']:
if prevGroup not in properties['Groups']:
groupsToBeDeletedFrom.append(prevGroup)
modifiedUser = True
for newGroup in properties['Groups']:
if newGroup not in userData[username]['Groups']:
groupsToBeAddedTo.append(newGroup)
modifiedUser = True
for group in groupsToBeDeletedFrom:
self.__removeUserFromGroup(group, username)
gLogger.info("Removed user %s from group %s" % (username, group))
for group in groupsToBeAddedTo:
self.__addUserToGroup(group, username)
gLogger.info("Added user %s to group %s" % (username, group))
modified = False
if modifiedUser:
modified = True
gLogger.info("Modified user %s" % username)
self.csModified = True
else:
gLogger.info("Nothing to modify for user %s" % username)
return S_OK(modified)
def addGroup(self, groupname, properties):
"""
Add a group to the cs
:param str groupname: group name
:param dict properties: dictionary describing group properties:
- Users
- Properties
- <extra params>
:return: True/False
"""
if not self.__initialized['OK']:
return self.__initialized
if groupname in self.listGroups()['Value']:
gLogger.error("Group is already registered", groupname)
return S_OK(False)
self.__csMod.createSection("%s/Groups/%s" % (self.__baseSecurity, groupname))
for prop in properties:
self.__csMod.setOptionValue("%s/Groups/%s/%s" % (self.__baseSecurity, groupname, prop), properties[prop])
gLogger.info("Registered group %s" % groupname)
self.csModified = True
return S_OK(True)
def modifyGroup(self, groupname, properties, createIfNonExistant=False):
"""
Modify a group
:param str groupname: group name
:param dict properties: dictionary describing group properties:
- Users
- Properties
- <extra params>
:return: True/False
"""
if not self.__initialized['OK']:
return self.__initialized
modifiedGroup = False
groupData = self.describeGroups([groupname])['Value']
if groupname not in groupData:
if createIfNonExistant:
gLogger.info("Registering group %s" % groupname)
return self.addGroup(groupname, properties)
gLogger.error("Group is not registered", groupname)
return S_OK(False)
for prop in properties:
prevVal = self.__csMod.getValue("%s/Groups/%s/%s" % (self.__baseSecurity, groupname, prop))
if not prevVal or prevVal != properties[prop]:
gLogger.info("Setting %s property for group %s to %s" % (prop, groupname, properties[prop]))
self.__csMod.setOptionValue("%s/Groups/%s/%s" % (self.__baseSecurity, groupname, prop), properties[prop])
modifiedGroup = True
if modifiedGroup:
gLogger.info("Modified group %s" % groupname)
self.csModified = True
else:
gLogger.info("Nothing to modify for group %s" % groupname)
return S_OK(True)
def addHost(self, hostname, properties):
"""
Add a host to the cs
:param str hostname: hostname name
:param dict properties: dictionary describing host properties:
- DN
- Properties
- <extra params>
:return: True/False
"""
if not self.__initialized['OK']:
return self.__initialized
for prop in ("DN", ):
if prop not in properties:
gLogger.error("Missing property for host", "%s %s" % (prop, hostname))
return S_OK(False)
if hostname in self.listHosts()['Value']:
gLogger.error("Host is already registered", hostname)
return S_OK(False)
self.__csMod.createSection("%s/Hosts/%s" % (self.__baseSecurity, hostname))
for prop in properties:
self.__csMod.setOptionValue("%s/Hosts/%s/%s" % (self.__baseSecurity, hostname, prop), properties[prop])
gLogger.info("Registered host %s" % hostname)
self.csModified = True
return S_OK(True)
def addShifter(self, shifters=None):
"""
Adds or modify one or more shifters. Also, adds the shifter section in case this is not present.
Shifter identities are used in several places, mostly for running agents
:param dict shifters: has to be in the form {'ShifterRole':{'User':'aUserName', 'Group':'aDIRACGroup'}}
:return: S_OK/S_ERROR
"""
def getOpsSection():
"""
Where is the shifters section?
"""
vo = CSGlobals.getVO()
setup = CSGlobals.getSetup()
if vo:
res = gConfig.getSections('/Operations/%s/%s/Shifter' % (vo, setup))
if res['OK']:
return S_OK('/Operations/%s/%s/Shifter' % (vo, setup))
res = gConfig.getSections('/Operations/%s/Defaults/Shifter' % vo)
if res['OK']:
return S_OK('/Operations/%s/Defaults/Shifter' % vo)
else:
res = gConfig.getSections('/Operations/%s/Shifter' % setup)
if res['OK']:
return S_OK('/Operations/%s/Shifter' % setup)
res = gConfig.getSections('/Operations/Defaults/Shifter')
if res['OK']:
return S_OK('/Operations/Defaults/Shifter')
return S_ERROR("No shifter section")
if shifters is None:
shifters = {}
if not self.__initialized['OK']:
return self.__initialized
# get current shifters
opsH = Operations()
currentShifterRoles = opsH.getSections('Shifter')
if not currentShifterRoles['OK']:
# we assume the shifter section is not present
currentShifterRoles = []
else:
currentShifterRoles = currentShifterRoles['Value']
currentShiftersDict = {}
for currentShifterRole in currentShifterRoles:
currentShifter = opsH.getOptionsDict('Shifter/%s' % currentShifterRole)
if not currentShifter['OK']:
return currentShifter
currentShifter = currentShifter['Value']
currentShiftersDict[currentShifterRole] = currentShifter
# Removing from shifters what does not need to be changed
for sRole in shifters.keys(): # note the pop below
if sRole in currentShiftersDict:
if currentShiftersDict[sRole] == shifters[sRole]:
shifters.pop(sRole)
# get shifters section to modify
section = getOpsSection()
# Is this section present?
if not section['OK']:
if section['Message'] == "No shifter section":
gLogger.warn(section['Message'])
gLogger.info("Adding shifter section")
vo = CSGlobals.getVO()
if vo:
section = '/Operations/%s/Defaults/Shifter' % vo
else:
section = '/Operations/Defaults/Shifter'
res = self.__csMod.createSection(section)
if not res:
gLogger.error("Section %s not created" % section)
return S_ERROR("Section %s not created" % section)
else:
gLogger.error(section['Message'])
return section
else:
section = section['Value']
# add or modify shifters
for shifter in shifters:
self.__csMod.removeSection(section + '/' + shifter)
self.__csMod.createSection(section + '/' + shifter)
self.__csMod.createSection(section + '/' + shifter + '/' + 'User')
self.__csMod.createSection(section + '/' + shifter + '/' + 'Group')
self.__csMod.setOptionValue(section + '/' + shifter + '/' + 'User', shifters[shifter]['User'])
self.__csMod.setOptionValue(section + '/' + shifter + '/' + 'Group', shifters[shifter]['Group'])
self.csModified = True
return S_OK(True)
def modifyHost(self, hostname, properties, createIfNonExistant=False):
"""
Modify a host
:param str hostname: hostname name
:param dict properties: dictionary describing host properties:
- DN
- Properties
- <extra params>
:return: True/False
"""
if not self.__initialized['OK']:
return self.__initialized
modifiedHost = False
hostData = self.describeHosts([hostname])['Value']
if hostname not in hostData:
if createIfNonExistant:
gLogger.info("Registering host %s" % hostname)
return self.addHost(hostname, properties)
gLogger.error("Host is not registered", hostname)
return S_OK(False)
for prop in properties:
prevVal = self.__csMod.getValue("%s/Hosts/%s/%s" % (self.__baseSecurity, hostname, prop))
if not prevVal or prevVal != properties[prop]:
gLogger.info("Setting %s property for host %s to %s" % (prop, hostname, properties[prop]))
self.__csMod.setOptionValue("%s/Hosts/%s/%s" % (self.__baseSecurity, hostname, prop), properties[prop])
modifiedHost = True
if modifiedHost:
gLogger.info("Modified host %s" % hostname)
self.csModified = True
else:
gLogger.info("Nothing to modify for host %s" % hostname)
return S_OK(True)
def syncUsersWithCFG(self, usersCFG):
"""
Sync users with the cfg contents. Usernames have to be sections containing
DN, Groups, and extra properties as parameters
"""
if not self.__initialized['OK']:
return self.__initialized
done = True
for user in usersCFG.listSections():
properties = {}
propList = usersCFG[user].listOptions()
for prop in propList:
if prop == "Groups":
properties[prop] = List.fromChar(usersCFG[user][prop])
else:
properties[prop] = usersCFG[user][prop]
if not self.modifyUser(user, properties, createIfNonExistant=True):
done = False
return S_OK(done)
def sortUsersAndGroups(self):
self.__csMod.sortAlphabetically("%s/Users" % self.__baseSecurity)
self.__csMod.sortAlphabetically("%s/Hosts" % self.__baseSecurity)
for group in self.__csMod.getSections("%s/Groups" % self.__baseSecurity):
usersOptionPath = "%s/Groups/%s/Users" % (self.__baseSecurity, group)
users = self.__csMod.getValue(usersOptionPath)
if users:
usersList = sorted(List.fromChar(users))
sortedUsers = ", ".join(usersList)
if users != sortedUsers:
self.__csMod.setOptionValue(usersOptionPath, sortedUsers)
def checkForUnexistantUsersInGroups(self):
allUsers = self.__csMod.getSections("%s/Users" % self.__baseSecurity)
allGroups = self.__csMod.getSections("%s/Groups" % self.__baseSecurity)
for group in allGroups:
usersInGroup = self.__csMod.getValue("%s/Groups/%s/Users" % (self.__baseSecurity, group))
if usersInGroup:
filteredUsers = []
usersInGroup = List.fromChar(usersInGroup)
for user in usersInGroup:
if user in allUsers:
filteredUsers.append(user)
self.__csMod.setOptionValue("%s/Groups/%s/Users" % (self.__baseSecurity, group),
",".join(filteredUsers))
def commitChanges(self, sortUsers=True):
if not self.__initialized['OK']:
return self.__initialized
if self.csModified:
self.checkForUnexistantUsersInGroups()
if sortUsers:
self.sortUsersAndGroups()
retVal = self.__csMod.commit()
if not retVal['OK']:
gLogger.error("Can't commit new configuration data", "%s" % retVal['Message'])
return retVal
return self.downloadCSData()
return S_OK()
def commit(self):
""" Commit the accumulated changes to the CS server
"""
if not self.__initialized['OK']:
return self.__initialized
if self.csModified:
retVal = self.__csMod.commit()
if not retVal['OK']:
gLogger.error("Can't commit new configuration data", "%s" % retVal['Message'])
return retVal
return self.downloadCSData()
return S_OK()
def mergeFromCFG(self, cfg):
""" Merge the internal CFG data with the input
"""
if not self.__initialized['OK']:
return self.__initialized
self.__csMod.mergeFromCFG(cfg)
self.csModified = True
return S_OK()
def modifyValue(self, optionPath, newValue):
"""Modify an existing value at the specified options path.
"""
if not self.__initialized['OK']:
return self.__initialized
prevVal = self.__csMod.getValue(optionPath)
if prevVal is None:
return S_ERROR('Trying to set %s to %s but option does not exist' % (optionPath, newValue))
gLogger.verbose("Changing %s from \n%s \nto \n%s" % (optionPath, prevVal, newValue))
self.__csMod.setOptionValue(optionPath, newValue)
self.csModified = True
return S_OK('Modified %s' % optionPath)
def setOption(self, optionPath, optionValue):
"""Create an option at the specified path.
"""
if not self.__initialized['OK']:
return self.__initialized
self.__csMod.setOptionValue(optionPath, optionValue)
self.csModified = True
return S_OK('Created new option %s = %s' % (optionPath, optionValue))
def setOptionComment(self, optionPath, comment):
"""Create an option at the specified path.
"""
if not self.__initialized['OK']:
return self.__initialized
self.__csMod.setComment(optionPath, comment)
self.csModified = True
return S_OK('Set option comment %s : %s' % (optionPath, comment))
def delOption(self, optionPath):
""" Delete an option
"""
if not self.__initialized['OK']:
return self.__initialized
if not self.__csMod.removeOption(optionPath):
return S_ERROR("Couldn't delete option %s" % optionPath)
self.csModified = True
return S_OK('Deleted option %s' % optionPath)
def createSection(self, sectionPath, comment=""):
""" Create a new section
"""
if not self.__initialized['OK']:
return self.__initialized
self.__csMod.createSection(sectionPath)
self.csModified = True
if comment:
self.__csMod.setComment(sectionPath, comment)
return S_OK()
def delSection(self, sectionPath):
""" Delete a section
"""
if not self.__initialized['OK']:
return self.__initialized
if not self.__csMod.removeSection(sectionPath):
return S_ERROR("Could not delete section %s " % sectionPath)
self.csModified = True
return S_OK()
def copySection(self, originalPath, targetPath):
""" Copy a whole section to a new location
"""
if not self.__initialized['OK']:
return self.__initialized
cfg = self.__csMod.getCFG()
sectionCfg = cfg[originalPath]
result = self.createSection(targetPath)
if not result['OK']:
return result
if not self.__csMod.mergeSectionFromCFG(targetPath, sectionCfg):
return S_ERROR("Could not merge cfg into section %s" % targetPath)
self.csModified = True
return S_OK()
def moveSection(self, originalPath, targetPath):
""" Move a whole section to a new location
"""
result = self.copySection(originalPath, targetPath)
if not result['OK']:
return result
result = self.delSection(originalPath)
if not result['OK']:
return result
self.csModified = True
return S_OK()
def mergeCFGUnderSection(self, sectionPath, cfg):
""" Merge the given cfg under a certain section
"""
if not self.__initialized['OK']:
return self.__initialized
result = self.createSection(sectionPath)
if not result['OK']:
return result
if not self.__csMod.mergeSectionFromCFG(sectionPath, cfg):
return S_ERROR("Could not merge cfg into section %s" % sectionPath)
self.csModified = True
return S_OK()
def mergeWithCFG(self, cfg):
""" Merge the given cfg with the current config
"""
if not self.__initialized['OK']:
return self.__initialized
self.__csMod.mergeFromCFG(cfg)
self.csModified = True
return S_OK()
def getCurrentCFG(self):
""" Get the current CFG as it is
"""
if not self.__initialized['OK']:
return self.__initialized
return S_OK(self.__csMod.getCFG())
def showDiff(self):
""" Just shows the differences accumulated within the Modificator object
"""
diffData = self.__csMod.showCurrentDiff()
gLogger.notice("Accumulated diff with master CS")
for line in diffData:
if line[0] in ('+', '-'):
gLogger.notice(line)
|
fstagni/DIRAC
|
ConfigurationSystem/Client/CSAPI.py
|
Python
|
gpl-3.0
| 27,525
|
[
"DIRAC"
] |
c8902cd5b2de3595e2830d2103447649ca2af2839bb54c2bf7ffe31a724e628b
|
# -*- coding: utf-8 -*-
"""Test sequences for graphiness.
"""
# Copyright (C) 2004-2018 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import heapq
import networkx as nx
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult (dschult@colgate.edu)'
'Joel Miller (joel.c.miller.research@gmail.com)'
'Ben Edwards'
'Brian Cloteaux <brian.cloteaux@nist.gov>'])
__all__ = ['is_graphical',
'is_multigraphical',
'is_pseudographical',
'is_digraphical',
'is_valid_degree_sequence_erdos_gallai',
'is_valid_degree_sequence_havel_hakimi',
]
def is_graphical(sequence, method='eg'):
"""Returns True if sequence is a valid degree sequence.
A degree sequence is valid if some graph can realize it.
Parameters
----------
sequence : list or iterable container
A sequence of integer node degrees
method : "eg" | "hh"
The method used to validate the degree sequence.
"eg" corresponds to the Erdős-Gallai algorithm, and
"hh" to the Havel-Hakimi algorithm.
Returns
-------
valid : bool
True if the sequence is a valid degree sequence and False if not.
Examples
--------
>>> G = nx.path_graph(4)
>>> sequence = (d for n, d in G.degree())
>>> nx.is_graphical(sequence)
True
References
----------
Erdős-Gallai
[EG1960]_, [choudum1986]_
Havel-Hakimi
[havel1955]_, [hakimi1962]_, [CL1996]_
"""
if method == 'eg':
valid = is_valid_degree_sequence_erdos_gallai(list(sequence))
elif method == 'hh':
valid = is_valid_degree_sequence_havel_hakimi(list(sequence))
else:
msg = "`method` must be 'eg' or 'hh'"
raise nx.NetworkXException(msg)
return valid
def _basic_graphical_tests(deg_sequence):
# Sort and perform some simple tests on the sequence
if not nx.utils.is_list_of_ints(deg_sequence):
raise nx.NetworkXUnfeasible
p = len(deg_sequence)
num_degs = [0] * p
dmax, dmin, dsum, n = 0, p, 0, 0
for d in deg_sequence:
# Reject if degree is negative or larger than the sequence length
if d < 0 or d >= p:
raise nx.NetworkXUnfeasible
# Process only the non-zero integers
elif d > 0:
dmax, dmin, dsum, n = max(dmax, d), min(dmin, d), dsum + d, n + 1
num_degs[d] += 1
# Reject sequence if it has odd sum or is oversaturated
if dsum % 2 or dsum > n * (n - 1):
raise nx.NetworkXUnfeasible
return dmax, dmin, dsum, n, num_degs
def is_valid_degree_sequence_havel_hakimi(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation proceeds using the Havel-Hakimi theorem.
Worst-case run time is $O(s)$ where $s$ is the sum of the sequence.
Parameters
----------
deg_sequence : list
A list of integers where each element specifies the degree of a node
in a graph.
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Notes
-----
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [1]_.
References
----------
.. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
[havel1955]_, [hakimi1962]_, [CL1996]_
"""
try:
dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1):
return True
modstubs = [0] * (dmax + 1)
# Successively reduce degree sequence by removing the maximum degree
while n > 0:
# Retrieve the maximum degree in the sequence
while num_degs[dmax] == 0:
dmax -= 1
# If there are not enough stubs to connect to, then the sequence is
# not graphical
if dmax > n - 1:
return False
# Remove largest stub in list
num_degs[dmax], n = num_degs[dmax] - 1, n - 1
# Reduce the next dmax largest stubs
mslen = 0
k = dmax
for i in range(dmax):
while num_degs[k] == 0:
k -= 1
num_degs[k], n = num_degs[k] - 1, n - 1
if k > 1:
modstubs[mslen] = k - 1
mslen += 1
# Add back to the list any non-zero stubs that were removed
for i in range(mslen):
stub = modstubs[i]
num_degs[stub], n = num_degs[stub] + 1, n + 1
return True
def is_valid_degree_sequence_erdos_gallai(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation is done using the Erdős-Gallai theorem [EG1960]_.
Parameters
----------
deg_sequence : list
A list of integers
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Notes
-----
This implementation uses an equivalent form of the Erdős-Gallai criterion.
Worst-case run time is $O(n)$ where $n$ is the length of the sequence.
Specifically, a sequence d is graphical if and only if the
sum of the sequence is even and for all strong indices k in the sequence,
.. math::
\sum_{i=1}^{k} d_i \leq k(k-1) + \sum_{j=k+1}^{n} \min(d_i,k)
= k(n-1) - ( k \sum_{j=0}^{k-1} n_j - \sum_{j=0}^{k-1} j n_j )
A strong index k is any index where d_k >= k and the value n_j is the
number of occurrences of j in d. The maximal strong index is called the
Durfee index.
This particular rearrangement comes from the proof of Theorem 3 in [2]_.
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [2]_.
References
----------
.. [1] A. Tripathi and S. Vijay. "A note on a theorem of Erdős & Gallai",
Discrete Mathematics, 265, pp. 417-420 (2003).
.. [2] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
[EG1960]_, [choudum1986]_
"""
try:
dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1):
return True
# Perform the EG checks using the reformulation of Zverovich and Zverovich
k, sum_deg, sum_nj, sum_jnj = 0, 0, 0, 0
for dk in range(dmax, dmin - 1, -1):
if dk < k + 1: # Check if already past Durfee index
return True
if num_degs[dk] > 0:
run_size = num_degs[dk] # Process a run of identical-valued degrees
if dk < k + run_size: # Check if end of run is past Durfee index
run_size = dk - k # Adjust back to Durfee index
sum_deg += run_size * dk
for v in range(run_size):
sum_nj += num_degs[k + v]
sum_jnj += (k + v) * num_degs[k + v]
k += run_size
if sum_deg > k * (n - 1) - k * sum_nj + sum_jnj:
return False
return True
def is_multigraphical(sequence):
"""Returns True if some multigraph can realize the sequence.
Parameters
----------
sequence : list
A list of integers
Returns
-------
valid : bool
True if deg_sequence is a multigraphic degree sequence and False if not.
Notes
-----
The worst-case run time is $O(n)$ where $n$ is the length of the sequence.
References
----------
.. [1] S. L. Hakimi. "On the realizability of a set of integers as
degrees of the vertices of a linear graph", J. SIAM, 10, pp. 496-506
(1962).
"""
deg_sequence = list(sequence)
if not nx.utils.is_list_of_ints(deg_sequence):
return False
dsum, dmax = 0, 0
for d in deg_sequence:
if d < 0:
return False
dsum, dmax = dsum + d, max(dmax, d)
if dsum % 2 or dsum < 2 * dmax:
return False
return True
def is_pseudographical(sequence):
"""Returns True if some pseudograph can realize the sequence.
Every nonnegative integer sequence with an even sum is pseudographical
(see [1]_).
Parameters
----------
sequence : list or iterable container
A sequence of integer node degrees
Returns
-------
valid : bool
True if the sequence is a pseudographic degree sequence and False if not.
Notes
-----
The worst-case run time is $O(n)$ where n is the length of the sequence.
References
----------
.. [1] F. Boesch and F. Harary. "Line removal algorithms for graphs
and their degree lists", IEEE Trans. Circuits and Systems, CAS-23(12),
pp. 778-782 (1976).
"""
s = list(sequence)
if not nx.utils.is_list_of_ints(s):
return False
return sum(s) % 2 == 0 and min(s) >= 0
def is_digraphical(in_sequence, out_sequence):
r"""Returns True if some directed graph can realize the in- and out-degree
sequences.
Parameters
----------
in_sequence : list or iterable container
A sequence of integer node in-degrees
out_sequence : list or iterable container
A sequence of integer node out-degrees
Returns
-------
valid : bool
True if in and out-sequences are digraphic False if not.
Notes
-----
This algorithm is from Kleitman and Wang [1]_.
The worst case runtime is $O(s \times \log n)$ where $s$ and $n$ are the
sum and length of the sequences respectively.
References
----------
.. [1] D.J. Kleitman and D.L. Wang
Algorithms for Constructing Graphs and Digraphs with Given Valences
and Factors, Discrete Mathematics, 6(1), pp. 79-88 (1973)
"""
in_deg_sequence = list(in_sequence)
out_deg_sequence = list(out_sequence)
if not nx.utils.is_list_of_ints(in_deg_sequence):
return False
if not nx.utils.is_list_of_ints(out_deg_sequence):
return False
# Process the sequences and form two heaps to store degree pairs with
# either zero or non-zero out degrees
sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence)
maxn = max(nin, nout)
maxin = 0
if maxn == 0:
return True
stubheap, zeroheap = [], []
for n in range(maxn):
in_deg, out_deg = 0, 0
if n < nout:
out_deg = out_deg_sequence[n]
if n < nin:
in_deg = in_deg_sequence[n]
if in_deg < 0 or out_deg < 0:
return False
sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg)
if in_deg > 0:
stubheap.append((-1 * out_deg, -1 * in_deg))
elif out_deg > 0:
zeroheap.append(-1 * out_deg)
if sumin != sumout:
return False
heapq.heapify(stubheap)
heapq.heapify(zeroheap)
modstubs = [(0, 0)] * (maxin + 1)
# Successively reduce degree sequence by removing the maximum out degree
while stubheap:
# Take the first value in the sequence with non-zero in degree
(freeout, freein) = heapq.heappop(stubheap)
freein *= -1
if freein > len(stubheap) + len(zeroheap):
return False
# Attach out stubs to the nodes with the most in stubs
mslen = 0
for i in range(freein):
if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0]):
stubout = heapq.heappop(zeroheap)
stubin = 0
else:
(stubout, stubin) = heapq.heappop(stubheap)
if stubout == 0:
return False
# Check if target is now totally connected
if stubout + 1 < 0 or stubin < 0:
modstubs[mslen] = (stubout + 1, stubin)
mslen += 1
# Add back the nodes to the heap that still have available stubs
for i in range(mslen):
stub = modstubs[i]
if stub[1] < 0:
heapq.heappush(stubheap, stub)
else:
heapq.heappush(zeroheap, stub[0])
if freeout < 0:
heapq.heappush(zeroheap, freeout)
return True
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/networkx/algorithms/graphical.py
|
Python
|
gpl-3.0
| 13,009
|
[
"Brian"
] |
844e37e5aa1c60a1ceefffc155407f45b2b482c63885aa386752208b8906c38a
|
# test driver for the second half of the SavePythonState test. it
# sets up the testing environment, loads the state file (passed
# on the command line), and verifys the resulting image
import sys
from paraview import smtesting
print sys.argv
smtesting.ProcessCommandLineArguments()
execfile(sys.argv[1])
_view = GetActiveView()
_view.ViewSize = [300, 300]
_view.SMProxy.UpdateVTKObjects()
if not smtesting.DoRegressionTesting(_view.SMProxy):
# This will lead to VTK object leaks.
sys.exit(1)
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/ParaViewCore/ServerManager/Default/Testing/Python/SavePythonState.py
|
Python
|
gpl-3.0
| 502
|
[
"ParaView",
"VTK"
] |
e89b6a4762c9dfc57de90a97da92888bbc5598ed4eb07671d579bf8e6959fe23
|
""" Test_RSS_Policy_DTPolicy
"""
from mock import MagicMock
import unittest
from DIRAC import gLogger
import DIRAC.ResourceStatusSystem.Policy.DowntimePolicy as moduleTested
################################################################################
class DTPolicy_TestCase(unittest.TestCase):
def setUp(self):
"""Setup"""
gLogger.setLevel("DEBUG")
self.moduleTested = moduleTested
self.testClass = self.moduleTested.DowntimePolicy
self.DTCommand = MagicMock()
def tearDown(self):
"""TearDown"""
del self.testClass
del self.moduleTested
################################################################################
# Tests
class DTPolicy_Success(DTPolicy_TestCase):
def test_instantiate(self):
"""tests that we can instantiate one object of the tested class"""
policy = self.testClass()
self.assertEqual("DowntimePolicy", policy.__class__.__name__)
def test_evaluate(self):
"""tests the evaluate method"""
policy = self.testClass()
# command failing
self.DTCommand.doCommand.return_value = {"OK": False, "Message": "Grumpy command"}
policy.setCommand(self.DTCommand)
res = policy.evaluate()
self.assertTrue(res["OK"])
self.assertEqual("Grumpy command", res["Value"]["Reason"])
self.assertEqual("Error", res["Value"]["Status"])
# command failing /2
self.DTCommand.doCommand.return_value = {
"OK": True,
"Value": {"Severity": "XYZ", "EndDate": "Y", "DowntimeID": "123", "Description": "blah"},
}
self.assertEqual("Error", res["Value"]["Status"])
res = policy.evaluate()
self.assertTrue(res["OK"])
# command result empty
self.DTCommand.doCommand.return_value = {"OK": True, "Value": None}
res = policy.evaluate()
self.assertTrue(res["OK"])
self.assertEqual("Active", res["Value"]["Status"])
self.assertEqual("No DownTime announced", res["Value"]["Reason"])
# command result with a DT
self.DTCommand.doCommand.return_value = {
"OK": True,
"Value": {"Severity": "OUTAGE", "EndDate": "Y", "DowntimeID": "123", "Description": "blah"},
}
policy.command = self.DTCommand
res = policy.evaluate()
self.assertTrue(res["OK"])
self.assertEqual("Banned", res["Value"]["Status"])
self.assertEqual("123 blah", res["Value"]["Reason"])
# command mock
self.DTCommand.doCommand.return_value = {
"OK": True,
"Value": {"Severity": "WARNING", "EndDate": "Y", "DowntimeID": "123", "Description": "blah"},
}
policy.command = self.DTCommand
res = policy.evaluate()
self.assertTrue(res["OK"])
self.assertEqual("Degraded", res["Value"]["Status"])
self.assertEqual("123 blah", res["Value"]["Reason"])
################################################################################
################################################################################
if __name__ == "__main__":
suite = unittest.defaultTestLoader.loadTestsFromTestCase(DTPolicy_TestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(DTPolicy_Success))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
DIRACGrid/DIRAC
|
src/DIRAC/ResourceStatusSystem/Policy/test/Test_RSS_Policy_DTPolicy.py
|
Python
|
gpl-3.0
| 3,478
|
[
"DIRAC"
] |
0dfbf7d49cdfe87b8d3060fd983e2505700035d932a4c6273fc86da3cf664e40
|
import uuid
from django.db import models
from django.utils.translation import ugettext_lazy as _
from core.models.mixins import AuditableMixin
class Visit(AuditableMixin):
id = models.UUIDField(
default=uuid.uuid4, primary_key=True, editable=False,
verbose_name=_("id")
)
link = models.ForeignKey(
'core.Link', editable=False, on_delete=models.PROTECT,
db_index=True, verbose_name=_("link")
)
ip_address = models.GenericIPAddressField(
protocol='both', verbose_name=_("ip address")
)
class Meta:
ordering = ['date_creation', ]
verbose_name = _("visit")
verbose_name_plural = _("visits")
def __str__(self):
return "%s" % self.ip_address
def get_absolute_url(self):
return self.parent.get_absolute_url()
@property
def company(self):
return self.parent.company
@property
def parent(self):
return self.link
|
ikcam/django-skeleton
|
core/models/visit.py
|
Python
|
bsd-3-clause
| 960
|
[
"VisIt"
] |
696fd245493630a92823f79bf4d8e85dab4c5efdb5ab5461df16c0c32a6f0520
|
"""
Defines how the GUI reads Abaqus files
"""
from __future__ import print_function
from six import iteritems
from six.moves import range
import os
import numpy as np
import vtk
from vtk import vtkLine, vtkTriangle, vtkQuad, vtkTetra
from vtk.util.numpy_support import numpy_to_vtk
from pyNastran.gui.gui_objects.gui_result import GuiResult
#from pyNastran.gui.qt_files.result import Result
from pyNastran.converters.dev.abaqus.abaqus import Abaqus
#from pyNastran.converters.cart3d.cart3d_result import Cart3dGeometry, Cart3dResult
#from pyNastran.converters.cart3d.input_c3d_reader import read_input_c3d
#from pyNastran.converters.cart3d.input_cntl_reader import read_input_cntl
class AbaqusIO(object):
def __init__(self):
pass
def get_abaqus_wildcard_geometry_results_functions(self):
"""dynamic named method for loading abaqus input files"""
data = ('Abaqus',
'Abaqus (*.inp)', self.load_abaqus_geometry,
None, None
)
return data
#def _remove_old_geometry(self, geom_filename):
#skip_reading = False
#params_to_delete = (
#'case_keys', 'icase', 'iSubcaseNameMap',
#'result_cases', 'eid_map', 'nid_map'
#)
#if geom_filename is None or geom_filename is '':
#skip_reading = True
#return skip_reading
#else:
#self.turn_text_off()
#self.grid.Reset()
#self.result_cases = {}
#self.ncases = 0
#for param in params_to_delete:
#if hasattr(self, param): # TODO: is this correct???
#try:
#delattr(self, param)
#except AttributeError:
#print('param =', param, hasattr(self, param))
#skip_reading = False
##self.scalarBar.VisibilityOff()
#self.scalarBar.Modified()
#return skip_reading
#def _remove_old_cart3d_geometry(self, filename):
##return self._remove_old_geometry(filename)
#self.eid_map = {}
#self.nid_map = {}
#if filename is None:
##self.emptyResult = vtk.vtkFloatArray()
##self.vectorResult = vtk.vtkFloatArray()
#self.scalarBar.VisibilityOff()
#skip_reading = True
#else:
#self.turn_text_off()
#self.grid.Reset()
##self.gridResult.Reset()
##self.gridResult.Modified()
#self.result_cases = {}
#self.ncases = 0
#try:
#del self.case_keys
#del self.icase
#del self.iSubcaseNameMap
#except:
## print("cant delete geo")
#pass
##print(dir(self))
#skip_reading = False
##self.scalarBar.VisibilityOff()
#self.scalarBar.Modified()
#return skip_reading
def load_abaqus_geometry(self, abaqus_filename, dirname, name='main', plot=True):
"""loads abaqus input files into the gui"""
skip_reading = self._remove_old_cart3d_geometry(abaqus_filename)
if skip_reading:
return
self.eid_map = {}
self.nid_map = {}
model = Abaqus(log=self.log, debug=False)
self.model_type = 'abaqus'
#self.model_type = model.model_type
model.read_abaqus_inp(abaqus_filename)
n_r2d2 = 0
n_cpe3 = 0
n_cpe4 = 0
n_cpe4r = 0
n_coh2d4 = 0
n_c3d10h = 0
n_cohax4 = 0
n_cax3 = 0
n_cax4r = 0
nnodes = 0
nelements = 0
all_nodes = []
for part_name, part in iteritems(model.parts):
nids = part.nids - 1
nodes = part.nodes
nnodes += nodes.shape[0]
if part.r2d2 is not None:
n_r2d2 += part.r2d2.shape[0]
if part.cpe3 is not None:
n_cpe3 += part.cpe3.shape[0]
if part.cpe4 is not None:
n_cpe4 += part.cpe4.shape[0]
if part.cpe4r is not None:
n_cpe4r += part.cpe4r.shape[0]
if part.coh2d4 is not None:
n_coh2d4 += part.coh2d4.shape[0]
if part.cohax4 is not None:
n_cohax4 += part.cohax4.shape[0]
if part.cax3 is not None:
n_cax3 += part.cax3.shape[0]
if part.cax4r is not None:
n_cax4r += part.cax4r.shape[0]
if part.c3d10h is not None:
n_c3d10h += part.c3d10h.shape[0]
all_nodes.append(nodes)
nelements += n_r2d2 + n_cpe3 + n_cpe4 + n_cpe4r + n_coh2d4 + n_c3d10h + n_cohax4 + n_cax3 + n_cax4r
assert nelements > 0, nelements
#nodes = model.nodes
#elements = model.elements
self.nNodes = nnodes
self.nElements = nelements
self.grid.Allocate(self.nElements, 1000)
points = vtk.vtkPoints()
points.SetNumberOfPoints(self.nNodes)
self.nid_map = {}
assert nodes is not None
nnodes = nodes.shape[0]
if len(all_nodes) == 1:
nodes = all_nodes[0]
else:
nodes = np.vstack(all_nodes)
mmax = np.amax(nodes, axis=0)
mmin = np.amin(nodes, axis=0)
dim_max = (mmax - mmin).max()
self.create_global_axes(dim_max)
data_type = vtk.VTK_FLOAT
points_array = numpy_to_vtk(
num_array=nodes,
deep=True,
array_type=data_type
)
points.SetData(points_array)
nid_offset = -1
for part_name, part in iteritems(model.parts):
nnodesi = part.nodes.shape[0]
n_r2d2 = 0
n_cpe3 = 0
n_cpe4 = 0
n_cpe4r = 0
n_coh2d4 = 0
n_c3d10h = 0
n_cohax4 = 0
n_cax3 = 0
n_cax4r = 0
if part.r2d2 is not None:
n_r2d2 += part.r2d2.shape[0]
if part.cpe3 is not None:
n_cpe3 += part.cpe3.shape[0]
if part.cpe4 is not None:
n_cpe4 += part.cpe4.shape[0]
if part.cpe4r is not None:
n_cpe4r += part.cpe4r.shape[0]
if part.coh2d4 is not None:
n_coh2d4 += part.coh2d4.shape[0]
if part.cohax4 is not None:
n_cohax4 += part.cohax4.shape[0]
if part.cax3 is not None:
n_cax3 += part.cax3.shape[0]
if part.cax4r is not None:
n_cax4r += part.cax4r.shape[0]
# solids
if part.c3d10h is not None:
n_c3d10h += part.c3d10h.shape[0]
if n_r2d2:
eids = part.r2d2[:, 0]
node_ids = part.r2d2[:, 1:] + nid_offset
for eid, node_ids in zip(eids, node_ids):
elem = vtkLine()
elem.GetPointIds().SetId(0, node_ids[0])
elem.GetPointIds().SetId(1, node_ids[1])
self.grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
if n_cpe3:
eids = part.cpe3[:, 0]
node_ids = part.cpe3[:, 1:] + nid_offset
for eid, node_ids in zip(eids, node_ids):
elem = vtkTriangle()
elem.GetPointIds().SetId(0, node_ids[0])
elem.GetPointIds().SetId(1, node_ids[1])
elem.GetPointIds().SetId(2, node_ids[2])
self.grid.InsertNextCell(5, elem.GetPointIds())
if n_cpe4:
eids = part.cpe4[:, 0]
node_ids = part.cpe4[:, 1:] + nid_offset
for eid, node_ids in zip(eids, node_ids):
elem = vtkQuad()
elem.GetPointIds().SetId(0, node_ids[0])
elem.GetPointIds().SetId(1, node_ids[1])
elem.GetPointIds().SetId(2, node_ids[2])
elem.GetPointIds().SetId(3, node_ids[3])
self.grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
if n_cpe4r:
eids = part.cpe4r[:, 0]
node_ids = part.cpe4r[:, 1:] + nid_offset
for eid, node_ids in zip(eids, node_ids):
elem = vtkQuad()
elem.GetPointIds().SetId(0, node_ids[0])
elem.GetPointIds().SetId(1, node_ids[1])
elem.GetPointIds().SetId(2, node_ids[2])
elem.GetPointIds().SetId(3, node_ids[3])
self.grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
if n_coh2d4:
eids = part.coh2d4[:, 0]
node_ids = part.coh2d4[:, 1:] + nid_offset
for eid, node_ids in zip(eids, node_ids):
elem = vtkQuad()
elem.GetPointIds().SetId(0, node_ids[0])
elem.GetPointIds().SetId(1, node_ids[1])
elem.GetPointIds().SetId(2, node_ids[2])
elem.GetPointIds().SetId(3, node_ids[3])
self.grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
if n_cohax4:
eids = part.cohax4[:, 0]
node_ids = part.cohax4[:, 1:] + nid_offset
for eid, node_ids in zip(eids, node_ids):
elem = vtkQuad()
elem.GetPointIds().SetId(0, node_ids[0])
elem.GetPointIds().SetId(1, node_ids[1])
elem.GetPointIds().SetId(2, node_ids[2])
elem.GetPointIds().SetId(3, node_ids[3])
self.grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
if n_cax3:
eids = part.cax3[:, 0]
node_ids = part.cax3[:, 1:] + nid_offset
for eid, node_ids in zip(eids, node_ids):
elem = vtkTriangle()
elem.GetPointIds().SetId(0, node_ids[0])
elem.GetPointIds().SetId(1, node_ids[1])
elem.GetPointIds().SetId(2, node_ids[2])
self.grid.InsertNextCell(5, elem.GetPointIds())
if n_cax4r:
eids = part.cax4r[:, 0]
node_ids = part.cax4r[:, 1:] + nid_offset
for eid, node_ids in zip(eids, node_ids):
elem = vtkQuad()
elem.GetPointIds().SetId(0, node_ids[0])
elem.GetPointIds().SetId(1, node_ids[1])
elem.GetPointIds().SetId(2, node_ids[2])
elem.GetPointIds().SetId(3, node_ids[3])
self.grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
# solids
if n_c3d10h:
eids = part.c3d10h[:, 0]
node_ids = part.c3d10h[:, 1:] + nid_offset
for eid, node_ids in zip(eids, node_ids):
#for eid, node_ids in part.c3d10h:
elem = vtkTetra()
elem.GetPointIds().SetId(0, node_ids[0])
elem.GetPointIds().SetId(1, node_ids[1])
elem.GetPointIds().SetId(2, node_ids[2])
elem.GetPointIds().SetId(3, node_ids[3])
self.grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
nid_offset += nnodesi
self.grid.SetPoints(points)
self.grid.Modified()
if hasattr(self.grid, 'Update'):
self.grid.Update()
# loadCart3dResults - regions/loads
#self.turn_text_on()
self.scalarBar.VisibilityOn()
self.scalarBar.Modified()
note = ''
self.iSubcaseNameMap = {1: ['Abaqus%s' % note, '']}
#form = []
cases = {}
ID = 1
form, cases, icase = self._fill_abaqus_case(cases, ID, nodes, nelements, model)
#self._fill_cart3d_results(cases, form, icase, ID, model)
self._finish_results_io2(form, cases)
def clear_abaqus(self):
"""does nothing"""
pass
def load_abaqus_results(self, cart3d_filename, dirname):
"""does nothing"""
raise NotImplementedError()
def _fill_abaqus_case(self, cases, ID, nodes, nelements, model):
"""creates the result objects for abaqus"""
#return [], {}, 0
#nelements = elements.shape[0]
nnodes = nodes.shape[0]
element_ids = np.arange(1, nelements + 1)
node_ids = np.arange(1, nnodes + 1)
#cnormals = model.get_normals(shift_nodes=False)
#cnnodes = cnormals.shape[0]
#assert cnnodes == nelements, len(cnnodes)
#print('nnodes =', nnodes)
#print('nelements =', nelements)
#print('regions.shape =', regions.shape)
subcase_id = 0
labels = ['NodeID', 'ElementID']
#cart3d_geo = Cart3dGeometry(subcase_id, labels,
#nids, eids, regions, cnormals,
#uname='Cart3dGeometry')
nid_res = GuiResult(ID, header='NodeID', title='NodeID',
location='node', scalar=node_ids)
eid_res = GuiResult(ID, header='ElementID', title='ElementID',
location='centroid', scalar=element_ids)
cases = {
0 : (nid_res, (0, 'NodeID')),
1 : (eid_res, (0, 'ElementID')),
}
geometry_form = [
('NodeID', 0, []),
('ElementID', 1, []),
]
form = [
('Geometry', None, geometry_form),
]
icase = 2
return form, cases, icase
|
saullocastro/pyNastran
|
pyNastran/converters/dev/abaqus/abaqus_io.py
|
Python
|
lgpl-3.0
| 13,782
|
[
"VTK"
] |
c630138812e9173e2948889238357be23d7fddc8b3abc7bef67415a738be59c1
|
r"""
This module is a ParaViewWeb server application.
The following command line illustrate how to use it::
$ pvpython .../pv_web_catalyst.py --data-dir /.../path-to-your-data-directory
--data-dir is used to list that directory on the server and let the client choose a file to load.
Any ParaViewWeb executable script come with a set of standard arguments that
can be overriden if need be::
--port 8080
Port number on which the HTTP server will listen to.
--content /path-to-web-content/
Directory that you want to server as static web content.
By default, this variable is empty which mean that we rely on another server
to deliver the static content and the current process only focus on the
WebSocket connectivity of clients.
--authKey vtkweb-secret
Secret key that should be provided by the client to allow it to make any
WebSocket communication. The client will assume if none is given that the
server expect "vtkweb-secret" as secret key.
"""
# import to process args
import os
import math
# import paraview modules.
import paraview
# for 4.1 compatibility till we fix ColorArrayName and ColorAttributeType usage.
paraview.compatibility.major = 4
paraview.compatibility.minor = 1
from paraview import simple
from paraview.web import wamp as pv_wamp
from paraview.web import protocols as pv_protocols
from vtk.web import server
try:
import argparse
except ImportError:
# since Python 2.6 and earlier don't have argparse, we simply provide
# the source for the same as _argparse and we use it instead.
from vtk.util import _argparse as argparse
# import annotations
from autobahn.wamp import register as exportRpc
# =============================================================================
# Handle function helpers
# =============================================================================
def convert_to_float(v):
return float(v)
# -----------------------------------------------------------------------------
def convert_to_float_array(v):
return [ float(v) ]
# -----------------------------------------------------------------------------
def update_property(handle, value):
property = handle['proxy'].GetProperty(handle['property'])
if handle.has_key('convert'):
property.SetData( handle['convert'](value) )
else:
property.SetData( value )
# -----------------------------------------------------------------------------
def create_property_handle(proxy, property_name, convert = None):
handle = { 'proxy': proxy, 'property': property_name, 'update': update_property }
if convert:
handle['convert'] = convert
return handle
# -----------------------------------------------------------------------------
def update_representation(handle, value):
proxy = handle['proxy']
if handle['color_list'].has_key(value):
for propName in handle['color_list'][value]:
prop = proxy.GetProperty(propName)
if prop != None:
prop.SetData(handle['color_list'][value][propName])
if handle.has_key('override_location'):
proxy.ColorAttributeType = handle['override_location']
# -----------------------------------------------------------------------------
def create_representation_handle(representation_proxy, colorByList, array_location = None):
handle = { 'proxy': representation_proxy, 'color_list': colorByList, 'update': update_representation }
if array_location:
handle['override_location'] = array_location
return handle
# =============================================================================
# Define a pipeline object
# =============================================================================
class CatalystBasePipeline(object):
def __init__(self):
self.field_data = {}
self.handles = {}
self.metadata = {}
def apply_pipeline(self, input_data, time_steps):
'''
Method called when data to process is ready
'''
self.metadata['time'] = { "default": time_steps[0], "type": "range", "values": time_steps, "label": "time", "priority": 0 }
def add_key(self, key, default_value, data_type, values, label, priority, handles):
self.handles[key] = handles
self.metadata[key] = {
"default": default_value,
"type": data_type,
"values": values,
"label": label,
"priority": priority
}
def register_data(self, fieldName, location, scalarRange, lutType):
self.field_data[fieldName] = {
"ColorArrayName": (location, fieldName),
"Range": scalarRange,
"LookupTable": self._create_lookup_table(fieldName, scalarRange, lutType),
"ScalarOpacityFunction": self._create_piecewise_function(scalarRange)
}
def update_argument(self, key, value):
for handle in self.handles[key]:
handle['update'](handle, value)
def get_metadata(self):
return self.metadata
def _create_data_values(self, scalarRange, number_of_values):
inc = float(scalarRange[1]-scalarRange[0]) / float(number_of_values)
values = []
for i in range(number_of_values+1):
values.append(float(scalarRange[0] + (float(i)*inc) ))
return values
def _create_lookup_table(self, name, scalarRange, lutType):
if lutType == 'blueToRed':
return simple.GetLookupTableForArray( name, 1, RGBPoints=[scalarRange[0], 0.231373, 0.298039, 0.752941, (scalarRange[0]+scalarRange[1])/2, 0.865003, 0.865003, 0.865003, scalarRange[1], 0.705882, 0.0156863, 0.14902], VectorMode='Magnitude', NanColor=[0.0, 0.0, 0.0], ColorSpace='Diverging', ScalarRangeInitialized=1.0, LockScalarRange=1)
else:
return simple.GetLookupTableForArray( name, 1, RGBPoints=[scalarRange[0], 0.0, 0.0, 1.0, scalarRange[1], 1.0, 0.0, 0.0], VectorMode='Magnitude', NanColor=[0.0, 0.0, 0.0], ColorSpace='HSV', ScalarRangeInitialized=1.0, LockScalarRange=1)
def _create_piecewise_function(self, scalarRange):
return simple.CreatePiecewiseFunction( Points=[scalarRange[0], 0.0, 0.5, 0.0, scalarRange[1], 1.0, 0.5, 0.0] )
# =============================================================================
# Create custom Catalyst Pipeline Manager class to handle clients requests
# =============================================================================
class _PVCatalystManager(pv_wamp.PVServerProtocol):
dataDir = None
authKey = "vtkweb-secret"
plugins = None
pipeline_handler = None
@staticmethod
def add_arguments(parser):
parser.add_argument("--data-dir", default=os.getcwd(), help="path to data directory to list", dest="path")
parser.add_argument("--plugins", default="", help="List of fully qualified path names to plugin objects to load", dest="plugins")
@staticmethod
def configure(args):
_PVCatalystManager.authKey = args.authKey
_PVCatalystManager.dataDir = args.path
_PVCatalystManager.plugins = args.plugins
def initialize(self):
# Bring used components
self.registerVtkWebProtocol(pv_protocols.ParaViewWebStartupPluginLoader(_PVCatalystManager.plugins))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebMouseHandler())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPort())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortImageDelivery())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortGeometryDelivery())
# Update authentication key to use
self.updateSecret(_PVCatalystManager.authKey)
view = simple.GetRenderView()
view.Background = [1,1,1]
@exportRpc("catalyst.file.open")
def openFileFromPath(self, files):
fileToLoad = []
number_of_time_steps = 1
if type(files) == list:
number_of_time_steps = len(files)
for file in files:
fileToLoad.append(os.path.join(_PVCatalystManager.dataDir, file))
else:
fileToLoad.append(os.path.join(_PVCatalystManager.dataDir, files))
self.time_steps = [ i for i in range(number_of_time_steps)]
reader = simple.OpenDataFile(fileToLoad)
if _PVCatalystManager.pipeline_handler:
_PVCatalystManager.pipeline_handler.apply_pipeline(reader, self.time_steps)
@exportRpc("catalyst.pipeline.initialize")
def initializePipeline(self, conf):
if _PVCatalystManager.pipeline_handler and "initialize_pipeline" in dir(_PVCatalystManager.pipeline_handler):
_PVCatalystManager.pipeline_handler.initialize_pipeline(conf)
@exportRpc("catalyst.active.argument.update")
def updateActiveArgument(self, key, value):
if key == "time":
simple.GetAnimationScene().TimeKeeper.Time = float(value)
elif _PVCatalystManager.pipeline_handler:
_PVCatalystManager.pipeline_handler.update_argument(key, value)
@exportRpc("catalyst.arguments.get")
def getArguments(self):
if _PVCatalystManager.pipeline_handler:
return _PVCatalystManager.pipeline_handler.get_metadata()
else:
return { "time": {
"default": "0",
"type": "range",
"values": self.time_steps,
"label": "time",
"priority": 0 } }
# =============================================================================
# Main: Parse args and start server
# =============================================================================
if __name__ == "__main__":
# Create argument parser
parser = argparse.ArgumentParser(description="ParaView/Web Pipeline Manager web-application")
# Add arguments
server.add_arguments(parser)
_PVCatalystManager.add_arguments(parser)
# Exctract arguments
args = parser.parse_args()
# Configure our current application
_PVCatalystManager.configure(args)
# Start server
server.start_webserver(options=args, protocol=_PVCatalystManager)
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/Web/Python/paraview/web/pv_web_catalyst.py
|
Python
|
gpl-3.0
| 10,291
|
[
"ParaView",
"VTK"
] |
0319d8bc1da5ccfb921e06d15f6555d09fd01a5e47ae5e03337c478160b2669c
|
#
# When publishing work that uses these basis sets, please use the following citation:
#
# K.G. Dyall, Theor. Chem. Acc. (1998) 99:366; addendum Theor. Chem. Acc. (2002) 108:365;
# revision Theor. Chem. Acc. (2006) 115:441. Basis sets available from the Dirac web site,
# http://dirac.chem.sdu.dk.
Pt = [[0, -1, [62378099.0,1]], [0, -1, [16610117.0,1]], [0, -1, [5696578.5,1]],
[0, -1, [2175602.4,1]], [0, -1, [909700.19,1]], [0, -1, [402379.22,1]],
[0, -1, [186860.49,1]], [0, -1, [89836.786,1]], [0, -1, [44412.747,1]],
[0, -1, [22441.78,1]], [0, -1, [11580.965,1]], [0, -1, [6101.9949,1]],
[0, -1, [3281.1068,1]], [0, -1, [1798.7943,1]], [0, -1, [1005.5354,1]],
[0, -1, [573.94387,1]], [0, -1, [335.55893,1]], [0, -1, [203.4416,1]],
[0, -1, [126.91635,1]], [0, -1, [79.359993,1]], [0, -1, [47.803711,1]],
[0, -1, [29.780005,1]], [0, -1, [18.271802,1]], [0, -1, [11.249627,1]],
[0, -1, [6.9501429,1]], [0, -1, [4.2935494,1]], [0, -1, [2.5603157,1]],
[0, -1, [1.5006061,1]], [0, -1, [0.85208386,1]], [0, -1, [0.46593475,1]],
[0, -1, [0.22835045,1]], [0, -1, [0.11429192,1]], [0, -1, [0.057390836,1]],
[0, -1, [0.028637438,1]],
[1, 0, [56847512.0,1]], [1, 0, [16555699.0,1]], [1, 0, [5235957.2,1]],
[1, 0, [1783964.1,1]], [1, 0, [644821.16,1]], [1, 0, [245142.11,1]],
[1, 0, [97475.601,1]], [1, 0, [40432.196,1]], [1, 0, [17491.403,1]],
[1, 0, [7899.194,1]], [1, 0, [3722.8512,1]], [1, 0, [1826.1078,1]],
[1, 0, [928.40132,1]], [1, 0, [486.58729,1]], [1, 0, [261.66269,1]],
[1, 0, [143.58357,1]], [1, 0, [79.905847,1]], [1, 0, [45.234049,1]],
[1, 0, [26.101909,1]], [1, 0, [15.033503,1]], [1, 0, [8.4691343,1]],
[1, 0, [4.7845135,1]], [1, 0, [2.6400203,1]], [1, 0, [1.4379999,1]],
[1, 0, [0.76620194,1]], [1, 0, [0.39734304,1]], [1, 0, [0.18131035,1]],
[1, 0, [0.084697942,1]], [1, 0, [0.039577815,1]], [1, 0, [0.018381289,1]],
[2, 0, [68782.903,1]], [2, 0, [16486.23,1]], [2, 0, [5473.3773,1]],
[2, 0, [2173.6928,1]], [2, 0, [970.62154,1]], [2, 0, [470.04055,1]],
[2, 0, [240.73543,1]], [2, 0, [128.5685,1]], [2, 0, [70.502724,1]],
[2, 0, [39.458468,1]], [2, 0, [22.233244,1]], [2, 0, [12.415785,1]],
[2, 0, [6.8533848,1]], [2, 0, [3.7296816,1]], [2, 0, [1.9597861,1]],
[2, 0, [0.99159256,1]], [2, 0, [0.48067665,1]], [2, 0, [0.22109331,1]],
[2, 0, [0.094413319,1]],
[3, 0, [1515.2867,1]], [3, 0, [512.9344,1]], [3, 0, [218.62924,1]],
[3, 0, [104.94393,1]], [3, 0, [53.68568,1]], [3, 0, [28.511821,1]],
[3, 0, [15.399494,1]], [3, 0, [8.2234678,1]], [3, 0, [4.2572104,1]],
[3, 0, [2.064042,1]], [3, 0, [0.88882536,1]], [3, 0, [0.35215489,1]], ]
At = [[0,0,[6.0375427E+07,1]], [0,0,[1.6075673E+07,1]], [0,0,[5.5075645E+06,1]],
[0,0,[2.1004143E+06,1]], [0,0,[8.7917001E+05,1]], [0,0,[3.9045618E+05,1]],
[0,0,[1.8273552E+05,1]], [0,0,[8.8838178E+04,1]], [0,0,[4.4616106E+04,1]],
[0,0,[2.2979252E+04,1]], [0,0,[1.2095138E+04,1]], [0,0,[6.4907973E+03,1]],
[0,0,[3.5524824E+03,1]], [0,0,[1.9845589E+03,1]], [0,0,[1.1323731E+03,1]],
[0,0,[6.6017267E+02,1]], [0,0,[3.9392589E+02,1]], [0,0,[2.4021531E+02,1]],
[0,0,[1.4900652E+02,1]], [0,0,[9.4000919E+01,1]], [0,0,[5.9699821E+01,1]],
[0,0,[3.7696078E+01,1]], [0,0,[2.3386529E+01,1]], [0,0,[1.5284999E+01,1]],
[0,0,[9.4028096E+00,1]], [0,0,[5.9292233E+00,1]], [0,0,[3.8706010E+00,1]],
[0,0,[2.2177953E+00,1]], [0,0,[1.3405729E+00,1]], [0,0,[8.4309331E-01,1]],
[0,0,[5.6531041E-01,1]], [0,0,[2.9318475E-01,1]], [0,0,[1.4823369E-01,1]],
[0,0,[7.0506504E-02,1]],
[1,0,[7.2251407E+07,1]], [1,0,[2.4932854E+07,1]], [1,0,[8.8937376E+06,1]],
[1,0,[3.3454100E+06,1]], [1,0,[1.3140444E+06,1]], [1,0,[5.3608132E+05,1]],
[1,0,[2.2603474E+05,1]], [1,0,[9.8286709E+04,1]], [1,0,[4.4052614E+04,1]],
[1,0,[2.0355617E+04,1]], [1,0,[9.7031233E+03,1]], [1,0,[4.7758375E+03,1]],
[1,0,[2.4266900E+03,1]], [1,0,[1.2710526E+03,1]], [1,0,[6.8407924E+02,1]],
[1,0,[3.7693166E+02,1]], [1,0,[2.1194901E+02,1]], [1,0,[1.2075749E+02,1]],
[1,0,[6.9304384E+01,1]], [1,0,[4.0524414E+01,1]], [1,0,[2.3598368E+01,1]],
[1,0,[1.3497835E+01,1]], [1,0,[7.8076208E+00,1]], [1,0,[4.4667435E+00,1]],
[1,0,[2.5211716E+00,1]], [1,0,[1.3993280E+00,1]], [1,0,[7.5966311E-01,1]],
[1,0,[3.9017758E-01,1]], [1,0,[1.9315865E-01,1]], [1,0,[9.3229402E-02,1]],
[1,0,[4.3409155E-02,1]],
[2,0,[1.9034949E+05,1]], [2,0,[4.4572260E+04,1]], [2,0,[1.4282757E+04,1]],
[2,0,[5.4662239E+03,1]], [2,0,[2.3620925E+03,1]], [2,0,[1.1150297E+03,1]],
[2,0,[5.6183005E+02,1]], [2,0,[2.9682816E+02,1]], [2,0,[1.6269028E+02,1]],
[2,0,[9.1351283E+01,1]], [2,0,[5.2299012E+01,1]], [2,0,[3.0186267E+01,1]],
[2,0,[1.7352767E+01,1]], [2,0,[9.9256123E+00,1]], [2,0,[5.6533216E+00,1]],
[2,0,[3.1660082E+00,1]], [2,0,[1.7309287E+00,1]], [2,0,[9.5105066E-01,1]],
[2,0,[5.7159618E-01,1]], [2,0,[3.3761399E-01,1]], [2,0,[1.3892468E-01,1]],
[3,0,[2.9832111E+03,1]], [3,0,[9.8834645E+02,1]], [3,0,[4.1900451E+02,1]],
[3,0,[2.0048009E+02,1]], [3,0,[1.0365833E+02,1]], [3,0,[5.5917913E+01,1]],
[3,0,[3.1127876E+01,1]], [3,0,[1.7542308E+01,1]], [3,0,[9.8863737E+00,1]],
[3,0,[5.5184395E+00,1]], [3,0,[2.9904382E+00,1]], [3,0,[1.5189767E+00,1]],
[3,0,[5.4776846E-01,1]], [3,0,[2.3105119E-01,1]],]
Rn = [[0, 0, [5.8493417E+07, 1]],
[0, 0, [1.5572798E+07, 1]],
[0, 0, [5.3333328E+06, 1]],
[0, 0, [2.0330517E+06, 1]],
[0, 0, [8.5087888E+05, 1]],
[0, 0, [3.7801886E+05, 1]],
[0, 0, [1.7710489E+05, 1]],
[0, 0, [8.6290758E+04, 1]],
[0, 0, [4.3508460E+04, 1]],
[0, 0, [2.2535909E+04, 1]],
[0, 0, [1.1939902E+04, 1]],
[0, 0, [6.4461109E+03, 1]],
[0, 0, [3.5440515E+03, 1]],
[0, 0, [1.9869386E+03, 1]],
[0, 0, [1.1385593E+03, 1]],
[0, 0, [6.6723524E+02, 1]],
[0, 0, [3.9890566E+02, 1]],
[0, 0, [2.4217648E+02, 1]],
[0, 0, [1.4867618E+02, 1]],
[0, 0, [9.2956939E+01, 1]],
[0, 0, [6.0123581E+01, 1]],
[0, 0, [3.9915796E+01, 1]],
[0, 0, [2.5935503E+01, 1]],
[0, 0, [1.6159454E+01, 1]],
[0, 0, [1.0223800E+01, 1]],
[0, 0, [6.3449788E+00, 1]],
[0, 0, [3.8363427E+00, 1]],
[0, 0, [2.3604729E+00, 1]],
[0, 0, [1.5191980E+00, 1]],
[0, 0, [9.7956364E-01, 1]],
[0, 0, [5.5528078E-01, 1]],
[0, 0, [3.0273335E-01, 1]],
[0, 0, [1.5648740E-01, 1]],
[0, 0, [7.5495409E-02, 1]],
[1, 0, [7.0924046E+07, 1]],
[1, 0, [2.4693625E+07, 1]],
[1, 0, [8.8672052E+06, 1]],
[1, 0, [3.3547783E+06, 1]],
[1, 0, [1.3239675E+06, 1]],
[1, 0, [5.4209233E+05, 1]],
[1, 0, [2.2929556E+05, 1]],
[1, 0, [9.9919950E+04, 1]],
[1, 0, [4.4803647E+04, 1]],
[1, 0, [2.0690977E+04, 1]],
[1, 0, [9.8561410E+03, 1]],
[1, 0, [4.8475196E+03, 1]],
[1, 0, [2.4612010E+03, 1]],
[1, 0, [1.2883984E+03, 1]],
[1, 0, [6.9330927E+02, 1]],
[1, 0, [3.8218239E+02, 1]],
[1, 0, [2.1511306E+02, 1]],
[1, 0, [1.2277465E+02, 1]],
[1, 0, [7.0702303E+01, 1]],
[1, 0, [4.1526024E+01, 1]],
[1, 0, [2.4312206E+01, 1]],
[1, 0, [1.3982374E+01, 1]],
[1, 0, [8.1468966E+00, 1]],
[1, 0, [4.6976116E+00, 1]],
[1, 0, [2.6679804E+00, 1]],
[1, 0, [1.4953134E+00, 1]],
[1, 0, [8.1950783E-01, 1]],
[1, 0, [4.2793063E-01, 1]],
[1, 0, [2.1421569E-01, 1]],
[1, 0, [1.0428020E-01, 1]],
[1, 0, [4.8791153E-02, 1]],
[2, 0, [2.0199161E+05, 1]],
[2, 0, [4.7238642E+04, 1]],
[2, 0, [1.5126665E+04, 1]],
[2, 0, [5.7844545E+03, 1]],
[2, 0, [2.4973963E+03, 1]],
[2, 0, [1.1777957E+03, 1]],
[2, 0, [5.9297434E+02, 1]],
[2, 0, [3.1309964E+02, 1]],
[2, 0, [1.7156664E+02, 1]],
[2, 0, [9.6362164E+01, 1]],
[2, 0, [5.5193697E+01, 1]],
[2, 0, [3.1898083E+01, 1]],
[2, 0, [1.8380362E+01, 1]],
[2, 0, [1.0536172E+01, 1]],
[2, 0, [6.0178963E+00, 1]],
[2, 0, [3.3827501E+00, 1]],
[2, 0, [1.8576181E+00, 1]],
[2, 0, [1.0285955E+00, 1]],
[2, 0, [6.4621438E-01, 1]],
[2, 0, [3.8953183E-01, 1]],
[2, 0, [1.6015524E-01, 1]],
[3, 0, [3.1949970E+03, 1]],
[3, 0, [1.0562594E+03, 1]],
[3, 0, [4.4732007E+02, 1]],
[3, 0, [2.1386933E+02, 1]],
[3, 0, [1.1057421E+02, 1]],
[3, 0, [5.9687149E+01, 1]],
[3, 0, [3.3257795E+01, 1]],
[3, 0, [1.8779755E+01, 1]],
[3, 0, [1.0611834E+01, 1]],
[3, 0, [5.9454147E+00, 1]],
[3, 0, [3.2392478E+00, 1]],
[3, 0, [1.6591272E+00, 1]],
[3, 0, [6.0504992E-01, 1]],
[3, 0, [2.7149796E-01, 1]],]
# flake8: noqa
|
sunqm/pyscf
|
pyscf/gto/basis/dyall_qz.py
|
Python
|
apache-2.0
| 8,871
|
[
"DIRAC"
] |
59265386df97b977ddbdeaa2161915e0dcbf381b88fe504bc1ca2e275b22e890
|
# Copyright (C) 2016-2019 The ESPResSo project
# Copyright (C) 2014 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script generates code_info.pyx
#
import inspect
import sys
import os
# find featuredefs.py
moduledir = os.path.dirname(inspect.getfile(inspect.currentframe()))
sys.path.append(os.path.join(moduledir, '..', '..', 'config'))
import featuredefs
if len(sys.argv) != 3:
print("Usage: {} DEFFILE PYXFILE".format(sys.argv[0]), file=sys.stderr)
exit(2)
deffilename, cfilename = sys.argv[1:3]
print("Reading definitions from " + deffilename + "...")
defs = featuredefs.defs(deffilename)
print("Done.")
# generate cpp-file
print("Writing " + cfilename + "...")
cfile = open(cfilename, 'w')
cfile.write("""
# This file is autogenerated by gen_code_info.py.
# DO NOT EDIT MANUALLY, CHANGES WILL BE LOST
include "myconfig.pxi"
def features():
\"\"\"Returns list of features compiled into ESPResSo core\"\"\"
f=[]
""")
template = """
IF {0} == 1:
f.append("{0}")
"""
for feature in defs.allfeatures:
cfile.write(template.format(feature))
cfile.write("""
return sorted(f)
def all_features():
return {}
""".format(defs.allfeatures))
cfile.close()
print("Done.")
|
KaiSzuttor/espresso
|
src/python/espressomd/gen_code_info.py
|
Python
|
gpl-3.0
| 1,849
|
[
"ESPResSo"
] |
1dd781ab55f995124555d307781dc9643039d00005fa45780b8f632d549ec43f
|
#!/usr/bin/env python
"""Small script to test timing of loading records into a BioSQL database.
"""
from __future__ import print_function
import time
# set up the connection
from Bio import GenBank
from BioSQL import BioSeqDatabase
server = BioSeqDatabase.open_database(host="192.168.0.192", user="root",
passwd="", db="pythonloadtest")
# remove the database if it already exists
db_name = "testload"
try:
server[db_name]
server.remove_database(db_name)
except KeyError:
pass
db = server.new_database(db_name)
input_file = "/home/hack/install/biopython/Tests/GenBank/cor6_6.gb"
handle = open(input_file, "r")
parser = GenBank.FeatureParser()
iterator = GenBank.Iterator(handle, parser)
# -- do the timing part
start_time = time.time()
num_records = db.load(iterator)
end_time = time.time()
elapsed_time = end_time - start_time
print("Loading")
print("\tDid %s records in %s seconds for\n\t%f records per second" % \
(num_records, elapsed_time, float(num_records) / float(elapsed_time)))
|
updownlife/multipleK
|
dependencies/biopython-1.65/Scripts/Performance/biosql_performance_load.py
|
Python
|
gpl-2.0
| 1,048
|
[
"Biopython"
] |
cf737a084ab05c8508eaf571724cd3f0facfab7dbbf4558a0a046b03a003c4ce
|
#! /usr/bin/env python
from openturns import *
from math import *
TESTPREAMBLE()
RandomGenerator().SetSeed(0)
try :
# 1D tests
distribution = Dirac(0.7)
print "Distribution ", distribution
# Is this distribution elliptical ?
print "Elliptical = ", distribution.isElliptical()
# Is this distribution continuous ?
print "Continuous = ", distribution.isContinuous()
# Test for realization of distribution
oneRealization = distribution.getRealization()
print "oneRealization=", oneRealization
# Test for sampling
size = 10000
oneSample = distribution.getSample( size )
print "oneSample first=", oneSample[0], " last=", oneSample[size - 1]
print "mean=", oneSample.computeMean()
sampleCovariance = oneSample.computeCovariance()[0, 0]
if (fabs(sampleCovariance) < 1.0e-16):
sampleCovariance = 0.0
print "covariance=", sampleCovariance
# Define a point
point = NumericalPoint( distribution.getDimension(), 0.0 )
print "Point= ", point
# Show PDF and CDF of point
PDF = distribution.computePDF( point )
print "pdf =", PDF
print "pdf (FD)=", (distribution.computeCDF( point + NumericalPoint(1, 0) ) - distribution.computeCDF( point + NumericalPoint(1, -1) ))
CDF = distribution.computeCDF( point )
print "cdf=", CDF
# Define a point
point = NumericalPoint( distribution.getSupport(distribution.getRange())[0])
print "Point= ", point
# Show PDF and CDF of point
PDF = distribution.computePDF( point )
print "pdf =", PDF
print "pdf (FD)=", (distribution.computeCDF( point + NumericalPoint(1, 0) ) - distribution.computeCDF( point + NumericalPoint(1, -1) ))
CDF = distribution.computeCDF( point )
print "cdf=", CDF
CF = distribution.computeCharacteristicFunction( 0.5 )
print "characteristic function=", CF
GF = distribution.computeGeneratingFunction( 0.5 + 0.3j )
print "generating function=", GF
quantile = distribution.computeQuantile( 0.95 )
print "quantile=", quantile
print "cdf(quantile)=", distribution.computeCDF(quantile)
mean = distribution.getMean()
print "mean=", mean
standardDeviation = distribution.getStandardDeviation()
print "standard deviation=", standardDeviation
skewness = distribution.getSkewness()
print "skewness=", skewness
kurtosis = distribution.getKurtosis()
print "kurtosis=", kurtosis
covariance = distribution.getCovariance()
print "covariance=", covariance
parameters = distribution.getParametersCollection()
print "parameters=", parameters
for i in range(6):
print "standard moment n=", i, " value=", distribution.getStandardMoment(i)
print "Standard representative=", distribution.getStandardRepresentative()
# N-D tests
dim = 4
distribution = Dirac(NumericalPoint(dim, 2.3))
print "Distribution ", distribution
# Is this distribution elliptical ?
print "Elliptical = ", distribution.isElliptical()
# Is this distribution continuous ?
print "Continuous = ", distribution.isContinuous()
# Test for realization of distribution
oneRealization = distribution.getRealization()
print "oneRealization=", oneRealization
# Test for sampling
size = 10000
oneSample = distribution.getSample( size )
print "oneSample first=", oneSample[0], " last=", oneSample[size - 1]
print "mean=", oneSample.computeMean()
sampleCovariance = oneSample.computeCovariance()
for i in range(dim):
for j in range(i + 1):
if (fabs(sampleCovariance[i, j]) < 1.0e-16):
sampleCovariance[i, j] = 0.0
print "covariance=", sampleCovariance
# Define a point
point = NumericalPoint( dim, 0.0 )
print "Point= ", point
# Show PDF and CDF of point
PDF = distribution.computePDF( point )
print "pdf =", PDF
print "pdf (FD)=", (distribution.computeCDF( point + NumericalPoint(dim, 0) ) - distribution.computeCDF( point + NumericalPoint(dim, -1) ))
CDF = distribution.computeCDF( point )
print "cdf=", CDF
# Define a point
point = NumericalPoint( distribution.getSupport(distribution.getRange())[0])
print "Point= ", point
# Show PDF and CDF of point
PDF = distribution.computePDF( point )
print "pdf =", PDF
print "pdf (FD)=", (distribution.computeCDF( point + NumericalPoint(dim, 0) ) - distribution.computeCDF( point + NumericalPoint(dim, -1) ))
CDF = distribution.computeCDF( point )
print "cdf=", CDF
quantile = distribution.computeQuantile( 0.95 )
print "quantile=", quantile
print "cdf(quantile)=", distribution.computeCDF(quantile)
mean = distribution.getMean()
print "mean=", mean
standardDeviation = distribution.getStandardDeviation()
print "standard deviation=", standardDeviation
skewness = distribution.getSkewness()
print "skewness=", skewness
kurtosis = distribution.getKurtosis()
print "kurtosis=", kurtosis
covariance = distribution.getCovariance()
print "covariance=", covariance
parameters = distribution.getParametersCollection()
print "parameters=", parameters
for i in range(6):
print "standard moment n=", i, " value=", distribution.getStandardMoment(i)
print "Standard representative=", distribution.getStandardRepresentative()
except :
import sys
print "t_Dirac.py", sys.exc_type, sys.exc_value
|
dbarbier/privot
|
python/test/t_Dirac_std.py
|
Python
|
lgpl-3.0
| 5,455
|
[
"DIRAC"
] |
3f23ac4c55e270fb44405980df30ebe031f04a9deff422ed7481c18316ad5d68
|
#!/usr/bin/python
# Copyright 2017 SuperDARN Canada
#
# brian.py
# 2018-01-30
# Communicate with all processes to administrate the borealis software
import sys
import os
import zmq
import time
from datetime import datetime, timedelta
sys.path.append(os.environ["BOREALISPATH"])
if __debug__: # TODO need to get build flavour from scons environment, 'release' may be 'debug'
sys.path.append(os.environ["BOREALISPATH"] + '/build/debug/utils/protobuf')
else:
sys.path.append(os.environ["BOREALISPATH"] + '/build/release/utils/protobuf')
import driverpacket_pb2
import sigprocpacket_pb2
import rxsamplesmetadata_pb2
import processeddata_pb2
# TODO: Socket options to look at: IDENTITY, AFFINITY, LINGER
# TODO: USE send_multipart, with human-readable identities, then the router knows how to handle the
# response to the request, see chapter 3, figures 29/30
RADCTRL_EXPHAN_IDEN = b"RADCTRL_EXPHAN_IDEN"
RADCTRL_DSP_IDEN = b"RADCTRL_DSP_IDEN"
RADCTRL_DRIVER_IDEN = b"RADCTRL_DRIVER_IDEN"
RADCTRL_BRIAN_IDEN = b"RADCTRL_BRIAN_IDEN"
DRIVER_RADCTRL_IDEN = b"DRIVER_RADCTRL_IDEN"
DRIVER_DSP_IDEN = b"DRIVER_DSP_IDEN"
DRIVER_BRIAN_IDEN = b"DRIVER_BRIAN_IDEN"
EXPHAN_RADCTRL_IDEN = b"EXPHAN_RADCTRL_IDEN"
EXPHAN_DSP_IDEN = b"EXPHAN_DSP_IDEN"
DSP_RADCTRL_IDEN = b"DSP_RADCTRL_IDEN"
DSP_DRIVER_IDEN = b"DSP_DRIVER_IDEN"
DSP_EXPHAN_IDEN = b"DSP_EXPHAN_IDEN"
DSP_DW_IDEN = b"DSP_DW_IDEN"
DSPBEGIN_BRIAN_IDEN = b"DSPBEGIN_BRIAN_IDEN"
DSPEND_BRIAN_IDEN = b"DSPEND_BRIAN_IDEN"
DW_DSP_IDEN = b"DW_DSP_IDEN"
BRIAN_RADCTRL_IDEN = b"BRIAN_RADCTRL_IDEN"
BRIAN_DRIVER_IDEN = b"BRIAN_DRIVER_IDEN"
BRIAN_DSPBEGIN_IDEN = b"BRIAN_DSPBEGIN_IDEN"
BRIAN_DSPEND_IDEN = b"BRIAN_DSPEND_IDEN"
ROUTER_ADDRESS="tcp://127.0.0.1:7878"
TIME = 0.087 #0.069
def create_sockets(identities, router_addr=ROUTER_ADDRESS):
"""Creates a DEALER socket for each identity in the list argument. Each socket is then connected
to the router
:param identities: Unique identities to give to sockets.
:type identities: List
:param router_addr: Address of the router socket, defaults to ROUTER_ADDRESS
:type router_addr: string, optional
:returns: Newly created and connected sockets.
:rtype: List
"""
context = zmq.Context().instance()
num_sockets = len(identities)
sockets = [context.socket(zmq.DEALER) for _ in range(num_sockets)]
for sk, iden in zip(sockets, identities):
sk.setsockopt(zmq.IDENTITY, iden)
sk.connect(router_addr)
return sockets
def recv_data(socket, sender_iden, pprint):
"""Receives data from a socket and verifies it comes from the correct sender.
:param socket: Socket to recv from.
:type socket: Zmq socket
:param sender_iden: Identity of the expected sender.
:type sender_iden: String
:param pprint: A function to pretty print the message
:type pprint: function
:returns: Received data
:rtype: String or Protobuf or None
"""
recv_identity, empty, data = socket.recv_multipart()
if recv_identity != sender_iden:
err_msg = "Expected identity {}, received from identity {}."
err_msg = err_msg.format(sender_iden, recv_identity)
pprint(err_msg)
return None
else:
return data
recv_reply = recv_request = recv_pulse = recv_data
def send_data(socket, recv_iden, msg):
"""Sends data to another identity.
:param socket: Socket to send from.
:type socket: Zmq socket.
:param recv_iden: The identity to send to.
:type recv_iden: String
:param msg: The data message to send.
:type msg: String
"""
frames = [recv_iden, b"", b"{}".format(msg)]
socket.send_multipart(frames)
send_reply = send_request = send_pulse = send_data
def radar_control(context=None):
"""Thread function for radar control
This function simulates the flow of data between radar control and other parts of the radar
system.
:param context: zmq context, if None, then this method will get one
:type context: zmq context, optional
"""
ids = [RADCTRL_EXPHAN_IDEN, RADCTRL_DSP_IDEN, RADCTRL_DRIVER_IDEN, RADCTRL_BRIAN_IDEN]
sockets_list = create_sockets(ids)
radar_control_to_exp_handler = sockets_list[0]
radar_control_to_dsp = sockets_list[1]
radar_control_to_driver = sockets_list[2]
radar_control_to_brian = sockets_list[3]
def printing(msg):
RADAR_CONTROL = "\033[33m" + "RADAR_CONTROL: " + "\033[0m"
sys.stdout.write(RADAR_CONTROL + msg + "\n")
time.sleep(1)
count = 0
time_counter = 0
time_inc = 0.0
while True:
#radar_control sends a request for an experiment to experiment_handler
printing("Requesting experiment")
send_request(radar_control_to_exp_handler, EXPHAN_RADCTRL_IDEN, "Requesting Experiment")
# radar_control receives new experiment
reply = recv_reply(radar_control_to_exp_handler, EXPHAN_RADCTRL_IDEN, printing)
reply_output = "Experiment handler sent -> {}".format(reply)
printing(reply_output)
start = time.time()
sigp = sigprocpacket_pb2.SigProcPacket()
sigp.sequence_time = TIME
sigp.sequence_num = count
count += 1
#Brian requests sequence metadata for timeouts
request = recv_request(radar_control_to_brian, BRIAN_RADCTRL_IDEN, printing)
request_output = "Brian requested -> {}".format(request)
printing(request_output)
send_reply(radar_control_to_brian, BRIAN_RADCTRL_IDEN, sigp.SerializeToString())
middle = time.time()
printing("brian time {}".format(middle-start))
#Radar control receives request for metadata from DSP
send_reply(radar_control_to_dsp, DSP_RADCTRL_IDEN, sigp.SerializeToString())
middle2 = time.time()
printing("dsp time {}".format(middle2-middle))
# sending pulses to driver
printing("Sending pulses")
num_pulses = range(8)
for i in num_pulses:
if i == num_pulses[0]:
pulse = "sob_pulse"
elif i == num_pulses[-1]:
pulse = "eob_pulse"
else:
pulse = str(i)
send_pulse(radar_control_to_driver, DRIVER_RADCTRL_IDEN, pulse)
end = time.time()
printing("Time {}".format(end-start))
time_counter += 1
time_inc += end - start
print("time_inc {}".format(time_inc))
if time_inc > 3.0:
printing("Number of averages: {}".format(time_counter))
time_counter = 0
time_inc = 0.0
def experiment_handler(context=None):
"""Thread function for experiment handler
This function simulates the flow of data between experiment handler and other parts of the radar
system.
:param context: zmq context, if None, then this method will get one
:type context: zmq context, optional
"""
ids = [EXPHAN_RADCTRL_IDEN, EXPHAN_DSP_IDEN]
sockets_list = create_sockets(ids)
exp_handler_to_radar_control = sockets_list[0]
exp_handler_to_dsp = sockets_list[1]
def printing(msg):
EXPERIMENT_HANDLER = "\033[34m" + "EXPERIMENT HANDLER: " + "\033[0m"
sys.stdout.write(EXPERIMENT_HANDLER + msg + "\n")
def update_experiment():
# Recv complete processed data from DSP
send_request(exp_handler_to_dsp, DSP_EXPHAN_IDEN, "Need completed data")
data = recv_data(exp_handler_to_dsp, DSP_EXPHAN_IDEN, printing)
data_output = "Dsp sent -> {}".format(data)
printing(data_output)
thread = threading.Thread(target=update_experiment)
thread.daemon = True
thread.start()
time.sleep(1)
while True:
# experiment_handler replies with an experiment to radar_control
request = recv_request(exp_handler_to_radar_control, RADCTRL_EXPHAN_IDEN, printing)
request_msg = "Radar control made request -> {}.".format(request)
printing(request_msg)
# sending experiment back to radar control
printing("Sending experiment")
send_reply(exp_handler_to_radar_control, RADCTRL_EXPHAN_IDEN, "Giving experiment")
def driver(context=None):
"""Thread function for driver
This function simulates the flow of data between the driver and other parts of the radar
system.
:param context: zmq context, if None, then this method will get one
:type context: zmq context, optional
"""
ids = [DRIVER_DSP_IDEN, DRIVER_RADCTRL_IDEN, DRIVER_BRIAN_IDEN]
sockets_list = create_sockets(ids)
driver_to_dsp = sockets_list[0]
driver_to_radar_control = sockets_list[1]
driver_to_brian = sockets_list[2]
def printing(msg):
DRIVER = "\033[34m" + "DRIVER: " + "\033[0m"
sys.stdout.write(DRIVER + msg + "\n")
time.sleep(1)
sq = 0
while True:
#getting pulses from radar control
while True:
pulse = recv_pulse(driver_to_radar_control, RADCTRL_DRIVER_IDEN, printing)
printing("Received pulse {}".format(pulse))
if pulse == "eob_pulse":
break
start = time.time()
time.sleep(TIME)
end = time.time()
samps_meta = rxsamplesmetadata_pb2.RxSamplesMetadata()
samps_meta.sequence_time = end - start
samps_meta.sequence_num = sq
sq += 1
#sending sequence data to dsp
request = recv_request(driver_to_dsp, DSP_DRIVER_IDEN, printing)
request_output = "Dsp sent -> {}".format(request)
printing(request_output)
send_reply(driver_to_dsp, DSP_DRIVER_IDEN, samps_meta.SerializeToString())
#sending collected data to brian
request = recv_request(driver_to_brian, BRIAN_DRIVER_IDEN, printing)
request_output = "Brian sent -> {}".format(request)
printing(request_output)
send_reply(driver_to_brian, BRIAN_DRIVER_IDEN, samps_meta.SerializeToString())
def dsp(context=None):
"""Thread function for dsp
This function simulates the flow of data between dsp and other parts of the radar
system.
:param context: zmq context, if None, then this method will get one
:type context: zmq context, optional
"""
ids = [DSP_RADCTRL_IDEN, DSP_DRIVER_IDEN, DSP_EXPHAN_IDEN, DSP_DW_IDEN, DSPBEGIN_BRIAN_IDEN,
DSPEND_BRIAN_IDEN]
sockets_list = create_sockets(ids)
dsp_to_radar_control = sockets_list[0]
dsp_to_driver = sockets_list[1]
dsp_to_experiment_handler = sockets_list[2]
dsp_to_data_write = sockets_list[3]
dsp_to_brian_begin = sockets_list[4]
dsp_to_brian_end = sockets_list[5]
def printing(msg):
DSP = "\033[35m" + "DSP: " + "\033[0m"
sys.stdout.write(DSP + msg + "\n")
time.sleep(1)
first_time = True
while True:
printing("Requesting metadata from radar control")
# # DSP makes a request for new sequence processing metadata to radar control
#send_request(dsp_to_radar_control, RADCTRL_DSP_IDEN,"Need metadata")
# Radar control sends back metadata
reply = recv_reply(dsp_to_radar_control, RADCTRL_DSP_IDEN, printing)
sigp = sigprocpacket_pb2.SigProcPacket()
sigp.ParseFromString(reply)
reply_output = "Radar control sent -> sequence {} time {}".format(sigp.sequence_num,
sigp.sequence_time)
printing(reply_output)
# request data from driver
send_request(dsp_to_driver, DRIVER_DSP_IDEN, "Need data to process")
reply = recv_reply(dsp_to_driver, DRIVER_DSP_IDEN, printing)
meta = rxsamplesmetadata_pb2.RxSamplesMetadata()
meta.ParseFromString(reply)
reply_output = "Driver sent -> time {}".format(meta.sequence_time)
printing(reply_output)
# Copy samples to device
time.sleep(TIME * 0.50)
# acknowledge start of work
request = recv_request(dsp_to_brian_begin, BRIAN_DSPBEGIN_IDEN, printing)
request_output = "Brian sent -> {}".format(request)
printing(request_output)
send_data(dsp_to_brian_begin, BRIAN_DSPBEGIN_IDEN, "Ack start of work, "
"sqnum {}".format(sigp.sequence_num))
# doing work!
def do_work(sqn_num):
sequence_num = sqn_num
start = time.time()
time.sleep(TIME * 0.9)
end = time.time()
proc_data = processeddata_pb2.ProcessedData()
proc_data.processing_time = end - start
proc_data.sequence_num = sequence_num
# acknowledge end of work
request = recv_request(dsp_to_brian_end, BRIAN_DSPEND_IDEN, printing)
request_output = "Brian sent -> {}".format(request)
printing(request_output)
send_data(dsp_to_brian_end, BRIAN_DSPEND_IDEN, proc_data.SerializeToString())
# send data to experiment handler
request = recv_request(dsp_to_experiment_handler, EXPHAN_DSP_IDEN, printing)
request_output = "Experiment handler sent -> {}".format(request)
printing(request_output)
send_data(dsp_to_experiment_handler, EXPHAN_DSP_IDEN, "All the datas")
# send data to data write
request = recv_request(dsp_to_data_write, DW_DSP_IDEN, printing)
request_output = "Data write sent -> {}".format(request)
printing(request_output)
send_data(dsp_to_data_write, DW_DSP_IDEN, "All the datas")
thread = threading.Thread(target=do_work, args=(sigp.sequence_num,))
thread.daemon = True
thread.start()
def data_write(context=None):
"""
Thread for data_write sockets testing
:param context: zmq context, if None, then this method will get one
:return:
"""
# Request socket to dsp (processed samples)
ids = [DW_DSP_IDEN]
sockets_list = create_sockets(ids)
data_write_to_dsp = sockets_list[0]
def printing(msg):
DATA_WRITE = "\033[32m" + "DATA WRITE: " + "\033[0m"
sys.stdout.write(DATA_WRITE + msg + "\n")
time.sleep(1)
while True:
# Request processed data
send_request(data_write_to_dsp, DSP_DW_IDEN, "Requesting processed data")
data = recv_data(data_write_to_dsp, DSP_DW_IDEN, printing)
data_output = "Dsp sent -> {}".format(data)
printing(data_output)
late_counter = 0
def sequence_timing():
"""Thread function for sequence timing
This function simulates the flow of data between brian's sequence timing and other parts of the
radar system. This function serves to check whether the sequence timing is working as expected
and to rate control the system to make sure the processing can handle data rates.
:param context: zmq context, if None, then this method will get one
:type context: zmq context, optional
"""
ids = [BRIAN_RADCTRL_IDEN, BRIAN_DRIVER_IDEN, BRIAN_DSPBEGIN_IDEN, BRIAN_DSPEND_IDEN]
sockets_list = create_sockets(ids)
brian_to_radar_control = sockets_list[0]
brian_to_driver = sockets_list[1]
brian_to_dsp_begin = sockets_list[2]
brian_to_dsp_end = sockets_list[3]
sequence_poller = zmq.Poller()
sequence_poller.register(brian_to_radar_control, zmq.POLLIN)
sequence_poller.register(brian_to_dsp_begin, zmq.POLLIN)
sequence_poller.register(brian_to_dsp_end, zmq.POLLIN)
sequence_poller.register(brian_to_driver, zmq.POLLIN)
def printing(msg):
SEQUENCE_TIMING = "\033[31m" + "SEQUENCE TIMING: " + "\033[0m"
sys.stdout.write(SEQUENCE_TIMING + msg + "\n")
context = zmq.Context().instance()
start_new_sock = context.socket(zmq.PAIR)
start_new_sock.bind("inproc://start_new")
def start_new():
""" This function serves to rate control the system. If processing is faster than the
sequence time than the speed of the driver is the limiting factor. If processing takes
longer than sequence time, than the dsp unit limits the speed of the system.
"""
start_new = context.socket(zmq.PAIR)
start_new.connect("inproc://start_new")
want_to_start = False
good_to_start = True
extra_good_to_start = False
dsp_finish_counter = 2
while True:
if want_to_start and good_to_start and dsp_finish_counter :
#Acknowledge new sequence can begin to Radar Control by requesting new sequence
#metadata
printing("Requesting metadata from Radar control")
send_request(brian_to_radar_control, RADCTRL_BRIAN_IDEN, "Requesting metadata")
want_to_start = good_to_start = False
dsp_finish_counter -= 1
message = start_new.recv()
if message == "want_to_start":
want_to_start = True
if message == "good_to_start":
good_to_start = True
if message == "extra_good_to_start":
dsp_finish_counter = 1
print("WTS {}, GTS {}, EGTS {}".format(want_to_start, good_to_start, extra_good_to_start))
thread = threading.Thread(target=start_new)
thread.daemon = True
thread.start()
time.sleep(1)
pulse_seq_times = {}
driver_times = {}
processing_times = {}
first_time = True
processing_done = True
while True:
if first_time:
#Request new sequence metadata
printing("Requesting metadata from Radar control")
send_request(brian_to_radar_control, RADCTRL_BRIAN_IDEN, "Requesting metadata")
first_time = False
start = time.time()
socks = dict(sequence_poller.poll())
end = time.time()
printing("Poller time {}".format(end-start))
if brian_to_driver in socks and socks[brian_to_driver] == zmq.POLLIN:
#Receive metadata of completed sequence from driver such as timing
reply = recv_reply(brian_to_driver, DRIVER_BRIAN_IDEN, printing)
meta = rxsamplesmetadata_pb2.RxSamplesMetadata()
meta.ParseFromString(reply)
reply_output = "Driver sent -> time {}, sqnum {}".format(meta.sequence_time, meta.sequence_num)
printing(reply_output)
driver_times[meta.sequence_num] = meta.sequence_time
#Requesting acknowledgement of work begins from DSP
printing("Requesting work begins from DSP")
send_request(brian_to_dsp_begin, DSPBEGIN_BRIAN_IDEN, "Requesting work begins")
#acknowledge we want to start something new
start_new_sock.send("want_to_start")
if brian_to_radar_control in socks and socks[brian_to_radar_control] == zmq.POLLIN:
#Get new sequence metadata from radar control
reply = recv_reply(brian_to_radar_control, RADCTRL_BRIAN_IDEN, printing)
sigp = sigprocpacket_pb2.SigProcPacket()
sigp.ParseFromString(reply)
reply_output = "Radar control sent -> sequence {} time {}".format(sigp.sequence_num,
sigp.sequence_time)
printing(reply_output)
pulse_seq_times[sigp.sequence_num] = sigp.sequence_time
#Request acknowledgement of sequence from driver
printing("Requesting ack from driver")
send_request(brian_to_driver, DRIVER_BRIAN_IDEN, "Requesting ack")
if brian_to_dsp_begin in socks and socks[brian_to_dsp_begin] == zmq.POLLIN:
#Get acknowledgement that work began in processing.
reply = recv_reply(brian_to_dsp_begin, DSPBEGIN_BRIAN_IDEN, printing)
reply_output = "Dsp sent -> {}".format(reply)
printing(reply_output)
#Requesting acknowledgement of work ends from DSP
printing("Requesting work end from DSP")
send_request(brian_to_dsp_end, DSPEND_BRIAN_IDEN, "Requesting work ends")
#acknowledge that we are good and able to start something new
start_new_sock.send("good_to_start")
if brian_to_dsp_end in socks and socks[brian_to_dsp_end] == zmq.POLLIN:
global late_counter
#Receive ack that work finished on previous sequence.
reply = recv_reply(brian_to_dsp_end, DSPEND_BRIAN_IDEN, printing)
proc_d = processeddata_pb2.ProcessedData()
proc_d.ParseFromString(reply)
reply_output = "Dsp sent -> time {}, sqnum {}".format(proc_d.processing_time, proc_d.sequence_num)
printing(reply_output)
print(proc_d.sequence_num)
processing_times[proc_d.sequence_num] = proc_d.processing_time
if proc_d.sequence_num != 0:
if proc_d.processing_time > processing_times[proc_d.sequence_num-1]:
late_counter +=1
else:
late_counter = 0
printing("Late counter {}".format(late_counter))
#acknowledge that we are good and able to start something new
start_new_sock.send("extra_good_to_start")
def router():
context = zmq.Context().instance()
router = context.socket(zmq.ROUTER)
router.bind(ROUTER_ADDRESS)
sys.stdout.write("Starting router!\n")
while True:
dd = router.recv_multipart()
#sys.stdout.write(dd)
sender, receiver, empty, data = dd
output = "Router input/// Sender -> {}: Receiver -> {}: empty: Data -> {}\n".format(*dd)
#sys.stdout.write(output)
frames = [receiver,sender,empty,data]
output = "Router output/// Receiver -> {}: Sender -> {}: empty: Data -> {}\n".format(*frames)
#sys.stdout.write(output)
router.send_multipart(frames)
if __name__ == "__main__":
import threading
sys.stdout.write("BRIAN_TESTER: Main\n")
threads = []
threads.append(threading.Thread(target=router))
time.sleep(0.5)
threads.append(threading.Thread(target=radar_control))
threads.append(threading.Thread(target=experiment_handler))
threads.append(threading.Thread(target=sequence_timing))
threads.append(threading.Thread(target=driver))
threads.append(threading.Thread(target=dsp))
threads.append(threading.Thread(target=data_write))
for thread in threads:
thread.daemon = True
thread.start()
sys.stdout.write("BRIAN_TESTER: Threads all set up\n")
while True:
time.sleep(1)
|
SuperDARNCanada/placeholderOS
|
tools/simulators/brian_testing/brian_test.py
|
Python
|
gpl-3.0
| 22,607
|
[
"Brian"
] |
93d4572f9662fc8091608b00d64e5953b4305f11a39662127035be93bbfb1179
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""INPCRD structure files in MDAnalysis --- :mod:`MDAnalysis.coordinates.INPCRD`
================================================================================
Read coordinates in Amber_ coordinate/restart file (suffix "inpcrd").
.. _Amber: http://ambermd.org/formats.html#restart
Classes
-------
.. autoclass:: INPReader
:members:
"""
from . import base
class INPReader(base.SingleFrameReaderBase):
"""Reader for Amber restart files."""
format = ['INPCRD', 'RESTRT']
units = {'length': 'Angstrom'}
def _read_first_frame(self):
# Read header
with open(self.filename, 'r') as inf:
self.title = inf.readline().strip()
line = inf.readline().split()
self.n_atoms = int(line[0])
self.ts = self._Timestep(self.n_atoms, **self._ts_kwargs)
try:
time = float(line[1])
except IndexError:
pass
else:
self.ts.time = time
self.ts.frame = 0
for p in range(self.n_atoms // 2):
line = inf.readline()
# each float is f12.7, 6 floats a line
for i, dest in enumerate([(2*p, 0), (2*p, 1), (2*p, 2),
(2*p + 1, 0), (2*p + 1, 1), (2*p + 1, 2)]):
self.ts._pos[dest] = float(line[i*12:(i+1)*12])
# Read last coordinate if necessary
if self.n_atoms % 2:
line = inf.readline()
for i in range(3):
self.ts._pos[-1, i] = float(line[i*12:(i+1)*12])
@staticmethod
def parse_n_atoms(filename, **kwargs):
with open(filename, 'r') as f:
f.readline()
n_atoms = int(f.readline().split()[0])
return n_atoms
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/coordinates/INPCRD.py
|
Python
|
gpl-2.0
| 2,866
|
[
"Amber",
"MDAnalysis"
] |
86bae6566cc64d65f1109ead74f960068bcf6f8136b35979d6e9154e1f401ef6
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
# pyeq2 is a collection of equations expressed as Python classes
#
# Copyright (C) 2013 James R. Phillips
# 2548 Vera Cruz Drive
# Birmingham, AL 35235 USA
#
# email: zunzun@zunzun.com
#
# License: BSD-style (see LICENSE.txt in main source directory)
import sys, os
if os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..') not in sys.path:
sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..'))
import pyeq2
import numpy
numpy.seterr(all= 'ignore')
import pyeq2.Model_3D_BaseClass
class ExtremeValueA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Extreme Value A"
_HTML = 'z = a * exp(-exp(-(x-b)/c)-(x-b)/c+1) + d * exp(-exp(-(y-f)/g)-(y-f)/g+1)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
try:
temp = a * numpy.exp(-numpy.exp(-(x_in-b)/c)-(x_in-b)/c+1.0)
temp += d * numpy.exp(-numpy.exp(-(y_in-f)/g)-(y_in-f)/g+1.0)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-exp(-(x_in-b)/c)-(x_in-b)/c+1.0) + d * exp(-exp(-(y_in-f)/g)-(y_in-f)/g+1.0);\n"
return s
class ExtremeValueB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Extreme Value B"
_HTML = 'z = a * exp(-exp(-(x-b)/c)-(x-b)/c+1) * exp(-exp(-(y-d)/f)-(y-d)/f+1)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = a * numpy.exp(-numpy.exp(-(x_in-b)/c)-(x_in-b)/c+1.0) * numpy.exp(-numpy.exp(-(y_in-d)/f)-(y_in-d)/f+1.0)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-exp(-(x_in-b)/c)-(x_in-b)/c+1.0) * exp(-exp(-(y_in-d)/f)-(y_in-d)/f+1.0);\n"
return s
class GaussianA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Gaussian A"
_HTML = 'z = a * exp(-0.5 * (((x-b)/c)<sup>2</sup> + ((y-d)/f)<sup>2</sup>))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = a * numpy.exp(-0.5 * (numpy.square((x_in-b)/c) + numpy.square((y_in-d)/f)))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-0.5 * (pow((x_in-b)/c, 2.0) + pow((y_in-d)/f, 2.0)));\n"
return s
class GaussianB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Gaussian B"
_HTML = 'z = a * exp(-0.5 * (((x-b)/c)<sup>2</sup>)) + d * exp(-0.5 * (((y-f)/g)<sup>2</sup>))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
try:
temp = a * numpy.exp(-0.5 * (numpy.square((x_in-b)/c)))
temp += d * numpy.exp(-0.5 * (numpy.square((y_in-f)/g)))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-0.5 * (pow((x_in-b)/c, 2.0))) + d * exp(-0.5 * (pow((y_in-f)/g, 2.0)));\n"
return s
class LogNormalA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Log-Normal A"
_HTML = 'z = a * exp(-0.5 * (((ln(x)-b)/c)<sup>2</sup> + ((ln(y)-d)/f)<sup>2</sup>))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = True
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogX(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
LogX = inDataCacheDictionary['LogX'] # only need to perform this dictionary look-up once
LogY = inDataCacheDictionary['LogY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = a * numpy.exp(-0.5 * (numpy.square((LogX-b)/c) + numpy.square((LogY-d)/f)))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-0.5 * (pow((log(x_in)-b)/c, 2.0) + pow((log(y_in)-d)/f, 2.0)));\n"
return s
class LogNormalB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Log-Normal B"
_HTML = 'z = a * exp(-0.5 * (((ln(x)-b)/c)<sup>2</sup>)) + d * exp(-0.5 * (((ln(y)-f)/g)<sup>2</sup>))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = True
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogX(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
LogX = inDataCacheDictionary['LogX'] # only need to perform this dictionary look-up once
LogY = inDataCacheDictionary['LogY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
try:
temp = a * numpy.exp(-0.5 * (numpy.square((LogX-b)/c)))
temp += d * numpy.exp(-0.5 * (numpy.square((LogY-f)/g)))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-0.5 * (pow((log(x_in)-b)/c, 2.0))) + d * exp(-0.5 * (pow((log(y_in)-f)/g, 2.0)));\n"
return s
class LogisticA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Logistic A"
_HTML = 'z = 4a * exp(-((x-b)/c))/((1+exp(-((x-b)/c)))<sup>2</sup>) + 4d * exp(-((y-f)/g))/((1+exp(-((y-f)/g)))<sup>2</sup>)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
try:
temp = 4.0 * a * numpy.exp(-((x_in-b)/c)) / numpy.square(1.0 + numpy.exp(-((x_in-b)/c)))
temp += 4.0 * d * numpy.exp(-((y_in-f)/g)) / numpy.square(1.0 + numpy.exp(-((y_in-f)/g)))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = 4.0 * a * exp(-((x_in-b)/c)) / pow(1.0 + exp(-((x_in-b)/c)), 2.0) + 4.0 * d * exp(-((y_in-f)/g)) / pow(1.0 + exp(-((y_in-f)/g)), 2.0);\n"
return s
class LogisticB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Logistic B"
_HTML = 'z = 16a * exp(-((x-b)/c)-((y-d)/f)) / ((1+exp(-((x-b)/c)))<sup>2</sup> * (1+exp(-((y-d)/f)))<sup>2</sup>)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = 16.0 * a * numpy.exp(-((x_in-b)/c)-((y_in-d)/f)) / (numpy.square(1.0 + numpy.exp(-((x_in-b)/c))) * numpy.square(1.0 + numpy.exp(-((y_in-d)/f))))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = 16.0 * a * exp(-((x_in-b)/c)-((y_in-d)/f)) / (pow(1.0 + exp(-((x_in-b)/c)), 2.0) * pow(1.0 + exp(-((y_in-d)/f)), 2.0));\n"
return s
class LorentzianA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Lorentzian A"
_HTML = 'z = a / ((1+((x-b)/c)<sup>2</sup>)*(1+((y-d)/f)<sup>2</sup>))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = a / ((1.0 + numpy.square((x_in-b)/c)) * (1.0 + numpy.square((y_in-d)/f)))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a / ((1.0 + pow((x_in-b)/c, 2.0)) * (1.0 + pow((y_in-d)/f, 2.0)));\n"
return s
class LorentzianB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Lorentzian B"
_HTML = 'z = a / (1+((x-b)/c)<sup>2</sup>) + d * (1+((y-f)/g)<sup>2</sup>)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
try:
temp = a / (1.0 + numpy.square((x_in-b)/c)) + d * (1.0 + numpy.square((y_in-f)/g))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a / (1.0 + pow((x_in-b)/c, 2.0)) + d * (1.0 + pow((y_in-f)/g, 2.0));\n"
return s
|
burkesquires/pyeq2
|
Models_3D/Peak.py
|
Python
|
bsd-2-clause
| 22,451
|
[
"Gaussian"
] |
9a31750d027472fac1340ff952fb70f909214c7b12e135866e66b1c73c895422
|
#!usr/bin/env python
from pyspace.planet import PlanetArray
from pyevtk.hl import pointsToVTK
def dump_vtk(pa, filename, base = ".", **data):
"""Dumps vtk output to file 'base/filename'"""
if not data:
data = {'v_x': pa.v_x, 'v_y': pa.v_y, 'v_z': pa.v_z}
pointsToVTK(base + "/" + filename, pa.x, pa.y, pa.z, data = data)
def get_planet_array(*args, **kwargs):
"""Returns a PlanetArray"""
pa = PlanetArray(*args, **kwargs)
return pa
|
adityapb/pyspace
|
pyspace/utils.py
|
Python
|
gpl-3.0
| 467
|
[
"VTK"
] |
387f5a51268ea633292cdfcc43387a4b0743caad742572dd7aec31f8f810b15f
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 5 12:26:40 2014
:module-author: steffen
:filename: config.py
This module provides a highlevel layer for reading and writing config files.
There must be a file called "config.ini" in the root-folder of the project.
The file has to be of the following structure to be imported correctly.
# this is a comment \n
# the filestructure is like: \n
\n
[netCDF] \n
RootFolder = c://netCDF \n
FilePrefix = cd2_ \n
\n
[mySQL] \n
host = localhost \n
user = guest \n
password = root \n
database = znes \n
\n
[SectionName] \n
OptionName = value \n
Option2 = value2 \n
"""
import configparser as cp
import logging
import os
FILENAME = 'config.ini'
FILE = os.path.join(os.path.expanduser("~"), '.oemof', FILENAME)
cfg = cp.RawConfigParser()
_loaded = False
def load_config(filename):
"""
Load data from config file to `cfg` that can be accessed by get, set
afterwards.
Specify absolute or relative path to your config file.
:param filename: Relative or absolute path
:type filename: str
"""
if filename is None:
filename = ''
abs_filename = os.path.join(os.getcwd(), filename)
global FILE
# find the config file
if os.path.isfile(filename):
FILE = filename
elif os.path.isfile(abs_filename):
FILE = abs_filename
elif os.path.isfile(FILE):
pass
else:
if os.path.dirname(filename):
file_not_found = filename
else:
file_not_found = abs_filename
file_not_found_message(file_not_found)
# load config
init(FILE)
def file_not_found_message(file_not_found):
"""
Show error message incl. help if file not found
:param filename:
:type filename: str
"""
logging.error(
"""
Config file {file} cannot be found. Make sure this file exists!
An exemplary section in the config file looks as follows
[database]
username = username under which to connect to the database
database = name of the database from which to read
host = host to connect to
port = port to connect to
For further advice, see in the docs (https://oemofdb.readthedocs.io)
how to format the config.
""".format(
file=file_not_found
)
)
def main():
pass
def init(FILE):
"""
Read config file
:param FILE: Absolute path to config file (incl. filename)
:type FILE: str
"""
try:
cfg.read(FILE)
global _loaded
_loaded = True
except Exception:
file_not_found_message(FILE)
def get(section, key):
"""
returns the value of a given key of a given section of the main
config file.
:param section: the section.
:type section: str.
:param key: the key.
:type key: str.
:returns: the value which will be casted to float, int or boolean.
if no cast is successfull, the raw string will be returned.
"""
# FILE = 'config_misc'
if not _loaded:
init(FILE)
try:
return cfg.getfloat(section, key)
except Exception:
try:
return cfg.getint(section, key)
except Exception:
try:
return cfg.getboolean(section, key)
except Exception:
return cfg.get(section, key)
def set(section, key, value):
"""
sets a value to a [section] key - pair.
if the section doesn't exist yet, it will be created.
:param section: the section.
:type section: str.
:param key: the key.
:type key: str.
:param value: the value.
:type value: float, int, str.
"""
if not _loaded:
init()
if not cfg.has_section(section):
cfg.add_section(section)
cfg.set(section, key, value)
with open(FILE, 'w') as configfile:
cfg.write(configfile)
if __name__ == "__main__":
main()
|
oemof/oemof_pg
|
src/oemof/db/config.py
|
Python
|
gpl-3.0
| 3,939
|
[
"NetCDF"
] |
a7811a7df492635313b42d9a669dcb90a6b80d8759f5030c6a3a110875746c31
|
'''
This script demonstrates how to create a periodic Gaussian process
using the *gpiso* function.
'''
import numpy as np
import matplotlib.pyplot as plt
from sympy import sin, exp, pi
from rbf.basis import get_r, get_eps, RBF
from rbf.gproc import gpiso
np.random.seed(1)
period = 5.0
cls = 0.5 # characteristic length scale
var = 1.0 # variance
r = get_r() # get symbolic variables
eps = get_eps()
# create a symbolic expression of the periodic covariance function
expr = exp(-sin(r*pi/period)**2/eps**2)
# define a periodic RBF using the symbolic expression
basis = RBF(expr)
# define a Gaussian process using the periodic RBF
gp = gpiso(basis, eps=cls, var=var)
t = np.linspace(-10, 10, 1000)[:,None]
sample = gp.sample(t) # draw a sample
mu,sigma = gp(t) # evaluate mean and std. dev.
# plot the results
fig,ax = plt.subplots(figsize=(6,4))
ax.grid(True)
ax.plot(t[:,0], mu, 'b-', label='mean')
ax.fill_between(
t[:,0], mu - sigma, mu + sigma,
color='b', alpha=0.2, edgecolor='none', label='std. dev.')
ax.plot(t, sample, 'k', label='sample')
ax.set_xlim((-10.0, 10.0))
ax.set_ylim((-2.5*var, 2.5*var))
ax.legend(loc=4, fontsize=10)
ax.tick_params(labelsize=10)
ax.set_xlabel('time', fontsize=10)
ax.set_title('periodic Gaussian process', fontsize=10)
fig.tight_layout()
plt.savefig('../figures/gproc.e.png')
plt.show()
|
treverhines/RBF
|
docs/scripts/gproc.e.py
|
Python
|
mit
| 1,343
|
[
"Gaussian"
] |
e3cdb541351bd51e9028a1c3d85af8769ded351ac77c7ee191ce8029b37bf82a
|
# Graph Algorithms using basic python Constructs.
# Narayana Chikkam, Dec, 22, 2015.
from collections import defaultdict
from heapq import *
import itertools
import copy
from lib.unionfind import (
UnionFind
)
from lib.prioritydict import (
priorityDictionary
)
class Vertex:
def __init__(self, id):
self.id = id
self.neighbours = {}
def addNeighbour(self, id, weight):
self.neighbours[id] = weight
def __str__(self):
return str(self.id) + ': ' + str(self.neighbours.keys())
def getNeighbours(self):
return self.neighbours # .keys()
def getName(self):
return self.id
def getWeight(self, id):
return self.neighbours[id]
class Graph:
def __init__(self):
self.v = {}
self.count = 0
def addVertex(self, key):
self.count += 1
newV = Vertex(key)
self.v[key] = newV
def getVertex(self, id):
if id in self.v.keys():
return self.v[id]
return None
def __contains__(self, id):
return id in self.v.keys()
# vertexOne, vertexTwo, cost-of-the-edge
def addEdge(self, vertexOne, vertexTwo, weight=None):
if vertexOne not in self.v.keys():
self.addVertex(vertexOne)
if vertexTwo not in self.v.keys():
self.addVertex(vertexTwo)
self.v[vertexOne].addNeighbour(vertexTwo, weight)
# vertexOne, vertexTwo, cost-of-the-edge
def updateEdge(self, vertexOne, vertexTwo, weight=None):
self.v[vertexOne].addNeighbour(vertexTwo, weight)
def getVertices(self):
return self.v.keys()
def __str__(self):
ret = "{ "
for v in self.v.keys():
ret += str(self.v[v].__str__()) + ", "
return ret + " }"
def __iter__(self):
return iter(self.v.values())
def getNeighbours(self, vertex):
if vertex not in self.v.keys():
raise "Node %s not in graph" % vertex
return self.v[vertex].neighbours # .keys()
def getEdges(self):
edges = []
for node in self.v.keys():
neighbours = self.v[node].getNeighbours()
for w in neighbours:
# tuple, srcVertex, dstVertex, weightBetween
edges.append((node, w, neighbours[w]))
return edges
def findIsolated(self):
isolated = []
for node in self.v:
deadNode = False
reachable = True
# dead node, can't reach any other node from this
if len(self.v[node].getNeighbours()) == 0:
deadNode = True
# reachable from other nodes ?
nbrs = [n.neighbours.keys() for n in self.v.values()]
# flatten the nested list
nbrs = list(itertools.chain(*nbrs))
if node not in nbrs:
reachable = False
if deadNode == True and reachable == False:
isolated.append(node)
return isolated
def getPath(self, start, end, path=[]):
path = path + [start]
if start == end:
return path
if start not in self.v:
return None
for vertex in self.v[start].getNeighbours():
if vertex not in path:
extended_path = self.getPath(vertex,
end,
path)
if extended_path:
return extended_path
return None
def getAllPaths(self, start, end, path=[]):
path = path + [start]
if start == end:
return [path]
if start not in self.v:
return []
paths = []
for vertex in self.v[start].getNeighbours():
if vertex not in path:
extended_paths = self.getAllPaths(vertex,
end,
path)
for p in extended_paths:
paths.append(p)
return paths
def inDegree(self, vertex):
"""
how many edges coming into this vertex
"""
nbrs = [n.neighbours.keys() for n in self.v.values()]
# flatten the nested list
nbrs = list(itertools.chain(*nbrs))
return nbrs.count(vertex)
def outDegree(self, vertex):
"""
how many vertices are neighbours to this vertex
"""
adj_vertices = self.v[vertex].getNeighbours()
return len(adj_vertices)
"""
The degree of a vertex is the no of edges connecting to it.
loop is counted twice
for an undirected Graph deg(v) = indegree(v) + outdegree(v)
"""
def getDegree(self, vertex):
return self.inDegree(vertex) + self.outDegree(vertex)
def verifyDegreeSumFormula(self):
"""Handshaking lemma - Vdeg(v) = 2 |E| """
degSum = 0
for v in self.v:
degSum += self.getDegree(v)
return degSum == (2 * len(self.getEdges()))
def delta(self):
""" the minimum degree of the Graph V """
min = 2**64
for vertex in self.v:
vertex_degree = self.getDegree(vertex)
if vertex_degree < min:
min = vertex_degree
return min
def Delta(self):
""" the maximum degree of the Graph V """
max = -2**64
for vertex in self.v:
vertex_degree = self.getDegree(vertex)
if vertex_degree > max:
max = vertex_degree
return max
def degreeSequence(self):
"""
degree sequence is the reverse sorder of the vertices degrees
Isomorphic graphs have the same degree sequence. However,
two graphs with the same degree sequence are not necessarily
isomorphic.
More-Info:
http://en.wikipedia.org/wiki/Graph_realization_problem
"""
seq = []
for vertex in self.v:
seq.append(self.getDegree(vertex))
seq.sort(reverse=True)
return tuple(seq)
# helper to check if the given sequence is in non-increasing Order ;)
@staticmethod
def sortedInDescendingOrder(seq):
return all(x >= y for x, y in zip(seq, seq[1:]))
@staticmethod
def isGraphicSequence(seq):
"""
Assumes that the degreeSequence is a list of non negative integers
http://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93Gallai_theorem
"""
# Check to ensure there are an even number of odd degrees
if sum(seq) % 2 != 0:
return False
# Erdos-Gallai theorem
for k in range(1, len(seq) + 1):
leftSum = sum(seq[:(k)])
rightSum = k * (k - 1) + sum([min(x, k) for x in seq[k:]])
if leftSum > rightSum:
return False
return True
@staticmethod
def isGraphicSequenceIterative(s):
# successively reduce degree sequence by removing node of maximum degree
# as in Havel-Hakimi algorithm
while s:
s.sort() # sort in increasing order
if s[0] < 0:
return False # check if removed too many from some node
d = s.pop() # pop largest degree
if d == 0:
return True # done! rest must be zero due to ordering
# degree must be <= number of available nodes
if d > len(s):
return False
# remove edges to nodes of next higher degrees
# s.reverse() # to make it easy to get at higher degree nodes.
for i in range(len(s) - 1, len(s) - (d + 1), -1):
s[i] -= 1
# should never get here b/c either d==0, d>len(s) or d<0 before s=[]
return False
def density(self):
"""
In mathematics, a dense graph is a graph in which the number of edges
is close to the maximal number of edges. The opposite, a graph with
only a few edges, is a sparse graph. The distinction between sparse
and dense graphs is rather vague, and depends on the context.
For undirected simple graphs, the graph density is defined as:
D = (2*No-Of-Edges)/((v*(v-1))/2)
For a complete Graph, the Density D is 1
"""
""" method to calculate the density of a graph """
V = len(self.v.keys())
E = len(self.getEdges())
return 2.0 * E / (V * (V - 1))
"""
Choose an arbitrary node x of the graph G as the starting point
Determine the set A of all the nodes which can be reached from x.
If A is equal to the set of nodes of G, the graph is connected; otherwise
it is disconnected.
"""
def isConnected(self, start=None):
if start == None:
start = self.v.keys()[0]
reachables = self.dfs(start, [])
return len(reachables) == len(self.v.keys())
"""
ToDo: USE CLR Approach for this Later
"""
def dfs(self, start, path=[]):
path = path + [start]
for v in self.v[start].getNeighbours().keys():
if v not in path:
path = self.dfs(v, path)
return path
"""
CLR Sytle
"""
def CLR_Dfs(self):
paths = []
for v in self.v.keys():
explored = self.dfs(v, [])
if len(explored) == len(self.v.keys()):
paths.append(explored)
return paths
def BFS(self, start):
# initialize lists
maxV = len(self.v.keys())
processed = [False] * (maxV) # which vertices have been processed
discovered = [False] * (maxV) # which vertices have been found
parent = [-1] * (maxV) # discovery relation
q = [] # queue of vertices to visit */
# enqueue(&q,start);
q.append(start)
discovered[start] = True
while (len(q) != 0):
v = q.pop(0)
processed[v] = True
nbrs = self.v[v].getNeighbours().keys()
# print nbrs
for n in nbrs:
# if processed[n] == False
if discovered[n] == False:
q.append(n)
discovered[n] = True
parent[n] = v
return (discovered, parent)
def findPath(self, start, end, parents, path):
if ((start == end) or (end == -1)):
path.append(start)
else:
self.findPath(start, parents[end], parents, path)
path.append(end)
"""
Find path between two given nodes
"""
def find_path(self, start, end, path=[]):
path = path + [start]
if start == end:
return path
if not self.v.has_key(start):
return None
for node in self.v[start].getNeighbours().keys():
if node not in path:
newpath = self.find_path(node, end, path)
if newpath:
return newpath
return None
"""
Find all paths
"""
def find_all_paths(self, start, end, path=[]):
path = path + [start]
if start == end:
return [path]
if not self.v.has_key(start):
return []
paths = []
for node in self.v[start].getNeighbours().keys():
if node not in path:
newpaths = self.find_all_paths(node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
"""
Find shorted path w.r.t no of vertices on the path
"""
def find_shortest_path(self, start, end, path=[]):
path = path + [start]
if start == end:
return path
if not self.v.has_key(start):
return None
shortest = None
for node in self.v[start].getNeighbours().keys():
if node not in path:
newpath = self.find_shortest_path(node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
"""
prim's algorithm - properties: tree could be not connected during
the finding process as it finds edges with min cost - greedy strategy
Prims always stays as a tree
If you don't know all the weight on edges use
Prim's algorithm
f you only need partial solution on the graph
use Prim's algorithm
"""
def mspPrims(self):
nodes = self.v.keys()
edges = [(u, v, c) for u in self.v.keys()
for v, c in self.v[u].getNeighbours().items()]
return self.prim(nodes, edges)
def prim(self, nodes, edges):
conn = defaultdict(list)
for n1, n2, c in edges: # makes graph undirected
conn[n1].append((c, n1, n2))
conn[n2].append((c, n2, n1))
mst = []
used = set()
used.add(nodes[0])
usable_edges = conn[nodes[0]][:]
heapify(usable_edges)
while usable_edges:
cost, n1, n2 = heappop(usable_edges)
if n2 not in used:
used.add(n2)
mst.append((n1, n2, cost))
for e in conn[n2]:
if e[2] not in used:
heappush(usable_edges, e)
return mst
"""
Kruskals begins with forest and merge into a tree
"""
def mspKrushkals(self):
nodes = self.v.keys()
edges = [(c, u, v) for u in self.v.keys()
for v, c in self.v[u].getNeighbours().items()]
return self.krushkal(edges)
def pprint(self):
print("{ ", end=" ")
for u in self.v.keys():
print(u, end=" ")
print(": { ", end=" ")
for v in self.v[u].getNeighbours().keys():
print(v, ":", self.v[u].getNeighbours()[v], end=" ")
print(" }", end=" ")
print(" }\n")
def krushkal(self, edges):
"""
Return the minimum spanning tree of an undirected graph G.
G should be represented in such a way that iter(G) lists its
vertices, iter(G[u]) lists the neighbors of u, G[u][v] gives the
length of edge u,v, and G[u][v] should always equal G[v][u].
The tree is returned as a list of edges.
"""
# Kruskal's algorithm: sort edges by weight, and add them one at a time.
# We use Kruskal's algorithm, first because it is very simple to
# implement once UnionFind exists, and second, because the only slow
# part (the sort) is sped up by being built in to Python.
subtrees = UnionFind()
tree = []
for c, u, v in sorted(edges): # take from small weight to large in order
if subtrees[u] != subtrees[v]:
tree.append((u, v, c))
subtrees.union(u, v)
return tree
def adj(self, missing=float('inf')): # makes the adj dict will all possible cells, similar to matrix
"""
G= { 0 : { 1 : 6, 2 : 4 }
1 : { 2 : 3, 5 : 7 }
2 : { 3 : 9, 4 : 1 }
3 : { 4 : 1 }
4 : { 5 : 5, 6 : 2 }
5 : { }
6 : { }
}
adj(G) >>
{ 0: {0: 0, 1: 6, 2: 4, 3: inf, 4: inf, 5: inf, 6: inf},
1: {0: inf, 1: 0, 2: 3, 3: inf, 4: inf, 5: 7, 6: inf},
2: {0: inf, 1: inf, 2: 0, 3: 9, 4: 1, 5: inf, 6: inf},
3: {0: inf, 1: inf, 2: inf, 3: 0, 4: 1, 5: inf, 6: inf},
4: {0: inf, 1: inf, 2: inf, 3: inf, 4: 0, 5: 5, 6: 2},
5: {0: inf, 1: inf, 2: inf, 3: inf, 4: inf, 5: 0, 6: inf},
6: {0: inf, 1: inf, 2: inf, 3: inf, 4: inf, 5: inf, 6: 0}
}
"""
vertices = self.v.keys()
return {v1:
{v2: 0 if v1 == v2 else self.v[v1].getNeighbours().get(v2, missing) for v2 in vertices
}
for v1 in vertices
}
def floyds(self):
"""
All pair shortest Path
Idea:
for k in (0, n):
for i in (0, n):
for j in (0, n):
g[i][j] = min(graph[i][j], graph[i][k]+graph[k][j])
Find the shortest distance between every pair of vertices in the weighted Graph G
"""
d = self.adj() # prepare the adjacency list representation for the algorithm
vertices = self.v.keys()
for v2 in vertices:
d = {v1: {v3: min(d[v1][v3], d[v1][v2] + d[v2][v3])
for v3 in vertices}
for v1 in vertices}
return d
def reachability(self):
""" Idea: graph reachability floyd-warshall
for k in (0, n):
for i in (0, n):
for j in (0, n):
g[i][j] = graph[i][j] || (graph[i][k]&&graph[k][j]))
"""
vertices = self.v.keys()
d = self.adj(float('0'))
for u in vertices:
for v in vertices:
if u == v or d[u][v]:
d[u][v] = True
else:
d[u][v] = False
for v2 in vertices:
d = {v1: {v3: d[v1][v3] or (d[v1][v2] and d[v2][v3]) # path for v1->v3 or v1->v2, v2-?v3
for v3 in vertices}
for v1 in vertices}
return d
def pathRecoveryFloydWarshall(self):
d = self.adj() # missing edges will have -1.0 value
vertices = self.v.keys()
parentMap = copy.deepcopy(d)
for v1 in vertices:
for v2 in vertices:
if (v1 == v2) or d[v1][v2] == float('inf'):
parentMap[v1][v2] = -1
else:
parentMap[v1][v2] = v1
for i in vertices:
for j in vertices:
for k in vertices:
temp = d[i][k] + d[k][j]
if temp < d[i][j]:
d[i][j] = temp
parentMap[i][j] = parentMap[k][j]
return parentMap
def getFloydPath(self, parentMap, u, v, path=[]):
"""
recursive procedure to get the path from parentMap matrix
"""
path.append(v)
if u != v and v != -1:
self.getFloydPath(parentMap, u, parentMap[u][v], path)
# from active recipes - handy thoughts to think about heap for this
# algorithm
def dijkstra(self, start, end=None):
"""
Find shortest paths from the start vertex to all
vertices nearer than or equal to the end.
The input graph G is assumed to have the following
representation: A vertex can be any object that can
be used as an index into a dictionary. G is a
dictionary, indexed by vertices. For any vertex v,
G[v] is itself a dictionary, indexed by the neighbors
of v. For any edge v->w, G[v][w] is the length of
the edge. This is related to the representation in
<http://www.python.org/doc/essays/graphs.html>
Of course, G and G[v] need not be Python dict objects;
they can be any other object that obeys dict protocol,
for instance a wrapper in which vertices are URLs
and a call to G[v] loads the web page and finds its links.
The output is a pair (D,P) where D[v] is the distance
from start to v and P[v] is the predecessor of v along
the shortest path from s to v.
Dijkstra's algorithm is only guaranteed to work correctly
when all edge lengths are positive. This code does not
verify this property for all edges (only the edges seen
before the end vertex is reached), but will correctly
compute shortest paths even for some graphs with negative
edges, and will raise an exception if it discovers that
a negative edge has caused it to make a mistake.
Introduction to Algorithms, 1st edition), page 528:
G = { 's':{'u':10, 'x':5},
' u':{'v':1, 'x':2},
'v':{'y':4},
'x':{'u':3, 'v':9, 'y':2},
'y':{'s':7, 'v':6}
}
"""
G = self.adj()
D = {} # dictionary of final distances
P = {} # dictionary of predecessors
Q = priorityDictionary() # est.dist. of non-final vert.
Q[start] = 0
for v in Q:
D[v] = Q[v]
if v == end:
break
for w in G[v]:
vwLength = D[v] + G[v][w]
if w in D:
if vwLength < D[w]:
raise (
ValueError, "Dijkstra: found better path to already-final vertex")
elif w not in Q or vwLength < Q[w]:
Q[w] = vwLength
P[w] = v
return D, P
def shortestPathDijkstra(self, start, end):
"""
Find a single shortest path from the given start vertex
to the given end vertex.
The input has the same conventions as Dijkstra().
The output is a list of the vertices in order along
the shortest path.
"""
D, P = self.dijkstra(start, end)
Path = []
while 1:
Path.append(end)
if end == start:
break
end = P[end]
Path.reverse()
return Path
"""
smart snippet on the dijkstra alg:
def shortestPath(graph, start, end):
queue = [(0, start, [])]
seen = set()
while True:
(cost, v, path) = heapq.heappop(queue)
if v not in seen:
path = path + [v]
seen.add(v)
if v == end:
return cost, path
for (next, c) in graph[v].iteritems():
heapq.heappush(queue, (cost + c, next, path))
"""
def strongly_connected_components(self):
"""
Tarjan's Algorithm (named for its discoverer, Robert Tarjan) is a graph theory algorithm
for finding the strongly connected components of a graph.
Based on: http://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm
"""
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors of `node`
try:
successors = self.v[node].getNeighbours().keys()
print(node, successors)
except:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited; recurse on it
strongconnect(successor)
lowlinks[node] = min(lowlinks[node], lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node], index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node:
break
component = tuple(connected_component)
# storing the result
print(component)
result.append(component)
for node in self.v.keys():
if node not in lowlinks:
strongconnect(node)
return result
def computeFirstUsingSCC(self, initFirst):
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
first = {}
def computeFirst(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors of `node`
try:
successors = self.v[node].getNeighbours().keys()
except:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited; recurse on it
computeFirst(successor)
lowlinks[node] = min(lowlinks[node], lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node], index[successor])
first[node] |= set(
first[successor] - set(['epsilon'])).union(set(initFirst[node])) # (*union!*)
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
# FIRST[w] := FIRST[v]; (*distribute!*)
first[successor] = set(
first[node] - set(['epsilon'])).union(set(initFirst[successor])) # (*distribute!*)
connected_component.append(successor)
if successor == node:
break
component = tuple(connected_component)
# storing the result
result.append(component)
for v in initFirst:
first[v] = initFirst[v] # (*init!*)
# print "init First assignment: ", first
for node in self.v.keys():
if node not in lowlinks:
computeFirst(node)
return first
def computeFollowUsingSCC(self, FIRST, initFollow):
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
follow = {}
def computeFollow(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors of `node`
try:
successors = self.v[node].getNeighbours().keys()
except:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited; recurse on it
computeFollow(successor)
lowlinks[node] = min(lowlinks[node], lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node], index[successor])
follow[node] |= follow[successor] # (*union!*)
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
follow[successor] = follow[node]
connected_component.append(successor)
if successor == node:
break
component = tuple(connected_component)
# storing the result
result.append(component)
for v in initFollow:
follow[v] = initFollow[v] # (*init!*)
for node in self.v.keys():
if node not in lowlinks:
computeFollow(node)
return follow
|
NicovincX2/Python-3.5
|
Algorithmique/Algorithme/Algorithme de la théorie des graphes/graph_lib.py
|
Python
|
gpl-3.0
| 28,701
|
[
"VisIt"
] |
4822b175bb2b8513f862c346ea5602e8dea083b5b74bd28f8a12a90d45691663
|
import ocl as cam
import camvtk
import time
import vtk
import datetime
if __name__ == "__main__":
myscreen = camvtk.VTKScreen()
myscreen.setAmbient(20,20,20)
myscreen.camera.SetPosition(20, 30, 50)
myscreen.camera.SetFocalPoint(5, 5, 0)
stl = camvtk.STLSurf(filename="stl/demo.stl")
#stl = camvtk.STLSurf(filename="demo2.stl")
print "STL surface read"
myscreen.addActor(stl)
stl.SetWireframe()
stl.SetColor((0.5,0.5,0.5))
#stl.SetFlat()
polydata = stl.src.GetOutput()
s= cam.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STLSurf with ", s.size(), " triangles"
cutterDiameter=1
cutter = cam.CylCutter(cutterDiameter)
#print cutter.str()
#print cc.type
minx=0
dx=1
maxx=10
miny=0
dy=1
maxy=10
z=-10
bucketSize = 20
#pftp = cam.ParallelFinish()
#pftp.initCLPoints(minx,dx,maxx,miny,dy,maxy,z)
#pftp.initSTLSurf(s, bucketSize)
#pftp.dropCutterSTL1(cutter)
#print " made ", pftp.dcCalls, " drop-cutter calls"
#exit
pf2 = cam.ParallelFinish()
pf2.initCLPoints(minx,dx,maxx,miny,dy,maxy,z)
pf2.initSTLSurf(s, bucketSize)
pf2.dropCutterSTL2(cutter)
print " made ", pf2.dcCalls, " drop-cutter calls"
#clpoints = pftp.getCLPoints()
#ccpoints = pftp.getCCPoints()
clpoints = pf2.getCLPoints()
ccpoints = pf2.getCCPoints()
#CLPointGrid(minx,dx,maxx,miny,dy,maxy,z)
nv=0
nn=0
ne=0
nf=0
t = camvtk.Text()
t.SetPos( (myscreen.width-200, myscreen.height-30) )
myscreen.addActor( t)
t2 = camvtk.Text()
t2.SetPos( (myscreen.width-200, 30) )
myscreen.addActor( t2)
t3 = camvtk.Text()
t3.SetPos( (30, 30))
myscreen.addActor( t3)
t4 = camvtk.Text()
t4.SetPos( (30, myscreen.height-60))
myscreen.addActor( t4)
n=0
precl = cam.Point()
#w2if = vtk.vtkWindowToImageFilter()
#w2if.SetInput(myscreen.renWin)
#lwr = vtk.vtkPNGWriter()
#lwr.SetInput( w2if.GetOutput() )
#w2if.Modified()
#lwr.SetFileName("tux1.png")
for cl,cc in zip(clpoints,ccpoints):
camEye = myscreen.camera.GetFocalPoint()
camPos = myscreen.camera.GetPosition()
postext = "(%3.3f, %3.3f, %3.3f)" % (camPos[0], camPos[1], camPos[2])
eyetext = "(%3.3f, %3.3f, %3.3f)" % (camEye[0], camEye[1], camEye[2])
camtext = "Camera LookAt: "+eyetext+"\nCamera Pos: "+ postext
t4.SetText(camtext)
t.SetText(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
xtext = "%3.3f" % cl.x
ytext = "%3.3f" % cl.y
ztext = "%3.3f" % cl.z
t2.SetText( "X: " + xtext + "\nY: " + ytext + "\nZ: " + ztext )
if cc.type==cam.CCType.FACET:
nf+=1
col = (0,1,1)
elif cc.type == cam.CCType.VERTEX:
nv+=1
col = (0,1,0)
elif cc.type == cam.CCType.EDGE:
ne+=1
col = (1,0,0)
elif cc.type == cam.CCType.NONE:
#print "type=NONE!"
nn+=1
col = (1,1,1)
#if cl.isInside(t):
# col = (0, 1, 0)
#else:
# col = (1, 0, 0)
trilist = pf2.getTrianglesUnderCutter(cl, cutter)
#print "at cl=", cl.str() , " where len(trilist)=", len(trilist)
t3.SetText("Total Triangles: "+ str(s.size()) +"\nUnder Cutter (red): "+str(len(trilist)))
stl2 = camvtk.STLSurf(filename=None, triangleList=trilist, color=(1,0,0)) # a new surface with only triangles under cutter
stl2.SetWireframe()
#stl2.SetFlat()
myscreen.addActor(stl2)
trilist=[]
cutactor = camvtk.Cylinder(center=(cl.x,cl.y,cl.z),
radius=cutterDiameter/2,
height=20,
rotXYZ=(90,0,0),
color=(0.7,0,1))
myscreen.addActor( cutactor )
myscreen.addActor( camvtk.Sphere(center=(cl.x,cl.y,cl.z) ,radius=0.03, color=col) )
"""
if n==0:
precl = cl
else:
d = cl-precl
if (d.norm() < 90):
myscreen.addActor( camvtk.Line( p1=(precl.x, precl.y, precl.z), p2=(cl.x, cl.y, cl.z), color=(0,1,1) ) )
precl = cl
"""
n=n+1
#myscreen.addActor( camvtk.Point(center=(cl2.x,cl2.y,cl2.z+0.2) , color=(0.6,0.2,0.9)) )
#myscreen.addActor( camvtk.Point(center=(cc.x,cc.y,cc.z), color=col) )
#print cc.type
#myscreen.camera.Azimuth( 0.2 )
time.sleep(0.1)
myscreen.render()
#w2if.Modified()
#lwr.SetFileName("kdbig"+ ('%05d' % n)+".png")
#lwr.Write()
#raw_input("Press Enter to continue")
myscreen.removeActor(stl2)
myscreen.removeActor( cutactor )
print "none=",nn," vertex=",nv, " edge=",ne, " facet=",nf, " sum=", nn+nv+ne+nf
print len(clpoints), " cl points evaluated"
#lwr.Write()
for n in range(1,36):
t.SetText(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
myscreen.camera.Azimuth( 1 )
time.sleep(0.01)
myscreen.render()
#lwr.SetFileName("kd_frame"+ ('%03d' % n)+".png")
#w2if.Modified()
#lwr.Write()
myscreen.iren.Start()
raw_input("Press Enter to terminate")
|
tectronics/opencamlib
|
scripts/kdtree_movie2.py
|
Python
|
gpl-3.0
| 5,564
|
[
"VTK"
] |
302de45d11850d7f2db06a336038d76d2e0bc4fdcffb7e597d3ec6bd50eb2d9d
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: Re-enable once we add mypy annotations for the base container API
# type: ignore
"""
kubevirt driver with support for nodes (vms)
"""
import json
import time
import hashlib
from datetime import datetime
from libcloud.common.types import LibcloudError
from libcloud.common.kubernetes import KubernetesBasicAuthConnection
from libcloud.common.kubernetes import KubernetesDriverMixin
from libcloud.common.kubernetes import VALID_RESPONSE_CODES
from libcloud.compute.types import Provider, NodeState
from libcloud.compute.base import NodeDriver, NodeSize, Node
from libcloud.compute.base import NodeImage, NodeLocation, StorageVolume
__all__ = [
"KubeVirtNodeDriver"
]
ROOT_URL = '/api/v1/'
KUBEVIRT_URL = '/apis/kubevirt.io/v1alpha3/'
class KubeVirtNodeDriver(KubernetesDriverMixin, NodeDriver):
type = Provider.KUBEVIRT
name = "kubevirt"
website = 'https://www.kubevirt.io'
connectionCls = KubernetesBasicAuthConnection
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'stopped': NodeState.STOPPED
}
def list_nodes(self, location=None):
namespaces = []
if location is not None:
namespaces.append(location.name)
else:
for ns in self.list_locations():
namespaces.append(ns.name)
dormant = []
live = []
for ns in namespaces:
req = KUBEVIRT_URL + 'namespaces/' + ns + \
"/virtualmachines"
result = self.connection.request(req)
if result.status != 200:
continue
result = result.object
for item in result['items']:
if not item['spec']['running']:
dormant.append(item)
else:
live.append(item)
vms = []
for vm in dormant:
vms.append(self._to_node(vm, is_stopped=True))
for vm in live:
vms.append(self._to_node(vm, is_stopped=False))
return vms
def get_node(self, id=None, name=None):
"get a vm by name or id"
if not id and not name:
raise ValueError("This method needs id or name to be specified")
nodes = self.list_nodes()
if id:
node_gen = filter(lambda x: x.id == id,
nodes)
if name:
node_gen = filter(lambda x: x.name == name,
nodes)
try:
return next(node_gen)
except StopIteration:
raise ValueError("Node does not exist")
def start_node(self, node):
# make sure it is stopped
if node.state is NodeState.RUNNING:
return True
name = node.name
namespace = node.extra['namespace']
req = KUBEVIRT_URL + 'namespaces/' + namespace +\
'/virtualmachines/' + name
data = {"spec": {"running": True}}
headers = {"Content-Type": "application/merge-patch+json"}
try:
result = self.connection.request(req, method="PATCH",
data=json.dumps(data),
headers=headers)
return result.status in VALID_RESPONSE_CODES
except Exception:
raise
def stop_node(self, node):
# check if running
if node.state is NodeState.STOPPED:
return True
name = node.name
namespace = node.extra['namespace']
req = KUBEVIRT_URL + 'namespaces/' + namespace + \
'/virtualmachines/' + name
headers = {"Content-Type": "application/merge-patch+json"}
data = {"spec": {"running": False}}
try:
result = self.connection.request(req, method="PATCH",
data=json.dumps(data),
headers=headers)
return result.status in VALID_RESPONSE_CODES
except Exception:
raise
def reboot_node(self, node):
"""
Rebooting a node.
"""
namespace = node.extra['namespace']
name = node.name
method = 'DELETE'
try:
result = self.connection.request(KUBEVIRT_URL + 'namespaces/' +
namespace +
'/virtualmachineinstances/' +
name,
method=method)
return result.status in VALID_RESPONSE_CODES
except Exception:
raise
return
def destroy_node(self, node):
"""
Terminating a VMI and deleting the VM resource backing it
"""
namespace = node.extra['namespace']
name = node.name
# find and delete services for this VM only
services = self.ex_list_services(namespace=namespace, node_name=name)
for service in services:
service_name = service['metadata']['name']
self.ex_delete_service(namespace=namespace,
service_name=service_name)
# stop the vmi
self.stop_node(node)
try:
result = self.connection.request(KUBEVIRT_URL +
'namespaces/' +
namespace +
'/virtualmachines/' + name,
method='DELETE')
return result.status in VALID_RESPONSE_CODES
except Exception:
raise
# only has container disk support atm with no persistency
def create_node(self, name, image, location=None, ex_memory=128, ex_cpu=1,
ex_disks=None, ex_network=None,
ex_termination_grace_period=0,
ports=None):
"""
Creating a VM with a containerDisk.
:param name: A name to give the VM. The VM will be identified by
this name and atm it cannot be changed after it is set.
:type name: ``str``
:param image: Either a libcloud NodeImage or a string.
In both cases it must point to a Docker image with an
embedded disk.
May be a URI like `kubevirt/cirros-registry-disk-demo`,
kubevirt will automatically pull it from
https://hub.docker.com/u/URI.
For more info visit:
https://kubevirt.io/user-guide/docs/latest/creating-virtual-machines/disks-and-volumes.html#containerdisk
:type image: `str`
:param location: The namespace where the VM will live.
(default is 'default')
:type location: ``str``
:param ex_memory: The RAM in MB to be allocated to the VM
:type ex_memory: ``int``
:param ex_cpu: The ammount of cpu to be allocated in miliCPUs
ie: 400 will mean 0.4 of a core, 1000 will mean 1 core
and 3000 will mean 3 cores.
:type ex_cpu: ``int``
:param ex_disks: A list containing disk dictionaries.
Each dictionaries should have the
following optional keys:
-bus: can be "virtio", "sata", or "scsi"
-device: can be "lun" or "disk"
The following are required keys:
-disk_type: atm only "persistentVolumeClaim"
is supported
-name: The name of the disk configuration
-claim_name: the name of the
Persistent Volume Claim
If you wish a new Persistent Volume Claim can be
created by providing the following:
required:
-size: the desired size (implied in GB)
-storage_class_name: the name of the storage class to # NOQA
be used for the creation of the
Persistent Volume Claim.
Make sure it allows for
dymamic provisioning.
optional:
-access_mode: default is ReadWriteOnce
-volume_mode: default is `Filesystem`,
it can also be `Block`
:type ex_disks: `list` of `dict`. For each `dict` the types
for its keys are:
-bus: `str`
-device: `str`
-disk_type: `str`
-name: `str`
-claim_name: `str`
(for creating a claim:)
-size: `int`
-storage_class_name: `str`
-volume_mode: `str`
-access_mode: `str`
:param ex_network: Only the pod type is supported, and in the
configuration masquerade or bridge are the
accepted values.
The parameter must be a tupple or list with
(network_type, interface, name)
:type ex_network: `iterable` (tupple or list) [network_type, inteface, name]
network_type: `str` | only "pod" is accepted atm
interface: `str` | "masquerade" or "bridge"
name: `str`
:param ports: A dictionary with keys: 'ports_tcp' and 'ports_udp'
'ports_tcp' value is a list of ints that indicate
the ports to be exposed with TCP protocol,
and 'ports_udp' is a list of ints that indicate
the ports to be exposed with UDP protocol.
:type ports: `dict` with keys
'ports_tcp`: `list` of `int`
'ports_udp`: `list` of `int`
"""
# all valid disk types for which support will be added in the future
DISK_TYPES = {'containerDisk', 'ephemeral', 'configMap', 'dataVolume',
'cloudInitNoCloud', 'persistentVolumeClaim', 'emptyDisk',
'cloudInitConfigDrive', 'hostDisk'}
if location is not None:
namespace = location.name
else:
namespace = 'default'
# vm template to be populated
vm = {
"apiVersion": "kubevirt.io/v1alpha3",
"kind": "VirtualMachine",
"metadata": {
"labels": {
"kubevirt.io/vm": name
},
"name": name
},
"spec": {
"running": False,
"template": {
"metadata": {
"labels": {
"kubevirt.io/vm": name
}
},
"spec": {
"domain": {
"devices": {
"disks": [],
"interfaces": [],
"networkInterfaceMultiqueue": False,
},
"machine": {
"type": ""
},
"resources": {
"requests": {},
"limits": {}
},
},
"networks": [],
"terminationGracePeriodSeconds": ex_termination_grace_period, # NOQA
"volumes": []
}
}
}
}
memory = str(ex_memory) + "Mi"
vm['spec']['template']['spec']['domain']['resources'][
'requests']['memory'] = memory
vm['spec']['template']['spec']['domain']['resources'][
'limits']['memory'] = memory
if ex_cpu < 10:
cpu = int(ex_cpu)
vm['spec']['template']['spec']['domain'][
'resources']['requests']['cpu'] = cpu
vm['spec']['template']['spec']['domain'][
'resources']['limits']['cpu'] = cpu
else:
cpu = str(ex_cpu) + "m"
vm['spec']['template']['spec']['domain']['resources'][
'requests']['cpu'] = cpu
vm['spec']['template']['spec']['domain']['resources'][
'limits']['cpu'] = cpu
i = 0
for disk in ex_disks:
disk_type = disk.get('disk_type')
bus = disk.get('bus', 'virtio')
disk_name = disk.get('name', 'disk{}'.format(i))
i += 1
device = disk.get('device', 'disk')
if disk_type not in DISK_TYPES:
raise ValueError("The possible values for this "
"parameter are: ", DISK_TYPES)
# depending on disk_type, in the future,
# when more will be supported,
# additional elif should be added
if disk_type == "containerDisk":
try:
image = disk['image']
except KeyError:
raise KeyError('A container disk needs a '
'containerized image')
volumes_dict = {'containerDisk': {'image': image},
'name': disk_name}
if disk_type == "persistentVolumeClaim":
if 'claim_name' in disk:
claimName = disk['claim_name']
if claimName not in self.ex_list_persistent_volume_claims(
namespace=namespace
):
if ('size' not in disk or "storage_class_name"
not in disk):
msg = ("disk['size'] and "
"disk['storage_class_name'] "
"are both required to create "
"a new claim.")
raise KeyError(msg)
size = disk['size']
storage_class = disk['storage_class_name']
volume_mode = disk.get('volume_mode', 'Filesystem')
access_mode = disk.get('access_mode', 'ReadWriteOnce')
self.create_volume(size=size, name=claimName,
location=location,
ex_storage_class_name=storage_class,
ex_volume_mode=volume_mode,
ex_access_mode=access_mode)
else:
msg = ("You must provide either a claim_name of an "
"existing claim or if you want one to be "
"created you must additionally provide size "
"and the storage_class_name of the "
"cluster, which allows dynamic provisioning, "
"so a Persistent Volume Claim can be created. "
"In the latter case please provide the desired "
"size as well.")
raise KeyError(msg)
volumes_dict = {'persistentVolumeClaim': {
'claimName': claimName},
'name': disk_name}
disk_dict = {device: {'bus': bus}, 'name': disk_name}
vm['spec']['template']['spec']['domain'][
'devices']['disks'].append(disk_dict)
vm['spec']['template']['spec']['volumes'].append(volumes_dict)
# adding image in a container Disk
if isinstance(image, NodeImage):
image = image.name
volumes_dict = {'containerDisk': {'image': image},
'name': 'boot-disk'}
disk_dict = {'disk': {'bus': 'virtio'}, 'name': 'boot-disk'}
vm['spec']['template']['spec']['domain'][
'devices']['disks'].append(disk_dict)
vm['spec']['template']['spec']['volumes'].append(volumes_dict)
# network
if ex_network:
interface = ex_network[1]
network_name = ex_network[2]
network_type = ex_network[0]
# add a default network
else:
interface = 'masquerade'
network_name = "netw1"
network_type = "pod"
network_dict = {network_type: {}, 'name': network_name}
interface_dict = {interface: {}, 'name': network_name}
ports = ports or {}
if ports.get('ports_tcp'):
ports_to_expose = []
for port in ports['ports_tcp']:
ports_to_expose.append(
{
'port': port,
'protocol': 'TCP'
}
)
interface_dict[interface]['ports'] = ports_to_expose
if ports.get('ports_udp'):
ports_to_expose = interface_dict[interface].get('ports', [])
for port in ports.get('ports_udp'):
ports_to_expose.append(
{
'port': port,
'protocol': 'UDP'
}
)
interface_dict[interface]['ports'] = ports_to_expose
vm['spec']['template']['spec'][
'networks'].append(network_dict)
vm['spec']['template']['spec']['domain']['devices'][
'interfaces'].append(interface_dict)
method = "POST"
data = json.dumps(vm)
req = KUBEVIRT_URL + "namespaces/" + namespace + "/virtualmachines/"
try:
self.connection.request(req, method=method, data=data)
except Exception:
raise
# check if new node is present
nodes = self.list_nodes()
for node in nodes:
if node.name == name:
self.start_node(node)
return node
def list_images(self, location=None):
"""
If location (namespace) is provided only the images
in that location will be provided. Otherwise all of them.
"""
nodes = self.list_nodes()
if location:
namespace = location.name
nodes = list(filter(lambda x: x['extra'][
'namespace'] == namespace, nodes))
name_set = set()
images = []
for node in nodes:
if node.image.name in name_set:
continue
name_set.add(node.image.name)
images.append(node.image)
return images
def list_locations(self):
"""
By locations here it is meant namespaces.
"""
req = ROOT_URL + "namespaces"
namespaces = []
result = self.connection.request(req).object
for item in result['items']:
name = item['metadata']['name']
ID = item['metadata']['uid']
namespaces.append(NodeLocation(id=ID, name=name,
country='',
driver=self.connection.driver))
return namespaces
def list_sizes(self, location=None):
namespace = ''
if location:
namespace = location.name
nodes = self.list_nodes()
sizes = []
for node in nodes:
if not namespace:
sizes.append(node.size)
elif namespace == node.extra['namespace']:
sizes.append(node.size)
return sizes
def create_volume(self, size, name,
location=None,
ex_storage_class_name='',
ex_volume_mode='Filesystem',
ex_access_mode='ReadWriteOnce',
ex_dynamic=True,
ex_reclaim_policy='Recycle',
ex_volume_type=None,
ex_volume_params=None,
):
"""
:param size: The size in Gigabytes
:type size: `int`
:param volume_type: This is the type of volume to be created that is
dependent on the underlying cloud where Kubernetes
is deployed. K8s is supporting the following types:
-gcePersistentDisk
-awsElasticBlockStore
-azureFile
-azureDisk
-csi
-fc (Fibre Channel)
-flexVolume
-flocker
-nfs
-iSCSI
-rbd (Ceph Block Device)
-cephFS
-cinder (OpenStack block storage)
-glusterfs
-vsphereVolume
-quobyte Volumes
-hostPath (Single node testing only – local storage is not supported in any way and WILL NOT WORK in a multi-node cluster) # NOQA
-portworx Volumes
-scaleIO Volumes
-storageOS
This parameter is a dict in the form {type: {key1:value1, key2:value2,...}},
where type is one of the above and key1, key2... are type specific keys and
their corresponding values. eg: {nsf: {server: "172.0.0.0", path: "/tmp"}}
{awsElasticBlockStore: {fsType: 'ext4', volumeID: "1234"}}
:type volume_type: `str`
:param volume_params: A dict with the key:value that the
volume_type needs.
This parameter is a dict in the form
{key1:value1, key2:value2,...},
where type is one of the above and key1, key2...
are type specific keys and
their corresponding values.
eg: for nsf volume_type
{server: "172.0.0.0", path: "/tmp"}
for awsElasticBlockStore volume_type
{fsType: 'ext4', volumeID: "1234"}
"""
if ex_dynamic:
if location is None:
msg = "Please provide a namespace for the PVC."
raise ValueError(msg)
vol = self._create_volume_dynamic(
size=size,
name=name,
storage_class_name=ex_storage_class_name,
namespace=location.name,
volume_mode=ex_volume_mode,
access_mode=ex_access_mode)
return vol
else:
if ex_volume_type is None or ex_volume_params is None:
msg = ("An ex_volume_type must be provided from the list "
"of supported clouds, as well as the ex_volume_params "
"necessesary for your volume type choice.")
raise ValueError(msg)
pv = {
'apiVersion': 'v1',
'kind': 'PersistentVolume',
'metadata': {
'name': name,
},
'spec': {
'capacity': {
'storage': str(size) + 'Gi'
},
'volumeMode': ex_volume_mode,
'accessModes': [ex_access_mode],
'persistentVolumeReclaimPolicy': ex_reclaim_policy,
'storageClassName': ex_storage_class_name,
'mountOptions': [], # beta, to add in the future
ex_volume_type: ex_volume_params,
}
}
req = ROOT_URL + "persistentvolumes/"
method = 'POST'
data = json.dumps(pv)
try:
self.connection.request(req, method=method, data=data)
except Exception:
raise
# make sure that the volume was created
volumes = self.list_volumes()
for volume in volumes:
if volume.name == name:
return volume
def _create_volume_dynamic(self, size, name, storage_class_name,
volume_mode='Filesystem', namespace='default',
access_mode='ReadWriteOnce'):
"""
Method to create a Persistent Volume Claim for storage,
thus storage is required in the arguments.
This method assumes dynamic provisioning of the
Persistent Volume so the storage_class given should
allow for it (by default it usually is), or already
have unbounded Persistent Volumes created by an admin.
:param name: The name of the pvc an arbitrary string of lower letters
:type name: `str`
:param size: An int of the ammount of gigabytes desired
:type size: `int`
:param namespace: The namespace where the claim will live
:type namespace: `str`
:param storage_class_name: If you want the pvc to be bound to
a particular class of PVs specified here.
:type storage_class_name: `str`
:param access_mode: The desired access mode, ie "ReadOnlyMany"
:type access_mode: `str`
:param matchLabels: A dictionary with the labels, ie:
{'release': 'stable,}
:type matchLabels: `dict` with keys `str` and values `str`
"""
pvc = {
'apiVersion': 'v1',
'kind': 'PersistentVolumeClaim',
'metadata': {
'name': name
},
'spec': {
'accessModes': [],
'volumeMode': volume_mode,
'resources': {
'requests': {
'storage': ''
}
},
}
}
pvc['spec']['accessModes'].append(access_mode)
if storage_class_name is not None:
pvc['spec']['storageClassName'] = storage_class_name
else:
raise ValueError("The storage class name must be provided of a"
"storage class which allows for dynamic "
"provisioning")
pvc['spec']['resources']['requests']['storage'] = str(size) + 'Gi'
method = "POST"
req = ROOT_URL + "namespaces/" + namespace + "/persistentvolumeclaims"
data = json.dumps(pvc)
try:
result = self.connection.request(req, method=method, data=data)
except Exception:
raise
if result.object['status']['phase'] != "Bound":
for _ in range(3):
req = ROOT_URL + "namespaces/" + namespace + \
"/persistentvolumeclaims/" + name
try:
result = self.connection.request(req).object
except Exception:
raise
if result['status']['phase'] == "Bound":
break
time.sleep(3)
# check that the pv was created and bound
volumes = self.list_volumes()
for volume in volumes:
if volume.extra['pvc']['name'] == name:
return volume
def _bind_volume(self, volume, namespace='default'):
"""
This method is for unbound volumes that were statically made.
It will bind them to a pvc so they can be used by
a kubernetes resource.
"""
if volume.extra['is_bound']:
return # volume already bound
storage_class = volume.extra['storage_class_name']
size = volume.size
name = volume.name + "-pvc"
volume_mode = volume.extra['volume_mode']
access_mode = volume.extra['access_modes'][0]
vol = self._create_volume_dynamic(size=size, name=name,
storage_class_name=storage_class,
volume_mode=volume_mode,
namespace=namespace,
access_mode=access_mode)
return vol
def destroy_volume(self, volume):
# first delete the pvc
method = 'DELETE'
if volume.extra['is_bound']:
pvc = volume.extra['pvc']['name']
namespace = volume.extra['pvc']['namespace']
req = ROOT_URL + "namespaces/" + namespace + \
"/persistentvolumeclaims/" + pvc
try:
result = self.connection.request(req, method=method)
except Exception:
raise
pv = volume.name
req = ROOT_URL + "persistentvolumes/" + pv
try:
result = self.connection.request(req, method=method)
return result.status
except Exception:
raise
def attach_volume(self, node, volume, device='disk',
ex_bus='virtio', ex_name=None):
"""
params: bus, name , device (disk or lun)
"""
# volume must be bound to a claim
if not volume.extra['is_bound']:
volume = self._bind_volume(volume, node.extra['namespace'])
if volume is None:
raise LibcloudError("Selected Volume (PV) could not be bound "
"(to a PVC), please select another volume",
driver=self)
claimName = volume.extra['pvc']['name']
if ex_name is None:
name = claimName
else:
name = ex_name
namespace = volume.extra['pvc']['namespace']
# check if vm is stopped
self.stop_node(node)
# check if it is the same namespace
if node.extra['namespace'] != namespace:
msg = "The PVC and the VM must be in the same namespace"
raise ValueError(msg)
vm = node.name
req = KUBEVIRT_URL + 'namespaces/' + namespace + '/virtualmachines/'\
+ vm
disk_dict = {device: {'bus': ex_bus}, 'name': name}
volumes_dict = {'persistentVolumeClaim': {'claimName': claimName},
'name': name}
# Get all the volumes of the vm
try:
result = self.connection.request(req).object
except Exception:
raise
disks = result['spec']['template']['spec']['domain'][
'devices']['disks']
volumes = result['spec']['template']['spec']['volumes']
disks.append(disk_dict)
volumes.append(volumes_dict)
# now patch the new volumes and disks lists into the resource
headers = {"Content-Type": "application/merge-patch+json"}
data = {'spec': {
'template': {
'spec': {
'volumes': volumes,
'domain': {
'devices':
{'disks': disks}
}
}
}
}
}
try:
result = self.connection.request(req, method="PATCH",
data=json.dumps(data),
headers=headers)
if 'pvcs' in node.extra:
node.extra['pvcs'].append(claimName)
else:
node.extra['pvcs'] = [claimName]
return result in VALID_RESPONSE_CODES
except Exception:
raise
def detach_volume(self, volume, ex_node):
"""
Detaches a volume from a node but the node must be given since a PVC
can have more than one VMI's pointing to it
"""
# vmi must be stopped
self.stop_node(ex_node)
claimName = volume.extra['pvc']['name']
name = ex_node.name
namespace = ex_node.extra['namespace']
req = KUBEVIRT_URL + 'namespaces/' + namespace + '/virtualmachines/'\
+ name
headers = {"Content-Type": "application/merge-patch+json"}
# Get all the volumes of the vm
try:
result = self.connection.request(req).object
except Exception:
raise
disks = result['spec']['template']['spec']['domain'][
'devices']['disks']
volumes = result['spec']['template']['spec']['volumes']
to_delete = None
for volume in volumes:
if 'persistentVolumeClaim' in volume:
if volume['persistentVolumeClaim']['claimName'] == claimName:
to_delete = volume['name']
volumes.remove(volume)
break
if not to_delete:
msg = "The given volume is not attached to the given VM"
raise ValueError(msg)
for disk in disks:
if disk['name'] == to_delete:
disks.remove(disk)
break
# now patch the new volumes and disks lists into the resource
data = {'spec': {
'template': {
'spec': {
'volumes': volumes,
'domain': {
'devices':
{'disks': disks}
}
}
}
}
}
try:
result = self.connection.request(req, method="PATCH",
data=json.dumps(data),
headers=headers)
ex_node.extra['pvcs'].remove(claimName)
return result in VALID_RESPONSE_CODES
except Exception:
raise
def ex_list_persistent_volume_claims(self, namespace="default"):
pvc_req = ROOT_URL + "namespaces/" + namespace + \
"/persistentvolumeclaims"
try:
result = self.connection.request(pvc_req).object
except Exception:
raise
pvcs = [item['metadata']['name'] for item in result['items']]
return pvcs
def ex_list_storage_classes(self):
# sc = storage class
sc_req = "/apis/storage.k8s.io/v1/storageclasses"
try:
result = self.connection.request(sc_req).object
except Exception:
raise
scs = [item['metadata']['name'] for item in result['items']]
return scs
def list_volumes(self):
"""
Location is a namespace of the cluster.
"""
volumes = []
pv_rec = ROOT_URL + "/persistentvolumes/"
try:
result = self.connection.request(pv_rec).object
except Exception:
raise
for item in result['items']:
if item['status']['phase'] not in {'Available', 'Bound'}:
continue
ID = item['metadata']['uid']
size = item['spec']['capacity']['storage']
size = int(size.rstrip('Gi'))
extra = {'pvc': {}}
extra['storage_class_name'] = item['spec']['storageClassName']
extra['is_bound'] = item['status']['phase'] == "Bound"
extra['access_modes'] = item['spec']['accessModes']
extra['volume_mode'] = item['spec']['volumeMode']
if extra['is_bound']:
extra['pvc']['name'] = item['spec']['claimRef']['name']
extra['pvc']['namespace'] = item['spec']['claimRef'][
'namespace']
extra['pvc']['uid'] = item['spec']['claimRef']['uid']
name = extra['pvc']['name']
else:
name = item['metadata']['name']
volume = StorageVolume(id=ID, name=name, size=size,
driver=self.connection.driver,
extra=extra)
volumes.append(volume)
return volumes
def _ex_connection_class_kwargs(self):
kwargs = {}
if hasattr(self, 'key_file'):
kwargs['key_file'] = self.key_file
if hasattr(self, 'cert_file'):
kwargs['cert_file'] = self.cert_file
return kwargs
def _to_node(self, vm, is_stopped=False):
"""
"""
ID = vm['metadata']['uid']
name = vm['metadata']['name']
driver = self.connection.driver
extra = {'namespace': vm['metadata']['namespace']}
extra['pvcs'] = []
memory = 0
if 'limits' in vm['spec']['template']['spec'][
'domain']['resources']:
if 'memory' in vm['spec']['template']['spec'][
'domain']['resources']['limits']:
memory = vm['spec']['template']['spec'][
'domain']['resources']['limits']['memory']
elif vm['spec']['template']['spec']['domain']['resources'].get(
'requests', None):
if vm['spec']['template']['spec'][
'domain']['resources']['requests'].get('memory', None):
memory = vm['spec']['template']['spec'][
'domain']['resources']['requests']['memory']
if not isinstance(memory, int):
if 'M' in memory or 'Mi' in memory:
memory = memory.rstrip('M')
memory = memory.rstrip('Mi')
memory = int(memory)
elif 'G' in memory:
memory = memory.rstrip('G')
memory = int(memory) // 1000
elif 'Gi' in memory:
memory = memory.rstrip('Gi')
memory = int(memory) // 1024
cpu = 1
if vm['spec']['template']['spec'][
'domain']['resources'].get('limits', None):
if vm['spec']['template']['spec']['domain']['resources'][
'limits'].get('cpu', None):
cpu = vm['spec']['template']['spec'][
'domain']['resources']['limits']['cpu']
elif vm['spec']['template']['spec'][
'domain']['resources'].get('requests', None) and vm[
'spec']['template']['spec'][
'domain']['resources']['requests'].get('cpu', None):
cpu = vm['spec']['template']['spec'][
'domain']['resources']['requests']['cpu']
elif vm['spec']['template']['spec']['domain'].get('cpu', None):
cpu = vm['spec']['template']['spec']['domain'][
'cpu'].get('cores', 1)
if not isinstance(cpu, int):
cpu = int(cpu.rstrip('m'))
extra_size = {'cpus': cpu}
size_name = "{} vCPUs, {}MB Ram".format(str(cpu), str(memory))
size_id = hashlib.md5(size_name.encode("utf-8")).hexdigest()
size = NodeSize(id=size_id, name=size_name, ram=memory,
disk=0, bandwidth=0, price=0,
driver=driver, extra=extra_size)
extra['memory'] = memory
extra['cpu'] = cpu
image_name = "undefined"
for volume in vm['spec']['template'][
'spec']['volumes']:
for k, v in volume.items():
if type(v) is dict:
if 'image' in v:
image_name = v['image']
image = NodeImage(image_name, image_name, driver)
if 'volumes' in vm['spec']['template']['spec']:
for volume in vm['spec']['template']['spec']['volumes']:
if 'persistentVolumeClaim' in volume:
extra['pvcs'].append(volume[
'persistentVolumeClaim']['claimName'])
port_forwards = []
services = self.ex_list_services(namespace=extra['namespace'],
node_name=name)
for service in services:
service_type = service['spec'].get('type')
for port_pair in service['spec']['ports']:
protocol = port_pair.get('protocol')
public_port = port_pair.get('port')
local_port = port_pair.get('targetPort')
try:
int(local_port)
except ValueError:
local_port = public_port
port_forwards.append({
'local_port': local_port,
'public_port': public_port,
'protocol': protocol,
'service_type': service_type
})
extra['port_forwards'] = port_forwards
if is_stopped:
state = NodeState.STOPPED
public_ips = None
private_ips = None
return Node(id=ID, name=name, state=state,
public_ips=public_ips,
private_ips=private_ips,
driver=driver, size=size,
image=image, extra=extra)
# getting image and image_ID from the container
req = ROOT_URL + "namespaces/" + extra['namespace'] + "/pods"
result = self.connection.request(req).object
pod = None
for pd in result['items']:
if 'metadata' in pd and 'ownerReferences' in pd['metadata']:
if pd['metadata']['ownerReferences'][0]['name'] == name:
pod = pd
if pod is None or 'containerStatuses' not in pod['status']:
state = NodeState.PENDING
public_ips = None
private_ips = None
return Node(id=ID, name=name, state=state,
public_ips=public_ips,
private_ips=private_ips,
driver=driver, size=size,
image=image, extra=extra)
extra['pod'] = {'name': pod['metadata']['name']}
for cont_status in pod['status']['containerStatuses']:
# only 2 containers are present the launcher and the vmi
if cont_status['name'] != 'compute':
image = NodeImage(ID, cont_status['image'],
driver)
state = NodeState.RUNNING if "running" in cont_status[
'state'] else NodeState.PENDING
public_ips = None
created_at = datetime.strptime(vm['metadata']['creationTimestamp'],
'%Y-%m-%dT%H:%M:%SZ')
if 'podIPs' in pod['status']:
private_ips = [ip['ip'] for ip in pod['status']['podIPs']]
else:
private_ips = []
return Node(id=ID, name=name, state=state,
public_ips=public_ips,
private_ips=private_ips,
driver=driver, size=size,
image=image, extra=extra,
created_at=created_at)
def ex_list_services(self, namespace='default', node_name=None,
service_name=None):
'''
If node_name is given then the services returned will be those that
concern the node
'''
params = None
if service_name is not None:
params = {'fieldSelector': 'metadata.name={}'.format(service_name)}
req = ROOT_URL + '/namespaces/{}/services'.format(namespace)
result = self.connection.request(req, params=params).object['items']
if node_name:
res = []
for service in result:
if node_name in service['metadata'].get('name', ""):
res.append(service)
return res
return result
def ex_create_service(self, node, ports, service_type="NodePort",
cluster_ip=None, load_balancer_ip=None,
override_existing_ports=False):
'''
Each node has a single service of one type on which the exposed ports
are described. If a service exists then the port declared will be
exposed alongside the existing ones, set override_existing_ports=True
to delete existing exposed ports and expose just the ones in the port
variable.
param node: the libcloud node for which the ports will be exposed
type node: libcloud `Node` class
param ports: a list of dictionaries with keys --> values:
'port' --> port to be exposed on the service
'target_port' --> port on the pod/node, optional
if empty then it gets the same
value as 'port' value
'protocol' ---> either 'UDP' or 'TCP', defaults to TCP
'name' --> A name for the service
If ports is an empty `list` and a service exists of this
type then the service will be deleted.
type ports: `list` of `dict` where each `dict` has keys --> values:
'port' --> `int`
'target_port' --> `int`
'protocol' --> `str`
'name' --> `str`
param service_type: Valid types are ClusterIP, NodePort, LoadBalancer
type service_type: `str`
param cluster_ip: This can be set with an IP string value if you want
manually set the service's internal IP. If the value
is not correct the method will fail, this value can't
be updated.
type cluster_ip: `str`
param override_existing_ports: Set to True if you want to delete the
existing ports exposed by the service
and keep just the ones declared in the
present ports argument.
By default it is false and if the
service already exists the ports will be
added to the existing ones.
type override_existing_ports: `boolean`
'''
# check if service exists first
namespace = node.extra.get('namespace', 'default')
service_name = 'service-{}-{}'.format(service_type.lower(), node.name)
service_list = self.ex_list_services(namespace=namespace,
service_name=service_name)
ports_to_expose = []
# if ports has a falsey value like None or 0
if not ports:
ports = []
for port_group in ports:
if not port_group.get('target_port', None):
port_group['target_port'] = port_group['port']
if not port_group.get('name', ""):
port_group['name'] = 'port-{}'.format(port_group['port'])
ports_to_expose.append(
{'protocol': port_group.get('protocol', 'TCP'),
'port': int(port_group['port']),
'targetPort': int(port_group['target_port']),
'name': port_group['name']})
headers = None
data = None
if len(service_list) > 0:
if not ports:
result = True
for service in service_list:
service_name = service['metadata']['name']
result = result and self.ex_delete_service(
namespace=namespace,
service_name=service_name)
return result
else:
method = 'PATCH'
spec = {'ports': ports_to_expose}
if not override_existing_ports:
existing_ports = service_list[0]['spec']['ports']
spec = {'ports': existing_ports.extend(ports_to_expose)}
data = json.dumps({'spec': spec})
headers = {"Content-Type": "application/merge-patch+json"}
req = "{}/namespaces/{}/services/{}".format(
ROOT_URL, namespace, service_name
)
else:
if not ports:
raise ValueError("Argument ports is empty but there is no "
"service of {} type to be deleted".format(
service_type
))
method = 'POST'
service = {
'kind': 'Service',
'apiVersion': 'v1',
'metadata': {
'name': service_name,
'labels': {
'service': 'kubevirt.io'
}
},
'spec': {
'type': "",
'selector': {
"kubevirt.io/vm": node.name
},
'ports': []
},
}
service['spec']['ports'] = ports_to_expose
service['spec']['type'] = service_type
if cluster_ip is not None:
service['spec']['clusterIP'] = cluster_ip
if service_type == "LoadBalancer" and load_balancer_ip is not None:
service['spec']['loadBalancerIP'] = load_balancer_ip
data = json.dumps(service)
req = "{}/namespaces/{}/services".format(ROOT_URL, namespace)
try:
result = self.connection.request(req, method=method, data=data,
headers=headers)
except Exception:
raise
return result.status in VALID_RESPONSE_CODES
def ex_delete_service(self, namespace, service_name):
req = "{}/namespaces/{}/services/{}".format(ROOT_URL, namespace,
service_name)
headers = {"Content-Type": "application/yaml"}
try:
result = self.connection.request(req, method="DELETE",
headers=headers)
except Exception:
raise
return result.status in VALID_RESPONSE_CODES
|
Kami/libcloud
|
libcloud/compute/drivers/kubevirt.py
|
Python
|
apache-2.0
| 51,562
|
[
"VisIt"
] |
ac1c2d26baaac818c7aecafe76303071ac01dea299126a873e762822aeed2ffe
|
""" The Script class provides a simple way for users to specify an executable
or file to run (and is also a simple example of a workflow module).
"""
import os
import sys
import re
import stat
import shlex
import distutils.spawn # pylint: disable=no-name-in-module,no-member,import-error
from DIRAC import gLogger
from DIRAC.Core.Utilities.Subprocess import systemCall
from DIRAC.Workflow.Modules.ModuleBase import ModuleBase
class Script(ModuleBase):
""" Module for running executable
"""
#############################################################################
def __init__(self, log=None):
""" c'tor
"""
if log is not None:
self.log = log
else:
self.log = gLogger.getSubLogger('Script')
super(Script, self).__init__(self.log)
# Set defaults for all workflow parameters here
self.executable = ''
self.applicationName = ''
self.applicationVersion = ''
self.applicationLog = ''
self.arguments = ''
self.workflow_commons = None
self.step_commons = None
self.environment = None
self.callbackFunction = None
self.bufferLimit = 52428800
#############################################################################
def _resolveInputVariables(self):
""" By convention the workflow parameters are resolved here.
"""
super(Script, self)._resolveInputVariables()
super(Script, self)._resolveInputStep()
self.arguments = self.step_commons.get('arguments', self.arguments)
if not self.arguments.strip():
self.arguments = self.workflow_commons.get('arguments', self.arguments)
#############################################################################
def _initialize(self):
""" simple checks
"""
if not self.executable:
raise RuntimeError('No executable defined')
def _setCommand(self):
""" set the command that will be executed
"""
self.command = self.executable
if os.path.exists(os.path.basename(self.executable)):
self.executable = os.path.basename(self.executable)
if not os.access('%s/%s' % (os.getcwd(), self.executable), 5):
# doc in https://docs.python.org/2/library/stat.html#stat.S_IRWXU
os.chmod('%s/%s' % (os.getcwd(), self.executable),
stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
self.command = '%s/%s' % (os.getcwd(), self.executable)
elif re.search('.py$', self.executable):
self.command = '%s %s' % (sys.executable, self.executable)
elif distutils.spawn.find_executable(self.executable): # pylint: disable=no-member
self.command = self.executable
if self.arguments:
self.command = '%s %s' % (self.command, self.arguments)
self.log.info('Command is: %s' % self.command)
def _executeCommand(self):
""" execute the self.command (uses systemCall)
"""
failed = False
outputDict = systemCall(timeout=0,
cmdSeq=shlex.split(self.command),
env=self.environment,
callbackFunction=self.callbackFunction,
bufferLimit=self.bufferLimit)
if not outputDict['OK']:
failed = True
self.log.error('System call execution failed:', '\n' + str(outputDict['Message']))
status, stdout, stderr = outputDict['Value'][0:3]
if status:
failed = True
self.log.error("Non-zero status while executing", "%s exited with status %s" % (self.command, status))
else:
self.log.info("%s execution completed with status %s" % (self.command, status))
self.log.verbose(stdout)
self.log.verbose(stderr)
if os.path.exists(self.applicationLog):
self.log.verbose('Removing existing %s' % self.applicationLog)
os.remove(self.applicationLog)
with open('%s/%s' % (os.getcwd(), self.applicationLog), 'w') as fopen:
fopen.write("<<<<<<<<<< %s Standard Output >>>>>>>>>>\n\n%s " % (self.executable, stdout))
if stderr:
fopen.write("<<<<<<<<<< %s Standard Error >>>>>>>>>>\n\n%s " % (self.executable, stderr))
self.log.info("Output written to %s, execution complete." % (self.applicationLog))
if failed:
self._exitWithError(status)
def _exitWithError(self, status):
""" Here because of possible extensions.
:param str status: the status of the application becomes the status of the workflow,
and may be interpreted by JobWrapper (e.g. for rescheduling cases)
"""
raise RuntimeError("'%s' Exited With Status %s" % (os.path.basename(self.executable).split('_')[0], status),
status)
def _finalize(self):
""" simply finalize
"""
applicationString = os.path.basename(self.executable).split('_')[0]
if self.applicationName and self.applicationName.lower() != 'unknown':
applicationString += ' (%s %s)' % (self.applicationName, self.applicationVersion)
status = "%s successful" % applicationString
super(Script, self)._finalize(status)
|
fstagni/DIRAC
|
Workflow/Modules/Script.py
|
Python
|
gpl-3.0
| 5,035
|
[
"DIRAC"
] |
42abbd62ab388a5c698124303a03a3c371c22b6cef772a5920b7ba804eb8ca09
|
# coding: utf-8
from __future__ import unicode_literals, division, print_function
import unittest
import random
from custodian.custodian import Job, ErrorHandler, Custodian, Validator
import os
import glob
import shutil
import subprocess
import yaml
"""
Created on Jun 1, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Jun 1, 2012"
class ExitCodeJob(Job):
def __init__(self, exitcode=0):
self.exitcode = exitcode
def setup(self):
pass
def run(self):
return subprocess.Popen('exit {}'.format(self.exitcode), shell=True)
def postprocess(self):
pass
class ExampleJob(Job):
def __init__(self, jobid, params=None):
if params is None:
params = {"initial": 0, "total": 0}
self.jobid = jobid
self.params = params
def setup(self):
self.params["initial"] = 0
self.params["total"] = 0
def run(self):
sequence = [random.uniform(0, 1) for i in range(100)]
self.params["total"] = self.params["initial"] + sum(sequence)
def postprocess(self):
pass
@property
def name(self):
return "ExampleJob{}".format(self.jobid)
class ExampleHandler(ErrorHandler):
def __init__(self, params):
self.params = params
def check(self):
return self.params["total"] < 50
def correct(self):
self.params["initial"] += 1
return {"errors": "total < 50", "actions": "increment by 1"}
class ExampleHandler2(ErrorHandler):
"""
This handler always result in an error.
"""
def __init__(self, params):
self.params = params
self.has_error = False
def check(self):
return True
def correct(self):
self.has_error = True
return {"errors": "Unrecoverable error", "actions": None}
class ExampleHandler2b(ExampleHandler2):
"""
This handler always result in an error. No runtime error though
"""
raises_runtime_error = False
def correct(self):
self.has_error = True
return {"errors": "Unrecoverable error", "actions": []}
class ExampleValidator1(Validator):
def check(self):
return False
class ExampleValidator2(Validator):
def check(self):
return True
class CustodianTest(unittest.TestCase):
def setUp(self):
self.cwd = os.getcwd()
os.chdir(os.path.abspath(os.path.dirname(__file__)))
def test_exitcode_error(self):
c = Custodian([], [ExitCodeJob(0)])
c.run()
c = Custodian([], [ExitCodeJob(1)])
self.assertRaises(RuntimeError, c.run)
c = Custodian([], [ExitCodeJob(1)],
terminate_on_nonzero_returncode=False)
c.run()
def test_run(self):
njobs = 100
params = {"initial": 0, "total": 0}
c = Custodian([ExampleHandler(params)],
[ExampleJob(i, params) for i in range(njobs)],
max_errors=njobs)
output = c.run()
self.assertEqual(len(output), njobs)
d = ExampleHandler(params).as_dict()
def test_run_interrupted(self):
njobs = 100
params = {'initial': 0, 'total': 0}
c = Custodian([ExampleHandler(params)],
[ExampleJob(i, params) for i in range(njobs)],
max_errors=njobs)
self.assertEqual(c.run_interrupted(), njobs)
self.assertEqual(c.run_interrupted(), njobs)
total_done = 1
while total_done < njobs:
c.jobs[njobs - 1].run()
if params['total'] > 50:
self.assertEqual(c.run_interrupted(), njobs - total_done)
total_done += 1
def test_unrecoverable(self):
njobs = 100
params = {"initial": 0, "total": 0}
h = ExampleHandler2(params)
c = Custodian([h],
[ExampleJob(i, params) for i in range(njobs)],
max_errors=njobs)
self.assertRaises(RuntimeError, c.run)
self.assertTrue(h.has_error)
h = ExampleHandler2b(params)
c = Custodian([h],
[ExampleJob(i, params) for i in range(njobs)],
max_errors=njobs)
c.run()
self.assertTrue(h.has_error)
def test_validators(self):
njobs = 100
params = {"initial": 0, "total": 0}
c = Custodian([ExampleHandler(params)],
[ExampleJob(i, params) for i in range(njobs)],
[ExampleValidator1()],
max_errors=njobs)
output = c.run()
self.assertEqual(len(output), njobs)
njobs = 100
params = {"initial": 0, "total": 0}
c = Custodian([ExampleHandler(params)],
[ExampleJob(i, params) for i in range(njobs)],
[ExampleValidator2()],
max_errors=njobs)
self.assertRaises(RuntimeError, c.run)
def test_from_spec(self):
spec = """jobs:
- jb: custodian.vasp.jobs.VaspJob
params:
final: False
suffix: .relax1
- jb: custodian.vasp.jobs.VaspJob
params:
final: True
suffix: .relax2
settings_override: {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}
jobs_common_params:
$vasp_cmd: ["mpirun", "-machinefile", "$PBS_NODEFILE", "-np", "24", "/opt/vasp/5.4.1/bin/vasp"]
handlers:
- hdlr: custodian.vasp.handlers.VaspErrorHandler
- hdlr: custodian.vasp.handlers.AliasingErrorHandler
- hdlr: custodian.vasp.handlers.MeshSymmetryErrorHandler
validators:
- vldr: custodian.vasp.validators.VasprunXMLValidator
custodian_params:
$scratch_dir: $TMPDIR"""
os.environ["TMPDIR"] = "/tmp/random"
os.environ["PBS_NODEFILE"] = "whatever"
d = yaml.load(spec)
c = Custodian.from_spec(d)
self.assertEqual(c.jobs[0].vasp_cmd[2], "whatever")
self.assertEqual(c.scratch_dir, "/tmp/random")
self.assertEqual(len(c.jobs), 2)
self.assertEqual(len(c.handlers), 3)
self.assertEqual(len(c.validators), 1)
def tearDown(self):
for f in glob.glob("custodian.*.tar.gz"):
os.remove(f)
try:
os.remove("custodian.json")
except OSError:
pass # Ignore if file cannot be found.
os.chdir(self.cwd)
class CustodianCheckpointTest(unittest.TestCase):
def setUp(self):
self.cwd = os.getcwd()
os.chdir(os.path.join(os.path.dirname(__file__), "..", "..",
"test_files", "checkpointing"))
shutil.copy(os.path.join('backup.tar.gz'),
'custodian.chk.3.tar.gz')
def test_checkpoint_loading(self):
njobs = 5
params = {"initial": 0, "total": 0}
c = Custodian([ExampleHandler(params)],
[ExampleJob(i, params) for i in range(njobs)],
[ExampleValidator1()],
max_errors=100, checkpoint=True)
self.assertEqual(len(c.run_log), 3)
self.assertEqual(len(c.run()), 5)
def tearDown(self):
os.remove("custodian.json")
os.chdir(self.cwd)
if __name__ == "__main__":
unittest.main()
|
xhqu1981/custodian
|
custodian/tests/test_custodian.py
|
Python
|
mit
| 7,328
|
[
"VASP"
] |
60fec0c6b64e714f46d6d56e7eca141df9b549797b70a3e966684b26cd7107ac
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.