text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Acceptance tests for Studio related to the split_test module.
"""
import math
from bok_choy.promise import Promise
from selenium.webdriver.support.ui import Select
from base_studio_test import StudioCourseTest
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.studio.container import ContainerPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage, CourseOutlineUnit
from common.test.acceptance.pages.studio.settings_group_configurations import GroupConfigurationsPage
from common.test.acceptance.pages.studio.utils import add_advanced_component
from common.test.acceptance.pages.studio.xblock_editor import XBlockEditorView
from common.test.acceptance.tests.helpers import create_user_partition_json
from test_studio_container import ContainerBase
from xmodule.partitions.partitions import Group
class SplitTestMixin(object):
"""
Mixin that contains useful methods for split_test module testing.
"""
def verify_groups(self, container, active_groups, inactive_groups, verify_missing_groups_not_present=True):
"""
Check that the groups appear and are correctly categorized as to active and inactive.
Also checks that the "add missing groups" button/link is not present unless a value of False is passed
for verify_missing_groups_not_present.
"""
def wait_for_xblocks_to_render():
# First xblock is the container for the page, subtract 1.
return (len(active_groups) + len(inactive_groups) == len(container.xblocks) - 1, len(active_groups))
Promise(wait_for_xblocks_to_render, "Number of xblocks on the page are incorrect").fulfill()
def check_xblock_names(expected_groups, actual_blocks):
self.assertEqual(len(expected_groups), len(actual_blocks))
for idx, expected in enumerate(expected_groups):
self.assertEqual(expected, actual_blocks[idx].name)
check_xblock_names(active_groups, container.active_xblocks)
check_xblock_names(inactive_groups, container.inactive_xblocks)
# Verify inactive xblocks appear after active xblocks
check_xblock_names(active_groups + inactive_groups, container.xblocks[1:])
if verify_missing_groups_not_present:
self.verify_add_missing_groups_button_not_present(container)
def verify_add_missing_groups_button_not_present(self, container):
"""
Checks that the "add missing groups" button/link is not present.
"""
def missing_groups_button_not_present():
button_present = container.missing_groups_button_present()
return (not button_present, not button_present)
Promise(missing_groups_button_not_present, "Add missing groups button should not be showing.").fulfill()
class SplitTest(ContainerBase, SplitTestMixin):
"""
Tests for creating and editing split test instances in Studio.
"""
__test__ = True
shard = 15
def setUp(self):
super(SplitTest, self).setUp()
# This line should be called once courseFixture is installed
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration alpha,beta',
'first',
[Group("0", 'alpha'), Group("1", 'beta')]
),
create_user_partition_json(
1,
'Configuration 0,1,2',
'second',
[Group("0", 'Group 0'), Group("1", 'Group 1'), Group("2", 'Group 2')]
),
],
},
})
def populate_course_fixture(self, course_fixture):
course_fixture.add_advanced_settings(
{u"advanced_modules": {"value": ["split_test"]}}
)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def verify_add_missing_groups_button_not_present(self, container):
"""
Checks that the "add missing groups" button/link is not present.
"""
def missing_groups_button_not_present():
button_present = container.missing_groups_button_present()
return (not button_present, not button_present)
Promise(missing_groups_button_not_present, "Add missing groups button should not be showing.").fulfill()
def create_poorly_configured_split_instance(self):
"""
Creates a split test instance with a missing group and an inactive group.
Returns the container page.
"""
unit = self.go_to_unit_page()
add_advanced_component(unit, 0, 'split_test')
container = self.go_to_nested_container_page()
container.edit()
component_editor = XBlockEditorView(self.browser, container.locator)
component_editor.set_select_value_and_save('Group Configuration', 'Configuration alpha,beta')
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration alpha,beta',
'first',
[Group("0", 'alpha'), Group("2", 'gamma')]
)
],
},
})
return self.go_to_nested_container_page()
def test_create_and_select_group_configuration(self):
"""
Tests creating a split test instance on the unit page, and then
assigning the group configuration.
"""
unit = self.go_to_unit_page()
add_advanced_component(unit, 0, 'split_test')
container = self.go_to_nested_container_page()
container.edit()
component_editor = XBlockEditorView(self.browser, container.locator)
component_editor.set_select_value_and_save('Group Configuration', 'Configuration alpha,beta')
self.verify_groups(container, ['alpha', 'beta'], [])
# Switch to the other group configuration. Must navigate again to the container page so
# that there is only a single "editor" on the page.
container = self.go_to_nested_container_page()
container.edit()
component_editor = XBlockEditorView(self.browser, container.locator)
component_editor.set_select_value_and_save('Group Configuration', 'Configuration 0,1,2')
self.verify_groups(container, ['Group 0', 'Group 1', 'Group 2'], ['Group ID 0', 'Group ID 1'])
# Reload the page to make sure the groups were persisted.
container = self.go_to_nested_container_page()
self.verify_groups(container, ['Group 0', 'Group 1', 'Group 2'], ['Group ID 0', 'Group ID 1'])
def test_delete_inactive_group(self):
"""
Test deleting an inactive group.
"""
container = self.create_poorly_configured_split_instance()
# The inactive group is the 2nd group, but it is the first one
# with a visible delete button, so use index 0
container.delete(0)
self.verify_groups(container, ['alpha'], [], verify_missing_groups_not_present=False)
class GroupConfigurationsNoSplitTest(StudioCourseTest):
"""
Tests how the Group Configuration page should look when the split_test module is not enabled.
"""
shard = 15
def setUp(self):
super(GroupConfigurationsNoSplitTest, self).setUp()
self.group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def test_no_content_experiment_sections(self):
"""
Scenario: if split_test module is not present in Advanced Settings, content experiment
parts of the Group Configurations page are not shown.
Given I have a course with split_test module not enabled
Then when I go to the Group Configurations page there are no content experiment sections
"""
self.group_configurations_page.visit()
self.assertFalse(self.group_configurations_page.experiment_group_sections_present)
class GroupConfigurationsTest(ContainerBase, SplitTestMixin):
"""
Tests that Group Configurations page works correctly with previously
added configurations in Studio
"""
__test__ = True
shard = 15
def setUp(self):
super(GroupConfigurationsTest, self).setUp()
self.page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def _assert_fields(self, config, cid=None, name='', description='', groups=None):
self.assertEqual(config.mode, 'details')
if name:
self.assertIn(name, config.name)
if cid:
self.assertEqual(cid, config.id)
else:
# To make sure that id is present on the page and it is not an empty.
# We do not check the value of the id, because it's generated randomly and we cannot
# predict this value
self.assertTrue(config.id)
# Expand the configuration
config.toggle()
if description:
self.assertIn(description, config.description)
if groups:
allocation = int(math.floor(100 / len(groups)))
self.assertEqual(groups, [group.name for group in config.groups])
for group in config.groups:
self.assertEqual(str(allocation) + "%", group.allocation)
# Collapse the configuration
config.toggle()
def _add_split_test_to_vertical(self, number, group_configuration_metadata=None):
"""
Add split test to vertical #`number`.
If `group_configuration_metadata` is not None, use it to assign group configuration to split test.
"""
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[number]
if group_configuration_metadata:
split_test = XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata=group_configuration_metadata)
else:
split_test = XBlockFixtureDesc('split_test', 'Test Content Experiment')
self.course_fixture.create_xblock(vertical.locator, split_test)
return split_test
def populate_course_fixture(self, course_fixture):
course_fixture.add_advanced_settings({
u"advanced_modules": {"value": ["split_test"]},
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def create_group_configuration_experiment(self, groups, associate_experiment):
"""
Creates a Group Configuration containing a list of groups.
Optionally creates a Content Experiment and associates it with previous Group Configuration.
Returns group configuration or (group configuration, experiment xblock)
"""
# Create a new group configurations
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(0, "Name", "Description.", groups),
],
},
})
if associate_experiment:
# Assign newly created group configuration to experiment
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
split_test = XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 0})
self.course_fixture.create_xblock(vertical.locator, split_test)
# Go to the Group Configuration Page
self.page.visit()
config = self.page.experiment_group_configurations[0]
if associate_experiment:
return config, split_test
return config
def publish_unit_in_lms_and_view(self, courseware_page, publish=True):
"""
Given course outline page, publish first unit and view it in LMS when publish is false, it will only view
"""
self.outline_page.visit()
self.outline_page.expand_all_subsections()
section = self.outline_page.section_at(0)
unit = section.subsection_at(0).unit_at(0).go_to()
# I publish and view in LMS and it is rendered correctly
if publish:
unit.publish()
unit.view_published_version()
self.assertEqual(len(self.browser.window_handles), 2)
courseware_page.wait_for_page()
def get_select_options(self, page, selector):
"""
Get list of options of dropdown that is specified by selector on a given page.
"""
select_element = page.q(css=selector)
self.assertTrue(select_element.is_present())
return [option.text for option in Select(select_element[0]).options]
def test_no_group_configurations_added(self):
"""
Scenario: Ensure that message telling me to create a new group configuration is
shown when group configurations were not added.
Given I have a course without group configurations
When I go to the Group Configuration page in Studio
Then I see "You have not created any group configurations yet." message
"""
self.page.visit()
self.assertTrue(self.page.experiment_group_sections_present)
self.assertTrue(self.page.no_experiment_groups_message_is_present)
self.assertIn(
"You have not created any group configurations yet.",
self.page.no_experiment_groups_message_text
)
def test_group_configurations_have_correct_data(self):
"""
Scenario: Ensure that the group configuration is rendered correctly in expanded/collapsed mode.
Given I have a course with 2 group configurations
And I go to the Group Configuration page in Studio
And I work with the first group configuration
And I see `name`, `id` are visible and have correct values
When I expand the first group configuration
Then I see `description` and `groups` appear and also have correct values
And I do the same checks for the second group configuration
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Name of the Group Configuration',
'Description of the group configuration.',
[Group("0", 'Group 0'), Group("1", 'Group 1')]
),
create_user_partition_json(
1,
'Name of second Group Configuration',
'Second group configuration.',
[Group("0", 'Alpha'), Group("1", 'Beta'), Group("2", 'Gamma')]
),
],
},
})
self.page.visit()
config = self.page.experiment_group_configurations[0]
# no groups when the the configuration is collapsed
self.assertEqual(len(config.groups), 0)
self._assert_fields(
config,
cid="0", name="Name of the Group Configuration",
description="Description of the group configuration.",
groups=["Group 0", "Group 1"]
)
config = self.page.experiment_group_configurations[1]
self._assert_fields(
config,
name="Name of second Group Configuration",
description="Second group configuration.",
groups=["Alpha", "Beta", "Gamma"]
)
def test_can_create_and_edit_group_configuration(self):
"""
Scenario: Ensure that the group configuration can be created and edited correctly.
Given I have a course without group configurations
When I click button 'Create new Group Configuration'
And I set new name and description, change name for the 2nd default group, add one new group
And I click button 'Create'
Then I see the new group configuration is added and has correct data
When I edit the group group_configuration
And I change the name and description, add new group, remove old one and change name for the Group A
And I click button 'Save'
Then I see the group configuration is saved successfully and has the new data
"""
self.page.visit()
self.assertEqual(len(self.page.experiment_group_configurations), 0)
# Create new group configuration
self.page.create_experiment_group_configuration()
config = self.page.experiment_group_configurations[0]
config.name = "New Group Configuration Name"
config.description = "New Description of the group configuration."
config.groups[1].name = "New Group Name"
# Add new group
config.add_group() # Group C
# Save the configuration
self.assertEqual(config.get_text('.action-primary'), "Create")
self.assertFalse(config.delete_button_is_present)
config.save()
self._assert_fields(
config,
name="New Group Configuration Name",
description="New Description of the group configuration.",
groups=["Group A", "New Group Name", "Group C"]
)
# Edit the group configuration
config.edit()
# Update fields
self.assertTrue(config.id)
config.name = "Second Group Configuration Name"
config.description = "Second Description of the group configuration."
self.assertEqual(config.get_text('.action-primary'), "Save")
# Add new group
config.add_group() # Group D
# Remove group with name "New Group Name"
config.groups[1].remove()
# Rename Group A
config.groups[0].name = "First Group"
# Save the configuration
config.save()
self._assert_fields(
config,
name="Second Group Configuration Name",
description="Second Description of the group configuration.",
groups=["First Group", "Group C", "Group D"]
)
def test_focus_management_in_experiment_group_inputs(self):
"""
Scenario: Ensure that selecting the focus inputs in the groups list
sets the .is-focused class on the fieldset
Given I have a course with experiment group configurations
When I click the name of the first group
Then the fieldset wrapping the group names whould get class .is-focused
When I click away from the first group
Then the fieldset should not have class .is-focused anymore
"""
self.page.visit()
self.page.create_experiment_group_configuration()
config = self.page.experiment_group_configurations[0]
group_a = config.groups[0]
# Assert the fieldset doesn't have .is-focused class
self.assertFalse(self.page.q(css="fieldset.groups-fields.is-focused").visible)
# Click on the Group A input field
self.page.q(css=group_a.prefix).click()
# Assert the fieldset has .is-focused class applied
self.assertTrue(self.page.q(css="fieldset.groups-fields.is-focused").visible)
# Click away
self.page.q(css=".page-header").click()
# Assert the fieldset doesn't have .is-focused class
self.assertFalse(self.page.q(css="fieldset.groups-fields.is-focused").visible)
def test_use_group_configuration(self):
"""
Scenario: Ensure that the group configuration can be used by split_module correctly
Given I have a course without group configurations
When I create new group configuration
And I set new name and add a new group, save the group configuration
And I go to the unit page in Studio
And I add new advanced module "Content Experiment"
When I assign created group configuration to the module
Then I see the module has correct groups
"""
self.page.visit()
# Create new group configuration
self.page.create_experiment_group_configuration()
config = self.page.experiment_group_configurations[0]
config.name = "New Group Configuration Name"
# Add new group
config.add_group()
config.groups[2].name = "New group"
# Save the configuration
config.save()
split_test = self._add_split_test_to_vertical(number=0)
container = ContainerPage(self.browser, split_test.locator)
container.visit()
container.edit()
component_editor = XBlockEditorView(self.browser, container.locator)
component_editor.set_select_value_and_save('Group Configuration', 'New Group Configuration Name')
self.verify_groups(container, ['Group A', 'Group B', 'New group'], [])
def test_container_page_active_verticals_names_are_synced(self):
"""
Scenario: Ensure that the Content Experiment display synced vertical names and correct groups.
Given I have a course with group configuration
And I go to the Group Configuration page in Studio
And I edit the name of the group configuration, add new group and remove old one
And I change the name for the group "New group" to "Second Group"
And I go to the Container page in Studio
And I edit the Content Experiment
Then I see the group configuration name is changed in `Group Configuration` dropdown
And the group configuration name is changed on container page
And I see the module has 2 active groups and one inactive
And I see "Add missing groups" link exists
When I click on "Add missing groups" link
The I see the module has 3 active groups and one inactive
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Name of the Group Configuration',
'Description of the group configuration.',
[Group("0", 'Group A'), Group("1", 'Group B'), Group("2", 'Group C')]
),
],
},
})
# Add split test to vertical and assign newly created group configuration to it
split_test = self._add_split_test_to_vertical(number=0, group_configuration_metadata={'user_partition_id': 0})
self.page.visit()
config = self.page.experiment_group_configurations[0]
config.edit()
config.name = "Second Group Configuration Name"
# `Group C` -> `Second Group`
config.groups[2].name = "Second Group"
# Add new group
config.add_group() # Group D
# Remove Group A
config.groups[0].remove()
# Save the configuration
config.save()
container = ContainerPage(self.browser, split_test.locator)
container.visit()
container.edit()
component_editor = XBlockEditorView(self.browser, container.locator)
self.assertEqual(
"Second Group Configuration Name",
component_editor.get_selected_option_text('Group Configuration')
)
component_editor.cancel()
self.assertIn(
"Second Group Configuration Name",
container.get_xblock_information_message()
)
self.verify_groups(
container, ['Group B', 'Second Group'], ['Group ID 0'],
verify_missing_groups_not_present=False
)
# Click the add button and verify that the groups were added on the page
container.add_missing_groups()
self.verify_groups(container, ['Group B', 'Second Group', 'Group D'], ['Group ID 0'])
def test_can_cancel_creation_of_group_configuration(self):
"""
Scenario: Ensure that creation of the group configuration can be canceled correctly.
Given I have a course without group configurations
When I click button 'Create new Group Configuration'
And I set new name and description, add 1 additional group
And I click button 'Cancel'
Then I see that there is no new group configurations in the course
"""
self.page.visit()
self.assertEqual(len(self.page.experiment_group_configurations), 0)
# Create new group configuration
self.page.create_experiment_group_configuration()
config = self.page.experiment_group_configurations[0]
config.name = "Name of the Group Configuration"
config.description = "Description of the group configuration."
# Add new group
config.add_group() # Group C
# Cancel the configuration
config.cancel()
self.assertEqual(len(self.page.experiment_group_configurations), 0)
def test_can_cancel_editing_of_group_configuration(self):
"""
Scenario: Ensure that editing of the group configuration can be canceled correctly.
Given I have a course with group configuration
When I go to the edit mode of the group configuration
And I set new name and description, add 2 additional groups
And I click button 'Cancel'
Then I see that new changes were discarded
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Name of the Group Configuration',
'Description of the group configuration.',
[Group("0", 'Group 0'), Group("1", 'Group 1')]
),
create_user_partition_json(
1,
'Name of second Group Configuration',
'Second group configuration.',
[Group("0", 'Alpha'), Group("1", 'Beta'), Group("2", 'Gamma')]
),
],
},
})
self.page.visit()
config = self.page.experiment_group_configurations[0]
config.name = "New Group Configuration Name"
config.description = "New Description of the group configuration."
# Add 2 new groups
config.add_group() # Group C
config.add_group() # Group D
# Cancel the configuration
config.cancel()
self._assert_fields(
config,
name="Name of the Group Configuration",
description="Description of the group configuration.",
groups=["Group 0", "Group 1"]
)
def test_group_configuration_validation(self):
"""
Scenario: Ensure that validation of the group configuration works correctly.
Given I have a course without group configurations
And I create new group configuration with 2 default groups
When I set only description and try to save
Then I see error message "Group Configuration name is required."
When I set a name
And I delete the name of one of the groups and try to save
Then I see error message "All groups must have a name"
When I delete all the groups and try to save
Then I see error message "There must be at least one group."
When I add a group and try to save
Then I see the group configuration is saved successfully
"""
def try_to_save_and_verify_error_message(message):
# Try to save
config.save()
# Verify that configuration is still in editing mode
self.assertEqual(config.mode, 'edit')
# Verify error message
self.assertEqual(message, config.validation_message)
self.page.visit()
# Create new group configuration
self.page.create_experiment_group_configuration()
# Leave empty required field
config = self.page.experiment_group_configurations[0]
config.description = "Description of the group configuration."
try_to_save_and_verify_error_message("Group Configuration name is required.")
# Set required field
config.name = "Name of the Group Configuration"
config.groups[1].name = ''
try_to_save_and_verify_error_message("All groups must have a name.")
config.groups[0].remove()
config.groups[0].remove()
try_to_save_and_verify_error_message("There must be at least one group.")
config.add_group()
# Save the configuration
config.save()
self._assert_fields(
config,
name="Name of the Group Configuration",
description="Description of the group configuration.",
groups=["Group A"]
)
def test_group_configuration_empty_usage(self):
"""
Scenario: When group configuration is not used, ensure that the link to outline page works correctly.
Given I have a course without group configurations
And I create new group configuration with 2 default groups
Then I see a link to the outline page
When I click on the outline link
Then I see the outline page
"""
# Create a new group configurations
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
"Name",
"Description.",
[Group("0", "Group A"), Group("1", "Group B")]
),
],
},
})
# Go to the Group Configuration Page and click on outline anchor
self.page.visit()
config = self.page.experiment_group_configurations[0]
config.toggle()
config.click_outline_anchor()
# Waiting for the page load and verify that we've landed on course outline page
self.outline_page.wait_for_page()
def test_group_configuration_non_empty_usage(self):
"""
Scenario: When group configuration is used, ensure that the links to units using a group configuration work correctly.
Given I have a course without group configurations
And I create new group configuration with 2 default groups
And I create a unit and assign the newly created group configuration
And open the Group Configuration page
Then I see a link to the newly created unit
When I click on the unit link
Then I see correct unit page
"""
# Create a new group configurations
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
"Name",
"Description.",
[Group("0", "Group A"), Group("1", "Group B")]
),
],
},
})
# Assign newly created group configuration to unit
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 0})
)
unit = CourseOutlineUnit(self.browser, vertical.locator)
# Go to the Group Configuration Page and click unit anchor
self.page.visit()
config = self.page.experiment_group_configurations[0]
config.toggle()
usage = config.usages[0]
config.click_unit_anchor()
unit = ContainerPage(self.browser, vertical.locator)
# Waiting for the page load and verify that we've landed on the unit page
unit.wait_for_page()
self.assertIn(unit.name, usage)
def test_can_delete_unused_group_configuration(self):
"""
Scenario: Ensure that the user can delete unused group configuration.
Given I have a course with 2 group configurations
And I go to the Group Configuration page
When I delete the Group Configuration with name "Configuration 1"
Then I see that there is one Group Configuration
When I edit the Group Configuration with name "Configuration 2"
And I delete the Group Configuration with name "Configuration 2"
Then I see that the are no Group Configurations
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration 1',
'Description of the group configuration.',
[Group("0", 'Group 0'), Group("1", 'Group 1')]
),
create_user_partition_json(
1,
'Configuration 2',
'Second group configuration.',
[Group("0", 'Alpha'), Group("1", 'Beta'), Group("2", 'Gamma')]
)
],
},
})
self.page.visit()
self.assertEqual(len(self.page.experiment_group_configurations), 2)
config = self.page.experiment_group_configurations[1]
# Delete first group configuration via detail view
config.delete()
self.assertEqual(len(self.page.experiment_group_configurations), 1)
config = self.page.experiment_group_configurations[0]
config.edit()
self.assertFalse(config.delete_button_is_disabled)
# Delete first group configuration via edit view
config.delete()
self.assertEqual(len(self.page.experiment_group_configurations), 0)
def test_cannot_delete_used_group_configuration(self):
"""
Scenario: Ensure that the user cannot delete unused group configuration.
Given I have a course with group configuration that is used in the Content Experiment
When I go to the Group Configuration page
Then I do not see delete button and I see a note about that
When I edit the Group Configuration
Then I do not see delete button and I see the note about that
"""
# Create a new group configurations
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
"Name",
"Description.",
[Group("0", "Group A"), Group("1", "Group B")]
)
],
},
})
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 0})
)
# Go to the Group Configuration Page and click unit anchor
self.page.visit()
config = self.page.experiment_group_configurations[0]
self.assertTrue(config.delete_button_is_disabled)
self.assertIn('Cannot delete when in use by an experiment', config.delete_note)
config.edit()
self.assertTrue(config.delete_button_is_disabled)
self.assertIn('Cannot delete when in use by an experiment', config.delete_note)
def test_easy_access_from_experiment(self):
"""
Scenario: When a Content Experiment uses a Group Configuration,
ensure that the link to that Group Configuration works correctly.
Given I have a course with two Group Configurations
And Content Experiment is assigned to one Group Configuration
Then I see a link to Group Configuration
When I click on the Group Configuration link
Then I see the Group Configurations page
And I see that appropriate Group Configuration is expanded.
"""
# Create a new group configurations
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
"Name",
"Description.",
[Group("0", "Group A"), Group("1", "Group B")]
),
create_user_partition_json(
1,
'Name of second Group Configuration',
'Second group configuration.',
[Group("0", 'Alpha'), Group("1", 'Beta'), Group("2", 'Gamma')]
),
],
},
})
# Assign newly created group configuration to unit
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 1})
)
unit = ContainerPage(self.browser, vertical.locator)
unit.visit()
experiment = unit.xblocks[0]
group_configuration_link_name = experiment.group_configuration_link_name
experiment.go_to_group_configuration_page()
self.page.wait_for_page()
# Appropriate Group Configuration is expanded.
self.assertFalse(self.page.experiment_group_configurations[0].is_expanded)
self.assertTrue(self.page.experiment_group_configurations[1].is_expanded)
self.assertEqual(
group_configuration_link_name,
self.page.experiment_group_configurations[1].name
)
def test_details_error_validation_message(self):
"""
Scenario: When a Content Experiment uses a Group Configuration, ensure
that an error validation message appears if necessary.
Given I have a course with a Group Configuration containing two Groups
And a Content Experiment is assigned to that Group Configuration
When I go to the Group Configuration Page
Then I do not see a error icon and message in the Group Configuration details view.
When I add a Group
Then I see an error icon and message in the Group Configuration details view
"""
# Create group configuration and associated experiment
config, _ = self.create_group_configuration_experiment([Group("0", "Group A"), Group("1", "Group B")], True)
# Display details view
config.toggle()
# Check that error icon and message are not present
self.assertFalse(config.details_error_icon_is_present)
self.assertFalse(config.details_message_is_present)
# Add a group
config.toggle()
config.edit()
config.add_group()
config.save()
# Display details view
config.toggle()
# Check that error icon and message are present
self.assertTrue(config.details_error_icon_is_present)
self.assertTrue(config.details_message_is_present)
self.assertIn(
"This content experiment has issues that affect content visibility.",
config.details_message_text
)
def test_details_warning_validation_message(self):
"""
Scenario: When a Content Experiment uses a Group Configuration, ensure
that a warning validation message appears if necessary.
Given I have a course with a Group Configuration containing three Groups
And a Content Experiment is assigned to that Group Configuration
When I go to the Group Configuration Page
Then I do not see a warning icon and message in the Group Configuration details view.
When I remove a Group
Then I see a warning icon and message in the Group Configuration details view
"""
# Create group configuration and associated experiment
config, _ = self.create_group_configuration_experiment([Group("0", "Group A"), Group("1", "Group B"), Group("2", "Group C")], True)
# Display details view
config.toggle()
# Check that warning icon and message are not present
self.assertFalse(config.details_warning_icon_is_present)
self.assertFalse(config.details_message_is_present)
# Remove a group
config.toggle()
config.edit()
config.groups[2].remove()
config.save()
# Display details view
config.toggle()
# Check that warning icon and message are present
self.assertTrue(config.details_warning_icon_is_present)
self.assertTrue(config.details_message_is_present)
self.assertIn(
"This content experiment has issues that affect content visibility.",
config.details_message_text
)
def test_edit_warning_message_empty_usage(self):
"""
Scenario: When a Group Configuration is not used, ensure that there are no warning icon and message.
Given I have a course with a Group Configuration containing two Groups
When I edit the Group Configuration
Then I do not see a warning icon and message
"""
# Create a group configuration with no associated experiment and display edit view
config = self.create_group_configuration_experiment([Group("0", "Group A"), Group("1", "Group B")], False)
config.edit()
# Check that warning icon and message are not present
self.assertFalse(config.edit_warning_icon_is_present)
self.assertFalse(config.edit_warning_message_is_present)
def test_edit_warning_message_non_empty_usage(self):
"""
Scenario: When a Group Configuration is used, ensure that there are a warning icon and message.
Given I have a course with a Group Configuration containing two Groups
When I edit the Group Configuration
Then I see a warning icon and message
"""
# Create a group configuration with an associated experiment and display edit view
config, _ = self.create_group_configuration_experiment([Group("0", "Group A"), Group("1", "Group B")], True)
config.edit()
# Check that warning icon and message are present
self.assertTrue(config.edit_warning_icon_is_present)
self.assertTrue(config.edit_warning_message_is_present)
self.assertIn(
"This configuration is currently used in content experiments. If you make changes to the groups, you may need to edit those experiments.",
config.edit_warning_message_text
)
def publish_unit_and_verify_groups_in_lms(self, courseware_page, group_names, publish=True):
"""
Publish first unit in LMS and verify that Courseware page has given Groups
"""
self.publish_unit_in_lms_and_view(courseware_page, publish)
self.assertEqual(u'split_test', courseware_page.xblock_component_type())
self.assertTrue(courseware_page.q(css=".split-test-select").is_present())
rendered_group_names = self.get_select_options(page=courseware_page, selector=".split-test-select")
self.assertListEqual(group_names, rendered_group_names)
def test_split_test_LMS_staff_view(self):
"""
Scenario: Ensure that split test is correctly rendered in LMS staff mode as it is
and after inactive group removal.
Given I have a course with group configurations and split test that assigned to first group configuration
Then I publish split test and view it in LMS in staff view
And it is rendered correctly
Then I go to group configuration and delete group
Then I publish split test and view it in LMS in staff view
And it is rendered correctly
Then I go to split test and delete inactive vertical
Then I publish unit and view unit in LMS in staff view
And it is rendered correctly
"""
config, split_test = self.create_group_configuration_experiment([Group("0", "Group A"), Group("1", "Group B"), Group("2", "Group C")], True)
container = ContainerPage(self.browser, split_test.locator)
# render in LMS correctly
courseware_page = CoursewarePage(self.browser, self.course_id)
self.publish_unit_and_verify_groups_in_lms(courseware_page, [u'Group A', u'Group B', u'Group C'])
# I go to group configuration and delete group
self.page.visit()
self.page.q(css='.group-toggle').first.click()
config.edit()
config.groups[2].remove()
config.save()
self.page.q(css='.group-toggle').first.click()
self._assert_fields(config, name="Name", description="Description", groups=["Group A", "Group B"])
self.browser.close()
self.browser.switch_to_window(self.browser.window_handles[0])
# render in LMS to see how inactive vertical is rendered
self.publish_unit_and_verify_groups_in_lms(
courseware_page,
[u'Group A', u'Group B', u'Group ID 2 (inactive)'],
publish=False
)
self.browser.close()
self.browser.switch_to_window(self.browser.window_handles[0])
# I go to split test and delete inactive vertical
container.visit()
container.delete(0)
# render in LMS again
self.publish_unit_and_verify_groups_in_lms(courseware_page, [u'Group A', u'Group B'])
|
jolyonb/edx-platform
|
common/test/acceptance/tests/studio/test_studio_split_test.py
|
Python
|
agpl-3.0
| 47,048
|
[
"VisIt"
] |
30859828626782b961ccfdf418557632918ae60a51b2d0fc4fbd16d5dacb7027
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import unittest as ut
import espressomd
import espressomd.analyze
import espressomd.lb
import numpy as np
@ut.skipIf((not espressomd.has_features(["LB_GPU"])) or
espressomd.has_features(["SHANCHEN"]), "Features not available, skipping test!")
class RemoveTotalMomentumTest(ut.TestCase):
def test(self):
dt = 0.01
skin = 0.1
agrid = 1.0
fric = 20.0
visc = 1.0
dens = 12.0
s = espressomd.System(box_l=[1.0, 1.0, 1.0])
s.seed = s.cell_system.get_state()['n_nodes'] * [1234]
s.box_l = [10, 10, 10]
s.time_step = dt
s.cell_system.skin = skin
for i in range(100):
r = s.box_l * np.random.random(3)
v = [1., 1., 1.] * np.random.random(3)
# Make sure that id gaps work correctly
s.part.add(id=2 * i, pos=r, v=v)
if espressomd.has_features(["MASS"]):
# Avoid masses too small for the time step
s.part[:].mass = 2. * (0.1 + np.random.random(100))
lbf = espressomd.lb.LBFluidGPU(
agrid=agrid, fric=fric, dens=dens, visc=visc, tau=dt)
s.actors.add(lbf)
s.integrator.run(300)
lbf.remove_total_momentum()
p = np.array(s.analysis.analyze_linear_momentum())
self.assertAlmostEqual(np.max(p), 0., places=3)
self.assertAlmostEqual(np.min(p), 0., places=3)
if __name__ == "__main__":
#print("Features: ", espressomd.features())
ut.main()
|
hmenke/espresso
|
testsuite/python/lbgpu_remove_total_momentum.py
|
Python
|
gpl-3.0
| 2,247
|
[
"ESPResSo"
] |
2a34fe51d993211acf77a5e20c477e9a1f55e950ac6bd3c9f8e23102d74d5f00
|
# This file is part of PyImgur.
# PyImgur is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PyImgur is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PyImgur. If not, see <http://www.gnu.org/licenses/>.
"""
PyImgur - The Simple Way of Using Imgur
PyImgur is a python wrapper of the popular image hosting and sharing website
imgur.com. It makes the process of writing applications that uses Imgur faster,
easier and less frustrating by automatically handling a lot of stuff for you.
For instance you'll only need to use your client_id when you instantiate the
Imgur object and when changing authentication. For the REST API this value
needs to be sent with every request, but PyImgur handles this automatically.
Before using PyImgur, or the Imgur REST API in general, you'll need to register
your application here: https://api.imgur.com/oauth2/addclient
For more information on usage visit https://github.com/Damgaard/PyImgur
"""
from base64 import b64encode
import os.path
import re
import sys
PY3 = sys.version_info.major == 3
if PY3:
from urllib.parse import urlparse # pylint: disable=no-name-in-module,import-error
else:
from urlparse import urlparse
import requests # NOQA
from pyimgur import request # NOQA
__version__ = '0.5.3'
MASHAPE_BASE = "https://imgur-apiv3.p.mashape.com"
IMGUR_BASE = "https://api.imgur.com"
AUTHORIZE_URL = ("{}/oauth2/authorize?"
"client_id={}&response_type={}&state={}")
EXCHANGE_URL = "{}/oauth2/token"
REFRESH_URL = "{}/oauth2/token"
def _change_object(from_object, to_object):
from_object.__class__ = to_object.__class__
from_object.__dict__ = to_object.__dict__
from_object.__repr__ = to_object.__repr__
def _get_album_or_image(json, imgur):
"""Return a gallery image/album depending on what the json represent."""
if json['is_album']:
return Gallery_album(json, imgur, has_fetched=False)
return Gallery_image(json, imgur)
class Basic_object(object):
"""Contains basic functionality shared by a lot of PyImgur's classes."""
def __getattr__(self, attribute):
if not self._has_fetched:
self.refresh()
return getattr(self, attribute)
raise AttributeError("{0} instance has no attribute '{1}'".format(
type(self).__name__, attribute))
def __init__(self, json_dict, imgur, has_fetched=True):
self._has_fetched = has_fetched
self._imgur = imgur
self._populate(json_dict)
def __repr__(self):
return "<{0} {1}>".format(type(self).__name__, self.id)
@property
def _delete_or_id_hash(self):
if self._imgur.access_token:
return self.id
else:
return self.deletehash
def _populate(self, json_dict):
for key, value in json_dict.items():
setattr(self, key, value)
# TODO: ups will need to be likes, because that's what the webinterface
# is. But we also have "voted" which is the current users vote on it.
# Update certain attributes for certain objects, to be link to lazily
# created objects rather than a string of ID or similar.
if isinstance(self, Album) or isinstance(self, Image):
if "favorite" in vars(self):
self.is_favorited = self.favorite
del self.favorite
if "nsfw" in vars(self):
self.is_nsfw = self.nsfw
del self.nsfw
if isinstance(self, Image):
if "animated" in vars(self):
self.is_animated = self.animated
del self.animated
if "link" in vars(self):
base, sep, ext = self.link.rpartition('.')
self.link_small_square = base + "s" + sep + ext
self.link_big_square = base + "b" + sep + ext
self.link_small_thumbnail = base + "t" + sep + ext
self.link_medium_thumbnail = base + "m" + sep + ext
self.link_large_thumbnail = base + "l" + sep + ext
self.link_huge_thumbnail = base + "h" + sep + ext
if isinstance(self, Album):
if "account_url" in vars(self):
self.author = User({'url': self.account_url}, self._imgur,
has_fetched=False)
del self.account_url
if "cover" in vars(self) and self.cover is not None: # pylint: disable=access-member-before-definition
self.cover = Image({'id': self.cover}, self._imgur,
has_fetched=False)
if "images" in vars(self):
self.images = [Image(img, self._imgur, has_fetched=False) for
img in self.images]
if "images_count" in vars(self):
del self.images_count
elif isinstance(self, Comment):
if "author" in vars(self):
self.author = User({'url': self.author}, self._imgur,
has_fetched=False)
# Problem with this naming is that children / parent are normal
# terminology for tree structures such as this. But elsewhere the
# children are referred to as replies, for instance a comment can
# be replies to not procreated with. I've decided to use replies
# and parent_comment as a compromise, where both attributes should
# be individually obvious but their connection may not.
if "author_id" in vars(self):
# author_id is not used for anything, and can also be gotten
# with comment.author.id which fits with how the id of anything
# else is gotten. So having it here only complicates the API.
del self.author_id
if "children" in vars(self):
self.replies = [Comment(com, self._imgur) for com in
self.children]
del self.children
if "comment" in vars(self):
self.text = self.comment
del self.comment
if "deleted" in vars(self):
self.is_deleted = self.deleted
del self.deleted
if "image_id" in vars(self):
self.permalink = ("http://imgur.com/gallery/{0}/comment/"
"{1}".format(self.image_id, self.id))
self.image = Image({'id': self.image_id}, self._imgur,
has_fetched=False)
del self.image_id
if "parent_id" in vars(self):
if self.parent_id == 0: # Top level comment
self.parent = None
else:
self.parent = Comment({'id': self.parent_id}, self._imgur,
has_fetched=False)
del self.parent_id
elif isinstance(self, Gallery_image):
if "account_url" in vars(self):
self.author = User({'url': self.account_url}, self._imgur,
has_fetched=False)
del self.account_url
elif isinstance(self, Message):
# Should be gotten via self.author.id
if "account_id" in vars(self):
del self.account_id
if "from" in vars(self):
# Use getattr and delattr here as doing self.from gives a
# syntax error because "from" is a protected keyword in Python.
self.author = User({'url': getattr(self, "from")}, self._imgur,
has_fetched=False)
delattr(self, "from")
if "parent_id" in vars(self):
self.first_message = Message({'id': self.parent_id},
self._imgur, has_fetched=False)
del self.parent_id
elif isinstance(self, Notification):
# Cannot be used for any calls.
# Also, since Notifications can only be returned for the
# authenticated user, the id can be found with get_user('me').id
if "account_id" in vars(self):
del self.account_id
if "viewed" in vars(self):
self.is_viewed = self.viewed
del self.viewed
if "content" in vars(self):
if "subject" in self.content: # pylint: disable=access-member-before-definition
self.content = Message(self.content, self._imgur, True)
elif "caption" in self.content:
self.content = Comment(self.content, self._imgur, True) # pylint: disable=redefined-variable-type
elif isinstance(self, User) and 'url' in vars(self):
self.name = self.url
del self.url
# NOTE: In the API a Images popularity is noted as it's score, but
# referred on the webend as points. A Comment has the points
# attribute which is simply likes - dislikes. One might think this
# is the same thing for images, but comparing the two numbers show
# that they are different. Usually with a small margin, but
# sometimes a very substantial margin. I'm not sure of how score is
# calculated and it's relationship to likes and dislikes.
# NOTE: Image has the attribute "nsfw" which doesn't exist in
# documentation.
def refresh(self):
"""
Refresh this objects attributes to the newest values.
Attributes that weren't added to the object before, due to lazy
loading, will be added by calling refresh.
"""
resp = self._imgur._send_request(self._INFO_URL)
self._populate(resp)
self._has_fetched = True
# NOTE: What if the object has been deleted in the meantime? That might
# give a pretty cryptic error.
class Album(Basic_object):
"""
An album is a collection of images.
:ivar author: The user that authored the album. None if anonymous.
:ivar cover: The albums cover image.
:ivar datetime: Time inserted into the gallery, epoch time.
:ivar deletehash: For anonymous uploads, this is used to delete the album.
:ivar description: A short description of the album.
:ivar id: The ID for the album.
:ivar images: A list of the images in this album. Only set at instantiation
if created with Imgur.get_album. But even if it isn't set, then you can
still access the attribute. This will make PyImgur fetch the newest
version of all attributes for this class, including images. So it will
work as though images was set all along.
:ivar is_favorited: Has the logged in user favorited this album?
:ivar is_nsfw: Is the album Not Safe For Work (contains gore/porn)?
:ivar layout: The view layout of the album.
:ivar link: The URL link to the album.
:ivar public: The privacy level of the album, you can only view public
albums if not logged in as the album owner.
:ivar section: ??? - No info in Imgur documentation.
:ivar title: The album's title
:ivar views: Total number of views the album has received.
"""
def __init__(self, json_dict, imgur, has_fetched=True):
self._INFO_URL = (imgur._base_url + "/3/album/"
"{0}".format(json_dict['id']))
self.deletehash = None
super(Album, self).__init__(json_dict, imgur, has_fetched)
def add_images(self, images):
"""
Add images to the album.
:param images: A list of the images we want to add to the album. Can be
Image objects, ids or a combination of the two. Images that you
cannot add (non-existing or not owned by you) will not cause
exceptions, but fail silently.
"""
url = self._imgur._base_url + "/3/album/{0}/add".format(self.id)
params = {'ids': images}
return self._imgur._send_request(url, needs_auth=True, params=params,
method="POST")
def delete(self):
"""Delete this album."""
url = (self._imgur._base_url + "/3/album/"
"{0}".format(self._delete_or_id_hash))
return self._imgur._send_request(url, method="DELETE")
def favorite(self):
"""
Favorite the album.
Favoriting an already favorited album will unfavor it.
"""
url = self._imgur._base_url + "/3/album/{0}/favorite".format(self.id)
return self._imgur._send_request(url, needs_auth=True, method="POST")
def remove_images(self, images):
"""
Remove images from the album.
:param images: A list of the images we want to remove from the album.
Can be Image objects, ids or a combination of the two. Images that
you cannot remove (non-existing, not owned by you or not part of
album) will not cause exceptions, but fail silently.
"""
url = (self._imgur._base_url + "/3/album/{0}/"
"remove_images".format(self._delete_or_id_hash))
# NOTE: Returns True and everything seem to be as it should in testing.
# Seems most likely to be upstream bug.
params = {'ids': images}
return self._imgur._send_request(url, params=params, method="DELETE")
def set_images(self, images):
"""
Set the images in this album.
:param images: A list of the images we want the album to contain.
Can be Image objects, ids or a combination of the two. Images that
images that you cannot set (non-existing or not owned by you) will
not cause exceptions, but fail silently.
"""
url = (self._imgur._base_url + "/3/album/"
"{0}/".format(self._delete_or_id_hash))
params = {'ids': images}
return self._imgur._send_request(url, needs_auth=True, params=params,
method="POST")
def submit_to_gallery(self, title, bypass_terms=False):
"""
Add this to the gallery.
Require that the authenticated user has accepted gallery terms and
verified their email.
:param title: The title of the new gallery item.
:param bypass_terms: If the user has not accepted Imgur's terms yet,
this method will return an error. Set this to True to by-pass the
terms.
"""
url = self._imgur._base_url + "/3/gallery/{0}".format(self.id)
payload = {'title': title, 'terms': '1' if bypass_terms else '0'}
self._imgur._send_request(url, needs_auth=True, params=payload,
method='POST')
item = self._imgur.get_gallery_album(self.id)
_change_object(self, item)
return self
def update(self, title=None, description=None, images=None, cover=None,
layout=None, privacy=None):
"""
Update the album's information.
Arguments with the value None will retain their old values.
:param title: The title of the album.
:param description: A description of the album.
:param images: A list of the images we want the album to contain.
Can be Image objects, ids or a combination of the two. Images that
images that you cannot set (non-existing or not owned by you) will
not cause exceptions, but fail silently.
:param privacy: The albums privacy level, can be public, hidden or
secret.
:param cover: The id of the cover image.
:param layout: The way the album is displayed, can be blog, grid,
horizontal or vertical.
"""
url = (self._imgur._base_url + "/3/album/"
"{0}".format(self._delete_or_id_hash))
is_updated = self._imgur._send_request(url, params=locals(),
method='POST')
if is_updated:
self.title = title or self.title
self.description = description or self.description
self.layout = layout or self.layout
self.privacy = privacy or self.privacy
if cover is not None:
self.cover = (cover if isinstance(cover, Image)
else Image({'id': cover}, self._imgur,
has_fetched=False))
if images:
self.images = [img if isinstance(img, Image) else
Image({'id': img}, self._imgur, False)
for img in images]
return is_updated
class Comment(Basic_object):
"""
A comment a user has made.
Users can comment on Gallery album, Gallery image or other Comments.
:ivar album_cover: If this Comment is on a Album, this will be the Albums
cover Image.
:ivar author: The user that created the comment.
:ivar datetime: Time inserted into the gallery, epoch time.
:ivar deletehash: For anonymous uploads, this is used to delete the image.
:ivar downs: The total number of dislikes (downvotes) the comment has
received.
:ivar image: The image the comment belongs to.
:ivar is_deleted: Has the comment been deleted?
:ivar on_album: Is the image part of an album.
:ivar parent: The comment this one has replied to, if it is a top-level
comment i.e. it's a comment directly to the album / image then it will
be None.
:ivar permalink: A permanent link to the comment.
:ivar points: ups - downs.
:ivar replies: A list of comment replies to this comment. This variable is
only available if the comment was returned via Album.get_comments().
Use get_replies instead to get the replies if this variable is not
available.
:ivar text: The comments text.
:ivar ups: The total number of likes (upvotes) the comment has received.
:ivar vote: The currently logged in users vote on the comment.
"""
def __init__(self, json_dict, imgur, has_fetched=True):
self.deletehash = None
self._INFO_URL = (imgur._base_url + "/3/comment/"
"{0}".format(json_dict['id']))
super(Comment, self).__init__(json_dict, imgur, has_fetched)
def delete(self):
"""Delete the comment."""
url = (self._imgur._base_url + "/3/image/"
"{0}".format(self._delete_or_id_hash))
return self._imgur._send_request(url, method='DELETE')
# NOTE: Gives a 403 permission denied error on comment 77087313 which
# made by me.
def downvote(self):
"""Downvote this comment."""
url = self._imgur._base_url + "/3/comment/{0}/vote/down".format(self.id)
return self._imgur._send_request(url, needs_auth=True, method='POST')
def get_replies(self):
"""Get the replies to this comment."""
url = self._imgur._base_url + "/3/comment/{0}/replies".format(self.id)
json = self._imgur._send_request(url)
child_comments = json['children']
return [Comment(com, self._imgur) for com in child_comments]
def reply(self, text):
"""Make a comment reply."""
url = self._imgur._base_url + "/3/comment/{0}".format(self.id)
payload = {'image_id': self.image.id, 'comment': text}
resp = self._imgur._send_request(url, params=payload, needs_auth=True,
method='POST')
return Comment(resp, imgur=self._imgur, has_fetched=False)
'''
Testing this method would give Imgur false positives.
Discussions with Imgur will hopefully produce a way of consistently testing
this without giving Imgur problems.
def report(self):
"""Reply comment for being inappropriate."""
pass
'''
def upvote(self):
"""Upvote this comment."""
url = self._imgur._base_url + "/3/comment/{0}/vote/up".format(self.id)
return self._imgur._send_request(url, needs_auth=True, method='POST')
class Gallery_item(object):
"""Functionality shared by Gallery_image and Gallery_album."""
def comment(self, text):
"""
Make a top-level comment to this.
:param text: The comment text.
"""
url = self._imgur._base_url + "/3/comment"
payload = {'image_id': self.id, 'comment': text}
resp = self._imgur._send_request(url, params=payload, needs_auth=True,
method='POST')
return Comment(resp, imgur=self._imgur, has_fetched=False)
def downvote(self):
"""
Dislike this.
A downvote will replace a neutral vote or an upvote. Downvoting
something the authenticated user has already downvoted will set the
vote to neutral.
"""
url = self._imgur._base_url + "/3/gallery/{0}/vote/down".format(self.id)
return self._imgur._send_request(url, needs_auth=True, method='POST')
def get_comments(self):
"""Get a list of the top-level comments."""
url = self._imgur._base_url + "/3/gallery/{0}/comments".format(self.id)
resp = self._imgur._send_request(url)
return [Comment(com, self._imgur) for com in resp]
def remove_from_gallery(self):
"""Remove this image from the gallery."""
url = self._imgur._base_url + "/3/gallery/{0}".format(self.id)
self._imgur._send_request(url, needs_auth=True, method='DELETE')
if isinstance(self, Image):
item = self._imgur.get_image(self.id)
else:
item = self._imgur.get_album(self.id)
_change_object(self, item)
return self
def upvote(self):
"""
Like this.
An upvote will replace a neutral vote or an downvote. Upvoting
something the authenticated user has already upvoted will set the vote
to neutral.
"""
url = self._imgur._base_url + "/3/gallery/{0}/vote/up".format(self.id)
return self._imgur._send_request(url, needs_auth=True, method='POST')
class Image(Basic_object):
"""
An image uploaded to Imgur.
:ivar bandwidth: Bandwidth consumed by the image in bytes.
:ivar datetime: Time inserted into the gallery, epoch time.
:ivar deletehash: For anonymous uploads, this is used to delete the image.
:ivar description: A short description of the image.
:ivar height: The height of the image in pixels.
:ivar id: The ID for the image.
:ivar is_animated: is the image animated?
:ivar is_favorited: Has the logged in user favorited this album?
:ivar is_nsfw: Is the image Not Safe For Work (contains gore/porn)?
:ivar link: The URL link to the image.
:ivar link_big_square: The URL to a big square thumbnail of the image.
:ivar link_huge_thumbnail: The URL to a huge thumbnail of the image.
:ivar link_large_square: The URL to a large square thumbnail of the image.
:ivar link_large_thumbnail: The URL to a large thumbnail of the image.
:ivar link_medium_thumbnail: The URL to a medium thumbnail of the image.
:ivar link_small_square: The URL to a small square thumbnail of the image.
:ivar section: ??? - No info in Imgur documentation.
:ivar size: The size of the image in bytes.
:ivar title: The albums title.
:ivar views: Total number of views the album has received.
:ivar width: The width of the image in bytes.
"""
def __init__(self, json_dict, imgur, has_fetched=True):
self._INFO_URL = (imgur._base_url + "/3/image/"
"{0}".format(json_dict['id']))
self.deletehash = None
super(Image, self).__init__(json_dict, imgur, has_fetched)
def delete(self):
"""Delete the image."""
url = (self._imgur._base_url + "/3/image/"
"{0}".format(self._delete_or_id_hash))
return self._imgur._send_request(url, method='DELETE')
def download(self, path='', name=None, overwrite=False, size=None):
"""
Download the image.
:param path: The image will be downloaded to the folder specified at
path, if path is None (default) then the current working directory
will be used.
:param name: The name the image will be stored as (not including file
extension). If name is None, then the title of the image will be
used. If the image doesn't have a title, it's id will be used. Note
that if the name given by name or title is an invalid filename,
then the hash will be used as the name instead.
:param overwrite: If True overwrite already existing file with the same
name as what we want to save the file as.
:param size: Instead of downloading the image in it's original size, we
can choose to instead download a thumbnail of it. Options are
'small_square', 'big_square', 'small_thumbnail',
'medium_thumbnail', 'large_thumbnail' or 'huge_thumbnail'.
:returns: Name of the new file.
"""
def save_as(filename):
local_path = os.path.join(path, filename)
if os.path.exists(local_path) and not overwrite:
raise Exception("Trying to save as {0}, but file "
"already exists.".format(local_path))
with open(local_path, 'wb') as out_file:
out_file.write(resp.content)
return local_path
valid_sizes = {'small_square': 's', 'big_square': 'b',
'small_thumbnail': 't', 'medium_thumbnail': 'm',
'large_thumbnail': 'l', 'huge_thumbnail': 'h'}
if size is not None:
size = size.lower().replace(' ', '_')
if size not in valid_sizes:
raise LookupError('Invalid size. Valid options are: {0}'.format(
", " .join(valid_sizes.keys())))
suffix = valid_sizes.get(size, '')
base, sep, ext = self.link.rpartition('.')
resp = requests.get(base + suffix + sep + ext)
if name or self.title:
try:
return save_as((name or self.title) + suffix + sep + ext)
except IOError:
pass
# Invalid filename
return save_as(self.id + suffix + sep + ext)
def favorite(self):
"""
Favorite the image.
Favoriting an already favorited image will unfavorite it.
"""
url = self._imgur._base_url + "/3/image/{0}/favorite".format(self.id)
return self._imgur._send_request(url, needs_auth=True, method='POST')
def submit_to_gallery(self, title, bypass_terms=False):
"""
Add this to the gallery.
Require that the authenticated user has accepted gallery terms and
verified their email.
:param title: The title of the new gallery item.
:param bypass_terms: If the user has not accepted Imgur's terms yet,
this method will return an error. Set this to True to by-pass the
terms.
"""
url = self._imgur._base_url + "/3/gallery/{0}".format(self.id)
payload = {'title': title, 'terms': '1' if bypass_terms else '0'}
self._imgur._send_request(url, needs_auth=True, params=payload,
method='POST')
item = self._imgur.get_gallery_image(self.id)
_change_object(self, item)
return self
def update(self, title=None, description=None):
"""Update the image with a new title and/or description."""
url = (self._imgur._base_url + "/3/image/"
"{0}".format(self._delete_or_id_hash))
is_updated = self._imgur._send_request(url, params=locals(),
method='POST')
if is_updated:
self.title = title or self.title
self.description = description or self.description
return is_updated
class Imgur:
"""
The base class containing general functionality for Imgur.
You should create an Imgur object at the start of your code and use it to
interact with Imgur. You shouldn't directly initialize any other classes,
but instead use the methods in this class to get them.
"""
def __init__(self, client_id, client_secret=None, access_token=None,
refresh_token=None, verify=True, mashape_key=None):
"""
Initialize the Imgur object.
Before using PyImgur, or the Imgur REST API in general, you need to
register your application with Imgur. This can be done at
https://api.imgur.com/oauth2/addclient
:param client_id: Your applications client_id.
:param client_secret: Your applications client_secret. This is only
needed when a user needs to authorize the app.
:param access_token: is your secret key used to access the user's data.
It can be thought of the user's password and username combined into
one, and is used to access the user's account. It expires after 1
hour.
:param refresh_token: is used to request new access_tokens. Since
access_tokens expire after 1 hour, we need a way to request new
ones without going through the entire authorization step again. It
does not expire.
:param verify: Verify SSL certificate of server
(can result in SSLErrors)?
"""
self.is_authenticated = False
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
self.DEFAULT_LIMIT = 100
self.ratelimit_clientlimit = None
self.ratelimit_clientremaining = None
self.ratelimit_userlimit = None
self.ratelimit_userremaining = None
self.ratelimit_userreset = None
self.refresh_token = refresh_token
self.verify = verify
self.mashape_key = mashape_key
if self.mashape_key:
self._base_url = MASHAPE_BASE
else:
self._base_url = IMGUR_BASE
def _send_request(self, url, needs_auth=False, **kwargs):
"""
Handles top level functionality for sending requests to Imgur.
This mean
- Raising client-side error if insufficient authentication.
- Adding authentication information to the request.
- Split the request into multiple request for pagination.
- Retry calls for certain server-side errors.
- Refresh access token automatically if expired.
- Updating ratelimit info
:param needs_auth: Is authentication as a user needed for the execution
of this method?
"""
# TODO: Add automatic test for timed_out access_tokens and
# automatically refresh it before carrying out the request.
if self.access_token is None and needs_auth:
# TODO: Use inspect to insert name of method in error msg.
raise Exception("Authentication as a user is required to use this "
"method.")
if self.access_token is None:
# Not authenticated as a user. Use anonymous access.
auth = {'Authorization': 'Client-ID {0}'.format(self.client_id)}
else:
auth = {'Authorization': 'Bearer {0}'.format(self.access_token)}
if self.mashape_key:
auth.update({'X-Mashape-Key': self.mashape_key})
content = []
is_paginated = False
if 'limit' in kwargs:
is_paginated = True
limit = kwargs['limit'] or self.DEFAULT_LIMIT
del kwargs['limit']
page = 0
base_url = url
url.format(page)
kwargs['authentication'] = auth
while True:
result = request.send_request(url, verify=self.verify, **kwargs)
new_content, ratelimit_info = result
if is_paginated and new_content and limit > len(new_content):
content += new_content
page += 1
url = base_url.format(page)
else:
if is_paginated:
content = (content + new_content)[:limit]
else:
content = new_content
break
# Note: When the cache is implemented, it's important that the
# ratelimit info doesn't get updated with the ratelimit info in the
# cache since that's likely incorrect.
for key, value in ratelimit_info.items():
setattr(self, key[2:].replace('-', '_'), value)
return content
def authorization_url(self, response, state=""):
"""
Return the authorization url that's needed to authorize as a user.
:param response: Can be either code or pin. If it's code the user will
be redirected to your redirect url with the code as a get parameter
after authorizing your application. If it's pin then after
authorizing your application, the user will instead be shown a pin
on Imgurs website. Both code and pin are used to get an
access_token and refresh token with the exchange_code and
exchange_pin functions respectively.
:param state: This optional parameter indicates any state which may be
useful to your application upon receipt of the response. Imgur
round-trips this parameter, so your application receives the same
value it sent. Possible uses include redirecting the user to the
correct resource in your site, nonces, and
cross-site-request-forgery mitigations.
"""
return AUTHORIZE_URL.format(self._base_url, self.client_id, response, state)
def change_authentication(self, client_id=None, client_secret=None,
access_token=None, refresh_token=None):
"""Change the current authentication."""
# TODO: Add error checking so you cannot change client_id and retain
# access_token. Because that doesn't make sense.
self.client_id = client_id or self.client_id
self.client_secret = client_secret or self.client_secret
self.access_token = access_token or self.access_token
self.refresh_token = refresh_token or self.refresh_token
def create_album(self, title=None, description=None, images=None,
cover=None):
"""
Create a new Album.
:param title: The title of the album.
:param description: The albums description.
:param images: A list of the images that will be added to the album
after it's created. Can be Image objects, ids or a combination of
the two. Images that you cannot add (non-existing or not owned by
you) will not cause exceptions, but fail silently.
:param cover: The id of the image you want as the albums cover image.
:returns: The newly created album.
"""
url = self._base_url + "/3/album/"
payload = {'ids': images, 'title': title,
'description': description, 'cover': cover}
resp = self._send_request(url, params=payload, method='POST')
return Album(resp, self, has_fetched=False)
'''
Not currently implemented for 3 reasons.
It requires recaptcha info, which makes using it via the API inconvenient.
It is hard to test.
It creates users on Imgur that doesn't correspond to actual users.
def create_user(self, username):
"""Create this user on Imgur."""
pass
'''
def exchange_code(self, code):
"""Exchange one-use code for an access_token and request_token."""
params = {'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'authorization_code',
'code': code}
result = self._send_request(EXCHANGE_URL.format(self._base_url),
params=params, method='POST',
data_field=None)
self.access_token = result['access_token']
self.refresh_token = result['refresh_token']
return self.access_token, self.refresh_token
def exchange_pin(self, pin):
"""Exchange one-use pin for an access_token and request_token."""
params = {'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'pin',
'pin': pin}
result = self._send_request(EXCHANGE_URL.format(self._base_url),
params=params, method='POST',
data_field=None)
self.access_token = result['access_token']
self.refresh_token = result['refresh_token']
return self.access_token, self.refresh_token
def get_album(self, id):
"""Return information about this album."""
url = self._base_url + "/3/album/{0}".format(id)
json = self._send_request(url)
return Album(json, self)
def get_at_url(self, url):
"""
Return a object representing the content at url.
Returns None if no object could be matched with the id.
Works for Album, Comment, Gallery_album, Gallery_image, Image and User.
NOTE: Imgur's documentation does not cover what urls are available.
Some urls, such as imgur.com/<ID> can be for several different types of
object. Using a wrong, but similair call, such as get_subreddit_image
on a meme image will not cause an error. But instead return a subset of
information, with either the remaining pieces missing or the value set
to None. This makes it hard to create a method such as this that
attempts to deduce the object from the url. Due to these factors, this
method should be considered experimental and used carefully.
:param url: The url where the content is located at
"""
class NullDevice():
def write(self, string):
pass
def get_gallery_item(id):
"""
Special helper method to get gallery items.
The problem is that it's impossible to distinguish albums and
images from each other based on the url. And there isn't a common
url endpoints that return either a Gallery_album or a Gallery_image
depending on what the id represents. So the only option is to
assume it's a Gallery_image and if we get an exception then try
Gallery_album. Gallery_image is attempted first because there is
the most of them.
"""
try:
# HACK: Problem is that send_request prints the error message
# from Imgur when it encounters an error. This is nice because
# this error message is more descriptive than just the status
# code that Requests give. But since we first assume the id
# belong to an image, it means we will get an error whenever
# the id belongs to an album. The following code temporarily
# disables stdout to avoid give a cryptic and incorrect error.
# Code for disabling stdout is from
# http://coreygoldberg.blogspot.dk/2009/05/
# python-redirect-or-turn-off-stdout-and.html
original_stdout = sys.stdout # keep a reference to STDOUT
sys.stdout = NullDevice() # redirect the real STDOUT
return self.get_gallery_image(id)
# TODO: Add better error codes so I don't have to do a catch-all
except Exception:
return self.get_gallery_album(id)
finally:
sys.stdout = original_stdout # turn STDOUT back on
if not self.is_imgur_url(url):
return None
objects = {'album': {'regex': "a/(?P<id>[\w.]*?)$",
'method': self.get_album},
'comment': {'regex': "gallery/\w*/comment/(?P<id>[\w.]*?)$",
'method': self.get_comment},
'gallery': {'regex': "(gallery|r/\w*?)/(?P<id>[\w.]*?)$",
'method': get_gallery_item},
# Valid image extensions: http://imgur.com/faq#types
# All are between 3 and 4 chars long.
'image': {'regex': "(?P<id>[\w.]*?)(\\.\w{3,4})?$",
'method': self.get_image},
'user': {'regex': "user/(?P<id>[\w.]*?)$",
'method': self.get_user}
}
parsed_url = urlparse(url)
for obj_type, values in objects.items():
regex_result = re.match('/' + values['regex'], parsed_url.path)
if regex_result is not None:
obj_id = regex_result.group('id')
initial_object = values['method'](obj_id)
if obj_type == 'image':
try:
# A better version might be to ping the url where the
# gallery_image should be with a requests.head call. If
# we get a 200 returned, then that means it exists and
# this becomes less hacky.
original_stdout = sys.stdout
sys.stdout = NullDevice()
if getattr(initial_object, 'section', None):
sub = initial_object.section
return self.get_subreddit_image(sub, obj_id)
return self.get_gallery_image(obj_id)
except Exception:
pass
finally:
sys.stdout = original_stdout
return initial_object
def get_comment(self, id):
"""Return information about this comment."""
url = self._base_url + "/3/comment/{0}".format(id)
json = self._send_request(url)
return Comment(json, self)
def get_gallery(self, section='hot', sort='viral', window='day',
show_viral=True, limit=None):
"""
Return a list of gallery albums and gallery images.
:param section: hot | top | user - defaults to hot.
:param sort: viral | time - defaults to viral.
:param window: Change the date range of the request if the section is
"top", day | week | month | year | all, defaults to day.
:param show_viral: true | false - Show or hide viral images from the
'user' section. Defaults to true.
:param limit: The number of items to return.
"""
url = (self._base_url + "/3/gallery/{}/{}/{}/{}?showViral="
"{}".format(section, sort, window, '{}', show_viral))
resp = self._send_request(url, limit=limit)
return [_get_album_or_image(thing, self) for thing in resp]
def get_gallery_album(self, id):
"""
Return the gallery album matching the id.
Note that an album's id is different from it's id as a gallery album.
This makes it possible to remove an album from the gallery and setting
it's privacy setting as secret, without compromising it's secrecy.
"""
url = self._base_url + "/3/gallery/album/{0}".format(id)
resp = self._send_request(url)
return Gallery_album(resp, self)
def get_gallery_image(self, id):
"""
Return the gallery image matching the id.
Note that an image's id is different from it's id as a gallery image.
This makes it possible to remove an image from the gallery and setting
it's privacy setting as secret, without compromising it's secrecy.
"""
url = self._base_url + "/3/gallery/image/{0}".format(id)
resp = self._send_request(url)
return Gallery_image(resp, self)
def get_image(self, id):
"""Return a Image object representing the image with the given id."""
url = self._base_url + "/3/image/{0}".format(id)
resp = self._send_request(url)
return Image(resp, self)
def get_message(self, id):
"""
Return a Message object for given id.
:param id: The id of the message object to return.
"""
url = self._base_url + "/3/message/{0}".format(id)
resp = self._send_request(url)
return Message(resp, self)
def get_notification(self, id):
"""
Return a Notification object.
:param id: The id of the notification object to return.
"""
url = self._base_url + "/3/notification/{0}".format(id)
resp = self._send_request(url)
return Notification(resp, self)
def get_memes_gallery(self, sort='viral', window='week', limit=None):
"""
Return a list of gallery albums/images submitted to the memes gallery
The url for the memes gallery is: http://imgur.com/g/memes
:param sort: viral | time | top - defaults to viral
:param window: Change the date range of the request if the section is
"top", day | week | month | year | all, defaults to week.
:param limit: The number of items to return.
"""
url = (self._base_url + "/3/gallery/g/memes/{0}/{1}/{2}".format(
sort, window, '{}'))
resp = self._send_request(url, limit=limit)
return [_get_album_or_image(thing, self) for thing in resp]
'''
This method does not seem to return any more data than get_gallery_image.
So I'm not sure whether it needs to be included. Speaking for is that
people may expect it to be here, speaking against is that the functionality
already exists and duplication will reduce usability of the API.
07-08-2013
def get_memes_image(self, id):
"""
Return the Gallery_image with the id submitted to the memes gallery
:param id: The id of the image we want.
"""
url = self._base_url + "/3/gallery/g/memes/" % id
resp = self._send_request(url)
return Gallery_image(resp, self)
'''
def get_subreddit_gallery(self, subreddit, sort='time', window='top',
limit=None):
"""
Return a list of gallery albums/images submitted to a subreddit.
A subreddit is a subsection of the website www.reddit.com, where users
can, among other things, post images.
:param subreddit: A valid subreddit name.
:param sort: time | top - defaults to top.
:param window: Change the date range of the request if the section is
"top", day | week | month | year | all, defaults to day.
:param limit: The number of items to return.
"""
url = (self._base_url + "/3/gallery/r/{0}/{1}/{2}/{3}".format(
subreddit, sort, window, '{}'))
resp = self._send_request(url, limit=limit)
return [_get_album_or_image(thing, self) for thing in resp]
def get_subreddit_image(self, subreddit, id):
"""
Return the Gallery_image with the id submitted to subreddit gallery
:param subreddit: The subreddit the image has been submitted to.
:param id: The id of the image we want.
"""
url = self._base_url + "/3/gallery/r/{0}/{1}".format(subreddit, id)
resp = self._send_request(url)
return Gallery_image(resp, self)
def get_user(self, username):
"""
Return a User object for this username.
:param username: The name of the user we want more information about.
"""
url = self._base_url + "/3/account/{0}".format(username)
json = self._send_request(url)
return User(json, self)
def is_imgur_url(self, url):
"""Is the given url a valid Imgur url?"""
return re.match("(http://)?(www\.)?imgur\.com", url, re.I) is not None
def refresh_access_token(self):
"""
Refresh the access_token.
The self.access_token attribute will be updated with the value of the
new access_token which will also be returned.
"""
if self.client_secret is None:
raise Exception("client_secret must be set to execute "
"refresh_access_token.")
if self.refresh_token is None:
raise Exception("refresh_token must be set to execute "
"refresh_access_token.")
params = {'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token}
result = self._send_request(REFRESH_URL.format(self._base_url),
params=params, method='POST',
data_field=None)
self.access_token = result['access_token']
return self.access_token
def search_gallery(self, q):
"""Search the gallery with the given query string."""
url = self._base_url + "/3/gallery/search?q={0}".format(q)
resp = self._send_request(url)
return [_get_album_or_image(thing, self) for thing in resp]
def upload_image(self, path=None, url=None, title=None, description=None,
album=None):
"""
Upload the image at either path or url.
:param path: The path to the image you want to upload.
:param url: The url to the image you want to upload.
:param title: The title the image will have when uploaded.
:param description: The description the image will have when uploaded.
:param album: The album the image will be added to when uploaded. Can
be either a Album object or it's id. Leave at None to upload
without adding to an Album, adding it later is possible.
Authentication as album owner is necessary to upload to an album
with this function.
:returns: An Image object representing the uploaded image.
"""
if bool(path) == bool(url):
raise LookupError("Either path or url must be given.")
if path:
with open(path, 'rb') as image_file:
binary_data = image_file.read()
image = b64encode(binary_data)
else:
image = url
payload = {'album_id': album, 'image': image,
'title': title, 'description': description}
resp = self._send_request(self._base_url + "/3/image",
params=payload, method='POST')
# TEMPORARY HACK:
# On 5-08-2013 I noticed Imgur now returned enough information from
# this call to fully populate the Image object. However those variables
# that matched arguments were always None, even if they had been given.
# See https://groups.google.com/forum/#!topic/imgur/F3uVb55TMGo
resp['title'] = title
resp['description'] = description
if album is not None:
resp['album'] = (Album({'id': album}, self, False) if not
isinstance(album, Album) else album)
return Image(resp, self)
class Message(Basic_object):
"""This corresponds to the messages users can send each other."""
def __init__(self, json_dict, imgur, has_fetched=True):
self._INFO_URL = (imgur._base_url + "/3/message/{0}".format(
json_dict['id']))
super(Message, self).__init__(json_dict, imgur, has_fetched)
'''
Maybe we cannot unblock users? Would be quite problematic if one of the
main acounts blocked the other and it couldn't be unblocked.
Perhaps this method should also be placed under User as its a user we
block, not the message itself.
def block(self):
pass
'''
def delete(self):
"""Delete the message."""
url = self._imgur._base_url + "/3/message/{0}".format(self.id)
return self._imgur._send_request(url, method='DELETE')
def get_thread(self):
"""Return the message thread this Message is in."""
url = (self._imgur._base_url + "/3/message/{0}/thread".format(
self.first_message.id))
resp = self._imgur._send_request(url)
return [Message(msg, self._imgur) for msg in resp]
def reply(self, body):
"""
Reply to this message.
This is a convenience method calling User.send_message. See it for more
information on usage. Note that both recipient and reply_to are given
by using this convenience method.
:param body: The body of the message.
"""
return self.author.send_message(body=body, reply_to=self.id)
'''
Testing this method would give Imgur false positives.
Discussions with Imgur will hopefully produce a way of consistently testing
this without giving Imgur problems.
Maybe this method should be placed under User as it is the user that's
being reported. On the other hand the reason for the report is sending
messages against Imgurs TOS. Comments can also be against the TOS, but
there is a separate endpoint for reporting those.
def report():
"""Report the author sending a message against the Terms of Service."""
pass
'''
class Notification(Basic_object):
"""
This corresponds to the notifications a user may receive.
A notification can come for several reasons. For instance, one may be
received if someone replies to one of your comments.
"""
def __init__(self, json_dict, imgur, has_fetched=True):
# Is never gotten lazily, so _has_fetched is always True
self._INFO_URL = (imgur._base_url + "/3/notification/{0}".format(
json_dict['id']))
super(Notification, self).__init__(json_dict, imgur, has_fetched)
def mark_as_viewed(self):
"""
Mark the notification as viewed.
Notifications cannot be marked as unviewed.
"""
url = self._imgur._base_url + "/3/notification/{0}".format(self.id)
return self._imgur._send_request(url, method='POST')
class User(Basic_object):
"""
A User on Imgur.
:ivar bio: A basic description filled out by the user, displayed in the
gallery profile page.
:ivar created: The epoch time of user account creation
:ivar id: The user id.
:ivar name: The username
:ivar reputation: Total likes - dislikes of the user's created content.
"""
def __init__(self, json_dict, imgur, has_fetched=True):
self._INFO_URL = (imgur._base_url + "/3/account/{0}".format(
json_dict['url']))
super(User, self).__init__(json_dict, imgur, has_fetched)
# Overrides __repr__ method in Basic_object
def __repr__(self):
return "<{0} {1}>".format(type(self).__name__, self.name)
def change_settings(self, bio=None, public_images=None,
messaging_enabled=None, album_privacy=None,
accepted_gallery_terms=None):
"""
Update the settings for the user.
:param bio: A basic description filled out by the user, is displayed in
the gallery profile page.
:param public_images: Set the default privacy setting of the users
images. If True images are public, if False private.
:param messaging_enabled: Set to True to enable messaging.
:param album_privacy: The default privacy level of albums created by
the user. Can be public, hidden or secret.
:param accepted_gallery_terms: The user agreement to Imgur Gallery
terms. Necessary before the user can submit to the gallery.
"""
# NOTE: album_privacy should maybe be renamed to default_privacy
# NOTE: public_images is a boolean, despite the documentation saying it
# is a string.
url = self._imgur._base_url + "/3/account/{0}/settings".format(self.name)
resp = self._imgur._send_request(url, needs_auth=True, params=locals(),
method='POST')
return resp
def delete(self):
"""Delete this user. Require being authenticated as the user."""
url = self._imgur._base_url + "/3/account/{0}".format(self.name)
return self._imgur._send_request(url, needs_auth=True, method='DELETE')
def get_albums(self, limit=None):
"""
Return a list of the user's albums.
Secret and hidden albums are only returned if this is the logged-in
user.
"""
url = (self._imgur._base_url + "/3/account/{0}/albums/{1}".format(self.name,
'{}'))
resp = self._imgur._send_request(url, limit=limit)
return [Album(alb, self._imgur, False) for alb in resp]
def get_comments(self):
"""Return the comments made by the user."""
url = self._imgur._base_url + "/3/account/{0}/comments".format(self.name)
resp = self._imgur._send_request(url)
return [Comment(com, self._imgur) for com in resp]
def get_favorites(self):
"""Return the users favorited images."""
url = self._imgur._base_url + "/3/account/{0}/favorites".format(self.name)
resp = self._imgur._send_request(url, needs_auth=True)
return [_get_album_or_image(thing, self._imgur) for thing in resp]
def get_gallery_favorites(self):
"""Get a list of the images in the gallery this user has favorited."""
url = (self._imgur._base_url + "/3/account/{0}/gallery_favorites".format(
self.name))
resp = self._imgur._send_request(url)
return [Image(img, self._imgur) for img in resp]
def get_gallery_profile(self):
"""Return the users gallery profile."""
url = (self._imgur._base_url + "/3/account/{0}/"
"gallery_profile".format(self.name))
return self._imgur._send_request(url)
def has_verified_email(self):
"""
Has the user verified that the email he has given is legit?
Verified e-mail is required to the gallery. Confirmation happens by
sending an email to the user and the owner of the email user verifying
that he is the same as the Imgur user.
"""
url = (self._imgur._base_url + "/3/account/{0}/"
"verifyemail".format(self.name))
return self._imgur._send_request(url, needs_auth=True)
def get_images(self, limit=None):
"""Return all of the images associated with the user."""
url = (self._imgur._base_url + "/3/account/{0}/"
"images/{1}".format(self.name, '{}'))
resp = self._imgur._send_request(url, limit=limit)
return [Image(img, self._imgur) for img in resp]
def get_messages(self, new=True):
"""
Return all messages sent to this user, formatted as a notification.
:param new: False for all notifications, True for only non-viewed
notifications.
"""
url = (self._imgur._base_url + "/3/account/{0}/notifications/"
"messages".format(self.name))
result = self._imgur._send_request(url, params=locals(),
needs_auth=True)
return [Notification(msg_dict, self._imgur, has_fetched=True) for
msg_dict in result]
def get_notifications(self, new=True):
"""Return all the notifications for this user."""
url = (self._imgur._base_url + "/3/account/{0}/"
"notifications".format(self.name))
resp = self._imgur._send_request(url, params=locals(), needs_auth=True)
msgs = [Message(msg_dict, self._imgur, has_fetched=True) for msg_dict
in resp['messages']]
replies = [Comment(com_dict, self._imgur, has_fetched=True) for
com_dict in resp['replies']]
return {'messages': msgs, 'replies': replies}
def get_replies(self, new=True):
"""
Return all reply notifications for this user.
:param new: False for all notifications, True for only non-viewed
notifications.
"""
url = (self._imgur._base_url + "/3/account/{0}/"
"notifications/replies".format(self.name))
return self._imgur._send_request(url, needs_auth=True)
def get_settings(self):
"""
Returns current settings.
Only accessible if authenticated as the user.
"""
url = self._imgur._base_url + "/3/account/{0}/settings".format(self.name)
return self._imgur._send_request(url)
def get_statistics(self):
"""Return statistics about this user."""
url = self._imgur._base_url + "/3/account/{0}/stats".format(self.name)
return self._imgur._send_request(url, needs_auth=True)
def get_submissions(self, limit=None):
"""Return a list of the images a user has submitted to the gallery."""
url = (self._imgur._base_url + "/3/account/{0}/submissions/"
"{1}".format(self.name, '{}'))
resp = self._imgur._send_request(url, limit=limit)
return [_get_album_or_image(thing, self._imgur) for thing in resp]
def send_message(self, body, subject=None, reply_to=None):
"""
Send a message to this user from the logged in user.
:param body: The body of the message.
:param subject: The subject of the message. Note that if the this
message is a reply, then the subject of the first message will be
used instead.
:param reply_to: Messages can either be replies to other messages or
start a new message thread. If this is None it will start a new
message thread. If it's a Message object or message_id, then the
new message will be sent as a reply to the reply_to message.
"""
url = self._imgur._base_url + "/3/message"
parent_id = reply_to.id if isinstance(reply_to, Message) else reply_to
payload = {'recipient': self.name, 'body': body, 'subject': subject,
'parent_id': parent_id}
self._imgur._send_request(url, params=payload, needs_auth=True,
method='POST')
def send_verification_email(self):
"""
Send verification email to this users email address.
Remember that the verification email may end up in the users spam
folder.
"""
url = (self._imgur._base_url + "/3/account/{0}"
"/verifyemail".format(self.name))
self._imgur._send_request(url, needs_auth=True, method='POST')
# Gallery_album and Gallery_image are placed at the end as they need to inherit
# from Gallery_item, Album and Image. It's thus impossible to place them
# alphabetically without errors.
class Gallery_album(Album, Gallery_item):
"""Gallery Albums are albums submitted to the gallery."""
def __init__(self, json_dict, imgur, has_fetched=True):
self._INFO_URL = (imgur._base_url + "/3/gallery/album/"
"{0}".format(json_dict['id']))
super(Gallery_album, self).__init__(json_dict, imgur, has_fetched)
class Gallery_image(Image, Gallery_item):
"""Gallery images are images submitted to the gallery."""
def __init__(self, json_dict, imgur, has_fetched=True):
self._INFO_URL = (imgur._base_url + "/3/gallery/image/"
"{0}".format(json_dict['id']))
super(Gallery_image, self).__init__(json_dict, imgur, has_fetched)
|
Damgaard/PyImgur
|
pyimgur/__init__.py
|
Python
|
gpl-3.0
| 64,541
|
[
"VisIt"
] |
3e525fd778436c47e0148682302a81f5b28c323e7985126742eefb857f1cc2ca
|
# !/usr/bin/env python
import sys
sys.path.append('/opt/lib/python2.7/site-packages/')
import math
import numpy as np
import pylab
import nest
import nest.raster_plot
import nest.topology as tp
import logging as log
from network_params import *
from sim_params import *
from user_params import *
# Implementation of the multi-layered local cortical network model by
# Potjans, Tobias C., and Markus Diesmann. "The cell-type specific
# cortical microcircuit: relating structure and activity in a full-scale
# spiking network model." Cerebral Cortex (2014): bhs358.
# Uses user_params.sli, sim_params.sli, and network_params.sli
# function definitions:
# - CheckParameters
# - PrepareSimulation
# - DerivedParameters
# - CreateNetworkNodes
# - WriteGIDstoFile
# - ConnectNetworkNodes
# global variables, lol
n_layers = None
n_pops_per_layer = None
normal_rdvs = None
PSC_e = None
PSC_e_23_4 = None
PSP_i = None
PSC_i = None
PSC_ext = None
PSC_array = None
PSC_sd = None
PSC_th_sd = None
delays_sd = None
delay_th_sd = None
n_neurons_rec_spikes = None
n_neurons_rec_voltage = None
n_neurons = None
neuron_subnet_GIDs = None
spike_detector_GIDs = None
voltmeter_GIDs = None
poisson_GIDs = None
dc_GIDs = None
th_neuron_subnet_GID = None
th_poisson_GID = None
th_spike_detector_GID = None
def GetLocalNodes(subnets):
if type(subnets) is not tuple:
subnets = tuple(subnets)
return nest.GetNodes(subnets, local_only=True)
def GetGlobalNodes(subnets):
if type(subnets) is not tuple:
subnets = tuple(subnets)
return nest.GetNodes(subnets, local_only=False)
def CheckParameters():
global n_layers
global n_pops_per_layer
if neuron_model != 'iaf_psc_exp':
if nest.Rank() != 0:
log.warn('Unexpected neuron type: script is tuned to "iaf_psc_exp" neurons.')
# number of layers
n_layers = len(full_scale_n_neurons)
# number of populations in each layer
n_pops_per_layer = np.shape(full_scale_n_neurons)[1]
# if np.shape(conn_probs)[0] != n_layers*n_pops_per_layer or \
# np.shape(conn_probs)[1] != n_layers*n_pops_per_layer:
# raise ValueError('conn_probs_dimensions')
if record_fraction_neurons_spikes:
if frac_rec_spikes > 1:
raise ValueError('frac_rec_spikes')
else:
if n_rec_spikes > area * min(map(min, full_scale_n_neurons)):
raise ValueError('n_rec_spikes')
if record_fraction_neurons_voltage:
if frac_rec_voltage > 1:
raise ValueError('frac_rec_voltage')
else:
if n_rec_voltage > area * min(map(min, full_scale_n_neurons)):
raise ValueError('n_rec_voltage')
def PrepareSimulation():
global normal_rdvs
nest.ResetKernel()
nest.SetKernelStatus({
'resolution': dt,
'total_num_virtual_procs': n_vp,
'communicate_allgather': allgather,
'overwrite_files': overwrite_existing_files,
'rng_seeds': range(master_seed, master_seed + n_vp), # local RNG seeds
'grng_seed': master_seed + n_vp # global RNG seed
})
if run_mode == 'production':
nest.SetKernelStatus({'data_path': output_path})
seed_offset = master_seed + n_vp
normal_rdvs = [np.random.RandomState(s) for s in range(seed_offset, seed_offset + n_vp)]
def DerivedParameters():
global PSC_e
global PSC_e_23_4
global PSP_i
global PSC_i
global PSC_ext
global PSC_array
global PSC_sd
global PSC_th_sd
global delays_sd
global delay_th_sd
global n_neurons_rec_spikes
global n_neurons_rec_voltage
global n_neurons
# compute numbers of neurons for the given surface area
n_neurons = np.array(map(lambda x: map(int, x), full_scale_n_neurons*area))
m = model_params
# compute PSC amplitude from PSP amplitude
# factor for transforming PSP amplitude to PSC amplitude
re = m['tau_m'] / m['tau_syn_ex']
de = m['tau_syn_ex'] - m['tau_m']
ri = m['tau_m'] / m['tau_syn_in']
di = m['tau_syn_in'] - m['tau_m']
PSC_e_over_PSP_e = (((m['C_m'])**(-1)*m['tau_m']*m['tau_syn_ex']/de*(re**(m['tau_m']/de)-re**(m['tau_syn_ex']/de)))**(-1))
PSC_i_over_PSP_i = (((m['C_m'])**(-1)*m['tau_m']*m['tau_syn_in']/di*(ri**(m['tau_m']/di)-ri**(m['tau_syn_in']/di)))**(-1))
PSC_e = PSC_e_over_PSP_e * PSP_e
PSC_e_23_4 = PSC_e_over_PSP_e * PSP_e_23_4
PSP_i = PSP_e * g
PSC_i = PSC_i_over_PSP_i * PSP_i
# PSC amplitude for all external input
PSC_ext = PSC_e_over_PSP_e * PSP_ext
# array of synaptic current amplitudes
PSC_array = np.tile(np.array([PSC_e, PSC_i]), (4,2,4,1))
PSC_array[0, 0, 1, 0] = PSC_e_23_4
# standard deviations of synaptic current amplitudes
PSC_sd = np.array([PSC_e, PSC_i]) * PSC_rel_sd
PSC_th_sd = PSC_ext * PSC_rel_sd
# standard deviations of delays
delays_sd = delays * delay_rel_sd
delay_th_sd = delay_th * delay_th_rel_sd
# numbers of neurons from which to record spikes and membrane potentials
if record_fraction_neurons_spikes:
n_neurons_rec_spikes = frac_rec_spikes*n_neurons
else:
n_neurons_rec_spikes = np.tile(n_rec_spikes, (n_layers, n_pops_per_layer, 1))
if record_fraction_neurons_voltage:
n_neurons_rec_voltage = frac_rec_voltage*n_neurons
else:
n_neurons_rec_voltage = np.tile(n_rec_voltage, (n_layers, n_pops_per_layer, 1))
def CreateNetworkNodes():
global neuron_subnet_GIDs
global spike_detector_GIDs
global voltmeter_GIDs
global poisson_GIDs
global dc_GIDs
global th_neuron_subnet_GID
global th_poisson_GID
global th_spike_detector_GID
# create and configure neurons
nest.SetDefaults(neuron_model, model_params)
# arrays of GIDs:
# neuron subnets
neuron_subnet_GIDs = np.tile(0, (n_layers, n_pops_per_layer, 1))
# spike detectors
spike_detector_GIDs = np.tile(0, (n_layers, n_pops_per_layer, 1))
# voltmeters
voltmeter_GIDs = np.tile(0, (n_layers, n_pops_per_layer, 1))
# Poisson generators
poisson_GIDs = np.tile(0, (n_layers, n_pops_per_layer, 1))
# DC generators
dc_GIDs = np.tile(0, (n_layers, n_pops_per_layer, 1))
for layer_index in xrange(n_layers):
nest.ChangeSubnet((0,)) # change to the root node
layer_subnet = nest.Create('subnet')
for population_index in xrange(n_pops_per_layer):
nest.ChangeSubnet(layer_subnet)
population_subnet = nest.Create('subnet')
nest.ChangeSubnet(population_subnet)
# create neurons
neuron_subnet = nest.Create('subnet')
nest.ChangeSubnet(neuron_subnet)
neuron_subnet_GIDs[layer_index][population_index] = neuron_subnet
nest.Create(neuron_model, n_neurons[layer_index][population_index])
# initialize membrane potentials
ctr = 0
for n in GetLocalNodes(neuron_subnet)[0]:
nest.SetStatus((n,), {'V_m': normal_rdvs[ctr].normal(Vm0_mean, Vm0_std)})
nest.ChangeSubnet(population_subnet)
# create and configure stimulus and recording devices
device_subnet = nest.Create('subnet')
nest.ChangeSubnet(device_subnet)
this_spike_detector = nest.Create('spike_detector')
# Set spike detector label for filenames. The GID of the spike
# detector and the process number are appended automatically.
nest.SetStatus(this_spike_detector, {
'label': spike_detector_label + '_' + str(layer_index) + '_' + str(population_index),
'to_file': save_cortical_spikes
})
spike_detector_GIDs[layer_index][population_index] = this_spike_detector
this_voltmeter = nest.Create('voltmeter')
nest.SetStatus(this_voltmeter, {
'label': voltmeter_label + '_' + str(layer_index) + '_' + str(population_index),
'to_file': save_voltages
})
voltmeter_GIDs[layer_index][population_index] = this_voltmeter
this_poisson_generator = nest.Create('poisson_generator')
this_K_bg = K_bg[layer_index][population_index]
nest.SetStatus(this_poisson_generator, {
'rate': this_K_bg * bg_rate
})
poisson_GIDs[layer_index][population_index] = this_poisson_generator
this_dc_generator = nest.Create('dc_generator')
nest.SetStatus(this_dc_generator, {
'amplitude': this_K_bg * dc_amplitude
})
dc_GIDs[layer_index][population_index] = this_dc_generator
# create and configure thalamic neurons (parrots) and their Poisson inputs
nest.ChangeSubnet((0,))
if n_thal > 0:
th_subnet = nest.Create('subnet')
nest.ChangeSubnet(th_subnet)
th_neuron_subnet_GID = nest.Create('subnet')
nest.ChangeSubnet(th_neuron_subnet_GID)
nest.Create('parrot_neuron')
nest.ChangeSubnet(th_subnet)
th_device_subnet = nest.Create('subnet')
nest.ChangeSubnet(th_device_subnet)
th_poisson_GID = nest.Create('poisson_generator')
nest.SetStatus(th_poisson_GID, {
'rate': th_rate,
'start': th_start,
'stop': th_start + th_duration
})
if record_thalamic_spikes:
th_spike_detector_GID = nest.Create('spike_detector')
nest.SetStatus(th_spike_detector_GID, {
'label': th_spike_detector_label,
'to_file': save_thalamic_spikes
})
def WriteGIDstoFile():
if run_mode == 'test':
f = GID_filename
if run_mode == 'production':
f = output_path + '/' + GID_filename
with open(f, 'w') as f:
for n in neuron_subnet_GIDs.flatten():
GIDs = nest.GetNodes((n,))
f.write(str(min(GIDs[0])) + '\t' + str(max(GIDs[0])) + '\n')
f.close()
def ConnectNetworkNodes():
global neuron_subnet_GIDs
global spike_detector_GIDs
global voltmeter_GIDs
global poisson_GIDs
global dc_GIDs
global th_neuron_subnet_GID
global th_poisson_GID
global th_spike_detector_GID
# target layer
for target_layer in xrange(n_layers):
for target_pop in xrange(n_pops_per_layer):
# get neuron IDs
target_nodes = GetGlobalNodes(neuron_subnet_GIDs[target_layer][target_pop])
n_targets = n_neurons[target_layer][target_pop]
full_scale_n_targets = full_scale_n_neurons[target_layer][target_pop]
# source layer
for source_layer in xrange(n_layers):
# source population
for source_pop in xrange(n_pops_per_layer):
### local connections
# get neuron IDs
source_nodes = GetGlobalNodes(neuron_subnet_GIDs[source_layer][source_pop])
n_sources = n_neurons[source_layer][source_pop]
full_scale_n_sources = full_scale_n_neurons[source_layer][source_pop]
# get connection probability
# pick row (target) in conn_probs
r = (target_layer * n_pops_per_layer) + target_pop
# pick column (source) in conn_probs
c = (source_layer * n_pops_per_layer) + source_pop
this_conn = conn_probs[r][c]# probability for this connection
# Compute numbers of synapses assuming binomial degree
# distributions and allowing for multapses (see Potjans
# and Diesmann 2012 Cereb Cortex Eq. 1)
if preserve_K:
prod = full_scale_n_sources * full_scale_n_targets
n_syn_temp = np.log(1.-this_conn)/np.log((prod-1.)/prod)
this_n_synapses = int((n_syn_temp * n_targets) / full_scale_n_targets)
else:
prod = n_sources * n_targets
this_n_synapses = int(np.log(1.-this_conn)/np.log((prod-1.)/prod))
if this_n_synapses > 0:
mean_weight = PSC_array[target_layer][target_pop][source_layer][source_pop]
# Create label for target and source populations
conn_label = 'layers' + str(target_layer) + '_' + 'populations' + str(target_pop) + '-' + \
'layers' + str(source_layer) + '_' + 'populations' + str(source_pop)
# fill the weight dictionary for Connect and insert it into the synapse dictionary
if mean_weight > 0:
weight_dict_exc['mu'] = mean_weight
weight_dict_exc['sigma'] = np.abs(PSC_sd[source_pop])
syn_dict['weight'] = weight_dict_exc
else:
weight_dict_inh['mu'] = mean_weight
weight_dict_inh['sigma'] = np.abs(PSC_sd[source_pop])
syn_dict['weight'] = weight_dict_inh
# fill the delay dictionary for Connect and insert it into the synapse dictionary
delay_dict['mu'] = delays[source_pop]
delay_dict['sigma'] = np.abs(delays_sd[source_pop])
syn_dict['delay'] = delay_dict
# fill the connectivity dictionary with the number of synapses to be used
conn_dict['N'] = this_n_synapses
conn_dict['rule'] = 'fixed_total_number'
# Connect the populations
nest.Connect(source_nodes[0], target_nodes[0], conn_dict, syn_dict)
if n_thal > 0:
# connections from thalamic neurons
source_nodes = GetGlobalNodes(th_neuron_subnet_GID)
this_conn = C_th[target_layer][target_pop]
# Compute numbers of synapses assuming binomial degree
# distributions and allowing for multapses (see Potjans and
# Diesmann 2012 Cereb Cortex Eq. 1)
if preserve_K:
prod = n_thal * full_scale_n_targets
n_syn_temp = np.log(1.-this_conn)/np.log((prod-1.)/prod)
this_n_synapses = int(full_scale_n_targets / (n_syn_temp * n_targets))
else:
prod = n_thal * n_targets
this_n_synapses = int(np.log(1.-this_conn)/np.log((prod-1.)/prod))
if this_n_synapses > 0:
# create label for current target population
th_conn_label = layers[target_layer] + '_' + populations[target_pop]
# fill the weight dictionary for Connect
weight_dict_exc['mu'] = PSC_ext
weight_dict_exc['sigma'] = np.abs(PSC_th_sd)
# insert the weight dictionary into the synapse dictionary
syn_dict['weight'] = weight_dict_exc
# fill the delay dictionary for Connect
delay_dict['mu'] = delay_th
delay_dict['sigma'] = np.abs(delay_th_sd)
# insert the delay dictionary into the synapse dictionary
syn_dict['delay'] = delay_dict
conn_dict['N'] = this_n_synapses
conn_dict['rule'] = 'fixed_total_number'
nest.Connect(source_nodes, target_nodes, conn_dict, syn_dict)
# Connect devices
# record from a continuous range of IDs
# (appropriate for networks without topology)
# print tuple(spike_detector_GIDs[target_layer][target_pop])
# print target_nodes[:int(n_neurons_rec_spikes[target_layer][target_pop])][0]
nest.Connect(target_nodes[:int(n_neurons_rec_spikes[target_layer][target_pop])][0],
tuple(spike_detector_GIDs[target_layer][target_pop]),
'all_to_all')
nest.Connect(tuple(voltmeter_GIDs[target_layer][target_pop]),
tuple(target_nodes[:int(n_neurons_rec_voltage[target_layer][target_pop])])[0],
'all_to_all')
nest.Connect(tuple(poisson_GIDs[target_layer][target_pop]),
target_nodes[0],
'all_to_all',
{'weight': PSC_ext, 'delay': delays[0]})
nest.Connect(tuple(dc_GIDs[target_layer][target_pop]),
target_nodes[0],
'all_to_all')
if n_thal > 0:
# Connect thalamic poisson_generator to thalamic neurons (parrots)
nest.Connect(th_poisson_GID, GetGlobalNodes(th_neuron_subnet_GID))
if record_thalamic_spikes and n_thal > 0:
# Connect thalamic neurons to spike detector
nest.Connect(GetGlobalNodes(th_neuron_subnet_GID), th_spike_detector_GID)
if __name__ == '__main__':
print "------------------------------------------------------"
print "Starting simulation"
print "------------------------------------------------------"
CheckParameters()
PrepareSimulation()
DerivedParameters()
CreateNetworkNodes()
WriteGIDstoFile()
ConnectNetworkNodes()
nest.Simulate(t_sim)
# for s in np.array(spike_detector_GIDs).flatten():
# nest.raster_plot.from_device((s,))
# pylab.show()
|
synergetics/potjans_2014
|
microcircuit.py
|
Python
|
mit
| 15,865
|
[
"NEURON"
] |
5eca087cc4484bd21cc43a5c65c3ff565babfeaad73e16a3f23d76efdcaf5de1
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2016 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
"""CP2K atomic wavefunctions"""
import numpy as np
from horton.gbasis.iobas import str_to_shell_types
from horton.gbasis.cext import GOBasis, fac2
__all__ = ['load_atom_cp2k']
def _get_cp2k_norm_corrections(l, alphas):
"""Compute the corrections for the normalization of the basis functions.
This correction is needed because the CP2K atom code works with non-normalized basis
functions. HORTON assumes Gaussian primitives are always normalized.
Parameters
----------
l : int
The angular momentum of the (pure) basis function. (s=0, p=1, ...)
alphas : float or np.ndarray
The exponent or exponents of the Gaussian primitives for which the correction
is to be computed.
Returns
-------
corrections : float or np.ndarray
The scale factor for the expansion coefficients of the wavefunction in
terms of primitive Gaussians. The inverse of this correction can be
applied to the contraction coefficients.
"""
expzet = 0.25*(2*l + 3)
prefac = np.sqrt(np.sqrt(np.pi)/2.0**(l + 2)*fac2(2*l + 1))
zeta = 2.0*alphas
return zeta**expzet/prefac
def _read_cp2k_contracted_obasis(f):
"""Read a contracted basis set from an open CP2K ATOM output file.
Parameters
----------
f : file
An open readable file object.
Returns
-------
obasis : GOBasis
The orbital basis read from the file.
"""
# Load the relevant data from the file
basis_desc = []
for line in f:
if line.startswith(' *******************'):
break
elif line[3:12] == 'Functions':
shell_type = str_to_shell_types(line[1:2], pure=True)[0]
a = [] # exponents (alpha)
c = [] # contraction coefficients
basis_desc.append((shell_type, a, c))
else:
values = [float(w) for w in line.split()]
a.append(values[0]) # one exponent per line
c.append(values[1:]) # many contraction coefficients per line
# Convert the basis into HORTON format
shell_map = []
shell_types = []
nprims = []
alphas = []
con_coeffs = []
for shell_type, a, c in basis_desc:
# get correction to contraction coefficients. CP2K uses different normalization
# conventions.
corrections = _get_cp2k_norm_corrections(abs(shell_type), np.array(a))
c = np.array(c)/corrections.reshape(-1, 1)
# fill in arrays
for col in c.T:
shell_map.append(0)
shell_types.append(shell_type)
nprims.append(len(col))
alphas.extend(a)
con_coeffs.extend(col)
# Create the basis object
coordinates = np.zeros((1, 3))
shell_map = np.array(shell_map)
nprims = np.array(nprims)
shell_types = np.array(shell_types)
alphas = np.array(alphas)
con_coeffs = np.array(con_coeffs)
obasis = GOBasis(coordinates, shell_map, nprims, shell_types, alphas, con_coeffs)
return obasis
def _read_cp2k_uncontracted_obasis(f):
"""Read an uncontracted basis set from an open CP2K ATOM output file.
Parameters
----------
f : file
An open readable file object.
Returns
-------
obasis : GOBasis
The orbital basis read from the file.
"""
# Load the relevant data from the file
basis_desc = []
shell_type = None
for line in f:
if line.startswith(' *******************'):
break
elif line[3:13] == 'Exponents:':
shell_type = str_to_shell_types(line[1:2], pure=True)[0]
words = line.split()
if len(words) >= 2:
# read the exponent
alpha = float(words[-1])
basis_desc.append((shell_type, alpha))
# Convert the basis into HORTON format
shell_map = []
shell_types = []
nprims = []
alphas = []
con_coeffs = []
# fill in arrays
for shell_type, alpha in basis_desc:
correction = _get_cp2k_norm_corrections(abs(shell_type), alpha)
shell_map.append(0)
shell_types.append(shell_type)
nprims.append(1)
alphas.append(alpha)
con_coeffs.append(1.0 / correction)
# Create the basis object
centers = np.zeros((1, 3))
shell_map = np.array(shell_map)
nprims = np.array(nprims)
shell_types = np.array(shell_types)
alphas = np.array(alphas)
con_coeffs = np.array(con_coeffs)
obasis = GOBasis(centers, shell_map, nprims, shell_types, alphas, con_coeffs)
return obasis
def _read_cp2k_obasis(f):
"""Read a basis set from an open CP2K ATOM output file."""
f.next() # Skip empty line
line = f.next() # Check for contracted versus uncontracted
if line == ' ********************** Contracted Gaussian Type Orbitals '\
'**********************\n':
return _read_cp2k_contracted_obasis(f)
elif line == ' ********************* Uncontracted Gaussian Type Orbitals '\
'*********************\n':
return _read_cp2k_uncontracted_obasis(f)
else:
raise IOError('Could not find basis set in CP2K ATOM output.')
def _read_cp2k_occupations_energies(f, restricted):
"""Read orbital occupation numbers and energies from an open CP2K ATOM output file.
Parameters
----------
f : file
An open readable file object.
restricted : bool
Is wavefunction restricted or unrestricted?
Returns
-------
oe_alpha, oe_beta : list
A list with orbital properties. Each element is a tuple with the
following info: (angular_momentum l, spin component: 'alpha' or
'beta', occupation number, orbital energy).
"""
oe_alpha = []
oe_beta = []
empty = 0
while empty < 2:
line = f.next()
words = line.split()
if len(words) == 0:
empty += 1
continue
empty = 0
s = int(words[0])
l = int(words[2 - restricted])
occ = float(words[3 - restricted])
ener = float(words[4 - restricted])
if restricted or words[1] == 'alpha':
oe_alpha.append((l, s, occ, ener))
else:
oe_beta.append((l, s, occ, ener))
return oe_alpha, oe_beta
def _read_cp2k_orbital_coeffs(f, oe):
"""Read the expansion coefficients of the orbital from an open CP2K ATOM output.
Parameters
----------
f : file
An open readable file object.
oe : list
The orbital occupation numbers and energies read with
``_read_cp2k_occupations_energies``.
Returns
-------
result : dict
Key is an (l, s) pair and value is an array with orbital coefficients.
"""
coeffs = {}
f.next()
while len(coeffs) < len(oe):
line = f.next()
assert line.startswith(" ORBITAL L =")
words = line.split()
l = int(words[3])
s = int(words[6])
c = []
while True:
line = f.next()
if len(line.strip()) == 0:
break
c.append(float(line))
coeffs[(l, s)] = np.array(c)
return coeffs
def _get_norb_nel(oe):
"""Return number of orbitals and electrons.
Parameters
----------
oe : list
The orbital occupation numbers and energies read with
``_read_cp2k_occupations_energies``.
"""
norb = 0
nel = 0
for row in oe:
norb += 2*row[0] + 1
nel += row[2]
return norb, nel
def _fill_exp(exp, oe, coeffs, shell_types, restricted):
"""Fill in orbital coefficients, energies and occupation numbers in ``exp``.
Parameters
----------
exp : DenseExpansion
An object to represent the orbitals
oe : list
The orbital occupation numbers and energies read with
``_read_cp2k_occupations_energies``.
coeffs : dict
The orbital coefficients read with ``_read_cp2k_orbital_coeffs``.
shell_types : np.ndarray
The array with shell types of the GOBasis instance.
restricted : bool
Is wavefunction restricted or unrestricted?
"""
# Find the offsets for each angular momentum
offset = 0
offsets = []
ls = abs(shell_types)
for l in sorted(set(ls)):
offsets.append(offset)
offset += (2*l + 1)*(l == ls).sum()
del offset
# Fill in the coefficients
iorb = 0
for l, s, occ, ener in oe:
cs = coeffs.get((l, s))
stride = 2*l + 1
for m in xrange(-l, l+1):
im = m + l
exp.energies[iorb] = ener
exp.occupations[iorb] = occ/float((restricted + 1)*(2*l + 1))
for ic in xrange(len(cs)):
exp.coeffs[offsets[l] + stride*ic + im, iorb] = cs[ic]
iorb += 1
def load_atom_cp2k(filename, lf):
"""Load data from a CP2K ATOM computation.
Parameters
---------
filename : str
The name of the cp2k out file
lf : LinalgFactory
A linear-algebra factory.
Returns
-------
results : dict
Contains: ``obasis``, ``exp_alpha``, ``coordinates``, ``numbers``,
``energy``, ``pseudo_numbers``. May contain: ``exp_beta``.
Notes
-----
This function assumes that the following subsections are present in the CP2K
ATOM input file, in the section ``ATOM%PRINT``:
.. code-block:: text
&PRINT
&POTENTIAL
&END POTENTIAL
&BASIS_SET
&END BASIS_SET
&ORBITALS
&END ORBITALS
&END PRINT
"""
with open(filename) as f:
# Find the element number
number = None
for line in f:
if line.startswith(' Atomic Energy Calculation'):
number = int(line[-5:-1])
break
if number is None:
raise IOError('Could not find atomic number in CP2K ATOM output: %s.' % filename)
# Go to the all-electron basis set and read it.
for line in f:
if line.startswith(' All Electron Basis'):
break
ae_obasis = _read_cp2k_obasis(f)
# Go to the pseudo basis set and read it.
for line in f:
if line.startswith(' Pseudopotential Basis'):
break
pp_obasis = _read_cp2k_obasis(f)
# Search for (un)restricted
restricted = None
for line in f:
if line.startswith(' METHOD |'):
if 'U' in line:
restricted = False
break
elif 'R' in line:
restricted = True
break
# Search for the core charge (pseudo number)
pseudo_number = None
for line in f:
if line.startswith(' Core Charge'):
pseudo_number = float(line[70:])
assert pseudo_number == int(pseudo_number)
break
elif line.startswith(' Electronic structure'):
pseudo_number = float(number)
break
if pseudo_number is None:
raise IOError('Could not find effective core charge in CP2K ATOM output:'
' %s' % filename)
# Select the correct basis
if pseudo_number == number:
obasis = ae_obasis
else:
obasis = pp_obasis
if lf.default_nbasis is not None and lf.default_nbasis != obasis.nbasis:
raise IOError('The value of lf.default_nbasis does not match nbasis '
'reported in CP2K ATOM output: %s' % filename)
lf.default_nbasis = obasis.nbasis
# Search for energy
for line in f:
if line.startswith(' Energy components [Hartree] Total Energy ::'):
energy = float(line[60:])
break
# Read orbital energies and occupations
for line in f:
if line.startswith(' Orbital energies'):
break
f.next()
oe_alpha, oe_beta = _read_cp2k_occupations_energies(f, restricted)
# Read orbital expansion coefficients
line = f.next()
if (line != " Atomic orbital expansion coefficients [Alpha]\n") and \
(line != " Atomic orbital expansion coefficients []\n"):
raise IOError('Could not find orbital coefficients in CP2K ATOM output: '
'%s' % filename)
coeffs_alpha = _read_cp2k_orbital_coeffs(f, oe_alpha)
if not restricted:
line = f.next()
if line != " Atomic orbital expansion coefficients [Beta]\n":
raise IOError('Could not find beta orbital coefficient in CP2K ATOM '
'output: %s' % filename)
coeffs_beta = _read_cp2k_orbital_coeffs(f, oe_beta)
# Turn orbital data into a HORTON orbital expansions
if restricted:
norb, nel = _get_norb_nel(oe_alpha)
assert nel % 2 == 0
exp_alpha = lf.create_expansion(obasis.nbasis, norb)
exp_beta = None
_fill_exp(exp_alpha, oe_alpha, coeffs_alpha, obasis.shell_types, restricted)
else:
norb_alpha = _get_norb_nel(oe_alpha)[0]
norb_beta = _get_norb_nel(oe_beta)[0]
assert norb_alpha == norb_beta
exp_alpha = lf.create_expansion(obasis.nbasis, norb_alpha)
exp_beta = lf.create_expansion(obasis.nbasis, norb_beta)
_fill_exp(exp_alpha, oe_alpha, coeffs_alpha, obasis.shell_types, restricted)
_fill_exp(exp_beta, oe_beta, coeffs_beta, obasis.shell_types, restricted)
result = {
'obasis': obasis,
'lf': lf,
'exp_alpha': exp_alpha,
'coordinates': obasis.centers,
'numbers': np.array([number]),
'energy': energy,
'pseudo_numbers': np.array([pseudo_number]),
}
if exp_beta is not None:
result['exp_beta'] = exp_beta
return result
|
crisely09/horton
|
horton/io/cp2k.py
|
Python
|
gpl-3.0
| 14,969
|
[
"CP2K",
"Gaussian"
] |
b46365eaefe5871acc9852c9d2b97f5e4fe916adfbfc3065169d50d27323e7b5
|
from typing import Any, Optional, Tuple
import numpy as np
import pytest
import tensorflow as tf
import gpflow
from gpflow import set_trainable
from gpflow.config import default_float
from gpflow.kernels import Kernel, SquaredExponential
from gpflow.likelihoods import Gaussian, Likelihood
from gpflow.models import GPR, SGPR, SVGP, VGP, BayesianModel
from gpflow.optimizers import NaturalGradient
class Setup:
N, M, D = 4, 3, 2
likelihood_variance = 0.1
rng = np.random.RandomState(42)
X = rng.randn(N, D)
Y = rng.randn(N, 1)
Z = rng.randn(M, D)
@pytest.fixture
def data() -> Tuple[tf.Tensor, tf.Tensor]:
X = tf.convert_to_tensor(Setup.X, dtype=default_float())
Y = tf.convert_to_tensor(Setup.Y, dtype=default_float())
return (X, Y)
@pytest.fixture
def inducing_variable() -> tf.Tensor:
Z = tf.convert_to_tensor(Setup.Z, dtype=default_float())
return Z
@pytest.fixture
def kernel() -> Kernel:
return SquaredExponential()
@pytest.fixture
def likelihood() -> Likelihood:
return Gaussian(variance=Setup.likelihood_variance)
@pytest.fixture
def gpr_and_vgp(
data: Tuple[tf.Tensor, tf.Tensor], kernel: Kernel, likelihood: Likelihood
) -> Tuple[GPR, VGP]:
vgp = VGP(data, kernel, likelihood)
gpr = GPR(data, kernel)
gpr.likelihood.variance.assign(likelihood.variance)
set_trainable(vgp, False)
set_trainable(vgp.q_mu, True)
set_trainable(vgp.q_sqrt, True)
return gpr, vgp
@pytest.fixture
def sgpr_and_svgp(
data: Tuple[tf.Tensor, tf.Tensor],
inducing_variable: tf.Tensor,
kernel: Kernel,
likelihood: Likelihood,
) -> Tuple[SGPR, SVGP]:
svgp = SVGP(kernel, likelihood, inducing_variable)
sgpr = SGPR(data, kernel, inducing_variable=inducing_variable)
sgpr.likelihood.variance.assign(Setup.likelihood_variance)
set_trainable(svgp, False)
set_trainable(svgp.q_mu, True)
set_trainable(svgp.q_sqrt, True)
return sgpr, svgp
def assert_different(value1: tf.Tensor, value2: tf.Tensor, rtol: float = 0.07) -> None:
""" assert relative difference > rtol """
relative_difference = (value1 - value2) / (value1 + value2)
assert np.abs(relative_difference) > rtol
def assert_gpr_vs_vgp(
m1: BayesianModel,
m2: BayesianModel,
gamma: float = 1.0,
maxiter: int = 1,
xi_transform: Optional[gpflow.optimizers.natgrad.XiTransform] = None,
) -> None:
assert maxiter >= 1
m1_ll_before = m1.training_loss()
m2_ll_before = m2.training_loss()
assert_different(m2_ll_before, m1_ll_before)
params: Tuple[Any, ...] = (m2.q_mu, m2.q_sqrt)
if xi_transform is not None:
params += (xi_transform,)
opt = NaturalGradient(gamma)
@tf.function
def minimize_step() -> None:
opt.minimize(m2.training_loss, var_list=[params]) # type: ignore
for _ in range(maxiter):
minimize_step()
m1_ll_after = m1.training_loss()
m2_ll_after = m2.training_loss()
np.testing.assert_allclose(m1_ll_after, m2_ll_after, atol=1e-4)
def assert_sgpr_vs_svgp(
m1: BayesianModel,
m2: BayesianModel,
) -> None:
data = m1.data
m1_ll_before = m1.training_loss()
m2_ll_before = m2.training_loss(data)
assert_different(m2_ll_before, m1_ll_before)
params = [(m2.q_mu, m2.q_sqrt)]
opt = NaturalGradient(1.0)
opt.minimize(m2.training_loss_closure(data), var_list=params)
m1_ll_after = m1.training_loss()
m2_ll_after = m2.training_loss(data)
np.testing.assert_allclose(m1_ll_after, m2_ll_after, atol=1e-4)
def test_vgp_vs_gpr(gpr_and_vgp: Tuple[GPR, VGP]) -> None:
"""
With a Gaussian likelihood the Gaussian variational (VGP) model should be equivalent to the
exact regression model (GPR) after a single nat grad step of size 1
"""
gpr, vgp = gpr_and_vgp
assert_gpr_vs_vgp(gpr, vgp)
def test_small_q_sqrt_handeled_correctly(
gpr_and_vgp: Tuple[GPR, VGP], data: Tuple[tf.Tensor, tf.Tensor]
) -> None:
"""
This is an extra test to make sure things still work when q_sqrt is small.
This was breaking (#767)
"""
gpr, vgp = gpr_and_vgp
vgp.q_mu.assign(np.random.randn(data[0].shape[0], 1))
vgp.q_sqrt.assign(np.eye(data[0].shape[0])[None, :, :] * 1e-3)
assert_gpr_vs_vgp(gpr, vgp)
def test_svgp_vs_sgpr(sgpr_and_svgp: Tuple[SGPR, SVGP]) -> None:
"""
With a Gaussian likelihood the sparse Gaussian variational (SVGP) model
should be equivalent to the analytically optimial sparse regression model (SGPR)
after a single nat grad step of size 1.0
"""
sgpr, svgp = sgpr_and_svgp
assert_sgpr_vs_svgp(sgpr, svgp)
class XiEta(gpflow.optimizers.XiTransform):
@staticmethod
def meanvarsqrt_to_xi(mean: tf.Tensor, varsqrt: tf.Tensor) -> tf.Tensor:
return gpflow.optimizers.natgrad.meanvarsqrt_to_expectation(mean, varsqrt)
@staticmethod
def xi_to_meanvarsqrt(xi1: tf.Tensor, xi2: tf.Tensor) -> tf.Tensor:
return gpflow.optimizers.natgrad.expectation_to_meanvarsqrt(xi1, xi2)
@staticmethod
def naturals_to_xi(nat1: tf.Tensor, nat2: tf.Tensor) -> tf.Tensor:
return gpflow.optimizers.natgrad.natural_to_expectation(nat1, nat2)
@pytest.mark.parametrize("xi_transform", [gpflow.optimizers.XiSqrtMeanVar(), XiEta()])
def test_xi_transform_vgp_vs_gpr(
gpr_and_vgp: Tuple[GPR, VGP], xi_transform: gpflow.optimizers.XiTransform
) -> None:
"""
With other transforms the solution is not given in a single step, but it should still give the same answer
after a number of smaller steps.
"""
gpr, vgp = gpr_and_vgp
assert_gpr_vs_vgp(gpr, vgp, gamma=0.01, xi_transform=xi_transform, maxiter=500)
|
GPflow/GPflow
|
tests/gpflow/optimizers/test_natural_gradient.py
|
Python
|
apache-2.0
| 5,685
|
[
"Gaussian"
] |
d5a86f6a9de252258adec9d634fa8f37ce93273ff42da05784eff23322848b6f
|
import sys,os,unittest
from ctypes import *
from lammps import lammps, LMP_STYLE_GLOBAL, LMP_TYPE_VECTOR
try:
import numpy
NUMPY_INSTALLED = True
except ImportError:
NUMPY_INSTALLED = False
# add timestep dependent force
def callback_one(lmp, ntimestep, nlocal, tag, x, f):
lmp.fix_external_set_virial_global("ext",[1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
for i in range(nlocal):
f[i][0] = float(ntimestep)
f[i][1] = float(ntimestep)
f[i][2] = float(ntimestep)
if ntimestep < 10:
lmp.fix_external_set_energy_global("ext", 0.5)
lmp.fix_external_set_vector("ext", 1, ntimestep)
lmp.fix_external_set_vector("ext", 3, 1.0)
lmp.fix_external_set_vector("ext", 4, -0.25)
else:
lmp.fix_external_set_energy_global("ext", 1.0)
lmp.fix_external_set_vector("ext", 2, ntimestep)
lmp.fix_external_set_vector("ext", 5, -1.0)
lmp.fix_external_set_vector("ext", 6, 0.25)
eatom = [0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7]
vatom = [ [0.1,0.0,0.0,0.0,0.0,0.0],
[0.0,0.2,0.0,0.0,0.0,0.0],
[0.0,0.0,0.3,0.0,0.0,0.0],
[0.0,0.0,0.0,0.4,0.0,0.0],
[0.0,0.0,0.0,0.0,0.5,0.0],
[0.0,0.0,0.0,0.0,0.0,0.6],
[0.0,0.0,0.0,0.0,-7.0,0.0],
[0.0,-8.0,0.0,0.0,0.0,0.0] ]
if ntimestep < 5:
lmp.fix_external_set_energy_peratom("ext",eatom)
lmp.fix_external_set_virial_peratom("ext",vatom)
else:
import numpy as np
eng = np.array(eatom)
vir = np.array(vatom)
lmp.numpy.fix_external_set_energy_peratom("ext",eng)
lmp.numpy.fix_external_set_virial_peratom("ext",vir)
# ------------------------------------------------------------------------
class PythonExternal(unittest.TestCase):
@unittest.skipIf(not NUMPY_INSTALLED, "NumPy is not available")
def testExternalCallback(self):
"""Test fix external from Python with pf/callback"""
machine=None
if 'LAMMPS_MACHINE_NAME' in os.environ:
machine=os.environ['LAMMPS_MACHINE_NAME']
lmp=lammps(name=machine, cmdargs=['-nocite', '-log','none', '-echo', 'screen'])
# a few commands to set up simple system
basic_system="""lattice sc 1.0
region box block -1 1 -1 1 -1 1
create_box 1 box
create_atoms 1 box
mass 1 1.0
pair_style zero 0.1
pair_coeff 1 1
velocity all set 0.1 0.0 -0.1
thermo_style custom step temp pe ke etotal press
thermo 5
fix 1 all nve
fix ext all external pf/callback 5 1
compute eatm all pe/atom fix
compute vatm all stress/atom NULL fix
compute sum all reduce sum c_eatm c_vatm[*]
thermo_style custom step temp pe ke etotal press c_sum[*]
fix_modify ext energy yes virial yes
"""
lmp.commands_string(basic_system)
lmp.fix_external_set_vector_length("ext",6);
lmp.set_fix_external_callback("ext",callback_one,lmp)
# check setting per-atom data with python lists
lmp.command("run 0 post no")
reduce = lmp.extract_compute("sum", LMP_STYLE_GLOBAL, LMP_TYPE_VECTOR)
self.assertAlmostEqual(reduce[0],2.8,14)
self.assertAlmostEqual(reduce[1],-0.1,14)
self.assertAlmostEqual(reduce[2],7.8,14)
self.assertAlmostEqual(reduce[3],-0.3,14)
self.assertAlmostEqual(reduce[4],-0.4,14)
self.assertAlmostEqual(reduce[5],6.5,14)
self.assertAlmostEqual(reduce[6],-0.6,14)
lmp.command("run 10 post no")
self.assertAlmostEqual(lmp.get_thermo("temp"),1.0/30.0,14)
self.assertAlmostEqual(lmp.get_thermo("pe"),1.0/8.0,14)
self.assertAlmostEqual(lmp.get_thermo("press"),0.15416666666666667,14)
# check setting per-atom data numpy arrays
reduce = lmp.extract_compute("sum", LMP_STYLE_GLOBAL, LMP_TYPE_VECTOR)
self.assertAlmostEqual(reduce[0],2.8,14)
self.assertAlmostEqual(reduce[1],-0.1,14)
self.assertAlmostEqual(reduce[2],7.8,14)
self.assertAlmostEqual(reduce[3],-0.3,14)
self.assertAlmostEqual(reduce[4],-0.4,14)
self.assertAlmostEqual(reduce[5],6.5,14)
self.assertAlmostEqual(reduce[6],-0.6,14)
val = 0.0
for i in range(0,6):
val += lmp.extract_fix("ext",LMP_STYLE_GLOBAL,LMP_TYPE_VECTOR,nrow=i)
self.assertAlmostEqual(val,15.0,14)
def testExternalArray(self):
"""Test fix external from Python with pf/array"""
machine=None
if 'LAMMPS_MACHINE_NAME' in os.environ:
machine=os.environ['LAMMPS_MACHINE_NAME']
lmp=lammps(name=machine, cmdargs=['-nocite', '-log','none', '-echo', 'screen'])
# a few commands to set up simple system
basic_system="""lattice sc 1.0
region box block -1 1 -1 1 -1 1
create_box 1 box
create_atoms 1 box
mass 1 1.0
pair_style zero 0.1
pair_coeff 1 1
velocity all set 0.1 0.0 -0.1
fix 1 all nve
fix ext all external pf/array 1
fix_modify ext energy yes virial yes
thermo_style custom step temp pe ke press
thermo 5
"""
lmp.commands_string(basic_system)
force = lmp.fix_external_get_force("ext");
nlocal = lmp.extract_setting("nlocal");
for i in range(nlocal):
force[i][0] = 0.0
force[i][1] = 0.0
force[i][2] = 0.0
lmp.fix_external_set_energy_global("ext", 0.5)
lmp.fix_external_set_virial_global("ext",[0.5, 0.5, 0.5, 0.0, 0.0, 0.0])
lmp.command("run 5 post no")
self.assertAlmostEqual(lmp.get_thermo("temp"),4.0/525.0,14)
self.assertAlmostEqual(lmp.get_thermo("pe"),1.0/16.0,14)
self.assertAlmostEqual(lmp.get_thermo("press"),0.06916666666666667,14)
if NUMPY_INSTALLED:
npforce = lmp.numpy.fix_external_get_force("ext")
self.assertEqual(len(npforce),8)
self.assertEqual(len(npforce[0]),3)
self.assertEqual(npforce[1][1],0.0)
force = lmp.fix_external_get_force("ext");
nlocal = lmp.extract_setting("nlocal");
for i in range(nlocal):
force[i][0] = 6.0
force[i][1] = 6.0
force[i][2] = 6.0
lmp.fix_external_set_energy_global("ext", 1.0)
lmp.fix_external_set_virial_global("ext",[1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
lmp.command("run 5 post no")
self.assertAlmostEqual(lmp.get_thermo("temp"),1.0/30.0,14)
self.assertAlmostEqual(lmp.get_thermo("pe"),1.0/8.0,14)
self.assertAlmostEqual(lmp.get_thermo("press"),0.15416666666666667,14)
if NUMPY_INSTALLED:
npforce = lmp.numpy.fix_external_get_force("ext")
self.assertEqual(npforce[0][0],6.0)
self.assertEqual(npforce[3][1],6.0)
self.assertEqual(npforce[7][2],6.0)
##############################
if __name__ == "__main__":
unittest.main()
|
akohlmey/lammps
|
unittest/python/python-fix-external.py
|
Python
|
gpl-2.0
| 7,469
|
[
"LAMMPS"
] |
2fe6fef824ebee315776deb206bbd8d3a664eab4928d278ec8d2e74040220af8
|
'''
* Copyright (C) 2017 Music Technology Group - Universitat Pompeu Fabra
*
* This file is part of jingjuSingingPhraseMatching
*
* pypYIN is free software: you can redistribute it and/or modify it under
* the terms of the GNU Affero General Public License as published by the Free
* Software Foundation (FSF), either version 3 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the Affero GNU General Public License
* version 3 along with this program. If not, see http://www.gnu.org/licenses/
*
* If you have any problem about this python version code, please contact: Rong Gong
* rong.gong@upf.edu
*
*
* If you want to refer this code, please use this article:
*
'''
"""
This .py contains the processing pipline for two methods: HMM and HSMM.
post-processor duration modelling is not included here instead in lyricsRecognizer/resultAnalysis.py
because it's a kind of post-processing after the state sequence decoding has been done
generalProcess funtion gets input of incoming query audio files, segment them into phrase
phraseProcess function takes over these segmented phrases and do matching
There are some obsolete method here, 'obsMatrix', 'candidateSynthesizeData'
These are the previous experimentation of different matching methods, which didn't work
The output ranking results is stored in path_json_dict_query_phrases
"""
from os import path
import sys
import json
import pickle
import numpy as np
currentPath = path.dirname(path.abspath(__file__))
lyricsRecognizerPath = path.join(currentPath, 'lyricsRecognizer')
sys.path.append(lyricsRecognizerPath)
# from general.trainTestSeparation import getRecordingNamesSimi
from general.textgridParser import syllableTextgridExtraction
from general.parameters import list_N_frames,hopsize_phoneticSimilarity
from targetAudioProcessing import gmmPhoModelLoad,processFeature,obsMatrixPho
from general.filePath import *
from ParallelLRHMM import ParallelLRHMM
from makeNet import makeNet
import pyximport
pyximport.install(reload_support=True,
setup_args={'include_dirs': np.get_include()})
from ParallelLRHSMM import ParallelLRHSMM
from makeHSMMNet import makeHSMMNet
from scoreDictFilter import runDictScoreRankNFilter
from scipy.io import wavfile
# from scoreManip import scoreMSynthesize,scoreMSynthesizePho,mfccSynthesizeFromData,mfccSynthesizeFromGMM,plotMFCC
# from tailsMFCCTrain import loadMFCCTrain
# from general.dtwSankalp import dtwNd
# from targetAudioProcessing import gmmModelLoad,obsMPlot,obsMPlotPho,obsMatrix
# from fastdtw import fastdtw
# from scipy.spatial.distance import euclidean
# from operator import itemgetter
# from resultAnalysis import evalOnSinglefilePostProcessor,sumLogDurProbs,calculateMetrics
# load score json, pre-computed
with open(path.join(currentPath, '../melodicSimilarity/scores.json'),'r') as f:
dict_score = json.load(f)
dist_measures = ['euclideanDist','sankalpNdDTW121']
dm = dist_measures[1]
# load textgrid, wav paths, load melodic similarity results
if class_name == 'danAll':
textgridDataDir = textgrid_path_dan
wavDataDir = wav_path_dan
path_melodic_similarity_results = path.join(currentPath, '..', 'melodicSimilarity', 'results',
'danAll_900_0.7_pyin_roleTypeWeight')
elif class_name == 'laosheng':
textgridDataDir = textgrid_path_laosheng
wavDataDir = wav_path_laosheng
path_melodic_similarity_results = path.join(currentPath,'..','melodicSimilarity','results','900_0.7_pyin')
def phraseProcess(filename,
i,
line_list,
wavData,
gmmModel,
kerasModel,
sampleRate,
method,
proportionality_std=0):
# sub module to process each phrase
line = line_list[0]
start_frame = int(round(line[0] * sampleRate))
end_frame = int(round(line[1] * sampleRate))
line_lyrics = line[2]
wav_line = wavData[start_frame:end_frame]
wavfile.write('temp.wav', sampleRate, wav_line)
# choose feature type as mfcc or mfccBands for GMM or DNN
if am == 'gmm':
mfcc_target = processFeature('temp.wav', feature_type='mfcc')
elif am == 'cnn':
mfcc_target = processFeature('temp.wav', feature_type='mfccBands2D')
N_frame = mfcc_target.shape[0]
duration_target = (N_frame * hopsize_phoneticSimilarity) / float(sampleRate)
# only examine on the first N ranking results of melodic similarity
query_phrase_name = filename + '_' + str(i)
dict_score_100 = runDictScoreRankNFilter(dict_score,
path_melodic_similarity_results,
query_phrase_name,
N=100)
# print 'dict_score_100 len:',len(dict_score_100)
if method == 'obsMatrix':
# not used
obsM = obsMatrixPho(mfcc_target, gmmModel) # pho gmm
obsM = np.exp(obsM)
obsM = obsM / np.sum(obsM, axis=0)
print obsM
elif method == 'candidateSynthesizeData':
# not used
# choose the template which has the nearest length of the target mfcc
index_template = np.argmin(np.abs((np.array(list_N_frames) - N_frame)))
output = open('syllable_mfcc_templates/dic_mfcc_synthesized_' + str(list_N_frames[index_template]) + '.pkl', 'r')
dic_mfcc_synthesized = pickle.load(output)
output.close()
elif method == 'lyricsRecognizerHMM':
# build matching network from the score dataset
phrases, \
lyrics_net, \
mat_trans_comb, \
state_pho_comb, \
index_start, \
index_end, \
list_centroid_pho_dur = makeNet(dict_score_100)
hmm = ParallelLRHMM(lyrics_net,
mat_trans_comb,
state_pho_comb,
index_start,
index_end)
if am == 'gmm':
hmm._gmmModel(gmmModels_path)
# viterbi decoding
paths_hmm, posteri_probas = hmm._viterbiLog(observations=mfcc_target, am=am, kerasModel=kerasModel)
# best_match_lyrics = hmm._getBestMatchLyrics(path_hmm)
# aggregate output for result anaylsis
list_state_dur_path_centroid_pho_durs = []
for ii_path in xrange(len(paths_hmm)):
path_ii = paths_hmm[ii_path]
state_dur_path = hmm._pathStateDur(path_ii)
centroid_pho_durs = list_centroid_pho_dur[ii_path]
centroid_pho_durs = np.array(centroid_pho_durs) / np.sum(centroid_pho_durs)
centroid_pho_durs *= duration_target
list_state_dur_path_centroid_pho_durs.append([state_dur_path, centroid_pho_durs.tolist()])
dict_query_phrase = \
{'list_state_dur_path_centroid_pho_durs': list_state_dur_path_centroid_pho_durs,
'query_phrase_name': query_phrase_name,
'lyrics_net': lyrics_net,
'posteri_probas': posteri_probas.tolist(),
'phrases': phrases,
'line_lyrics': line_lyrics
}
elif method == 'lyricsRecognizerHSMM':
# build matching network from the score dataset
phrases, \
lyrics_net, \
mat_trans_comb, \
state_pho_comb, \
index_start, \
index_end, \
list_centroid_pho_dur = makeHSMMNet(dict_score_100)
# calculate the mean duration of each phoneme (state) in the network
# this mean will be to generate gaussian duration distribution for each state.
mean_dur_state = []
for cpd in list_centroid_pho_dur:
cpd = np.array(cpd) / np.sum(cpd)
cpd *= duration_target
mean_dur_state += cpd.tolist()
# print mean_dur_state
hsmm = ParallelLRHSMM(lyrics_net,
mat_trans_comb,
state_pho_comb,
index_start,
index_end,
mean_dur_state,
proportionality_std)
if am == 'gmm':
hsmm._gmmModel(gmmModels_path)
# viterbi decoding
paths_hmm, posteri_probas = hsmm._viterbiHSMM(observations=mfcc_target, am=am, kerasModel=kerasModel)
# aggregate results for analysis
dict_query_phrase = \
{'query_phrase_name': query_phrase_name,
'lyrics_net': lyrics_net,
'posteri_probas': posteri_probas.tolist(),
'phrases': phrases,
'line_lyrics': line_lyrics}
return dict_query_phrase, query_phrase_name
def generalProcess(method,
proportionality_std=0,
path_json_dict_query_phrases='dummy',
am='gmm',
files=()):
##-- method conditions
if method == 'obsMatrix':
# not used
gmmModel = gmmPhoModelLoad()
else:
gmmModel = ''
pass
if am=='cnn':
kerasModel = ParallelLRHMM.kerasModel(kerasModels_path)
else:
kerasModel = ''
dict_query_phrases = {}
# files = [filename for filename in getRecordingNamesSimi('TEST',class_name)]
for filename in files:
nestedPhonemeLists, _, _ = syllableTextgridExtraction(textgridDataDir, filename, 'line', 'details')
sampleRate, wavData = wavfile.read(path.join(wavDataDir,filename+'.wav'))
for i, line_list in enumerate(nestedPhonemeLists):
print filename, i
# these phrases are not in score dataset
if (filename == 'lseh-Zi_na_ri-Hong_yang_dong-qm' and i in [4,5]) or \
(filename == 'lsxp-Huai_nan_wang-Huai_he_ying02-qm' and i in [0,1,2,3]):
continue
if filename == 'daxp-Jiao_Zhang_sheng-Hong_niang01-qm' and i in [3]:
continue
dict_query_phrase, query_phrase_name = phraseProcess(filename,
i,
line_list,
wavData,
gmmModel,
kerasModel,
sampleRate,
method,
proportionality_std)
dict_query_phrases[query_phrase_name] = dict_query_phrase
with open(path_json_dict_query_phrases,'wb') as outfile:
json.dump(dict_query_phrases,outfile)
'''
# below is reserved for experiment of obsMatrix and candidateSynthesizeData methods
list_simi = []
# best_dist = float('Inf')
# best_M = np.array([])
# best_scoreM = np.array([])
for key in dict_score:
print key
if method == 'obsMatrix':
# scoreM = scoreMSynthesize(dict_score[key],N_frame) # syllable gmm
scoreM = scoreMSynthesizePho(dict_score[key],N_frame) # pho gmm
# scoreM = scoreM[1:,:]
# scoreMaskedM = obsM*scoreM
# udist = abs(np.sum(scoreMaskedM))
# print scoreM
obsMPlotPho(obsM)
obsMPlotPho(scoreM)
udist,plen = dtwNd(scoreM.transpose(),obsM.transpose())
print udist,plen
udist = udist/plen
else:
if method == 'candidateSynthesizeData':
# mfcc synthesized from data, dim_mfcc = 12
mfcc_synthesized = dic_mfcc_synthesized[key]
else:
# mfcc synthesized from GMM, dim_mfcc = 36
mfcc_synthesized = mfccSynthesizeFromGMM(dict_score[key],mfcc_target.shape[1],N_frame)
udist,_ = fastdtw(mfcc_target,mfcc_synthesized,dist=euclidean) # not good result
# udist = dtwNd(mfcc_target,mfcc_synthesized) # bad result
print udist
# plotMFCC(mfcc_synthesized)
# plotMFCC(mfcc_target)
# print udist
lyrics = dict_score[key]['lyrics']
sdist = stringDist(line_lyrics,lyrics)
list_simi.append([key,lyrics,udist,sdist])
# if udist < best_dist:
# best_dist=udist
# best_M = scoreMaskedM
# best_scoreM = scoreM
# list_simi.append([key,lyrics,sdist])
list_simi = sorted(list_simi,key=itemgetter(2))
list_sdist = [ls[3] for ls in list_simi]
# list_sdist = [ls[2] for ls in list_simi]
order = list_sdist.index(max(list_sdist))
#
# obsMPlot(obsM)
# obsMPlot(best_M)
# print best_dist
# print list_simi[order][2]
with open('results/obsMatrixPho_cosineDist/'+filename+str(i)+'.csv','wb') as csvfile:
w = csv.writer(csvfile)
w.writerow([line_lyrics,str(order),list_simi[order][1]])
for row_simi in list_simi:
w.writerow(row_simi)
order += 1
list_rank.append(order)
print list_rank
mrr = MRR(list_rank)
top1hit = topXhit(1,list_rank)
top3hit = topXhit(3,list_rank)
top5hit = topXhit(5,list_rank)
top10hit = topXhit(10,list_rank)
top20hit = topXhit(20,list_rank)
with open('eval/'+class_name+'_'+dm+'_'+method+'_'+'mfcc_noscaling_pho'+'.csv','wb') as csvfile:
w = csv.writer(csvfile)
w.writerow(['MRR',mrr])
w.writerow(['top 1 hit',top1hit])
w.writerow(['top 3 hit',top3hit])
w.writerow(['top 5 hit',top5hit])
w.writerow(['top 10 hit',top10hit])
w.writerow(['top 20 hit',top20hit])
'''
# # run analysis and save
# for cpp in xrange(1,31,1):
# runResultsAnalysis(cpp)
|
ronggong/jingjuSingingPhraseMatching
|
phoneticSimilarity/generalProcess.py
|
Python
|
agpl-3.0
| 14,591
|
[
"Gaussian"
] |
376ce2274d289d3ff292c8dfa1aa49bb847d037ba10383bc85da1db1ad96de75
|
# coding=utf8
#
# Copyright 2013 Dreamlab Onet.pl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 3.0.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, visit
#
# http://www.gnu.org/licenses/lgpl.txt
#
class RmockError(Exception):
pass
class RmockStartError(RmockError):
pass
class RmockParamsError(RmockError):
pass
class InvalidFunction(RmockError):
pass
|
tikan/rmock
|
src/rmock/errors.py
|
Python
|
lgpl-3.0
| 832
|
[
"VisIt"
] |
92fccb9281de3ec5639d7a6980bf878ef03f6c8a772b9494606ea4c88950f171
|
# Module wordnet.py
#
# Original author: Oliver Steele <steele@osteele.com>
# Project Page: http://sourceforge.net/projects/pywordnet
#
# Copyright (c) 1998-2004 by Oliver Steele. Use is permitted under
# the Artistic License
# <http://www.opensource.org/licenses/artistic-license.html>
"""Utility functions to use with the wordnet module.
Usage
-----
>>> dog = N['dog'][0]
# (First 10) adjectives that are transitively SIMILAR to the main sense of 'red'
>>> closure(ADJ['red'][0], SIMILAR)[:10]
['red' in {adjective: red, reddish, ruddy, blood-red, carmine, cerise, cherry, cherry-red, crimson, ruby, ruby-red, scarlet}, {adjective: chromatic}, {adjective: amber, brownish-yellow, yellow-brown}, {adjective: amethyst}, {adjective: aureate, gilded, gilt, gold, golden}, {adjective: azure, cerulean, sky-blue, bright blue}, {adjective: blue, bluish, blueish, light-blue, dark-blue, blue-black}, {adjective: bluish green, blue-green, cyan, teal}, {adjective: blushful, rosy}, {adjective: bottle-green}]
>>> # Adjectives that are transitively SIMILAR to any of the senses of 'red'
>>> #flatten1(map(lambda sense:closure(sense, SIMILAR), ADJ['red'])) # too verbose
>>> # Hyponyms of the main sense of 'dog'(n.) that are homophonous with verbs
>>> filter(lambda sense:V.get(sense.form), flatten1(map(lambda e:e.getSenses(), hyponyms(N['dog'][0]))))
['dog' in {noun: dog, domestic dog, Canis familiaris}, 'pooch' in {noun: pooch, doggie, doggy, barker, bow-wow}, 'toy' in {noun: toy dog, toy}, 'hound' in {noun: hound, hound dog}, 'basset' in {noun: basset, basset hound}, 'cocker' in {noun: cocker spaniel, English cocker spaniel, cocker}, 'bulldog' in {noun: bulldog, English bulldog}]
>>> # Find the senses of 'raise'(v.) and 'lower'(v.) that are antonyms
>>> filter(lambda p:p[0] in p[1].pointerTargets(ANTONYM), product(V['raise'].getSenses(), V['lower'].getSenses()))
[('raise' in {verb: raise, lift, elevate, get up, bring up}, 'lower' in {verb: lower, take down, let down, get down, bring down})]
"""
from __future__ import absolute_import
from functools import reduce
__author__ = "Oliver Steele <steele@osteele.com>"
__version__ = "2.0"
from .wordnet import *
#
# Domain utilities
#
def _requireSource(entity):
if not hasattr(entity, 'pointers'):
if isinstance(entity, Word):
raise TypeError(
repr(entity) + " is not a Sense or Synset. Try " + repr(entity) + "[0] instead.")
else:
raise TypeError(repr(entity) + " is not a Sense or Synset")
def tree(source, pointerType):
"""
>>> dog = N['dog'][0]
>>> from pprint import pprint
>>> pprint(tree(dog, HYPERNYM))
['dog' in {noun: dog, domestic dog, Canis familiaris},
[{noun: canine, canid},
[{noun: carnivore},
[{noun: placental, placental mammal, eutherian, eutherian mammal},
[{noun: mammal},
[{noun: vertebrate, craniate},
[{noun: chordate},
[{noun: animal, animate being, beast, brute, creature, fauna},
[{noun: organism, being},
[{noun: living thing, animate thing},
[{noun: object, physical object}, [{noun: entity}]]]]]]]]]]]]
>>> #pprint(tree(dog, HYPONYM)) # too verbose to include here
"""
if isinstance(source, Word):
return map(lambda s, t=pointerType: tree(s, t), source.getSenses())
_requireSource(source)
return [source] + map(lambda s, t=pointerType: tree(s, t), source.pointerTargets(pointerType))
def closure(source, pointerType, accumulator=None):
"""Return the transitive closure of source under the pointerType
relationship. If source is a Word, return the union of the closures of its
senses.
>>> dog = N['dog'][0]
>>> closure(dog, HYPERNYM)
['dog' in {noun: dog, domestic dog, Canis familiaris}, {noun: canine, canid}, {noun: carnivore}, {noun: placental, placental mammal, eutherian, eutherian mammal}, {noun: mammal}, {noun: vertebrate, craniate}, {noun: chordate}, {noun: animal, animate being, beast, brute, creature, fauna}, {noun: organism, being}, {noun: living thing, animate thing}, {noun: object, physical object}, {noun: entity}]
"""
if isinstance(source, Word):
return reduce(union, map(lambda s, t=pointerType: tree(s, t), source.getSenses()))
_requireSource(source)
if accumulator is None:
accumulator = []
if source not in accumulator:
accumulator.append(source)
for target in source.pointerTargets(pointerType):
closure(target, pointerType, accumulator)
return accumulator
def hyponyms(source):
"""Return source and its hyponyms.
If source is a Word, return the union of the hyponyms of its senses.
"""
return closure(source, HYPONYM)
def hypernyms(source):
"""Return source and its hypernyms.
If source is a Word, return the union of the hypernyms of its
senses.
"""
return closure(source, HYPERNYM)
def meet(a, b, pointerType=HYPERNYM):
"""Return the meet of a and b under the pointerType relationship.
>>> meet(N['dog'][0], N['cat'][0])
{noun: carnivore}
>>> meet(N['dog'][0], N['person'][0])
{noun: organism, being}
>>> meet(N['thought'][0], N['belief'][0])
{noun: content, cognitive content, mental object}
"""
return (intersection(closure(a, pointerType), closure(b, pointerType)) + [None])[0]
#
# String Utility Functions
#
def startsWith(str, prefix):
"""Return true iff _str_ starts with _prefix_.
>>> startsWith('unclear', 'un')
1
"""
return str[:len(prefix)] == prefix
def endsWith(str, suffix):
"""Return true iff _str_ ends with _suffix_.
>>> endsWith('clearly', 'ly')
1
"""
return str[-len(suffix):] == suffix
def equalsIgnoreCase(a, b):
"""Return true iff a and b have the same lowercase representation.
>>> equalsIgnoreCase('dog', 'Dog')
1
>>> equalsIgnoreCase('dOg', 'DOG')
1
"""
# test a == b first as an optimization where they're equal
return a == b or string.lower(a) == string.lower(b)
#
# Sequence Utility Functions
#
def issequence(item):
"""Return true iff _item_ is a Sequence (a List, String, or Tuple).
>>> issequence((1,2))
1
>>> issequence([1,2])
1
>>> issequence('12')
1
>>> issequence(1)
0
"""
return type(item) in (ListType, StringType, TupleType)
def intersection(u, v):
"""Return the intersection of _u_ and _v_.
>>> intersection((1,2,3), (2,3,4))
[2, 3]
"""
w = []
for e in u:
if e in v:
w.append(e)
return w
def union(u, v):
"""Return the union of _u_ and _v_.
>>> union((1,2,3), (2,3,4))
[1, 2, 3, 4]
"""
w = list(u)
if w is u:
import copy
w = copy.copy(w)
for e in v:
if e not in w:
w.append(e)
return w
def product(u, v):
"""Return the Cartesian product of u and v.
>>> product("123", "abc")
[('1', 'a'), ('1', 'b'), ('1', 'c'), ('2', 'a'), ('2', 'b'), ('2', 'c'), ('3', 'a'), ('3', 'b'), ('3', 'c')]
"""
return flatten1(map(lambda a, v=v: map(lambda b, a=a: (a, b), v), u))
def removeDuplicates(sequence):
"""Return a copy of _sequence_ with equal items removed.
>>> removeDuplicates("this is a test")
['t', 'h', 'i', 's', ' ', 'a', 'e']
>>> removeDuplicates(map(lambda tuple:apply(meet, tuple), product(N['story'].getSenses(), N['joke'].getSenses())))
[{noun: message, content, subject matter, substance}, None, {noun: abstraction}, {noun: communication}]
"""
accumulator = []
for item in sequence:
if item not in accumulator:
accumulator.append(item)
return accumulator
#
# Tree Utility Functions
#
def flatten1(sequence):
accumulator = []
for item in sequence:
if type(item) == TupleType:
item = list(item)
if type(item) == ListType:
accumulator.extend(item)
else:
accumulator.append(item)
return accumulator
#
# WordNet utilities
#
GET_INDEX_SUBSTITUTIONS = (
(' ', '-'), ('-', ' '), ('-', ''), (' ', ''), ('.', ''))
def getIndex(form, pos='noun'):
"""Search for _form_ in the index file corresponding to _pos_.
getIndex applies to _form_ an algorithm that replaces underscores
with hyphens, hyphens with underscores, removes hyphens and
underscores, and removes periods in an attempt to find a form of the
string that is an exact match for an entry in the index file
corresponding to _pos_. getWord() is called on each transformed
string until a match is found or all the different strings have been
tried. It returns a Word or None.
"""
def trySubstitutions(trySubstitutions, form, substitutions, lookup=1, dictionary=dictionaryFor(pos)):
if lookup and form in dictionary:
return dictionary[form]
elif substitutions:
(old, new) = substitutions[0]
substitute = string.replace(form, old, new) and substitute != form
if substitute and substitute in dictionary:
return dictionary[substitute]
return trySubstitutions(trySubstitutions, form, substitutions[1:], lookup=0) or \
(substitute and trySubstitutions(
trySubstitutions, substitute, substitutions[1:]))
return trySubstitutions(returnMatch, form, GET_INDEX_SUBSTITUTIONS)
MORPHOLOGICAL_SUBSTITUTIONS = {
NOUN:
[('s', ''),
('ses', 's'),
('ves', 'f'),
('xes', 'x'),
('zes', 'z'),
('ches', 'ch'),
('shes', 'sh'),
('men', 'man'),
('ies', 'y')],
VERB:
[('s', ''),
('ies', 'y'),
('es', 'e'),
('es', ''),
('ed', 'e'),
('ed', ''),
('ing', 'e'),
('ing', '')],
ADJECTIVE:
[('er', ''),
('est', ''),
('er', 'e'),
('est', 'e')],
ADVERB: []}
def morphy(form, pos='noun', collect=0):
"""Recursively uninflect _form_, and return the first form found in the
dictionary. If _collect_ is true, a sequence of all forms is returned,
instead of just the first one.
>>> morphy('dogs')
'dog'
>>> morphy('churches')
'church'
>>> morphy('aardwolves')
'aardwolf'
>>> morphy('abaci')
'abacus'
>>> morphy('hardrock', 'adv')
"""
from .wordnet import _normalizePOS, _dictionaryFor
pos = _normalizePOS(pos)
fname = os.path.join(WNSEARCHDIR, {
NOUN: 'noun', VERB: 'verb', ADJECTIVE: 'adj', ADVERB: 'adv'}[pos] + '.exc')
excfile = open(fname)
substitutions = MORPHOLOGICAL_SUBSTITUTIONS[pos]
def trySubstitutions(trySubstitutions, # workaround for lack of nested closures in Python < 2.1
form, # reduced form
substitutions, # remaining substitutions
lookup=1,
dictionary=_dictionaryFor(pos),
excfile=excfile,
collect=collect,
collection=[]):
import string
exceptions = binarySearchFile(excfile, form)
if exceptions:
form = exceptions[string.find(exceptions, ' ') + 1:-1]
if lookup and form in dictionary:
if collect:
collection.append(form)
else:
return form
elif substitutions:
old, new = substitutions[0]
substitutions = substitutions[1:]
substitute = None
if endsWith(form, old):
substitute = form[:-len(old)] + new
# if dictionary.has_key(substitute):
# return substitute
form = trySubstitutions(trySubstitutions, form, substitutions) or \
(substitute and trySubstitutions(
trySubstitutions, substitute, substitutions))
return (collect and collection) or form
elif collect:
return collection
return trySubstitutions(trySubstitutions, form, substitutions)
#
# Testing
#
def _test(reset=0):
import doctest
import wntools
if reset:
# This keeps doctest from complaining after a reload.
doctest.master = None
return doctest.testmod(wntools)
|
textioHQ/pattern
|
pattern_text/en/wordnet/pywordnet/wntools.py
|
Python
|
bsd-3-clause
| 12,405
|
[
"Amber"
] |
e01d58cd4e48373ef08600e26278ada1cabf3c21bfda5e821c19e88496dbbd47
|
#!/usr/bin/env python
import Pyro.core
import Pyro.naming
import string
import MySQLdb
import time
import random
import threading
try:
from phamerator import *
except:
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import alignmentDatabase
from errorHandler import *
import db_conf
import sys
try:
import hashlib
except ImportError:
import md5
import getopt
import getpass
import logger
from threading import Thread
from Pyro.EventService.Clients import Subscriber
from Pyro.protocol import getHostname
import Pyro.EventService.Clients
Pyro.config.PYRO_MAXCONNECTIONS=1000
Pyro.config.PYRO_NS_HOSTNAME='localhost'
class options:
def __init__(self, argv):
try:
opts, args = getopt.getopt(argv, "hpq:s:n:u:d:i:l:a:", ["help", "prompt", "password=", "server=", "nsname=", "user=","database=","instances=","logging=","alignment_type="])
except getopt.GetoptError:
print 'error running getopt.getopt'
self.usage()
self.argDict = {}
for opt, arg in opts:
if opt in ("-h", "--help"):
self.usage()
sys.exit()
elif opt in ("-p", "--prompt"):
self.argDict['password'] = getpass.getpass('password: ')
elif opt in ("-q", "--password"):
self.argDict['password'] = arg
elif opt in ("-s", "--server"):
self.argDict['server'] = arg
elif opt in ("-n", "--nsname"):
self.argDict['nsname'] = arg
elif opt in ("-u", "--user"):
self.argDict['user'] = arg
elif opt in ("-d", "--database"):
self.argDict['database'] = arg
elif opt in ("-i", "--instances"):
self.argDict['instances'] = arg
elif opt in ("-l", "--logging"):
self.argDict['logging'] = arg
elif opt in ("-a", "--alignment_type"):
self.argDict['alignment_type'] = arg
if not self.argDict.has_key('password'): self.argDict['password'] = ''
required_args = ('server', 'nsname', 'user', 'database', 'instances', 'logging', 'alignment_type')
for a in required_args:
if a not in self.argDict:
print "required argument '%s' is missing" % a
self.usage()
sys.exit()
def usage(self):
'''Prints program usage information'''
print """phamServer_InnoDB.py [OPTION] [ARGUMENT]
-h, --help: print this usage information
-u, --user=<username>: specify a username on the database
-p, --prompt: prompt for a password
-q, --password=<pass>: enter a password on the command line, overrides -p
-d, --database=<database name>: specify the name of the database to access
-i, --instances=<number_of_instances>: number of server instances to run (default=1)
-l, --logging={True or False}: whether to print out debugging info (default is True)
-a, --alignment_type={blast or clustalw}: this argument is required
-s, --server=<hostname>: hostname of database server, required
-n, --nsname=<nsname>: PYRO server nsname, usually localhost, required"""
class phamPublisher(Pyro.EventService.Clients.Publisher):
'''Publishes Pyro events over the network to clients, for instance when the BLAST database changes'''
def __init__(self):
Pyro.EventService.Clients.Publisher.__init__(self)
#def publish(self, channel, message):
# self.publish(channel, message)
class NameServer(Thread):
def __init__(self):
Thread.__init__(self)
self.setDaemon(1)
self.starter = Pyro.naming.NameServerStarter() # no special identification
def run(self):
print "Launching Pyro Name Server"
self.starter.start() # (hostname=Pyro.config.PYRO_NS_HOSTNAME)
def waitUntilStarted(self):
return self.starter.waitUntilStarted()
class EventServer(Thread):
def __init__(self):
Thread.__init__(self)
self.setDaemon(1)
self.starter = Pyro.EventService.Server.EventServiceStarter() # no special identification
def run(self):
print "Launching Pyro Event Server"
self.starter.start(hostname=Pyro.config.PYRO_NS_HOSTNAME)
def waitUntilStarted(self):
return self.starter.waitUntilStarted()
class phamServlet(Pyro.core.SynchronizedObjBase, errorHandler):
def __init__(self, logging, c):
Pyro.core.SynchronizedObjBase.__init__(self)
errorHandler.__init__(self)
self._logger = logger.logger(logging)
self.lastAccessed = time.time()
self.name = ''
self.c = c
#self.c.execute("SELECT id FROM scores LIMIT 1000")
try: self.c.execute("COMMIT")
except: self.show_sql_errors(self.c)
def get_last_accessed(self):
#print 'returning lastAccessed'
return self.lastAccessed
class clustalwServlet(phamServlet, Subscriber, Thread):
def __init__(self, logging, c, server, database, opts):
Thread.__init__(self)
phamServlet.__init__(self,logging, c)
Subscriber.__init__(self)
#self.setDaemon(1)
self.server, self.database = server, database
self.c = db_conf.db_conf(username=opts['user'], password=opts['password'], server=opts['server'], db=opts['database']).get_cursor()
self.subscribe("clustalw")
self._logger = logger.logger(logging)
self.publisher = phamPublisher()
def request_db_info(self):
'''phamClient needs this info to get a proper database cursor, but it also needs a valid username/password pair'''
return self.server, self.database
def event(self, event):
self._logger.log('%s --> %s' % (event.subject, event.msg))
if event.subject == 'clustalw' and event.msg == 'database has alignments available':
self._logger.log('telling the clients to get busy')
self.publisher.publish('clustalw', 'get busy')
def report_scores(self, clustalw_work_unit, results, client_host):
'''compute node reporting scores for a particular query'''
self._logger.log('%s: reporting clustalw results' % client_host)
clustalw_work_unit.add_matches(results, self.c)
def request_seqs(self, client_host):
'''compute node asking for a query sequence and optionally the database for clustalw alignments'''
self._logger.log('%s: requesting clustalw work unit' % client_host)
clustalw_work_unit = alignmentDatabase.clustalwWorkUnit(self.c)
if not clustalw_work_unit.query_id:
try:
import pynotify
if pynotify.init("Phamerator"):
n = pynotify.Notification("Phamerator Server Update", "Clustalw alignments completed", "file:///%s" % os.path.join(os.path.dirname(__file__),"pixmaps/phamerator.png"))
n.show()
else:
pass
#print "there was a problem initializing the pynotify module"
except:
pass
return clustalw_work_unit
def run(self):
self.listen()
class blastServlet(phamServlet, Subscriber, Thread):
def __init__(self, logging, c, server, database, opts):
Thread.__init__(self)
phamServlet.__init__(self, logging, c)
Subscriber.__init__(self)
self.c = db_conf.db_conf(username=opts['user'], password=opts['password'], server=opts['server'], db=opts['database']).get_cursor()
self.server, self.database = server, database
self.subscribe("fasta")
self.lastAccessed = time.time()
self.waitTime = random.randint(5,15)
self.busy = False
self._logger = logger.logger(logging)
self.status = 'avail'
def request_db_info(self):
'''phamClient needs this info to get a proper database cursor, but it also needs a valid username/password pair'''
return self.server, self.database
def disconnect(self, client):
'''cleans up after a client disconnects'''
self._logger.log(client + ' has disconnected. Rolling back changes.')
try:
self.c.execute("ROLLBACK")
self._logger.log('done.')
except: self.show_sql_errors(self.c)
self._logger.log(client + ' has disconnected. Unlocking tables.')
try:
self.c.execute("UNLOCK TABLES")
self._logger.log('done.')
except: self.show_sql_errors(self.c)
def event(self, event):
self._logger.log('%s --> %s' % (event.subject, event.msg))
if event.subject == 'fasta' and event.msg == 'update available': self.update_db()
def request_seqs(self, client_host):
'''the new method for getting seqs for BLAST that doesn't use the alignment and blast tables'''
self.lastAccessed = time.time()
self._logger.log('%s: requesting BLAST work unit' % client_host)
blastWorkUnit = alignmentDatabase.blastWorkUnit(self.c)
return blastWorkUnit
def report_scores(self, blastWorkUnit, results, client_host):
'''compute node reporting scores for a particular query'''
self._logger.log('%s: reporting BLAST results' % client_host)
self.lastAccessed = time.time()
blastWorkUnit.add_matches(results, self.c)
def run(self):
self.listen()
class checkStaleRows (Thread, errorHandler):
def __init__(self,logging, c):
Thread.__init__(self)
self.setDaemon(1)
self.logging = logging
self.c = c
def run (self):
self._logger = logger.logger(self.logging)
while 1:
self._logger.log('looking for stale clustalw alignments...')
self.c.execute("UPDATE gene SET clustalw_status = 'avail' WHERE clustalw_status = 'stale'")
self._logger.log('looking for pending alignments...')
self.c.execute("UPDATE gene SET clustalw_status = 'stale' WHERE clustalw_status = 'pending'")
self._logger.log('looking for stale blast alignments...')
self.c.execute("UPDATE gene SET blast_status = 'avail' WHERE blast_status = 'stale'")
self._logger.log('looking for pending blast alignments...')
self.c.execute("UPDATE gene SET blast_status = 'stale' WHERE blast_status = 'pending'")
self.c.execute("COMMIT")
time.sleep(60*60)
class serverSelector(Pyro.core.SynchronizedObjBase, errorHandler):
def __init__(self, daemon, logging, c, username, password, server, database, opts):
Pyro.core.SynchronizedObjBase.__init__(self)
self._logger = logger.logger(logging)
self.logging = logging
self.servers = []
self.setPyroDaemon(daemon)
self.c = c
self.username = username
self.password = password
self.server = server
self.database = database
self.opts = opts
# make some phamServlet objects that should be able to concurrently access the DB
def create_servers(self, server_instances, alignment_type, server, database):
for i in range(1,server_instances+1):
#if sys.argv[3] == 'clustalw':
if alignment_type == 'clustalw':
server = clustalwServlet(self.logging, self.c, server, database, self.opts)
#elif sys.argv[3] == 'blast':
elif alignment_type == 'blast':
server = blastServlet(self.logging, self.c, server, database, self.opts)
uri=self.daemon.connect(server, server.name)
else:
self._logger.log('Command line argument error: please specify \'clustalw\' or \'blast\' as the server type')
sys.exit()
server.name = 'phamServlet'+str(i)
uri=self.daemon.connect(server, server.name)
self.servers.append(server)
server.start()
# connect the phamServlets to the Pyro name server
self._logger.log('spawning ' + str(server_instances) + ' instances of the server')
return self.servers
# assign a phamServlet to a client when it first contacts the server program
def get_server(self, platform, hostname):
try:
self.c.execute("""SELECT id FROM node WHERE hostname = '%s'""" % hostname)
except:
self.c = db_conf.db_conf(username=self.username, password=self.password, server=self.server, db=self.database).get_cursor()
try:
self.c.execute("""SELECT id FROM node WHERE hostname = '%s'""" % hostname)
except:
self.show_sql_errors(self.c)
node_id = self.c.fetchone()
if node_id:
node_id = int(node_id[0])
# if this is the first ever connection for this client, add it to the node table
if not node_id:
#try: self.c.execute("""LOCK TABLES gene WRITE, scores WRITE, node WRITE""")
#except: self.show_sql_errors(self.c)
try: self.c.execute("""INSERT INTO node (platform, hostname) VALUES ('%s', '%s')""" % (platform, hostname))
except: self.show_sql_errors(self.c)
try: self.c.execute("COMMIT")
except: self.show_sql_errors(self.c)
try: self.c.execute("""SELECT id FROM node WHERE platform = '%s' AND hostname = '%s'""" % (platform, hostname))
except: self.show_sql_errors(self.c)
#try: self.c.execute("""UNLOCK TABLES""")
#except: sql_show_errors(self.c)
node_id = self.c.fetchone()[0]
self._logger.log('registering new node id:' + str(node_id) + ' platform: ' + platform + ' hostname: ' + hostname)
# return the server that was accessed the least recently (should be the least busy one)
dict = {}
for server in self.servers:
dict[server.name] = server.get_last_accessed()
items = dict.items()
items = [(v, k) for (k, v) in items]
items.sort()
items = [(k, v) for (v, k) in items]
self._logger.log(hostname+ ': use ' + items[0][0])
return items[0][0]
class phamServer(errorHandler):
def __init__(self, daemon, server_instances, alignment_type, logging, c, username, password, server, database, opts):
self._logger = logger.logger(logging)
if Pyro.config.PYRO_MULTITHREADED: self._logger.log('Pyro server running in multithreaded mode')
self.c = c
try: self.c.execute("SET AUTOCOMMIT = 0")
except: self.show_sql_errors(self.c)
try: self.c.execute("COMMIT")
except: self.show_sql_errors(self.c)
#self.reset_stale_rows()
self.daemon = daemon
self.servers = []
self.servSel = serverSelector(self.daemon, logging, self.c, username, password, server, database, opts)
self.servers = self.servSel.create_servers(server_instances, alignment_type, server, database)
self._logger.log('Registering serverSelector.')
uri=self.daemon.connect(self.servSel, "serverSelector")
self._logger.log('Startup complete. Listening for client connections...')
def reset_stale_rows(self):
self._logger.log('Clearing stale alignments.')
try: self.c.execute("UPDATE clustalw SET score = NULL, node_id = NULL, status = 'avail' WHERE status = 'pending' OR status = 'stale'")
except: self.show_sql_errors(self.c)
try: self.c.execute("UPDATE blast SET score = NULL, node_id = NULL, status = 'avail' WHERE status = 'pending' OR status = 'stale'")
except: self.show_sql_errors(self.c)
try: self.c.execute("COMMIT")
except: self.show_sql_errors(self.c)
def shutdown(self):
self._logger.log('Disconnecting objects from the Pyro nameserver')
self.daemon.disconnect(self.servSel)
self._logger.log('...serverSelector')
for i in range(len(self.servSel.servers)):
j = self.servSel.servers.pop(0)
self._logger.log('...' + j.name)
self.daemon.disconnect(j)
for server in self.servSel.servers:
server.abort()
def update_blast_db(self):
'''listen for event that blast database needs to be updated'''
pass
def update_clustal_db(self):
'''listen for event that clustal database needs to be updated'''
pass
def main():
opts = options(sys.argv[1:]).argDict
username, password, database, server, nsname = opts['user'], opts['password'], opts['database'], opts['server'], opts['nsname']
alignment_type = opts['alignment_type']
print 'username :', username
#print 'password :', password
print 'server :', server
print 'database :', database
if opts['nsname']:
Pyro.config.PYRO_NS_HOSTNAME=opts['nsname']
nss=NameServer()
nss.start()
nss.waitUntilStarted() # wait until the NS has fully started.
ess=EventServer()
ess.start()
ess.waitUntilStarted() # wait until the ES has fully started.
server_instances = int(opts['instances'])
logging = opts['logging']
daemon=Pyro.core.Daemon(host=nsname)
ns=Pyro.naming.NameServerLocator().getNS(host=nsname)
daemon.useNameServer(ns)
_logger = logger.logger(logging)
c = db_conf.db_conf(username=username, password=password, server=opts['server'], db=database).get_cursor()
try: c.execute("SET SESSION wait_timeout=2629740;")
except: self.show_sql_errors(c)
csrCursor = db_conf.db_conf(username=username, password=password, server=opts['server'], db=database).get_cursor()
try: csrCursor.execute("SET SESSION wait_timeout=2629740;")
except: self.show_sql_errors(csrCursor)
csr = checkStaleRows(logging, csrCursor)
csr.start()
pServer = phamServer(daemon, server_instances, alignment_type, logging, c, username, password, server, database, opts)
# run the Pyro loop
try: daemon.requestLoop()
# if Cntl-C pressed, exit cleanly
except (KeyboardInterrupt, SystemExit):
pServer.shutdown()
_logger.log('waiting for all threads to exit')
if __name__ == '__main__':
main()
|
byuphamerator/phamerator-dev
|
phamerator/phamServer_InnoDB.py
|
Python
|
gpl-2.0
| 16,847
|
[
"BLAST"
] |
e9112117b38111d8c77b29813a9c331740dd4ea3a8da214f3e8d910920e062f2
|
# -*- coding: utf-8 -*-
# This file is part of AudioLazy, the signal processing Python package.
# Copyright (C) 2012-2014 Danilo de Jesus da Silva Bellini
#
# AudioLazy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Created on Wed Jul 18 2012
# danilo [dot] bellini [at] gmail [dot] com
"""
Simple audio/stream synthesis module
"""
from math import sin, pi, ceil, isinf
import collections
import random
# Audiolazy internal imports
from .lazy_stream import Stream, tostream, AbstractOperatorOverloaderMeta
from .lazy_itertools import cycle
from .lazy_filters import comb
from .lazy_compat import meta, iteritems, xrange, xzip
from .lazy_misc import rint
__all__ = ["modulo_counter", "line", "fadein", "fadeout", "attack", "ones",
"zeros", "zeroes", "adsr", "white_noise", "gauss_noise",
"TableLookupMeta", "TableLookup", "DEFAULT_TABLE_SIZE",
"sin_table", "saw_table", "sinusoid", "impulse", "karplus_strong"]
@tostream
def modulo_counter(start=0., modulo=256., step=1.):
"""
Creates a lazy endless counter stream with the given modulo, i.e., its
values ranges from 0. to the given "modulo", somewhat equivalent to:\n
Stream(itertools.count(start, step)) % modulo\n
Yet the given step can be an iterable, and doen't create unneeded big
ints. All inputs can be float. Input order remembers slice/range inputs.
All inputs can also be iterables. If any of them is an iterable, the end
of this counter happen when there's no more data in one of those inputs.
to continue iteration.
"""
if isinstance(start, collections.Iterable):
lastp = 0.
c = 0.
if isinstance(step, collections.Iterable):
if isinstance(modulo, collections.Iterable):
for p, m, s in xzip(start, modulo, step):
c += p - lastp
c = c % m % m
yield c
c += s
lastp = p
else:
for p, s in xzip(start, step):
c += p - lastp
c = c % modulo % modulo
yield c
c += s
lastp = p
else:
if isinstance(modulo, collections.Iterable):
for p, m in xzip(start, modulo):
c += p - lastp
c = c % m % m
yield c
c += step
lastp = p
else: # Only start is iterable. This should be optimized!
if step == 0:
for p in start:
yield p % modulo % modulo
else:
steps = int(modulo / step)
if steps > 1:
n = 0
for p in start:
c += p - lastp
yield (c + n * step) % modulo % modulo
lastp = p
n += 1
if n == steps:
n = 0
c = (c + steps * step) % modulo % modulo
else:
for p in start:
c += p - lastp
c = c % modulo % modulo
yield c
c += step
lastp = p
else:
c = start
if isinstance(step, collections.Iterable):
if isinstance(modulo, collections.Iterable):
for m, s in xzip(modulo, step):
c = c % m % m
yield c
c += s
else: # Only step is iterable. This should be optimized!
for s in step:
c = c % modulo % modulo
yield c
c += s
else:
if isinstance(modulo, collections.Iterable):
for m in modulo:
c = c % m % m
yield c
c += step
else: # None is iterable
if step == 0:
c = start % modulo % modulo
while True:
yield c
else:
steps = int(modulo / step)
if steps > 1:
n = 0
while True:
yield (c + n * step) % modulo % modulo
n += 1
if n == steps:
n = 0
c = (c + steps * step) % modulo % modulo
else:
while True:
c = c % modulo % modulo
yield c
c += step
@tostream
def line(dur, begin=0., end=1., finish=False):
"""
Finite Stream with a straight line, could be used as fade in/out effects.
Parameters
----------
dur :
Duration, given in number of samples. Use the sHz function to help with
durations in seconds.
begin, end :
First and last (or stop) values to be yielded. Defaults to [0., 1.],
respectively.
finish :
Choose if ``end`` it the last to be yielded or it shouldn't be yield at
all. Defauts to False, which means that ``end`` won't be yield. The last
sample won't have "end" amplitude unless finish is True, i.e., without
explicitly saying "finish=True", the "end" input works like a "stop" range
parameter, although it can [should] be a float. This is so to help
concatenating several lines.
Returns
-------
A finite Stream with the linearly spaced data.
Examples
--------
With ``finish = True``, it works just like NumPy ``np.linspace``, besides
argument order and lazyness:
>>> import numpy as np # This test needs Numpy
>>> np.linspace(.2, .7, 6)
array([ 0.2, 0.3, 0.4, 0.5, 0.6, 0.7])
>>> line(6, .1, .7, finish=True)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(line(6, .2, .7, finish=True))
[0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
>>> list(line(6, 1, 4)) # With finish = False (default)
[1.0, 1.5, 2.0, 2.5, 3.0, 3.5]
Line also works with Numpy arrays and matrices
>>> a = np.mat([[1, 2], [3, 4]])
>>> b = np.mat([[3, 2], [2, 1]])
>>> for el in line(4, a, b):
... print(el)
[[ 1. 2.]
[ 3. 4.]]
[[ 1.5 2. ]
[ 2.75 3.25]]
[[ 2. 2. ]
[ 2.5 2.5]]
[[ 2.5 2. ]
[ 2.25 1.75]]
And also with ZFilter instances:
>>> from audiolazy import z
>>> for el in line(4, z ** 2 - 5, z + 2):
... print(el)
z^2 - 5
0.75 * z^2 + 0.25 * z - 3.25
0.5 * z^2 + 0.5 * z - 1.5
0.25 * z^2 + 0.75 * z + 0.25
Note
----
Amplitudes commonly should be float numbers between -1 and 1.
Using line(<inputs>).append([end]) you can finish the line with one extra
sample without worrying with the "finish" input.
See Also
--------
sHz :
Second and hertz constants from samples/second rate.
"""
m = (end - begin) / (dur - (1. if finish else 0.))
for sample in xrange(int(dur + .5)):
yield begin + sample * m
def fadein(dur):
"""
Linear fading in.
Parameters
----------
dur :
Duration, in number of samples.
Returns
-------
Stream instance yielding a line from zero to one.
"""
return line(dur)
def fadeout(dur):
"""
Linear fading out. Multiply by this one at end to finish and avoid clicks.
Parameters
----------
dur :
Duration, in number of samples.
Returns
-------
Stream instance yielding the line. The starting amplitude is is 1.0.
"""
return line(dur, 1., 0.)
def attack(a, d, s):
"""
Linear ADS fading attack stream generator, useful to be multiplied with a
given stream.
Parameters
----------
a :
"Attack" time, in number of samples.
d :
"Decay" time, in number of samples.
s :
"Sustain" amplitude level (should be based on attack amplitude).
The sustain can be a Stream, if desired.
Returns
-------
Stream instance yielding an endless envelope, or a finite envelope if the
sustain input is a finite Stream. The attack amplitude is is 1.0.
"""
# Configure sustain possibilities
if isinstance(s, collections.Iterable):
it_s = iter(s)
s = next(it_s)
else:
it_s = None
# Attack and decay lines
m_a = 1. / a
m_d = (s - 1.) / d
len_a = int(a + .5)
len_d = int(d + .5)
for sample in xrange(len_a):
yield sample * m_a
for sample in xrange(len_d):
yield 1. + sample * m_d
# Sustain!
if it_s is None:
while True:
yield s
else:
for s in it_s:
yield s
@tostream
def ones(dur=None):
"""
Ones stream generator.
You may multiply your endless stream by this to enforce an end to it.
Parameters
----------
dur :
Duration, in number of samples; endless if not given.
Returns
-------
Stream that repeats "1.0" during a given time duration (if any) or
endlessly.
"""
if dur is None or (isinf(dur) and dur > 0):
while True:
yield 1.0
for x in xrange(int(.5 + dur)):
yield 1.0
@tostream
def zeros(dur=None):
"""
Zeros/zeroes stream generator.
You may sum your endless stream by this to enforce an end to it.
Parameters
----------
dur :
Duration, in number of samples; endless if not given.
Returns
-------
Stream that repeats "0.0" during a given time duration (if any) or
endlessly.
"""
if dur is None or (isinf(dur) and dur > 0):
while True:
yield 0.0
for x in xrange(int(.5 + dur)):
yield 0.0
zeroes = zeros
@tostream
def adsr(dur, a, d, s, r):
"""
Linear ADSR envelope.
Parameters
----------
dur :
Duration, in number of samples, including the release time.
a :
"Attack" time, in number of samples.
d :
"Decay" time, in number of samples.
s :
"Sustain" amplitude level (should be based on attack amplitude).
r :
"Release" time, in number of samples.
Returns
-------
Stream instance yielding a finite ADSR envelope, starting and finishing with
0.0, having peak value of 1.0.
"""
m_a = 1. / a
m_d = (s - 1.) / d
m_r = - s * 1. / r
len_a = int(a + .5)
len_d = int(d + .5)
len_r = int(r + .5)
len_s = int(dur + .5) - len_a - len_d - len_r
for sample in xrange(len_a):
yield sample * m_a
for sample in xrange(len_d):
yield 1. + sample * m_d
for sample in xrange(len_s):
yield s
for sample in xrange(len_r):
yield s + sample * m_r
@tostream
def white_noise(dur=None, low=-1., high=1.):
"""
White noise stream generator.
Parameters
----------
dur :
Duration, in number of samples; endless if not given (or None).
low, high :
Lower and higher limits. Defaults to the [-1; 1] range.
Returns
-------
Stream yielding random numbers between -1 and 1.
"""
if dur is None or (isinf(dur) and dur > 0):
while True:
yield random.uniform(low, high)
for x in xrange(rint(dur)):
yield random.uniform(low, high)
@tostream
def gauss_noise(dur=None, mu=0., sigma=1.):
"""
Gaussian (normal) noise stream generator.
Parameters
----------
dur :
Duration, in number of samples; endless if not given (or None).
mu :
Distribution mean. Defaults to zero.
sigma :
Distribution standard deviation. Defaults to one.
Returns
-------
Stream yielding Gaussian-distributed random numbers.
Warning
-------
This function can yield values outside the [-1; 1] range, and you might
need to clip its results.
See Also
--------
clip:
Clips the signal up to both a lower and a higher limit.
"""
if dur is None or (isinf(dur) and dur > 0):
while True:
yield random.gauss(mu, sigma)
for x in xrange(rint(dur)):
yield random.gauss(mu, sigma)
class TableLookupMeta(AbstractOperatorOverloaderMeta):
"""
Table lookup metaclass. This class overloads all operators to the
TableLookup class, applying them to the table contents, elementwise.
Table length and number of cycles should be equal for this to work.
"""
__operators__ = "+ - * / // % ** << >> & | ^ ~"
def __binary__(cls, op):
op_func = op.func
def dunder(self, other):
if isinstance(other, TableLookup):
if self.cycles != other.cycles:
raise ValueError("Incompatible number of cycles")
if len(self) != len(other):
raise ValueError("Incompatible sizes")
zip_tables = xzip(self.table, other.table)
new_table = [op_func(data1, data2) for data1, data2 in zip_tables]
return TableLookup(new_table, self.cycles)
if isinstance(other, (int, float, complex)):
new_table = [op_func(data, other) for data in self.table]
return TableLookup(new_table, self.cycles)
raise NotImplementedError("Unknown action do be done")
return dunder
def __rbinary__(cls, op):
op_func = op.func
def dunder(self, other):
if isinstance(other, (int, float, complex)):
new_table = [op_func(other, data) for data in self.table]
return TableLookup(new_table, self.cycles)
raise NotImplementedError("Unknown action do be done")
return dunder
def __unary__(cls, op):
op_func = op.func
def dunder(self):
new_table = [op_func(data) for data in self.table]
return TableLookup(new_table, self.cycles)
return dunder
class TableLookup(meta(metaclass=TableLookupMeta)):
"""
Table lookup synthesis class, also allowing multi-cycle tables as input.
"""
def __init__(self, table, cycles=1):
"""
Inits a table lookup. The given table should be a sequence, like a list.
The cycles input should have the number of cycles in table for frequency
calculation afterwards.
"""
self.table = table
self.cycles = cycles
@property
def table(self):
return self._table
@table.setter
def table(self, value):
self._table = value
self._len = len(value)
def __len__(self):
return self._len
def __call__(self, freq, phase=0.):
"""
Returns a wavetable lookup synthesis endless stream. Play it with the
given frequency and starting phase. Phase is given in rads, and frequency
in rad/sample. Accepts streams of numbers, as well as numbers, for both
frequency and phase inputs.
"""
total_length = len(self)
total_len_float = float(total_length)
cycle_length = total_len_float / (self.cycles * 2 * pi)
step = cycle_length * freq
part = cycle_length * phase
tbl_iter = modulo_counter(part, total_len_float, step)
tbl = self.table
#return Stream(tbl[int(idx)] for idx in tbl_iter)
return Stream(tbl[int(idx)] * (1. - (idx - int(idx))) +
tbl[int(ceil(idx)) - total_length] * (idx - int(idx))
for idx in tbl_iter)
def __getitem__(self, idx):
"""
Gets an item from the table from its index, which can possibly be a float.
The data is linearly interpolated.
"""
total_length = len(self)
tbl = self.table
return tbl[int(idx) % total_length] * (1. - (idx - int(idx))) + \
tbl[int(ceil(idx)) % total_length] * (idx - int(idx))
def __eq__(self, other):
if isinstance(other, TableLookup):
return (self.cycles == other.cycles) and (self.table == other.table)
return False
def __ne__(self, other):
return not self == other
def harmonize(self, harmonics_dict):
"""
Returns a "harmonized" table lookup instance by using a "harmonics"
dictionary with {partial: amplitude} terms, where all "partial" keys have
to be integers.
"""
data = sum(cycle(self.table[::partial+1]) * amplitude
for partial, amplitude in iteritems(harmonics_dict))
return TableLookup(data.take(len(self)), cycles=self.cycles)
def normalize(self):
"""
Returns a new table with values ranging from -1 to 1, reaching at least
one of these, unless there's no data.
"""
max_abs = max(self.table, key=abs)
if max_abs == 0:
raise ValueError("Can't normalize zeros")
return self / max_abs
# Create the instance for each default table
DEFAULT_TABLE_SIZE = 2**16
sin_table = TableLookup([sin(x * 2 * pi / DEFAULT_TABLE_SIZE)
for x in xrange(DEFAULT_TABLE_SIZE)])
saw_table = TableLookup(list(line(DEFAULT_TABLE_SIZE, -1, 1, finish=True)))
@tostream
def sinusoid(freq, phase=0.):
"""
Sinusoid based on the optimized math.sin
"""
# When at 44100 samples / sec, 5 seconds of this leads to an error of 8e-14
# peak to peak. That's fairly enough.
for n in modulo_counter(start=phase, modulo=2 * pi, step=freq):
yield sin(n)
@tostream
def impulse(dur=None, one=1., zero=0.):
"""
Impulse stream generator.
Parameters
----------
dur :
Duration, in number of samples; endless if not given.
Returns
-------
Stream that repeats "0.0" during a given time duration (if any) or
endlessly, but starts with one (and only one) "1.0".
"""
if dur is None or (isinf(dur) and dur > 0):
yield one
while True:
yield zero
elif dur >= .5:
num_samples = int(dur - .5)
yield one
for x in xrange(num_samples):
yield zero
def karplus_strong(freq, tau=2e4, memory=white_noise):
"""
Karplus-Strong "digitar" synthesis algorithm.
Parameters
----------
freq :
Frequency, in rad/sample.
tau :
Time decay (up to ``1/e``, or -8.686 dB), in number of samples. Defaults
to 2e4. Be careful: using the default value will make duration different
on each sample rate value. Use ``sHz`` if you need that independent from
the sample rate and in seconds unit.
memory :
Memory data for the comb filter (delayed "output" data in memory).
Defaults to the ``white_noise`` function.
Returns
-------
Stream instance with the synthesized data.
Note
----
The fractional delays are solved by exponent linearization.
See Also
--------
sHz :
Second and hertz constants from samples/second rate.
white_noise :
White noise stream generator.
"""
return comb.tau(2 * pi / freq, tau).linearize()(zeros(), memory=memory)
|
antiface/audiolazy
|
audiolazy/lazy_synth.py
|
Python
|
gpl-3.0
| 17,869
|
[
"Gaussian"
] |
478e5aa4761115661b7d423a5a7b517fe413e5f6515801622b982afd7d6ff599
|
import numpy as np
from itertools import izip
from multiprocessing import Pool
from scipy.linalg import inv
import emcee as mc
from pearce.mocks.kittens import TrainingBox
from collections import OrderedDict
import h5py
def lnprior(theta, param_names, param_bounds, *args):
"""
Prior for an MCMC. Default is to assume flat prior for all parameters defined by the boundaries the
emulator is built from. Retuns negative infinity if outside bounds or NaN
:param theta:
The parameters proposed by the sampler.
:param param_names
The names identifying the values in theta, needed to extract their boundaries
:param param_bounds
Dictionary of the boundaires allowed by each parameter.
:return:
Either 0 or -np.inf, depending if the params are allowed or not.
"""
for p, t in izip(param_names, theta):
low, high = param_bounds[p]
if np.isnan(t) or t < low or t > high:
return -np.inf
return 0
def lnlike(theta, param_names, param_bounds, fixed_params, r_bins, y, combined_inv_cov):
"""
:param theta:
Proposed parameters.
:param param_names:
The names of the parameters in theta
:param fixed_params:
Dictionary of parameters necessary to predict y_bar but are not being sampled over.
:param cat:
Cat object corresponding to the loaded cosmology.
:param r_bins:
The centers of the r bins y is measured in, angular or radial.
:param y:
The measured values of the observables to compare to the emulators. Must be an interable that contains
predictions of each observable.
:param combined_inv_cov:
The inverse covariance matrices. Explicitly, the inverse of the sum of the mesurement covaraince matrix
and the matrix from the emulator, both for each observable. Both are independent of emulator parameters,
so can be precomputed. Must be an iterable with a matrixfor each observable.
:return:
The log liklihood of theta given the measurements and the emulator.
"""
cat = _cat
param_dict = dict(izip(param_names, theta))
param_dict.update(fixed_params)
cat.populate(param_dict)
pred = cat.calc_vdf(r_bins, n_cores=16).squeeze()
delta = pred - y
#print delta
return - np.dot(delta, np.dot(combined_inv_cov, delta))
def lnprob(theta, *args):
"""
The total liklihood for an MCMC. Mostly a generic wrapper for the below functions.
:param theta:
Parameters for the proposal
:param args:
Arguments to pass into the liklihood
:return:
Log Liklihood of theta, a float.
"""
lp = lnprior(theta, *args)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, *args)
def _random_initial_guess(param_bounds, nwalkers):
"""
Create a random initial guess for the sampler. Creates a 3-sigma gaussian ball around the center of the prior space.
:param param_names:
The names of the parameters in the emulator
:param nwalkers:
Number of walkers to initiate. Must be the same as in resume_from_previous
:param num_params:
Number of params to initiate, must be the same as in resume_from_previous
:return: pos0, the initial position of each walker for the chain.
"""
pos0 = np.zeros((nwalkers, len(param_bounds)))
for idx, (pname, pbound) in enumerate(param_bounds.iteritems()):
low, high = pbound
pos0[:, idx] = np.random.randn(nwalkers) * (np.abs(high - low) / 6.0) + (low + high) / 2.0
return pos0
def run_mcmc_iterator(cat, param_bounds, y, cov, r_bins,fixed_params={},
pos0=None, nwalkers=100, nsteps=5000, ncores=8, return_lnprob=False):
"""
Run an MCMC using emcee and the emu. Includes some sanity checks and does some precomputation.
Also optimized to be more efficient than using emcee naively with the emulator.
This version, as opposed to run_mcmc, "yields" each step of the chain, to write to file or to print.
:param cat:
Loaded version of a cat object with a model and halocatalog loaded
:param param_names:
Names of the parameters to constrain
:param y:
data to constrain against. either one array of observables, of size (n_bins*n_obs)
:param cov:
measured covariance of y for each y. Should have the same shape as y, but square
:param r_bin_centers:
The scale bins corresponding to all y in ys
:param resume_from_previous:
String listing filename of a previous chain to resume from. Default is None, which starts a new chain.
:param fixed_params:
Any values held fixed during the emulation, default is {}
:param nwalkers:
Number of walkers for the mcmc. default is 1000
:param nsteps:
Number of steps for the mcmc. Default is 1--
:param nburn:
Number of burn in steps, default is 20
:param ncores:
Number of cores. Default is 'all', which will use all cores available
:param return_lnprob:
Whether to return the evaluation of lnprob on the samples along with the samples. Default is Fasle,
which only returns samples.
:yield:
chain, collaposed to the shape ((nsteps-nburn)*nwalkers, len(param_names))
"""
_cat = cat
global _cat
pool = Pool(processes=ncores)
param_names = param_bounds.keys()
num_params = len(param_names)
combined_inv_cov = inv(cov)
sampler = mc.EnsembleSampler(nwalkers, num_params, lnprob, pool=pool,
args=(param_names, param_bounds, fixed_params, cat, r_bins, y, combined_inv_cov))
if pos0 is None:
pos0 = _random_initial_guess(param_bounds, nwalkers)
for result in sampler.sample(pos0, iterations=nsteps, storechain=False):
if return_lnprob:
yield result[0], result[1]
else:
yield result[0]
#if __name__ == "__main__":
from sys import argv
output_fname = argv[1]
boxno = 12
cat = TrainingBox(boxno)
cat.load(1.0, HOD='zheng07')
hod_param_bounds = OrderedDict({'logMmin': (13.0, 14.0),
'sigma_logM': (0.05, 0.5),
'alpha': (0.85, 1.15),
'logM0': (12.5, 14.5),
'logM1': (13.5, 15.5)} )
true_point = _random_initial_guess(hod_param_bounds, 1).squeeze()
true_dict = OrderedDict(dict(zip(hod_param_bounds.keys(), true_point)))
print 'Truth', true_dict
cat.populate(true_dict)
print true_dict
r_bins = np.logspace(-1, 1.6, 19)
y = cat.calc_vdf(r_bins, n_cores=16).squeeze()
print hod_param_bounds.keys()
print true_point
print y
from sys import exit
exit(0)
cov_ys = np.zeros((25, y.shape[0]))
for i in xrange(25):
cat.populate(true_dict)
cov_ys[i] = cat.calc_vdf(r_bins, n_cores=16).squeeze()
covmat = np.cov(cov_ys, rowvar=False)
nwalkers = 20
nsteps = 5000
with h5py.File(output_fname, 'w') as f:
f.create_dataset('chain', (0, len(hod_param_bounds)),
compression = 'gzip', maxshape = (None, len(hod_param_bounds)))
f.attrs['boxno'] = boxno
f.attrs['r_bins'] = r_bins
f.attrs['true_point'] = true_point
f.attrs['hod_pnames'] = hod_param_bounds.keys()
f.attrs['cov'] = covmat
f.attrs['y'] = y
f.attrs['nwalkers'] = nwalkers
f.attrs['nsteps'] = nsteps
_cat = cat
global _cat
pool = None#Pool(processes=4)
param_names = hod_param_bounds.keys()
num_params = len(param_names)
combined_inv_cov = inv(covmat)
fixed_params = {}
sampler = mc.EnsembleSampler(nwalkers, num_params, lnprob, pool=pool,
args=(param_names, hod_param_bounds, fixed_params, r_bins, y, combined_inv_cov))
pos0 = _random_initial_guess(hod_param_bounds, nwalkers)
for step, pos in enumerate(sampler.sample(pos0, iterations=nsteps, storechain=False)):
with h5py.File(output_fname, 'a') as f:
chain_dset = f['chain']
l = len(chain_dset)
chain_dset.resize((l + nwalkers), axis=0)
chain_dset[-nwalkers:] = pos[0]
|
mclaughlin6464/pearce
|
bin/vdf_mcmc/hod_recovery_test.py
|
Python
|
mit
| 8,063
|
[
"Gaussian"
] |
ed4f96cb83cfc6218bcdee51e0b17a321f82a3b600644a06c736a45bbc0866c5
|
../../../../../../../share/pyshared/orca/scripts/apps/Banshee/__init__.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/Banshee/__init__.py
|
Python
|
gpl-3.0
| 73
|
[
"ORCA"
] |
b9b084693a0710e1c5003a711a5c0cb3a65ccf5c41bab0d234b68b9895f33af8
|
# -*- coding: utf-8 -*-
import base64
import datetime
import json
import time
import mock
from nose.tools import eq_, ok_
from nose.plugins.attrib import attr
from pyquery import PyQuery as pq
from urlparse import urlparse
from django.conf import settings
from django.contrib.sites.models import Site
from django.core import mail
from django.db.models import Q
from django.test.client import (FakePayload, encode_multipart,
BOUNDARY, CONTENT_TYPE_RE, MULTIPART_CONTENT)
from django.test.utils import override_settings
from django.http import Http404
from django.utils.encoding import smart_str
from constance import config
from jingo.helpers import urlparams
from waffle.models import Flag, Switch
from kuma.attachments.models import Attachment
from kuma.attachments.utils import make_test_file
from kuma.authkeys.models import Key
from kuma.core.cache import memcache as cache
from kuma.core.models import IPBan
from kuma.core.tests import post, get, override_constance_settings
from kuma.core.urlresolvers import reverse
from kuma.users.tests import UserTestCase, user
from ..content import get_seo_description
from ..events import EditDocumentEvent
from ..forms import MIDAIR_COLLISION
from ..models import (Document, Revision, RevisionIP, DocumentZone,
DocumentTag, DocumentDeletionLog)
from . import (doc_rev, document, new_document_data, revision,
normalize_html, create_template_test_users,
make_translation, WikiTestCase, FakeResponse)
class RedirectTests(UserTestCase, WikiTestCase):
"""Tests for the REDIRECT wiki directive"""
localizing_client = True
def test_redirect_suppression(self):
"""The document view shouldn't redirect when passed redirect=no."""
redirect, _ = doc_rev('REDIRECT <a class="redirect" '
'href="/en-US/docs/blah">smoo</a>')
url = redirect.get_absolute_url() + '?redirect=no'
response = self.client.get(url, follow=True)
self.assertContains(response, 'REDIRECT ')
def test_redirects_only_internal(self):
"""Ensures redirects cannot be used to link to other sites"""
redirect, _ = doc_rev('REDIRECT <a class="redirect" '
'href="//davidwalsh.name">DWB</a>')
url = redirect.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, 'DWB')
def test_redirects_only_internal_2(self):
"""Ensures redirects cannot be used to link to other sites"""
redirect, _ = doc_rev('REDIRECT <a class="redirect" '
'href="http://davidwalsh.name">DWB</a>')
url = redirect.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, 'DWB')
def test_self_redirect_suppression(self):
"""The document view shouldn't redirect to itself."""
slug = 'redirdoc'
html = ('REDIRECT <a class="redirect" href="/en-US/docs/%s">smoo</a>' %
slug)
doc = document(title='blah', slug=slug, html=html, save=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
revision(document=doc, content=html, is_approved=True, save=True)
response = self.client.get(doc.get_absolute_url(), follow=True)
eq_(200, response.status_code)
response_html = pq(response.content)
article_body = response_html.find('#wikiArticle').html()
self.assertHTMLEqual(html, article_body)
class LocaleRedirectTests(UserTestCase, WikiTestCase):
"""Tests for fallbacks to en-US and such for slug lookups."""
# Some of these may fail or be invalid if your WIKI_DEFAULT_LANGUAGE is de.
localizing_client = True
def test_fallback_to_translation(self):
"""If a slug isn't found in the requested locale but is in the default
locale and if there is a translation of that default-locale document to
the requested locale, the translation should be served."""
en_doc, de_doc = self._create_en_and_de_docs()
response = self.client.get(reverse('wiki.document',
args=(en_doc.slug,),
locale='de'),
follow=True)
self.assertRedirects(response, de_doc.get_absolute_url())
def test_fallback_with_query_params(self):
"""The query parameters should be passed along to the redirect."""
en_doc, de_doc = self._create_en_and_de_docs()
url = reverse('wiki.document', args=[en_doc.slug], locale='de')
response = self.client.get(url + '?x=y&x=z', follow=True)
self.assertRedirects(response, de_doc.get_absolute_url() + '?x=y&x=z')
def test_redirect_with_no_slug(self):
"""Bug 775241: Fix exception in redirect for URL with ui-locale"""
loc = settings.WIKI_DEFAULT_LANGUAGE
url = '/%s/docs/%s/' % (loc, loc)
try:
self.client.get(url, follow=True)
except Http404, e:
pass
except Exception, e:
self.fail("The only exception should be a 404, not this: %s" % e)
def _create_en_and_de_docs(self):
en = settings.WIKI_DEFAULT_LANGUAGE
en_doc = document(locale=en, slug='english-slug', save=True)
de_doc = document(locale='de', parent=en_doc, save=True)
revision(document=de_doc, is_approved=True, save=True)
return en_doc, de_doc
class ViewTests(UserTestCase, WikiTestCase):
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
localizing_client = True
@attr('bug875349')
def test_json_view(self):
expected_tags = sorted(['foo', 'bar', 'baz'])
expected_review_tags = sorted(['tech', 'editorial'])
doc = Document.objects.get(pk=1)
doc.tags.set(*expected_tags)
doc.current_revision.review_tags.set(*expected_review_tags)
url = reverse('wiki.json', locale=settings.WIKI_DEFAULT_LANGUAGE)
resp = self.client.get(url, {'title': 'an article title'})
eq_(200, resp.status_code)
data = json.loads(resp.content)
eq_('article-title', data['slug'])
result_tags = sorted([str(x) for x in data['tags']])
eq_(expected_tags, result_tags)
result_review_tags = sorted([str(x) for x in data['review_tags']])
eq_(expected_review_tags, result_review_tags)
url = reverse('wiki.json_slug', args=('article-title',),
locale=settings.WIKI_DEFAULT_LANGUAGE)
Switch.objects.create(name='application_ACAO', active=True)
resp = self.client.get(url)
ok_('Access-Control-Allow-Origin' in resp)
eq_('*', resp['Access-Control-Allow-Origin'])
eq_(200, resp.status_code)
data = json.loads(resp.content)
eq_('an article title', data['title'])
ok_('translations' in data)
result_tags = sorted([str(x) for x in data['tags']])
eq_(expected_tags, result_tags)
result_review_tags = sorted([str(x) for x in data['review_tags']])
eq_(expected_review_tags, result_review_tags)
def test_history_view(self):
slug = 'history-view-test-doc'
html = 'history view test doc'
doc = document(title='History view test doc', slug=slug,
html=html, save=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
for i in xrange(1, 51):
revision(document=doc, content=html,
comment='Revision %s' % i,
is_approved=True, save=True)
url = reverse('wiki.document_revisions', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
resp = self.client.get(url)
eq_(200, resp.status_code)
all_url = urlparams(reverse('wiki.document_revisions', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE),
limit='all')
resp = self.client.get(all_url)
eq_(403, resp.status_code)
self.client.login(username='testuser', password='testpass')
resp = self.client.get(all_url)
eq_(200, resp.status_code)
def test_toc_view(self):
slug = 'toc_test_doc'
html = '<h2>Head 2</h2><h3>Head 3</h3>'
doc = document(title='blah', slug=slug, html=html, save=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
revision(document=doc, content=html, is_approved=True, save=True)
url = reverse('wiki.toc', args=[slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
Switch.objects.create(name='application_ACAO', active=True)
resp = self.client.get(url)
ok_('Access-Control-Allow-Origin' in resp)
eq_('*', resp['Access-Control-Allow-Origin'])
self.assertHTMLEqual(
resp.content, '<ol><li><a href="#Head_2" rel="internal">Head 2</a>'
'<ol><li><a href="#Head_3" rel="internal">Head 3</a>'
'</ol></li></ol>')
@attr('bug875349')
def test_children_view(self):
test_content = '<p>Test <a href="http://example.com">Summary</a></p>'
def _make_doc(title, slug, parent=None, is_redir=False):
doc = document(title=title,
slug=slug,
save=True,
is_redirect=is_redir)
if is_redir:
content = 'REDIRECT <a class="redirect" href="/en-US/blah">Blah</a>'
else:
content = test_content
revision(document=doc,
content=test_content,
summary=get_seo_description(
test_content,
strip_markup=False),
save=True)
doc.html = content
if parent:
doc.parent_topic = parent
doc.save()
return doc
root_doc = _make_doc('Root', 'Root')
child_doc_1 = _make_doc('Child 1', 'Root/Child_1', root_doc)
_make_doc('Grandchild 1', 'Root/Child_1/Grandchild_1', child_doc_1)
grandchild_doc_2 = _make_doc('Grandchild 2',
'Root/Child_1/Grandchild_2',
child_doc_1)
_make_doc('Great Grandchild 1',
'Root/Child_1/Grandchild_2/Great_Grand_Child_1',
grandchild_doc_2)
_make_doc('Child 2', 'Root/Child_2', root_doc)
_make_doc('Child 3', 'Root/Child_3', root_doc, True)
Switch.objects.create(name='application_ACAO', active=True)
for expand in (True, False):
url = reverse('wiki.get_children', args=['Root'],
locale=settings.WIKI_DEFAULT_LANGUAGE)
if expand:
url = '%s?expand' % url
resp = self.client.get(url)
ok_('Access-Control-Allow-Origin' in resp)
eq_('*', resp['Access-Control-Allow-Origin'])
json_obj = json.loads(resp.content)
# Basic structure creation testing
eq_(json_obj['slug'], 'Root')
if not expand:
ok_('summary' not in json_obj)
else:
eq_(json_obj['summary'],
'Test <a href="http://example.com">Summary</a>')
ok_('tags' in json_obj)
ok_('review_tags' in json_obj)
eq_(len(json_obj['subpages']), 2)
eq_(len(json_obj['subpages'][0]['subpages']), 2)
eq_(json_obj['subpages'][0]['subpages'][1]['title'],
'Grandchild 2')
# Depth parameter testing
def _depth_test(depth, aught):
url = reverse('wiki.get_children', args=['Root'],
locale=settings.WIKI_DEFAULT_LANGUAGE) + '?depth=' + str(depth)
resp = self.client.get(url)
json_obj = json.loads(resp.content)
eq_(len(json_obj['subpages'][0]['subpages'][1]['subpages']), aught)
_depth_test(2, 0)
_depth_test(3, 1)
_depth_test(6, 1)
# Sorting test
sort_root_doc = _make_doc('Sort Root', 'Sort_Root')
_make_doc('B Child', 'Sort_Root/B_Child', sort_root_doc)
_make_doc('A Child', 'Sort_Root/A_Child', sort_root_doc)
resp = self.client.get(reverse('wiki.get_children', args=['Sort_Root'],
locale=settings.WIKI_DEFAULT_LANGUAGE))
json_obj = json.loads(resp.content)
eq_(json_obj['subpages'][0]['title'], 'A Child')
# Test if we are serving an error json if document does not exist
no_doc_url = reverse('wiki.get_children', args=['nonexistentDocument'],
locale=settings.WIKI_DEFAULT_LANGUAGE)
resp = self.client.get(no_doc_url)
result = json.loads(resp.content)
eq_(result, {'error': 'Document does not exist.'})
def test_summary_view(self):
"""The ?summary option should restrict document view to summary"""
d, r = doc_rev("""
<p>Foo bar <a href="http://example.com">baz</a></p>
<p>Quux xyzzy</p>
""")
resp = self.client.get('%s?raw&summary' % d.get_absolute_url())
eq_(resp.content, 'Foo bar <a href="http://example.com">baz</a>')
@override_settings(CELERY_ALWAYS_EAGER=True)
@mock.patch('waffle.flag_is_active')
@mock.patch('kuma.wiki.jobs.DocumentContributorsJob.get')
def test_footer_contributors(self, get_contributors, flag_is_active):
get_contributors.return_value = [
{'id': 1, 'username': 'ringo', 'email': 'ringo@apple.co.uk'},
{'id': 2, 'username': 'john', 'email': 'lennon@apple.co.uk'},
]
flag_is_active.return_value = True
d, r = doc_rev('some content')
resp = self.client.get(d.get_absolute_url())
page = pq(resp.content)
contributors = (page.find(":contains('Contributors to this page')")
.parent())
# just checking if the contributor link is rendered
eq_(len(contributors.find('a')), 2)
def test_revision_view_bleached_content(self):
"""Bug 821988: Revision content should be cleaned with bleach"""
d, r = doc_rev("""
<a href="#" onload=alert(3)>Hahaha</a>
<svg><svg onload=alert(3);>
""")
resp = self.client.get(r.get_absolute_url())
page = pq(resp.content)
ct = page.find('#wikiArticle').html()
ok_('<svg>' not in ct)
ok_('<a href="#">Hahaha</a>' in ct)
def test_raw_css_view(self):
"""The raw source for a document can be requested"""
self.client.login(username='admin', password='testpass')
doc = document(title='Template:CustomSampleCSS',
slug='Template:CustomSampleCSS',
save=True)
revision(
save=True,
is_approved=True,
document=doc,
content="""
/* CSS here */
body {
padding: 0;
margin: 0;
}
svg:not(:root) {
display:block;
}
""")
response = self.client.get('%s?raw=true' %
reverse('wiki.document', args=[doc.slug]))
ok_('text/css' in response['Content-Type'])
class PermissionTests(UserTestCase, WikiTestCase):
localizing_client = True
def setUp(self):
"""Set up the permissions, groups, and users needed for the tests"""
super(PermissionTests, self).setUp()
self.perms, self.groups, self.users, self.superuser = (
create_template_test_users())
def test_template_revert_permission(self):
locale = 'en-US'
slug = 'Template:test-revert-perm'
doc = document(save=True, slug=slug, title=slug, locale=locale)
rev = revision(save=True, document=doc)
# Revision template should not show revert button
url = reverse('wiki.revision', args=([doc.slug, rev.id]))
resp = self.client.get(url)
ok_('Revert' not in resp.content)
# Revert POST should give permission denied to user without perm
username = self.users['none'].username
self.client.login(username=username, password='testpass')
url = reverse('wiki.revert_document',
args=([doc.slug, rev.id]))
resp = self.client.post(url, {'comment': 'test'})
eq_(403, resp.status_code)
# Revert POST should give success to user with perm
username = self.users['change'].username
self.client.login(username=username, password='testpass')
url = reverse('wiki.revert_document',
args=([doc.slug, rev.id]))
resp = self.client.post(url, {'comment': 'test'}, follow=True)
eq_(200, resp.status_code)
def test_template_permissions(self):
msg = ('edit', 'create')
for is_add in (True, False):
slug_trials = (
('test_for_%s', (
(True, self.superuser),
(True, self.users['none']),
(True, self.users['all']),
(True, self.users['add']),
(True, self.users['change']),
)),
('Template:test_for_%s', (
(True, self.superuser),
(False, self.users['none']),
(True, self.users['all']),
(is_add, self.users['add']),
(not is_add, self.users['change']),
))
)
for slug_tmpl, trials in slug_trials:
for expected, tmp_user in trials:
username = tmp_user.username
slug = slug_tmpl % username
locale = settings.WIKI_DEFAULT_LANGUAGE
Document.objects.all().filter(slug=slug).delete()
if not is_add:
doc = document(save=True, slug=slug, title=slug,
locale=locale)
revision(save=True, document=doc)
self.client.login(username=username, password='testpass')
data = new_document_data()
slug = slug_tmpl % username
data.update({"title": slug, "slug": slug})
if is_add:
url = reverse('wiki.new_document', locale=locale)
resp = self.client.post(url, data, follow=False)
else:
data['form'] = 'rev'
url = reverse('wiki.edit_document', args=(slug,),
locale=locale)
resp = self.client.post(url, data, follow=False)
if expected:
eq_(302, resp.status_code,
"%s should be able to %s %s" %
(user, msg[is_add], slug))
Document.objects.filter(slug=slug).delete()
else:
eq_(403, resp.status_code,
"%s should not be able to %s %s" %
(user, msg[is_add], slug))
class ConditionalGetTests(UserTestCase, WikiTestCase):
"""Tests for conditional GET on document view"""
localizing_client = True
def test_last_modified(self):
"""Ensure the last-modified stamp of a document is cached"""
doc, rev = doc_rev()
get_url = reverse('wiki.document',
args=[doc.slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
# There should be a last-modified date cached for this document already
cache_key = doc.last_modified_cache_key
ok_(cache.get(cache_key))
# Now, try a request, and ensure that the last-modified header is
# present.
response = self.client.get(get_url, follow=False)
ok_(response.has_header('last-modified'))
last_mod = response['last-modified']
# Try another request, using If-Modified-Since. This should be a 304
response = self.client.get(get_url, follow=False,
HTTP_IF_MODIFIED_SINCE=last_mod)
eq_(304, response.status_code)
# Finally, ensure that the last-modified was cached.
cached_last_mod = cache.get(cache_key)
eq_(doc.modified.strftime('%s'), cached_last_mod)
# Let the clock tick, so the last-modified will change on edit.
time.sleep(1.0)
# Edit the document, ensure the last-modified has been invalidated.
revision(document=doc, content="New edits", save=True)
ok_(cache.get(cache_key) != cached_last_mod)
# This should be another 304, but the last-modified in response and
# cache should have changed.
response = self.client.get(get_url, follow=False,
HTTP_IF_MODIFIED_SINCE=last_mod)
eq_(200, response.status_code)
ok_(last_mod != response['last-modified'])
ok_(cached_last_mod != cache.get(cache_key))
def test_deletion_clears_last_modified(self):
"""Deleting a page clears any last-modified caching"""
# Setup mostly the same as previous test, to get a doc and set
# last-modified info.
doc, rev = doc_rev()
self.url = reverse('wiki.document',
args=[doc.slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
cache_key = doc.last_modified_cache_key
last_mod = cache.get(cache_key)
ok_(last_mod) # exists already because pre-filled
self.client.get(self.url, follow=False)
ok_(cache.get(cache_key) == last_mod)
# Now delete the doc and make sure there's no longer
# last-modified data in the cache for it afterward.
doc.delete()
ok_(not cache.get(cache_key))
def test_deleted_doc_returns_404(self):
"""Requesting a deleted doc returns 404"""
doc, rev = doc_rev()
doc.delete()
DocumentDeletionLog.objects.create(locale=doc.locale, slug=doc.slug,
user=rev.creator, reason="test")
response = self.client.get(doc.get_absolute_url(), follow=False)
eq_(404, response.status_code)
class ReadOnlyTests(UserTestCase, WikiTestCase):
"""Tests readonly scenarios"""
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
localizing_client = True
def setUp(self):
super(ReadOnlyTests, self).setUp()
self.d, r = doc_rev()
self.edit_url = reverse('wiki.edit_document', args=[self.d.slug])
def test_everyone(self):
""" kumaediting: everyone, kumabanned: none """
self.kumaediting_flag.everyone = True
self.kumaediting_flag.save()
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.edit_url)
eq_(200, resp.status_code)
def test_superusers_only(self):
""" kumaediting: superusers, kumabanned: none """
self.kumaediting_flag.everyone = None
self.kumaediting_flag.superusers = True
self.kumaediting_flag.save()
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.edit_url)
eq_(403, resp.status_code)
ok_('The wiki is in read-only mode.' in resp.content)
self.client.logout()
self.client.login(username='admin', password='testpass')
resp = self.client.get(self.edit_url)
eq_(200, resp.status_code)
def test_banned_users(self):
""" kumaediting: everyone, kumabanned: testuser2 """
self.kumaediting_flag.everyone = True
self.kumaediting_flag.save()
# ban testuser2
kumabanned = Flag.objects.create(name='kumabanned')
kumabanned.users = self.user_model.objects.filter(username='testuser2')
kumabanned.save()
# testuser can still access
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.edit_url)
eq_(200, resp.status_code)
self.client.logout()
# testuser2 cannot
self.client.login(username='testuser2', password='testpass')
resp = self.client.get(self.edit_url)
eq_(403, resp.status_code)
ok_('Your profile has been banned from making edits.' in resp.content)
# ban testuser01 and testuser2
kumabanned.users = self.user_model.objects.filter(
Q(username='testuser2') | Q(username='testuser01'))
kumabanned.save()
# testuser can still access
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.edit_url)
eq_(200, resp.status_code)
self.client.logout()
# testuser2 cannot access
self.client.login(username='testuser2', password='testpass')
resp = self.client.get(self.edit_url)
eq_(403, resp.status_code)
ok_('Your profile has been banned from making edits.' in resp.content)
# testuser01 cannot access
self.client.login(username='testuser01', password='testpass')
resp = self.client.get(self.edit_url)
eq_(403, resp.status_code)
ok_('Your profile has been banned from making edits.' in resp.content)
class BannedIPTests(UserTestCase, WikiTestCase):
"""Tests readonly scenarios"""
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
localizing_client = True
def setUp(self):
super(BannedIPTests, self).setUp()
self.ip = '127.0.0.1'
self.ip_ban = IPBan.objects.create(ip=self.ip)
self.doc, rev = doc_rev()
self.edit_url = reverse('wiki.edit_document',
args=[self.doc.slug])
def tearDown(self):
cache.clear()
def test_banned_ip_cant_get_edit(self):
self.client.login(username='testuser', password='testpass')
response = self.client.get(self.edit_url, REMOTE_ADDR=self.ip)
eq_(403, response.status_code)
def test_banned_ip_cant_post_edit(self):
self.client.login(username='testuser', password='testpass')
response = self.client.get(self.edit_url, REMOTE_ADDR=self.ip)
eq_(403, response.status_code)
def test_banned_ip_can_still_get_articles(self):
response = self.client.get(self.doc.get_absolute_url(),
REMOTE_ADDR=self.ip)
eq_(200, response.status_code)
class KumascriptIntegrationTests(UserTestCase, WikiTestCase):
"""
Tests for usage of the kumascript service.
Note that these tests really just check whether or not the service was
used, and are not integration tests meant to exercise the real service.
"""
localizing_client = True
def setUp(self):
super(KumascriptIntegrationTests, self).setUp()
self.d, self.r = doc_rev()
self.r.content = "TEST CONTENT"
self.r.save()
self.d.tags.set('foo', 'bar', 'baz')
self.url = reverse('wiki.document',
args=(self.d.slug,),
locale=self.d.locale)
# TODO: upgrade mock to 0.8.0 so we can do this.
# self.mock_kumascript_get = (
# mock.patch('kuma.wiki.kumascript.get'))
# self.mock_kumascript_get.return_value = self.d.html
def tearDown(self):
super(KumascriptIntegrationTests, self).tearDown()
# TODO: upgrade mock to 0.8.0 so we can do this.
# self.mock_kumascript_get.stop()
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_basic_view(self, mock_kumascript_get):
"""When kumascript timeout is non-zero, the service should be used"""
mock_kumascript_get.return_value = (self.d.html, None)
self.client.get(self.url, follow=False)
ok_(mock_kumascript_get.called,
"kumascript should have been used")
@override_constance_settings(KUMASCRIPT_TIMEOUT=0.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_disabled(self, mock_kumascript_get):
"""When disabled, the kumascript service should not be used"""
mock_kumascript_get.return_value = (self.d.html, None)
self.client.get(self.url, follow=False)
ok_(not mock_kumascript_get.called,
"kumascript not should have been used")
@override_constance_settings(KUMASCRIPT_TIMEOUT=0.0)
@mock.patch('kuma.wiki.kumascript.get')
@override_settings(CELERY_ALWAYS_EAGER=True)
def test_disabled_rendering(self, mock_kumascript_get):
"""When disabled, the kumascript service should not be used
in rendering"""
mock_kumascript_get.return_value = (self.d.html, None)
self.d.schedule_rendering('max-age=0')
ok_(not mock_kumascript_get.called,
"kumascript not should have been used")
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_nomacros(self, mock_kumascript_get):
mock_kumascript_get.return_value = (self.d.html, None)
self.client.get('%s?nomacros' % self.url, follow=False)
ok_(not mock_kumascript_get.called,
"kumascript should not have been used")
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_raw(self, mock_kumascript_get):
mock_kumascript_get.return_value = (self.d.html, None)
self.client.get('%s?raw' % self.url, follow=False)
ok_(not mock_kumascript_get.called,
"kumascript should not have been used")
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_raw_macros(self, mock_kumascript_get):
mock_kumascript_get.return_value = (self.d.html, None)
self.client.get('%s?raw¯os' % self.url, follow=False)
ok_(mock_kumascript_get.called,
"kumascript should have been used")
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=1234)
@mock.patch('requests.get')
def test_ua_max_age_zero(self, mock_requests_get):
"""Authenticated users can request a zero max-age for kumascript"""
trap = {}
def my_requests_get(url, headers=None, timeout=None):
trap['headers'] = headers
return FakeResponse(status_code=200,
headers={}, text='HELLO WORLD')
mock_requests_get.side_effect = my_requests_get
self.client.get(self.url, follow=False,
HTTP_CACHE_CONTROL='no-cache')
eq_('max-age=1234', trap['headers']['Cache-Control'])
self.client.login(username='admin', password='testpass')
self.client.get(self.url, follow=False,
HTTP_CACHE_CONTROL='no-cache')
eq_('no-cache', trap['headers']['Cache-Control'])
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=1234)
@mock.patch('requests.get')
def test_ua_no_cache(self, mock_requests_get):
"""Authenticated users can request no-cache for kumascript"""
trap = {}
def my_requests_get(url, headers=None, timeout=None):
trap['headers'] = headers
return FakeResponse(status_code=200,
headers={}, text='HELLO WORLD')
mock_requests_get.side_effect = my_requests_get
self.client.get(self.url, follow=False,
HTTP_CACHE_CONTROL='no-cache')
eq_('max-age=1234', trap['headers']['Cache-Control'])
self.client.login(username='admin', password='testpass')
self.client.get(self.url, follow=False,
HTTP_CACHE_CONTROL='no-cache')
eq_('no-cache', trap['headers']['Cache-Control'])
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=1234)
@mock.patch('requests.get')
def test_conditional_get(self, mock_requests_get):
"""Ensure conditional GET in requests to kumascript work as expected"""
expected_etag = "8675309JENNY"
expected_modified = "Wed, 14 Mar 2012 22:29:17 GMT"
expected_content = "HELLO THERE, WORLD"
trap = dict(req_cnt=0)
def my_requests_get(url, headers=None, timeout=None):
trap['req_cnt'] += 1
trap['headers'] = headers
if trap['req_cnt'] in [1, 2]:
return FakeResponse(
status_code=200, text=expected_content,
headers={
"etag": expected_etag,
"last-modified": expected_modified,
"age": 456
})
else:
return FakeResponse(
status_code=304, text='',
headers={
"etag": expected_etag,
"last-modified": expected_modified,
"age": 123
})
mock_requests_get.side_effect = my_requests_get
# First request to let the view cache etag / last-modified
response = self.client.get(self.url)
# Clear rendered_html to force another request.
self.d.rendered_html = ''
self.d.save()
# Second request to verify the view sends them back
response = self.client.get(self.url)
eq_(expected_etag, trap['headers']['If-None-Match'])
eq_(expected_modified, trap['headers']['If-Modified-Since'])
# Third request to verify content was cached and served on a 304
response = self.client.get(self.url)
ok_(expected_content in response.content)
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=600)
@mock.patch('requests.get')
def test_error_reporting(self, mock_requests_get):
"""Kumascript reports errors in HTTP headers, Kuma should display"""
# Make sure we have enough log messages to ensure there are more than
# 10 lines of Base64 in headers. This ensures that there'll be a
# failure if the view sorts FireLogger sequence number alphabetically
# instead of numerically.
expected_errors = {
"logs": [
{"level": "debug",
"message": "Message #1",
"args": ['TestError', {}, {'name': 'SomeMacro', 'token': {'args': 'arguments here'}}],
"time": "12:32:03 GMT-0400 (EDT)",
"timestamp": "1331829123101000"},
{"level": "warning",
"message": "Message #2",
"args": ['TestError', {}, {'name': 'SomeMacro2'}],
"time": "12:33:58 GMT-0400 (EDT)",
"timestamp": "1331829238052000"},
{"level": "info",
"message": "Message #3",
"args": ['TestError'],
"time": "12:34:22 GMT-0400 (EDT)",
"timestamp": "1331829262403000"},
{"level": "debug",
"message": "Message #4",
"time": "12:32:03 GMT-0400 (EDT)",
"timestamp": "1331829123101000"},
{"level": "warning",
"message": "Message #5",
"time": "12:33:58 GMT-0400 (EDT)",
"timestamp": "1331829238052000"},
{"level": "info",
"message": "Message #6",
"time": "12:34:22 GMT-0400 (EDT)",
"timestamp": "1331829262403000"},
]
}
# Pack it up, get ready to ship it out.
d_json = json.dumps(expected_errors)
d_b64 = base64.encodestring(d_json)
d_lines = [x for x in d_b64.split("\n") if x]
# Headers are case-insensitive, so let's just drive that point home
p = ['firelogger', 'FIRELOGGER', 'FireLogger']
fl_uid = 8675309
headers_out = {}
for i in range(0, len(d_lines)):
headers_out['%s-%s-%s' % (p[i % len(p)], fl_uid, i)] = d_lines[i]
# Now, trap the request from the view.
trap = {}
def my_requests_get(url, headers=None, timeout=None):
trap['headers'] = headers
return FakeResponse(
status_code=200,
text='HELLO WORLD',
headers=headers_out
)
mock_requests_get.side_effect = my_requests_get
# Finally, fire off the request to the view and ensure that the log
# messages were received and displayed on the page. But, only for a
# logged in user.
self.client.login(username='admin', password='testpass')
response = self.client.get(self.url)
eq_(trap['headers']['X-FireLogger'], '1.2')
for error in expected_errors['logs']:
ok_(error['message'] in response.content)
eq_(response.status_code, 200)
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=600)
@mock.patch('requests.post')
def test_preview_nonascii(self, mock_post):
"""POSTing non-ascii to kumascript should encode to utf8"""
content = u'Français'
trap = {}
def my_post(url, timeout=None, headers=None, data=None):
trap['data'] = data
return FakeResponse(status_code=200, headers={},
text=content.encode('utf8'))
mock_post.side_effect = my_post
self.client.login(username='admin', password='testpass')
self.client.post(reverse('wiki.preview'), {'content': content})
try:
trap['data'].decode('utf8')
except UnicodeDecodeError:
self.fail("Data wasn't posted as utf8")
class DocumentSEOTests(UserTestCase, WikiTestCase):
"""Tests for the document seo logic"""
localizing_client = True
def test_seo_title(self):
self.client.login(username='admin', password='testpass')
# Utility to make a quick doc
def _make_doc(title, aught_titles, slug):
doc = document(save=True, slug=slug, title=title,
locale=settings.WIKI_DEFAULT_LANGUAGE)
revision(save=True, document=doc)
response = self.client.get(reverse('wiki.document', args=[slug],
locale=settings.WIKI_DEFAULT_LANGUAGE))
page = pq(response.content)
ok_(page.find('title').text() in aught_titles)
# Test nested document titles
_make_doc('One', ['One | MDN'], 'one')
_make_doc('Two', ['Two - One | MDN'], 'one/two')
_make_doc('Three', ['Three - One | MDN'], 'one/two/three')
_make_doc(u'Special Φ Char',
[u'Special \u03a6 Char - One | MDN',
u'Special \xce\xa6 Char - One | MDN'],
'one/two/special_char')
# Additional tests for /Web/* changes
_make_doc('Firefox OS', ['Firefox OS | MDN'], 'firefox_os')
_make_doc('Email App', ['Email App - Firefox OS | MDN'],
'firefox_os/email_app')
_make_doc('Web', ['Web | MDN'], 'Web')
_make_doc('HTML', ['HTML | MDN'], 'Web/html')
_make_doc('Fieldset', ['Fieldset - HTML | MDN'], 'Web/html/fieldset')
_make_doc('Legend', ['Legend - HTML | MDN'],
'Web/html/fieldset/legend')
def test_seo_script(self):
self.client.login(username='admin', password='testpass')
def make_page_and_compare_seo(slug, content, aught_preview):
# Create the doc
data = new_document_data()
data.update({'title': 'blah', 'slug': slug, 'content': content})
response = self.client.post(reverse('wiki.new_document',
locale=settings.WIKI_DEFAULT_LANGUAGE),
data)
eq_(302, response.status_code)
# Connect to newly created page
response = self.client.get(reverse('wiki.document', args=[slug],
locale=settings.WIKI_DEFAULT_LANGUAGE))
page = pq(response.content)
meta_content = page.find('meta[name=description]').attr('content')
eq_(str(meta_content).decode('utf-8'),
str(aught_preview).decode('utf-8'))
# Test pages - very basic
good = 'This is the content which should be chosen, man.'
make_page_and_compare_seo('one', '<p>' + good + '</p>', good)
# No content, no seo
make_page_and_compare_seo('two', 'blahblahblahblah<br />', None)
# No summary, no seo
make_page_and_compare_seo('three', '<div><p>You cant see me</p></div>',
None)
# Warning paragraph ignored
make_page_and_compare_seo('four',
'<div class="geckoVersion">'
'<p>No no no</p></div><p>yes yes yes</p>',
'yes yes yes')
# Warning paragraph ignored, first one chosen if multiple matches
make_page_and_compare_seo('five',
'<div class="geckoVersion"><p>No no no</p>'
'</div><p>yes yes yes</p>'
'<p>ignore ignore ignore</p>',
'yes yes yes')
# Don't take legacy crumbs
make_page_and_compare_seo('six', u'<p>« CSS</p><p>I am me!</p>',
'I am me!')
# Take the seoSummary class'd element
make_page_and_compare_seo('seven',
u'<p>I could be taken</p>'
'<p class="seoSummary">I should be though</p>',
'I should be though')
# Two summaries append
make_page_and_compare_seo('eight',
u'<p>I could be taken</p>'
'<p class="seoSummary">a</p>'
'<p class="seoSummary">b</p>',
'a b')
# No brackets
make_page_and_compare_seo('nine',
u'<p>I <em>am</em> awesome.'
' <a href="blah">A link</a> is also <cool></p>',
u'I am awesome. A link is also cool')
class DocumentEditingTests(UserTestCase, WikiTestCase):
"""Tests for the document-editing view"""
localizing_client = True
def test_noindex_post(self):
self.client.login(username='admin', password='testpass')
# Go to new document page to ensure no-index header works
response = self.client.get(reverse('wiki.new_document', args=[],
locale=settings.WIKI_DEFAULT_LANGUAGE))
eq_(response['X-Robots-Tag'], 'noindex')
@attr('bug821986')
def test_editor_safety_filter(self):
"""Safety filter should be applied before rendering editor"""
self.client.login(username='admin', password='testpass')
r = revision(save=True, content="""
<svg><circle onload=confirm(3)>
""")
args = [r.document.slug]
urls = (
reverse('wiki.edit_document', args=args),
'%s?tolocale=%s' % (reverse('wiki.translate', args=args), 'fr')
)
for url in urls:
page = pq(self.client.get(url).content)
editor_src = page.find('#id_content').text()
ok_('onload' not in editor_src)
def test_create_on_404(self):
self.client.login(username='admin', password='testpass')
# Create the parent page.
d, r = doc_rev()
# Establish attribs of child page.
locale = settings.WIKI_DEFAULT_LANGUAGE
local_slug = 'Some_New_Title'
slug = '%s/%s' % (d.slug, local_slug)
url = reverse('wiki.document', args=[slug], locale=locale)
# Ensure redirect to create new page on attempt to visit non-existent
# child page.
resp = self.client.get(url)
eq_(302, resp.status_code)
ok_('docs/new' in resp['Location'])
ok_('?slug=%s' % local_slug in resp['Location'])
# Ensure real 404 for visit to non-existent page with params common to
# kumascript and raw content API.
for p_name in ('raw', 'include', 'nocreate'):
sub_url = '%s?%s=1' % (url, p_name)
resp = self.client.get(sub_url)
eq_(404, resp.status_code)
# Ensure root level documents work, not just children
response = self.client.get(reverse('wiki.document',
args=['noExist'], locale=locale))
eq_(302, response.status_code)
response = self.client.get(reverse('wiki.document',
args=['Template:NoExist'],
locale=locale))
eq_(302, response.status_code)
def test_new_document_comment(self):
"""Creating a new document with a revision comment saves the comment"""
self.client.login(username='admin', password='testpass')
comment = 'I am the revision comment'
slug = 'Test-doc-comment'
loc = settings.WIKI_DEFAULT_LANGUAGE
# Create a new doc.
data = new_document_data()
data.update({'slug': slug, 'comment': comment})
self.client.post(reverse('wiki.new_document'), data)
doc = Document.objects.get(slug=slug, locale=loc)
eq_(comment, doc.current_revision.comment)
@attr('toc')
def test_toc_initial(self):
self.client.login(username='admin', password='testpass')
resp = self.client.get(reverse('wiki.new_document'))
eq_(200, resp.status_code)
page = pq(resp.content)
toc_select = page.find('#id_toc_depth')
toc_options = toc_select.find('option')
for option in toc_options:
opt_element = pq(option)
found_selected = False
if opt_element.attr('selected'):
found_selected = True
eq_(str(Revision.TOC_DEPTH_H4), opt_element.attr('value'))
if not found_selected:
raise AssertionError("No ToC depth initially selected.")
@attr('retitle')
def test_retitling_solo_doc(self):
""" Editing just title of non-parent doc:
* Changes title
* Doesn't cause errors
* Doesn't create redirect
"""
# Not testing slug changes separately; the model tests cover those plus
# slug+title changes. If title changes work in the view, the rest
# should also.
self.client.login(username='admin', password='testpass')
new_title = 'Some New Title'
d, r = doc_rev()
old_title = d.title
data = new_document_data()
data.update({'title': new_title,
'form': 'rev'})
data['slug'] = ''
url = reverse('wiki.edit_document', args=[d.slug])
self.client.post(url, data)
eq_(new_title,
Document.objects.get(slug=d.slug, locale=d.locale).title)
try:
Document.objects.get(title=old_title)
self.fail("Should not find doc by old title after retitling.")
except Document.DoesNotExist:
pass
@attr('retitle')
def test_retitling_parent_doc(self):
""" Editing just title of parent doc:
* Changes title
* Doesn't cause errors
* Doesn't create redirect
"""
# Not testing slug changes separately; the model tests cover those plus
# slug+title changes. If title changes work in the view, the rest
# should also.
self.client.login(username='admin', password='testpass')
# create parent doc & rev along with child doc & rev
d = document(title='parent', save=True)
revision(document=d, content='parent', save=True)
d2 = document(title='child', parent_topic=d, save=True)
revision(document=d2, content='child', save=True)
old_title = d.title
new_title = 'Some New Title'
data = new_document_data()
data.update({'title': new_title,
'form': 'rev'})
data['slug'] = ''
url = reverse('wiki.edit_document', args=[d.slug])
self.client.post(url, data)
eq_(new_title,
Document.objects.get(slug=d.slug, locale=d.locale).title)
try:
Document.objects.get(title=old_title)
self.fail("Should not find doc by old title after retitling.")
except Document.DoesNotExist:
pass
def test_slug_change_ignored_for_iframe(self):
"""When the title of an article is edited in an iframe, the change is
ignored."""
self.client.login(username='admin', password='testpass')
new_slug = 'some_new_slug'
d, r = doc_rev()
old_slug = d.slug
data = new_document_data()
data.update({'title': d.title,
'slug': new_slug,
'form': 'rev'})
self.client.post('%s?iframe=1' % reverse('wiki.edit_document',
args=[d.slug]), data)
eq_(old_slug, Document.objects.get(slug=d.slug,
locale=d.locale).slug)
assert "REDIRECT" not in Document.objects.get(slug=old_slug).html
@attr('clobber')
def test_slug_collision_errors(self):
"""When an attempt is made to retitle an article and another with that
title already exists, there should be form errors"""
self.client.login(username='admin', password='testpass')
exist_slug = "existing-doc"
# Create a new doc.
data = new_document_data()
data.update({"slug": exist_slug})
resp = self.client.post(reverse('wiki.new_document'), data)
eq_(302, resp.status_code)
# Create another new doc.
data = new_document_data()
data.update({"slug": 'some-new-title'})
resp = self.client.post(reverse('wiki.new_document'), data)
eq_(302, resp.status_code)
# Now, post an update with duplicate slug
data.update({
'form': 'rev',
'slug': exist_slug
})
resp = self.client.post(reverse('wiki.edit_document',
args=['some-new-title']), data)
eq_(200, resp.status_code)
p = pq(resp.content)
ok_(p.find('.errorlist').length > 0)
ok_(p.find('.errorlist a[href="#id_slug"]').length > 0)
@attr('clobber')
def test_redirect_can_be_clobbered(self):
"""When an attempt is made to retitle an article, and another article
with that title exists but is a redirect, there should be no errors and
the redirect should be replaced."""
self.client.login(username='admin', password='testpass')
exist_title = "Existing doc"
exist_slug = "existing-doc"
changed_title = 'Changed title'
changed_slug = 'changed-title'
# Create a new doc.
data = new_document_data()
data.update({"title": exist_title, "slug": exist_slug})
resp = self.client.post(reverse('wiki.new_document'), data)
eq_(302, resp.status_code)
# Change title and slug
data.update({'form': 'rev',
'title': changed_title,
'slug': changed_slug})
resp = self.client.post(reverse('wiki.edit_document',
args=[exist_slug]),
data)
eq_(302, resp.status_code)
# Change title and slug back to originals, clobbering the redirect
data.update({'form': 'rev',
'title': exist_title,
'slug': exist_slug})
resp = self.client.post(reverse('wiki.edit_document',
args=[changed_slug]),
data)
eq_(302, resp.status_code)
def test_invalid_slug(self):
"""Slugs cannot contain "$", but can contain "/"."""
self.client.login(username='admin', password='testpass')
data = new_document_data()
data['title'] = 'valid slug'
data['slug'] = 'valid'
response = self.client.post(reverse('wiki.new_document'), data)
self.assertRedirects(response,
reverse('wiki.document', args=[data['slug']],
locale=settings.WIKI_DEFAULT_LANGUAGE))
# Slashes should not be acceptable via form input
data['title'] = 'valid with slash'
data['slug'] = 'va/lid'
response = self.client.post(reverse('wiki.new_document'), data)
self.assertContains(response, 'The slug provided is not valid.')
# Dollar sign is reserved for verbs
data['title'] = 'invalid with dollars'
data['slug'] = 'inva$lid'
response = self.client.post(reverse('wiki.new_document'), data)
self.assertContains(response, 'The slug provided is not valid.')
# Question mark is reserved for query params
data['title'] = 'invalid with questions'
data['slug'] = 'inva?lid'
response = self.client.post(reverse('wiki.new_document'), data)
self.assertContains(response, 'The slug provided is not valid.')
def test_invalid_reserved_term_slug(self):
"""Slugs should not collide with reserved URL patterns"""
self.client.login(username='admin', password='testpass')
data = new_document_data()
# TODO: This is info derived from urls.py, but unsure how to DRY it
reserved_slugs = (
'ckeditor_config.js',
'watch-ready-for-review',
'unwatch-ready-for-review',
'watch-approved',
'unwatch-approved',
'.json',
'new',
'all',
'preview-wiki-content',
'category/10',
'needs-review/technical',
'needs-review/',
'feeds/atom/all/',
'feeds/atom/needs-review/technical',
'feeds/atom/needs-review/',
'tag/tasty-pie'
)
for term in reserved_slugs:
data['title'] = 'invalid with %s' % term
data['slug'] = term
response = self.client.post(reverse('wiki.new_document'), data)
self.assertContains(response, 'The slug provided is not valid.')
def test_slug_revamp(self):
self.client.login(username='admin', password='testpass')
def _createAndRunTests(slug):
# Create some vars
locale = settings.WIKI_DEFAULT_LANGUAGE
foreign_locale = 'es'
new_doc_url = reverse('wiki.new_document')
invalid_slug = invalid_slug1 = "some/thing"
invalid_slug2 = "some?thing"
invalid_slug3 = "some thing"
child_slug = 'kiddy'
grandchild_slug = 'grandkiddy'
# Create the document data
doc_data = new_document_data()
doc_data['title'] = slug + ' Doc'
doc_data['slug'] = slug
doc_data['content'] = 'This is the content'
doc_data['is_localizable'] = True
""" NEW DOCUMENT CREATION, CHILD CREATION """
# Create the document, validate it exists
response = self.client.post(new_doc_url, doc_data)
eq_(302, response.status_code) # 302 = good, forward to new page
ok_(slug in response['Location'])
self.assertRedirects(response, reverse('wiki.document',
locale=locale, args=[slug]))
doc_url = reverse('wiki.document', locale=locale, args=[slug])
eq_(self.client.get(doc_url).status_code, 200)
doc = Document.objects.get(locale=locale, slug=slug)
eq_(doc.slug, slug)
eq_(0, len(Document.objects.filter(title=doc_data['title'] + 'Redirect')))
# Create child document data
child_data = new_document_data()
child_data['title'] = slug + ' Child Doc'
child_data['slug'] = invalid_slug
child_data['content'] = 'This is the content'
child_data['is_localizable'] = True
# Attempt to create the child with invalid slug, validate it fails
def test_invalid_slug(inv_slug, url, data, doc):
data['slug'] = inv_slug
response = self.client.post(url, data)
page = pq(response.content)
eq_(200, response.status_code) # 200 = bad, invalid data
# Slug doesn't add parent
eq_(inv_slug, page.find('input[name=slug]')[0].value)
eq_(doc.get_absolute_url(),
page.find('.metadataDisplay').attr('href'))
self.assertContains(response,
'The slug provided is not valid.')
test_invalid_slug(invalid_slug1,
new_doc_url + '?parent=' + str(doc.id),
child_data, doc)
test_invalid_slug(invalid_slug2,
new_doc_url + '?parent=' + str(doc.id),
child_data, doc)
test_invalid_slug(invalid_slug3,
new_doc_url + '?parent=' + str(doc.id),
child_data, doc)
# Attempt to create the child with *valid* slug,
# should succeed and redirect
child_data['slug'] = child_slug
full_child_slug = slug + '/' + child_data['slug']
response = self.client.post(new_doc_url + '?parent=' + str(doc.id),
child_data)
eq_(302, response.status_code)
self.assertRedirects(response, reverse('wiki.document',
locale=locale,
args=[full_child_slug]))
child_doc = Document.objects.get(locale=locale,
slug=full_child_slug)
eq_(child_doc.slug, full_child_slug)
eq_(0, len(Document.objects.filter(
title=child_data['title'] + ' Redirect 1',
locale=locale)))
# Create grandchild data
grandchild_data = new_document_data()
grandchild_data['title'] = slug + ' Grandchild Doc'
grandchild_data['slug'] = invalid_slug
grandchild_data['content'] = 'This is the content'
grandchild_data['is_localizable'] = True
# Attempt to create the child with invalid slug, validate it fails
response = self.client.post(
new_doc_url + '?parent=' + str(child_doc.id), grandchild_data)
page = pq(response.content)
eq_(200, response.status_code) # 200 = bad, invalid data
# Slug doesn't add parent
eq_(invalid_slug, page.find('input[name=slug]')[0].value)
eq_(child_doc.get_absolute_url(),
page.find('.metadataDisplay').attr('href'))
self.assertContains(response, 'The slug provided is not valid.')
# Attempt to create the child with *valid* slug,
# should succeed and redirect
grandchild_data['slug'] = grandchild_slug
full_grandchild_slug = (full_child_slug
+ '/' + grandchild_data['slug'])
response = self.client.post(
new_doc_url + '?parent=' + str(child_doc.id),
grandchild_data)
eq_(302, response.status_code)
self.assertRedirects(response,
reverse('wiki.document', locale=locale,
args=[full_grandchild_slug]))
grandchild_doc = Document.objects.get(locale=locale,
slug=full_grandchild_slug)
eq_(grandchild_doc.slug, full_grandchild_slug)
missing_title = grandchild_data['title'] + ' Redirect 1'
eq_(0, len(Document.objects.filter(title=missing_title,
locale=locale)))
def _run_edit_tests(edit_slug, edit_data, edit_doc,
edit_parent_path):
"""EDIT DOCUMENT TESTING"""
# Load "Edit" page for the root doc, ensure no "/" in the slug
# Also ensure the 'parent' link is not present
response = self.client.get(reverse('wiki.edit_document',
args=[edit_doc.slug], locale=locale))
eq_(200, response.status_code)
page = pq(response.content)
eq_(edit_data['slug'], page.find('input[name=slug]')[0].value)
eq_(edit_parent_path,
page.find('.metadataDisplay').attr('href'))
# Attempt an invalid edit of the root,
# ensure the slug stays the same (i.e. no parent prepending)
def test_invalid_slug_edit(inv_slug, url, data):
data['slug'] = inv_slug
data['form'] = 'rev'
response = self.client.post(url, data)
eq_(200, response.status_code) # 200 = bad, invalid data
page = pq(response.content)
# Slug doesn't add parent
eq_(inv_slug, page.find('input[name=slug]')[0].value)
eq_(edit_parent_path,
page.find('.metadataDisplay').attr('href'))
self.assertContains(response,
'The slug provided is not valid.')
# Ensure no redirect
redirect_title = data['title'] + ' Redirect 1'
eq_(0, len(Document.objects.filter(title=redirect_title,
locale=locale)))
# Push a valid edit, without changing the slug
edit_data['slug'] = edit_slug
edit_data['form'] = 'rev'
response = self.client.post(reverse('wiki.edit_document',
args=[edit_doc.slug],
locale=locale),
edit_data)
eq_(302, response.status_code)
# Ensure no redirect
redirect_title = edit_data['title'] + ' Redirect 1'
eq_(0, len(Document.objects.filter(title=redirect_title,
locale=locale)))
self.assertRedirects(response,
reverse('wiki.document',
locale=locale,
args=[edit_doc.slug]))
def _run_translate_tests(translate_slug, translate_data,
translate_doc):
"""TRANSLATION DOCUMENT TESTING"""
foreign_url = (reverse('wiki.translate',
args=[translate_doc.slug],
locale=locale)
+ '?tolocale='
+ foreign_locale)
foreign_doc_url = reverse('wiki.document',
args=[translate_doc.slug],
locale=foreign_locale)
# Verify translate page form is populated correctly
response = self.client.get(foreign_url)
eq_(200, response.status_code)
page = pq(response.content)
eq_(translate_data['slug'],
page.find('input[name=slug]')[0].value)
# Attempt an invalid edit of the root
# ensure the slug stays the same (i.e. no parent prepending)
def test_invalid_slug_translate(inv_slug, url, data):
data['slug'] = inv_slug
data['form'] = 'both'
response = self.client.post(url, data)
eq_(200, response.status_code) # 200 = bad, invalid data
page = pq(response.content)
# Slug doesn't add parent
eq_(inv_slug, page.find('input[name=slug]')[0].value)
self.assertContains(response,
'The slug provided is not valid.')
# Ensure no redirect
eq_(0, len(Document.objects.filter(title=data['title'] +
' Redirect 1',
locale=foreign_locale)))
# Push a valid translation
translate_data['slug'] = translate_slug
translate_data['form'] = 'both'
response = self.client.post(foreign_url, translate_data)
eq_(302, response.status_code)
# Ensure no redirect
redirect_title = translate_data['title'] + ' Redirect 1'
eq_(0, len(Document.objects.filter(title=redirect_title,
locale=foreign_locale)))
self.assertRedirects(response, foreign_doc_url)
return Document.objects.get(locale=foreign_locale,
slug=translate_doc.slug)
_run_translate_tests(slug, doc_data, doc)
_run_translate_tests(child_slug, child_data, child_doc)
_run_translate_tests(grandchild_slug, grandchild_data,
grandchild_doc)
def _run_translate_edit_tests(edit_slug, edit_data, edit_doc):
"""TEST BASIC EDIT OF TRANSLATION"""
# Hit the initial URL
response = self.client.get(reverse('wiki.edit_document',
args=[edit_doc.slug],
locale=foreign_locale))
eq_(200, response.status_code)
page = pq(response.content)
eq_(edit_data['slug'], page.find('input[name=slug]')[0].value)
# Attempt an invalid edit of the root, ensure the slug stays
# the same (i.e. no parent prepending)
edit_data['slug'] = invalid_slug
edit_data['form'] = 'both'
response = self.client.post(reverse('wiki.edit_document',
args=[edit_doc.slug],
locale=foreign_locale),
edit_data)
eq_(200, response.status_code) # 200 = bad, invalid data
page = pq(response.content)
# Slug doesn't add parent
eq_(invalid_slug, page.find('input[name=slug]')[0].value)
self.assertContains(response, page.find('ul.errorlist li'
' a[href="#id_slug"]').
text())
# Ensure no redirect
eq_(0, len(Document.objects.filter(title=edit_data['title'] +
' Redirect 1',
locale=foreign_locale)))
# Push a valid edit, without changing the slug
edit_data['slug'] = edit_slug
response = self.client.post(reverse('wiki.edit_document',
args=[edit_doc.slug],
locale=foreign_locale),
edit_data)
eq_(302, response.status_code)
# Ensure no redirect
eq_(0, len(Document.objects.filter(title=edit_data['title'] +
' Redirect 1',
locale=foreign_locale)))
self.assertRedirects(response, reverse('wiki.document',
locale=foreign_locale,
args=[edit_doc.slug]))
""" TEST EDITING SLUGS AND TRANSLATIONS """
def _run_slug_edit_tests(edit_slug, edit_data, edit_doc, loc):
edit_data['slug'] = edit_data['slug'] + '_Updated'
edit_data['form'] = 'rev'
response = self.client.post(reverse('wiki.edit_document',
args=[edit_doc.slug],
locale=loc),
edit_data)
eq_(302, response.status_code)
# HACK: the es doc gets a 'Redirigen 1' if locale/ is updated
# Ensure *1* redirect
eq_(1,
len(Document.objects.filter(
title__contains=edit_data['title'] + ' Redir',
locale=loc)))
self.assertRedirects(response,
reverse('wiki.document',
locale=loc,
args=[edit_doc.slug.replace(
edit_slug,
edit_data['slug'])]))
# Run all of the tests
_createAndRunTests("parent")
# Test that slugs with the same "specific" slug but in different levels
# in the heiharachy are validate properly upon submission
# Create base doc
parent_doc = document(title='Length',
slug='length',
is_localizable=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
parent_doc.save()
r = revision(document=parent_doc)
r.save()
# Create child, try to use same slug, should work
child_data = new_document_data()
child_data['title'] = 'Child Length'
child_data['slug'] = 'length'
child_data['content'] = 'This is the content'
child_data['is_localizable'] = True
child_url = (reverse('wiki.new_document') +
'?parent=' +
str(parent_doc.id))
response = self.client.post(child_url, child_data)
eq_(302, response.status_code)
self.assertRedirects(response,
reverse('wiki.document',
args=['length/length'],
locale=settings.WIKI_DEFAULT_LANGUAGE))
# Editing "length/length" document doesn't cause errors
child_data['form'] = 'rev'
child_data['slug'] = ''
edit_url = reverse('wiki.edit_document', args=['length/length'],
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.post(edit_url, child_data)
eq_(302, response.status_code)
self.assertRedirects(response, reverse('wiki.document',
args=['length/length'],
locale=settings.WIKI_DEFAULT_LANGUAGE))
# Creating a new translation of "length" and "length/length"
# doesn't cause errors
child_data['form'] = 'both'
child_data['slug'] = 'length'
translate_url = reverse('wiki.document', args=[child_data['slug']],
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.post(translate_url + '$translate?tolocale=es',
child_data)
eq_(302, response.status_code)
self.assertRedirects(response, reverse('wiki.document',
args=[child_data['slug']],
locale='es'))
translate_url = reverse('wiki.document', args=['length/length'],
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.post(translate_url + '$translate?tolocale=es',
child_data)
eq_(302, response.status_code)
slug = 'length/' + child_data['slug']
self.assertRedirects(response, reverse('wiki.document',
args=[slug],
locale='es'))
def test_translate_keeps_topical_parent(self):
self.client.login(username='admin', password='testpass')
en_doc, de_doc = make_translation()
en_child_doc = document(parent_topic=en_doc, slug='en-child',
save=True)
en_child_rev = revision(document=en_child_doc, save=True)
de_child_doc = document(parent_topic=de_doc, locale='de',
slug='de-child', parent=en_child_doc,
save=True)
revision(document=de_child_doc, save=True)
post_data = {}
post_data['slug'] = de_child_doc.slug
post_data['title'] = 'New title'
post_data['form'] = 'both'
post_data['content'] = 'New translation'
post_data['tolocale'] = 'de'
post_data['toc_depth'] = 0
post_data['based_on'] = en_child_rev.id
post_data['parent_id'] = en_child_doc.id
translate_url = reverse('wiki.edit_document',
args=[de_child_doc.slug],
locale='de')
self.client.post(translate_url, post_data)
de_child_doc = Document.objects.get(locale='de', slug='de-child')
eq_(en_child_doc, de_child_doc.parent)
eq_(de_doc, de_child_doc.parent_topic)
eq_('New translation', de_child_doc.current_revision.content)
def test_translate_keeps_toc_depth(self):
self.client.login(username='admin', password='testpass')
locale = settings.WIKI_DEFAULT_LANGUAGE
original_slug = 'eng-doc'
foreign_locale = 'es'
foreign_slug = 'es-doc'
en_doc = document(title='Eng Doc', slug=original_slug,
is_localizable=True, locale=locale)
en_doc.save()
r = revision(document=en_doc, toc_depth=1)
r.save()
post_data = new_document_data()
post_data['title'] = 'ES Doc'
post_data['slug'] = foreign_slug
post_data['content'] = 'This is the content'
post_data['is_localizable'] = True
post_data['form'] = 'both'
post_data['toc_depth'] = r.toc_depth
translate_url = reverse('wiki.document', args=[original_slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
translate_url += '$translate?tolocale=' + foreign_locale
response = self.client.post(translate_url, post_data)
self.assertRedirects(response, reverse('wiki.document',
args=[foreign_slug],
locale=foreign_locale))
es_d = Document.objects.get(locale=foreign_locale, slug=foreign_slug)
eq_(r.toc_depth, es_d.current_revision.toc_depth)
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
def test_translate_rebuilds_source_json(self):
self.client.login(username='admin', password='testpass')
# Create an English original and a Spanish translation.
en_slug = 'en-doc'
es_locale = 'es'
es_slug = 'es-doc'
en_doc = document(title='EN Doc',
slug=en_slug,
is_localizable=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
en_doc.save()
en_doc.render()
en_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=en_slug)
json.loads(en_doc.json)
r = revision(document=en_doc)
r.save()
translation_data = new_document_data()
translation_data['title'] = 'ES Doc'
translation_data['slug'] = es_slug
translation_data['content'] = 'This is the content'
translation_data['is_localizable'] = False
translation_data['form'] = 'both'
translate_url = reverse('wiki.document', args=[en_slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
translate_url += '$translate?tolocale=' + es_locale
response = self.client.post(translate_url, translation_data)
# Sanity to make sure the translate succeeded.
self.assertRedirects(response, reverse('wiki.document',
args=[es_slug],
locale=es_locale))
es_doc = Document.objects.get(locale=es_locale,
slug=es_slug)
es_doc.render()
new_en_json = json.loads(Document.objects.get(pk=en_doc.pk).json)
ok_('translations' in new_en_json)
ok_(translation_data['title'] in [t['title'] for t in
new_en_json['translations']])
es_translation_json = [t for t in new_en_json['translations'] if
t['title'] == translation_data['title']][0]
eq_(es_translation_json['last_edit'],
es_doc.current_revision.created.isoformat())
def test_slug_translate(self):
"""Editing a translated doc keeps the correct slug"""
self.client.login(username='admin', password='testpass')
# Settings
original_slug = 'eng-doc'
child_slug = 'child-eng-doc'
foreign_locale = 'es'
foreign_slug = 'es-doc'
foreign_child_slug = 'child-es-doc'
# Create the one-level English Doc
en_doc = document(title='Eng Doc',
slug=original_slug,
is_localizable=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
en_doc.save()
r = revision(document=en_doc)
r.save()
# Translate to ES
parent_data = new_document_data()
parent_data['title'] = 'ES Doc'
parent_data['slug'] = foreign_slug
parent_data['content'] = 'This is the content'
parent_data['is_localizable'] = True
parent_data['form'] = 'both'
translate_url = reverse('wiki.document', args=[original_slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
translate_url += '$translate?tolocale=' + foreign_locale
response = self.client.post(translate_url, parent_data)
self.assertRedirects(response, reverse('wiki.document',
args=[foreign_slug],
locale=foreign_locale))
# Go to edit the translation, ensure the the slug is correct
response = self.client.get(reverse('wiki.edit_document',
args=[foreign_slug],
locale=foreign_locale))
page = pq(response.content)
eq_(page.find('input[name=slug]')[0].value, foreign_slug)
# Create an English child now
en_doc = document(title='Child Eng Doc',
slug=original_slug + '/' + child_slug,
is_localizable=True,
locale=settings.WIKI_DEFAULT_LANGUAGE,
parent_topic=en_doc)
en_doc.save()
r = revision(document=en_doc)
r.save()
# Translate to ES
child_data = new_document_data()
child_data['title'] = 'ES Child Doc'
child_data['slug'] = foreign_child_slug
child_data['content'] = 'This is the content'
child_data['is_localizable'] = True
child_data['form'] = 'both'
translate_url = reverse('wiki.document',
args=[original_slug + '/' + child_slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
translate_url += '$translate?tolocale=' + foreign_locale
response = self.client.post(translate_url, child_data)
slug = foreign_slug + '/' + child_data['slug']
self.assertRedirects(response, reverse('wiki.document',
args=[slug],
locale=foreign_locale))
def test_clone(self):
self.client.login(username='admin', password='testpass')
slug = None
title = None
content = '<p>Hello!</p>'
test_revision = revision(save=True, title=title, slug=slug,
content=content)
document = test_revision.document
response = self.client.get(reverse('wiki.new_document',
args=[],
locale=settings.WIKI_DEFAULT_LANGUAGE) + '?clone=' + str(document.id))
page = pq(response.content)
eq_(page.find('input[name=title]')[0].value, title)
eq_(page.find('input[name=slug]')[0].value, slug)
self.assertHTMLEqual(page.find('textarea[name=content]')[0].value, content)
def test_localized_based_on(self):
"""Editing a localized article 'based on' an older revision of the
localization is OK."""
self.client.login(username='admin', password='testpass')
en_r = revision(save=True)
fr_d = document(parent=en_r.document, locale='fr', save=True)
fr_r = revision(document=fr_d, based_on=en_r, save=True)
url = reverse('wiki.new_revision_based_on',
locale='fr', args=(fr_d.slug, fr_r.pk,))
response = self.client.get(url)
input = pq(response.content)('#id_based_on')[0]
eq_(int(input.value), en_r.pk)
def test_restore_translation_source(self):
"""Edit a localized article without an English parent allows user to
set translation parent."""
# Create english doc
self.client.login(username='admin', password='testpass')
data = new_document_data()
self.client.post(reverse('wiki.new_document'), data)
en_d = Document.objects.get(locale=data['locale'], slug=data['slug'])
# Create french doc
data.update({'locale': 'fr',
'title': 'A Tést Articlé',
'content': "C'ést bon."})
self.client.post(reverse('wiki.new_document', locale='fr'), data)
fr_d = Document.objects.get(locale=data['locale'], slug=data['slug'])
# Check edit doc page for choose parent box
url = reverse('wiki.edit_document', args=[fr_d.slug], locale='fr')
response = self.client.get(url)
ok_(pq(response.content)('li.metadata-choose-parent'))
# Set the parent
data.update({'form': 'rev', 'parent_id': en_d.id})
resp = self.client.post(url, data)
eq_(302, resp.status_code)
ok_('fr/docs/a-test-article' in resp['Location'])
# Check the languages drop-down
resp = self.client.get(resp['Location'])
translations = pq(resp.content)('ul#translations li')
ok_('A Test Article' in translations.html())
ok_('English (US)' in translations.text())
def test_translation_source(self):
"""Allow users to change "translation source" settings"""
self.client.login(username='admin', password='testpass')
data = new_document_data()
self.client.post(reverse('wiki.new_document'), data)
parent = Document.objects.get(locale=data['locale'], slug=data['slug'])
data.update({'title': 'Another Test Article',
'content': "Yahoooo!",
'parent_id': parent.id})
self.client.post(reverse('wiki.new_document'), data)
child = Document.objects.get(locale=data['locale'], slug=data['slug'])
url = reverse('wiki.edit_document', args=[child.slug])
response = self.client.get(url)
content = pq(response.content)
ok_(content('li.metadata-choose-parent'))
ok_(str(parent.id) in content.html())
@attr('tags')
@mock.patch.object(Site.objects, 'get_current')
def test_document_tags(self, get_current):
"""Document tags can be edited through revisions"""
data = new_document_data()
locale = data['locale']
slug = data['slug']
path = slug
ts1 = ('JavaScript', 'AJAX', 'DOM')
ts2 = ('XML', 'JSON')
get_current.return_value.domain = 'su.mo.com'
self.client.login(username='admin', password='testpass')
def assert_tag_state(yes_tags, no_tags):
# Ensure the tags are found for the Documents
doc = Document.objects.get(locale=locale, slug=slug)
doc_tags = [x.name for x in doc.tags.all()]
for t in yes_tags:
ok_(t in doc_tags)
for t in no_tags:
ok_(t not in doc_tags)
# Ensure the tags are found in the Document view
response = self.client.get(reverse('wiki.document',
args=[doc.slug]), data)
page = pq(response.content)
for t in yes_tags:
eq_(1, page.find('.tags li a:contains("%s")' % t).length,
'%s should NOT appear in document view tags' % t)
for t in no_tags:
eq_(0, page.find('.tags li a:contains("%s")' % t).length,
'%s should appear in document view tags' % t)
# Check for the document slug (title in feeds) in the tag listing
for t in yes_tags:
response = self.client.get(reverse('wiki.tag', args=[t]))
self.assertContains(response, doc.slug, msg_prefix=t)
response = self.client.get(reverse('wiki.feeds.recent_documents',
args=['atom', t]))
self.assertContains(response, doc.title)
for t in no_tags:
response = self.client.get(reverse('wiki.tag', args=[t]))
ok_(doc.slug not in response.content.decode('utf-8'))
response = self.client.get(reverse('wiki.feeds.recent_documents',
args=['atom', t]))
self.assertNotContains(response, doc.title)
# Create a new doc with tags
data.update({'slug': slug, 'tags': ','.join(ts1)})
self.client.post(reverse('wiki.new_document'), data)
assert_tag_state(ts1, ts2)
# Now, update the tags.
data.update({'form': 'rev', 'tags': ', '.join(ts2)})
self.client.post(reverse('wiki.edit_document',
args=[path]), data)
assert_tag_state(ts2, ts1)
@attr('review_tags')
@mock.patch.object(Site.objects, 'get_current')
def test_review_tags(self, get_current):
"""Review tags can be managed on document revisions"""
get_current.return_value.domain = 'su.mo.com'
self.client.login(username='admin', password='testpass')
# Create a new doc with one review tag
data = new_document_data()
data.update({'review_tags': ['technical']})
response = self.client.post(reverse('wiki.new_document'), data)
# Ensure there's now a doc with that expected tag in its newest
# revision
doc = Document.objects.get(slug="a-test-article")
rev = doc.revisions.order_by('-id').all()[0]
review_tags = [x.name for x in rev.review_tags.all()]
eq_(['technical'], review_tags)
# Now, post an update with two tags
data.update({
'form': 'rev',
'review_tags': ['editorial', 'technical'],
})
response = self.client.post(reverse('wiki.edit_document',
args=[doc.slug]), data)
# Ensure the doc's newest revision has both tags.
doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug="a-test-article")
rev = doc.revisions.order_by('-id').all()[0]
review_tags = [x.name for x in rev.review_tags.all()]
review_tags.sort()
eq_(['editorial', 'technical'], review_tags)
# Now, ensure that warning boxes appear for the review tags.
response = self.client.get(reverse('wiki.document',
args=[doc.slug]), data)
page = pq(response.content)
eq_(2, page.find('.warning.warning-review').length)
# Ensure the page appears on the listing pages
response = self.client.get(reverse('wiki.list_review'))
eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
response = self.client.get(reverse('wiki.list_review_tag',
args=('technical',)))
eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
response = self.client.get(reverse('wiki.list_review_tag',
args=('editorial',)))
eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
# Also, ensure that the page appears in the proper feeds
# HACK: Too lazy to parse the XML. Lazy lazy.
response = self.client.get(reverse('wiki.feeds.list_review',
args=('atom',)))
ok_('<entry><title>%s</title>' % doc.title in response.content)
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'technical', )))
ok_('<entry><title>%s</title>' % doc.title in response.content)
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'editorial', )))
ok_('<entry><title>%s</title>' % doc.title in response.content)
# Post an edit that removes one of the tags.
data.update({
'form': 'rev',
'review_tags': ['editorial', ]
})
response = self.client.post(reverse('wiki.edit_document',
args=[doc.slug]), data)
# Ensure only one of the tags' warning boxes appears, now.
response = self.client.get(reverse('wiki.document',
args=[doc.slug]), data)
page = pq(response.content)
eq_(1, page.find('.warning.warning-review').length)
# Ensure the page appears on the listing pages
response = self.client.get(reverse('wiki.list_review'))
eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
response = self.client.get(reverse('wiki.list_review_tag',
args=('technical',)))
eq_(0, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
response = self.client.get(reverse('wiki.list_review_tag',
args=('editorial',)))
eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
# Also, ensure that the page appears in the proper feeds
# HACK: Too lazy to parse the XML. Lazy lazy.
response = self.client.get(reverse('wiki.feeds.list_review',
args=('atom',)))
ok_('<entry><title>%s</title>' % doc.title in response.content)
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'technical', )))
ok_('<entry><title>%s</title>' % doc.title not in response.content)
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'editorial', )))
ok_('<entry><title>%s</title>' % doc.title in response.content)
@attr('review-tags')
def test_quick_review(self):
"""Test the quick-review button."""
self.client.login(username='admin', password='testpass')
test_data = [
{
'params': {'approve_technical': 1},
'expected_tags': ['editorial'],
'name': 'technical',
'message_contains': ['Technical review completed.']
},
{
'params': {'approve_editorial': 1},
'expected_tags': ['technical'],
'name': 'editorial',
'message_contains': ['Editorial review completed.']
},
{
'params': {
'approve_technical': 1,
'approve_editorial': 1
},
'expected_tags': [],
'name': 'editorial-technical',
'message_contains': [
'Technical review completed.',
'Editorial review completed.',
]
}
]
for data_dict in test_data:
slug = 'test-quick-review-%s' % data_dict['name']
data = new_document_data()
data.update({'review_tags': ['editorial', 'technical'],
'slug': slug})
resp = self.client.post(reverse('wiki.new_document'), data)
doc = Document.objects.get(slug=slug)
rev = doc.revisions.order_by('-id').all()[0]
review_url = reverse('wiki.quick_review',
args=[doc.slug])
params = dict(data_dict['params'], revision_id=rev.id)
resp = self.client.post(review_url, params)
eq_(302, resp.status_code)
doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
rev = doc.revisions.order_by('-id').all()[0]
review_tags = [x.name for x in rev.review_tags.all()]
review_tags.sort()
for expected_str in data_dict['message_contains']:
ok_(expected_str in rev.summary)
ok_(expected_str in rev.comment)
eq_(data_dict['expected_tags'], review_tags)
@attr('midair')
def test_edit_midair_collision(self):
self.client.login(username='admin', password='testpass')
# Post a new document.
data = new_document_data()
resp = self.client.post(reverse('wiki.new_document'), data)
doc = Document.objects.get(slug=data['slug'])
# Edit #1 starts...
resp = self.client.get(reverse('wiki.edit_document',
args=[doc.slug]))
page = pq(resp.content)
rev_id1 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 starts...
resp = self.client.get(reverse('wiki.edit_document',
args=[doc.slug]))
page = pq(resp.content)
rev_id2 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 submits successfully
data.update({
'form': 'rev',
'content': 'This edit got there first',
'current_rev': rev_id2
})
resp = self.client.post(reverse('wiki.edit_document',
args=[doc.slug]), data)
eq_(302, resp.status_code)
# Edit #1 submits, but receives a mid-aired notification
data.update({
'form': 'rev',
'content': 'This edit gets mid-aired',
'current_rev': rev_id1
})
resp = self.client.post(reverse('wiki.edit_document',
args=[doc.slug]), data)
eq_(200, resp.status_code)
ok_(unicode(MIDAIR_COLLISION).encode('utf-8') in resp.content,
"Midair collision message should appear")
@attr('toc')
def test_toc_toggle_off(self):
"""Toggling of table of contents in revisions"""
self.client.login(username='admin', password='testpass')
d, _ = doc_rev()
data = new_document_data()
ok_(Document.objects.get(slug=d.slug, locale=d.locale).show_toc)
data['form'] = 'rev'
data['toc_depth'] = 0
data['slug'] = d.slug
data['title'] = d.title
self.client.post(reverse('wiki.edit_document',
args=[d.slug]),
data)
doc = Document.objects.get(slug=d.slug, locale=d.locale)
eq_(0, doc.current_revision.toc_depth)
@attr('toc')
def test_toc_toggle_on(self):
"""Toggling of table of contents in revisions"""
self.client.login(username='admin', password='testpass')
d, r = doc_rev()
new_r = revision(document=d, content=r.content, toc_depth=0,
is_approved=True)
new_r.save()
ok_(not Document.objects.get(slug=d.slug, locale=d.locale).show_toc)
data = new_document_data()
data['form'] = 'rev'
data['slug'] = d.slug
data['title'] = d.title
self.client.post(reverse('wiki.edit_document',
args=[d.slug]),
data)
ok_(Document.objects.get(slug=d.slug, locale=d.locale).show_toc)
def test_parent_topic(self):
"""Selection of a parent topic when creating a document."""
self.client.login(username='admin', password='testpass')
d = document(title='HTML8')
d.save()
r = revision(document=d)
r.save()
data = new_document_data()
data['title'] = 'Replicated local storage'
data['parent_topic'] = d.id
resp = self.client.post(reverse('wiki.new_document'), data)
eq_(302, resp.status_code)
ok_(d.children.count() == 1)
ok_(d.children.all()[0].title == 'Replicated local storage')
def test_repair_breadcrumbs(self):
english_top = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English top',
save=True)
english_mid = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English mid',
parent_topic=english_top,
save=True)
english_bottom = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English bottom',
parent_topic=english_mid,
save=True)
french_top = document(locale='fr',
title='French top',
parent=english_top,
save=True)
french_mid = document(locale='fr',
title='French mid',
parent=english_mid,
parent_topic=english_mid,
save=True)
french_bottom = document(locale='fr',
title='French bottom',
parent=english_bottom,
parent_topic=english_bottom,
save=True)
self.client.login(username='admin', password='testpass')
resp = self.client.get(reverse('wiki.repair_breadcrumbs',
args=[french_bottom.slug],
locale='fr'))
eq_(302, resp.status_code)
ok_(french_bottom.get_absolute_url() in resp['Location'])
french_bottom_fixed = Document.objects.get(locale='fr',
title=french_bottom.title)
eq_(french_mid.id, french_bottom_fixed.parent_topic.id)
eq_(french_top.id, french_bottom_fixed.parent_topic.parent_topic.id)
def test_translate_on_edit(self):
d1 = document(title="Doc1", locale=settings.WIKI_DEFAULT_LANGUAGE,
save=True)
revision(document=d1, save=True)
d2 = document(title="TransDoc1", locale='de', parent=d1, save=True)
revision(document=d2, save=True)
self.client.login(username='admin', password='testpass')
url = reverse('wiki.edit_document', args=(d2.slug,), locale=d2.locale)
resp = self.client.get(url)
eq_(200, resp.status_code)
def test_discard_location(self):
"""Testing that the 'discard' HREF goes to the correct place when it's
explicitely and implicitely set"""
self.client.login(username='admin', password='testpass')
def _create_doc(slug, locale):
doc = document(slug=slug, is_localizable=True, locale=locale)
doc.save()
r = revision(document=doc)
r.save()
return doc
# Test that the 'discard' button on an edit goes to the original page
doc = _create_doc('testdiscarddoc', settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(reverse('wiki.edit_document',
args=[doc.slug], locale=doc.locale))
eq_(pq(response.content).find('.btn-discard').attr('href'),
reverse('wiki.document', args=[doc.slug], locale=doc.locale))
# Test that the 'discard button on a new translation goes
# to the en-US page'
response = self.client.get(reverse('wiki.translate',
args=[doc.slug], locale=doc.locale) + '?tolocale=es')
eq_(pq(response.content).find('.btn-discard').attr('href'),
reverse('wiki.document', args=[doc.slug], locale=doc.locale))
# Test that the 'discard' button on an existing translation goes
# to the 'es' page
foreign_doc = _create_doc('testdiscarddoc', 'es')
response = self.client.get(reverse('wiki.edit_document',
args=[foreign_doc.slug],
locale=foreign_doc.locale))
eq_(pq(response.content).find('.btn-discard').attr('href'),
reverse('wiki.document', args=[foreign_doc.slug],
locale=foreign_doc.locale))
# Test new
response = self.client.get(reverse('wiki.new_document',
locale=settings.WIKI_DEFAULT_LANGUAGE))
eq_(pq(response.content).find('.btn-discard').attr('href'),
reverse('wiki.new_document',
locale=settings.WIKI_DEFAULT_LANGUAGE))
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_revert(self, mock_kumascript_get):
self.client.login(username='admin', password='testpass')
mock_kumascript_get.return_value = (
'lorem ipsum dolor sit amet', None)
data = new_document_data()
data['title'] = 'A Test Article For Reverting'
data['slug'] = 'test-article-for-reverting'
response = self.client.post(reverse('wiki.new_document'), data)
doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug='test-article-for-reverting')
rev = doc.revisions.order_by('-id').all()[0]
data['content'] = 'Not lorem ipsum anymore'
data['comment'] = 'Nobody likes Latin anyway'
response = self.client.post(reverse('wiki.edit_document',
args=[doc.slug]), data)
mock_kumascript_get.called = False
response = self.client.post(reverse('wiki.revert_document',
args=[doc.slug, rev.id]),
{'revert': True, 'comment': 'Blah blah'})
ok_(mock_kumascript_get.called,
"kumascript should have been used")
ok_(302 == response.status_code)
rev = doc.revisions.order_by('-id').all()[0]
ok_('lorem ipsum dolor sit amet' == rev.content)
ok_('Blah blah' in rev.comment)
mock_kumascript_get.called = False
rev = doc.revisions.order_by('-id').all()[1]
response = self.client.post(reverse('wiki.revert_document',
args=[doc.slug, rev.id]),
{'revert': True})
ok_(302 == response.status_code)
rev = doc.revisions.order_by('-id').all()[0]
ok_(': ' not in rev.comment)
ok_(mock_kumascript_get.called,
"kumascript should have been used")
def test_store_revision_ip(self):
self.client.login(username='testuser', password='testpass')
data = new_document_data()
slug = 'test-article-for-storing-revision-ip'
data.update({'title': 'A Test Article For Storing Revision IP',
'slug': slug})
self.client.post(reverse('wiki.new_document'), data)
doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
data.update({'form': 'rev',
'content': 'This revision should NOT record IP',
'comment': 'This revision should NOT record IP'})
self.client.post(reverse('wiki.edit_document', args=[doc.slug]),
data)
eq_(0, RevisionIP.objects.all().count())
Switch.objects.create(name='store_revision_ips', active=True)
data.update({'content': 'Store the IP address for the revision.',
'comment': 'Store the IP address for the revision.'})
self.client.post(reverse('wiki.edit_document', args=[doc.slug]),
data)
eq_(1, RevisionIP.objects.all().count())
rev = doc.revisions.order_by('-id').all()[0]
rev_ip = RevisionIP.objects.get(revision=rev)
eq_('127.0.0.1', rev_ip.ip)
@mock.patch.object(Site.objects, 'get_current')
def test_email_for_first_edits(self, get_current):
get_current.return_value.domain = 'dev.mo.org'
self.client.login(username='testuser', password='testpass')
data = new_document_data()
slug = 'test-article-for-storing-revision-ip'
data.update({'title': 'A Test Article For First Edit Emails',
'slug': slug})
self.client.post(reverse('wiki.new_document'), data)
eq_(1, len(mail.outbox))
doc = Document.objects.get(
locale=settings.WIKI_DEFAULT_LANGUAGE, slug=slug)
data.update({'form': 'rev',
'content': 'This edit should not send an email',
'comment': 'This edit should not send an email'})
self.client.post(reverse('wiki.edit_document',
args=[doc.slug]),
data)
eq_(1, len(mail.outbox))
self.client.login(username='admin', password='testpass')
data.update({'content': 'Admin first edit should send an email',
'comment': 'Admin first edit should send an email'})
self.client.post(reverse('wiki.edit_document',
args=[doc.slug]),
data)
eq_(2, len(mail.outbox))
def _check_message_for_headers(message, username):
ok_("%s made their first edit" % username in message.subject)
eq_({'X-Kuma-Document-Url': "https://dev.mo.org%s" % doc.get_absolute_url(),
'X-Kuma-Editor-Username': username}, message.extra_headers)
testuser_message = mail.outbox[0]
admin_message = mail.outbox[1]
_check_message_for_headers(testuser_message, 'testuser')
_check_message_for_headers(admin_message, 'admin')
class DocumentWatchTests(UserTestCase, WikiTestCase):
"""Tests for un/subscribing to document edit notifications."""
localizing_client = True
def setUp(self):
super(DocumentWatchTests, self).setUp()
self.document, self.r = doc_rev()
self.client.login(username='testuser', password='testpass')
def test_watch_GET_405(self):
"""Watch document with HTTP GET results in 405."""
response = get(self.client, 'wiki.subscribe_document',
args=[self.document.slug])
eq_(405, response.status_code)
def test_unwatch_GET_405(self):
"""Unwatch document with HTTP GET results in 405."""
response = get(self.client, 'wiki.subscribe_document',
args=[self.document.slug])
eq_(405, response.status_code)
def test_watch_unwatch(self):
"""Watch and unwatch a document."""
user = self.user_model.objects.get(username='testuser')
# Subscribe
response = post(self.client, 'wiki.subscribe_document', args=[self.document.slug])
eq_(200, response.status_code)
assert EditDocumentEvent.is_notifying(user, self.document), \
'Watch was not created'
# Unsubscribe
response = post(self.client, 'wiki.subscribe_document', args=[self.document.slug])
eq_(200, response.status_code)
assert not EditDocumentEvent.is_notifying(user, self.document), \
'Watch was not destroyed'
class SectionEditingResourceTests(UserTestCase, WikiTestCase):
localizing_client = True
def test_raw_source(self):
"""The raw source for a document can be requested"""
self.client.login(username='admin', password='testpass')
d, r = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
expected = """
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
"""
Switch.objects.create(name='application_ACAO', active=True)
response = self.client.get('%s?raw=true' %
reverse('wiki.document', args=[d.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
ok_('Access-Control-Allow-Origin' in response)
eq_('*', response['Access-Control-Allow-Origin'])
eq_(normalize_html(expected),
normalize_html(response.content))
@attr('bug821986')
def test_raw_editor_safety_filter(self):
"""Safety filter should be applied before rendering editor"""
self.client.login(username='admin', password='testpass')
d, r = doc_rev("""
<p onload=alert(3)>FOO</p>
<svg><circle onload=confirm(3)>HI THERE</circle></svg>
""")
response = self.client.get('%s?raw=true' %
reverse('wiki.document', args=[d.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
ok_('<p onload=' not in response.content)
ok_('<circle onload=' not in response.content)
def test_raw_with_editing_links_source(self):
"""The raw source for a document can be requested, with section editing
links"""
self.client.login(username='admin', password='testpass')
d, r = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
expected = """
<h1 id="s1"><a class="edit-section" data-section-id="s1" data-section-src-url="/en-US/docs/%(slug)s?raw=true&section=s1" href="/en-US/docs/%(slug)s$edit?section=s1&edit_links=true" title="Edit section">Edit</a>s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2"><a class="edit-section" data-section-id="s2" data-section-src-url="/en-US/docs/%(slug)s?raw=true&section=s2" href="/en-US/docs/%(slug)s$edit?section=s2&edit_links=true" title="Edit section">Edit</a>s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3"><a class="edit-section" data-section-id="s3" data-section-src-url="/en-US/docs/%(slug)s?raw=true&section=s3" href="/en-US/docs/%(slug)s$edit?section=s3&edit_links=true" title="Edit section">Edit</a>s3</h1>
<p>test</p>
<p>test</p>
""" % {'slug': d.slug}
response = self.client.get('%s?raw=true&edit_links=true' %
reverse('wiki.document', args=[d.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(response.content))
def test_raw_section_source(self):
"""The raw source for a document section can be requested"""
self.client.login(username='admin', password='testpass')
d, r = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
expected = """
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
"""
response = self.client.get('%s?section=s2&raw=true' %
reverse('wiki.document',
args=[d.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(response.content))
@attr('midair')
@attr('rawsection')
def test_raw_section_edit(self):
self.client.login(username='admin', password='testpass')
d, r = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
replace = """
<h1 id="s2">s2</h1>
<p>replace</p>
"""
expected = """
<h1 id="s2">s2</h1>
<p>replace</p>
"""
response = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit_document',
args=[d.slug]),
{"form": "rev",
"slug": d.slug,
"content": replace},
follow=True,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(response.content))
expected = """
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>replace</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
"""
response = self.client.get('%s?raw=true' %
reverse('wiki.document',
args=[d.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(response.content))
@attr('midair')
def test_midair_section_merge(self):
"""If a page was changed while someone was editing, but the changes
didn't affect the specific section being edited, then ignore the midair
warning"""
self.client.login(username='admin', password='testpass')
doc, rev = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
replace_1 = """
<h1 id="replace1">replace1</h1>
<p>replace</p>
"""
replace_2 = """
<h1 id="replace2">replace2</h1>
<p>replace</p>
"""
expected = """
<h1 id="replace1">replace1</h1>
<p>replace</p>
<h1 id="replace2">replace2</h1>
<p>replace</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
"""
data = {
'form': 'rev',
'content': rev.content,
'slug': ''
}
# Edit #1 starts...
resp = self.client.get('%s?section=s1' %
reverse('wiki.edit_document',
args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
page = pq(resp.content)
rev_id1 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 starts...
resp = self.client.get('%s?section=s2' %
reverse('wiki.edit_document',
args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
page = pq(resp.content)
rev_id2 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 submits successfully
data.update({
'form': 'rev',
'content': replace_2,
'current_rev': rev_id2,
'slug': doc.slug
})
resp = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit_document',
args=[doc.slug]),
data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(302, resp.status_code)
# Edit #1 submits, but since it's a different section, there's no
# mid-air collision
data.update({
'form': 'rev',
'content': replace_1,
'current_rev': rev_id1
})
resp = self.client.post('%s?section=s1&raw=true' %
reverse('wiki.edit_document', args=[doc.slug]),
data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# No conflict, but we should get a 205 Reset as an indication that the
# page needs a refresh.
eq_(205, resp.status_code)
# Finally, make sure that all the edits landed
response = self.client.get('%s?raw=true' %
reverse('wiki.document',
args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(response.content))
# Also, ensure that the revision is slipped into the headers
eq_(unicode(Document.objects.get(slug=doc.slug, locale=doc.locale)
.current_revision.id),
unicode(response['x-kuma-revision']))
@attr('midair')
def test_midair_section_collision(self):
"""If both a revision and the edited section has changed, then a
section edit is a collision."""
self.client.login(username='admin', password='testpass')
doc, rev = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
replace_1 = """
<h1 id="s2">replace</h1>
<p>replace</p>
"""
replace_2 = """
<h1 id="s2">first replace</h1>
<p>first replace</p>
"""
data = {
'form': 'rev',
'content': rev.content
}
# Edit #1 starts...
resp = self.client.get('%s?section=s2' %
reverse('wiki.edit_document',
args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
page = pq(resp.content)
rev_id1 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 starts...
resp = self.client.get('%s?section=s2' %
reverse('wiki.edit_document',
args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
page = pq(resp.content)
rev_id2 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 submits successfully
data.update({
'form': 'rev',
'content': replace_2,
'slug': doc.slug,
'current_rev': rev_id2
})
resp = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit_document',
args=[doc.slug]),
data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(302, resp.status_code)
# Edit #1 submits, but since it's the same section, there's a collision
data.update({
'form': 'rev',
'content': replace_1,
'current_rev': rev_id1
})
resp = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit_document',
args=[doc.slug]),
data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# With the raw API, we should get a 409 Conflict on collision.
eq_(409, resp.status_code)
def test_raw_include_option(self):
doc_src = u"""
<div class="noinclude">{{ XULRefAttr() }}</div>
<dl>
<dt>{{ XULAttr("maxlength") }}</dt>
<dd>Type: <em>integer</em></dd>
<dd>Przykłady 例 예제 示例</dd>
</dl>
<div class="noinclude">
<p>{{ languages( { "ja": "ja/XUL/Attribute/maxlength" } ) }}</p>
</div>
"""
doc, rev = doc_rev(doc_src)
expected = u"""
<dl>
<dt>{{ XULAttr("maxlength") }}</dt>
<dd>Type: <em>integer</em></dd>
<dd>Przykłady 例 예제 示例</dd>
</dl>
"""
resp = self.client.get('%s?raw&include' %
reverse('wiki.document', args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(resp.content.decode('utf-8')))
def test_section_edit_toc(self):
"""show_toc is preserved in section editing."""
self.client.login(username='admin', password='testpass')
doc, rev = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
rev.toc_depth = 1
rev.save()
replace = """
<h1 id="s2">s2</h1>
<p>replace</p>
"""
self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit_document', args=[doc.slug]),
{"form": "rev", "slug": doc.slug, "content": replace},
follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
changed = Document.objects.get(pk=doc.id).current_revision
ok_(rev.id != changed.id)
eq_(1, changed.toc_depth)
def test_section_edit_review_tags(self):
"""review tags are preserved in section editing."""
self.client.login(username='admin', password='testpass')
doc, rev = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
tags_to_save = ['bar', 'foo']
rev.save()
rev.review_tags.set(*tags_to_save)
replace = """
<h1 id="s2">s2</h1>
<p>replace</p>
"""
self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit_document', args=[doc.slug]),
{"form": "rev", "slug": doc.slug, "content": replace},
follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
changed = Document.objects.get(pk=doc.id).current_revision
ok_(rev.id != changed.id)
eq_(set(tags_to_save),
set([t.name for t in changed.review_tags.all()]))
class MindTouchRedirectTests(UserTestCase, WikiTestCase):
"""
Test that we appropriately redirect old-style MindTouch URLs to
new-style kuma URLs.
"""
# A note on these tests: we could try to use assertRedirects on
# these, but for the most part we're just constructing a URL
# similar enough to the wiki app's own built-in redirects that
# it'll pick up the request and do what we want with it. But it
# may end up issuing its own redirects, which are tricky to sort
# out from the ones the legacy MindTouch handling will emit, so
# instead we just test that A) we did issue a redirect and B) the
# URL we constructed is enough for the document views to go on.
localizing_client = True
server_prefix = 'http://testserver/%s/docs' % settings.WIKI_DEFAULT_LANGUAGE
namespace_urls = (
# One for each namespace.
{'mindtouch': '/Help:Foo',
'kuma': '%s/Help:Foo' % server_prefix},
{'mindtouch': '/Help_talk:Foo',
'kuma': '%s/Help_talk:Foo' % server_prefix},
{'mindtouch': '/Project:En/MDC_editor_guide',
'kuma': '%s/Project:MDC_editor_guide' % server_prefix},
{'mindtouch': '/Project_talk:En/MDC_style_guide',
'kuma': '%s/Project_talk:MDC_style_guide' % server_prefix},
{'mindtouch': '/Special:Foo',
'kuma': '%s/Special:Foo' % server_prefix},
{'mindtouch': '/Talk:en/Foo',
'kuma': '%s/Talk:Foo' % server_prefix},
{'mindtouch': '/Template:Foo',
'kuma': '%s/Template:Foo' % server_prefix},
{'mindtouch': '/User:Foo',
'kuma': '%s/User:Foo' % server_prefix},
)
documents = (
{'title': 'XHTML', 'mt_locale': 'cn', 'kuma_locale': 'zh-CN',
'expected': '/zh-CN/docs/XHTML'},
{'title': 'JavaScript', 'mt_locale': 'zh_cn', 'kuma_locale': 'zh-CN',
'expected': '/zh-CN/docs/JavaScript'},
{'title': 'XHTML6', 'mt_locale': 'zh_tw', 'kuma_locale': 'zh-CN',
'expected': '/zh-TW/docs/XHTML6'},
{'title': 'HTML7', 'mt_locale': 'fr', 'kuma_locale': 'fr',
'expected': '/fr/docs/HTML7'},
)
def test_namespace_urls(self):
new_doc = document()
new_doc.title = 'User:Foo'
new_doc.slug = 'User:Foo'
new_doc.save()
for namespace_test in self.namespace_urls:
resp = self.client.get(namespace_test['mindtouch'], follow=False)
eq_(301, resp.status_code)
eq_(namespace_test['kuma'], resp['Location'])
def test_trailing_slash(self):
d = document()
d.locale = 'zh-CN'
d.slug = 'foofoo'
d.title = 'FooFoo'
d.save()
mt_url = '/cn/%s/' % (d.slug,)
resp = self.client.get(mt_url)
eq_(301, resp.status_code)
eq_('http://testserver%s' % d.get_absolute_url(), resp['Location'])
def test_document_urls(self):
for doc in self.documents:
d = document()
d.title = doc['title']
d.slug = doc['title']
d.locale = doc['kuma_locale']
d.save()
mt_url = '/%s' % '/'.join([doc['mt_locale'], doc['title']])
resp = self.client.get(mt_url)
eq_(301, resp.status_code)
eq_('http://testserver%s' % doc['expected'], resp['Location'])
def test_view_param(self):
d = document()
d.locale = settings.WIKI_DEFAULT_LANGUAGE
d.slug = 'HTML/HTML5'
d.title = 'HTML 5'
d.save()
mt_url = '/en-US/%s?view=edit' % (d.slug,)
resp = self.client.get(mt_url)
eq_(301, resp.status_code)
expected_url = 'http://testserver%s$edit' % d.get_absolute_url()
eq_(expected_url, resp['Location'])
class AutosuggestDocumentsTests(WikiTestCase):
"""
Test the we're properly filtering out the Redirects from the document list
"""
localizing_client = True
def test_autosuggest_no_term(self):
url = reverse('wiki.autosuggest_documents',
locale=settings.WIKI_DEFAULT_LANGUAGE)
resp = self.client.get(url)
eq_(400, resp.status_code)
def test_document_redirects(self):
# All contain "e", so that will be the search term
invalid_documents = (
{
'title': 'Something Redirect 8',
'html': 'REDIRECT <a class="redirect" href="/blah">Something Redirect</a>',
'is_redirect': 1
},
)
valid_documents = (
{'title': 'e 6', 'html': '<p>Blah text Redirect'},
{'title': 'e 7', 'html': 'AppleTalk'},
{'title': 'Response.Redirect'},
)
for doc in invalid_documents + valid_documents:
d = document()
d.title = doc['title']
if 'html' in doc:
d.html = doc['html']
if 'slug' in doc:
d.slug = doc['slug']
if 'is_redirect' in doc:
d.is_redirect = 1
d.save()
url = reverse('wiki.autosuggest_documents',
locale=settings.WIKI_DEFAULT_LANGUAGE) + '?term=e'
Switch.objects.create(name='application_ACAO', active=True)
resp = self.client.get(url)
ok_('Access-Control-Allow-Origin' in resp)
eq_('*', resp['Access-Control-Allow-Origin'])
eq_(200, resp.status_code)
data = json.loads(resp.content)
eq_(len(data), len(valid_documents))
# Ensure that the valid docs found are all in the valid list
for d in data:
found = False
for v in valid_documents:
if v['title'] in d['title']:
found = True
break
eq_(True, found)
def test_list_no_redirects(self):
Document.objects.all().delete()
invalid_documents = [
{
'title': 'Something Redirect 8',
'slug': 'xx',
'html': 'REDIRECT <a class="redirect" href="%s">yo</a>' % settings.SITE_URL
},
{
'title': 'My Template',
'slug': 'Template:Something',
'html': 'blah',
},
]
valid_documents = [
{'title': 'A Doc', 'slug': 'blah', 'html': 'Blah blah blah'}
]
for doc in invalid_documents + valid_documents:
document(save=True, slug=doc['slug'],
title=doc['title'], html=doc['html'])
resp = self.client.get(reverse('wiki.all_documents',
locale=settings.WIKI_DEFAULT_LANGUAGE))
eq_(len(valid_documents), len(pq(resp.content).find('.document-list li')))
class CodeSampleViewTests(UserTestCase, WikiTestCase):
localizing_client = True
@override_constance_settings(
KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/testserver')
def test_code_sample_1(self):
"""The raw source for a document can be requested"""
d, r = doc_rev("""
<p>This is a page. Deal with it.</p>
<div id="sample1" class="code-sample">
<pre class="brush: html">Some HTML</pre>
<pre class="brush: css">.some-css { color: red; }</pre>
<pre class="brush: js">window.alert("HI THERE")</pre>
</div>
<p>test</p>
""")
expecteds = (
'<style type="text/css">.some-css { color: red; }</style>',
'Some HTML',
'<script type="text/javascript">window.alert("HI THERE")</script>',
)
Switch.objects.create(name='application_ACAO', active=True)
response = self.client.get(reverse('wiki.code_sample',
args=[d.slug, 'sample1']),
HTTP_HOST='testserver')
ok_('Access-Control-Allow-Origin' in response)
eq_('*', response['Access-Control-Allow-Origin'])
eq_(200, response.status_code)
normalized = normalize_html(response.content)
# Content checks
ok_('<!DOCTYPE html>' in response.content)
for item in expecteds:
ok_(item in normalized)
@override_constance_settings(
KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/sampleserver')
def test_code_sample_host_restriction(self):
d, r = doc_rev("""
<p>This is a page. Deal with it.</p>
<div id="sample1" class="code-sample">
<pre class="brush: html">Some HTML</pre>
<pre class="brush: css">.some-css { color: red; }</pre>
<pre class="brush: js">window.alert("HI THERE")</pre>
</div>
<p>test</p>
""")
response = self.client.get(reverse('wiki.code_sample',
args=[d.slug, 'sample1']),
HTTP_HOST='testserver')
eq_(403, response.status_code)
response = self.client.get(reverse('wiki.code_sample',
args=[d.slug, 'sample1']),
HTTP_HOST='sampleserver')
eq_(200, response.status_code)
@override_constance_settings(
KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/sampleserver')
def test_code_sample_iframe_embed(self):
slug = 'test-code-embed'
embed_url = ('https://sampleserver/%s/docs/%s$samples/sample1' %
(settings.WIKI_DEFAULT_LANGUAGE, slug))
doc_src = """
<p>This is a page. Deal with it.</p>
<div id="sample1" class="code-sample">
<pre class="brush: html">Some HTML</pre>
<pre class="brush: css">.some-css { color: red; }</pre>
<pre class="brush: js">window.alert("HI THERE")</pre>
</div>
<iframe id="if1" src="%(embed_url)s"></iframe>
<iframe id="if2" src="http://testserver"></iframe>
<iframe id="if3" src="https://some.alien.site.com"></iframe>
<p>test</p>
""" % dict(embed_url=embed_url)
slug = 'test-code-doc'
d, r = doc_rev()
revision(save=True, document=d, title="Test code doc", slug=slug,
content=doc_src)
response = self.client.get(reverse('wiki.document', args=(d.slug,)))
eq_(200, response.status_code)
page = pq(response.content)
if1 = page.find('#if1')
eq_(if1.length, 1)
eq_(if1.attr('src'), embed_url)
if2 = page.find('#if2')
eq_(if2.length, 1)
eq_(if2.attr('src'), '')
if3 = page.find('#if3')
eq_(if3.length, 1)
eq_(if3.attr('src'), '')
class CodeSampleViewFileServingTests(UserTestCase, WikiTestCase):
@override_constance_settings(
KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/testserver',
WIKI_ATTACHMENT_ALLOWED_TYPES='text/plain')
@override_settings(ATTACHMENT_HOST='testserver')
def test_code_sample_file_serving(self):
self.client.login(username='admin', password='testpass')
# first let's upload a file
file_for_upload = make_test_file(content='Something something unique')
post_data = {
'title': 'An uploaded file',
'description': 'A unique experience for your file serving needs.',
'comment': 'Yadda yadda yadda',
'file': file_for_upload,
}
response = self.client.post(reverse('attachments.new_attachment'),
data=post_data)
eq_(response.status_code, 302)
# then build the document and revision we need to test
attachment = Attachment.objects.get(title='An uploaded file')
filename = attachment.current_revision.filename()
url_css = 'url("files/%(attachment_id)s/%(filename)s")' % {
'attachment_id': attachment.id,
'filename': filename,
}
doc, rev = doc_rev("""
<p>This is a page. Deal with it.</p>
<div id="sample1" class="code-sample">
<pre class="brush: html">Some HTML</pre>
<pre class="brush: css">.some-css { background: %s }</pre>
<pre class="brush: js">window.alert("HI THERE")</pre>
</div>
<p>test</p>
""" % url_css)
# then see of the code sample view has successfully found the sample
response = self.client.get(reverse('wiki.code_sample',
args=[doc.slug, 'sample1'],
locale='en-US'))
eq_(response.status_code, 200)
normalized = normalize_html(response.content)
ok_(url_css in normalized)
# and then we try if a redirect by the file serving view redirects
# to the main file serving view
response = self.client.get(reverse('wiki.raw_code_sample_file',
args=[doc.slug,
'sample1',
attachment.id,
filename],
locale='en-US'))
eq_(response.status_code, 302)
eq_(response['Location'], attachment.get_file_url())
class DeferredRenderingViewTests(UserTestCase, WikiTestCase):
"""Tests for the deferred rendering system and interaction with views"""
localizing_client = True
def setUp(self):
super(DeferredRenderingViewTests, self).setUp()
self.rendered_content = 'HELLO RENDERED CONTENT'
self.raw_content = 'THIS IS RAW CONTENT'
self.d, self.r = doc_rev(self.raw_content)
# Disable TOC, makes content inspection easier.
self.r.toc_depth = 0
self.r.save()
self.d.html = self.raw_content
self.d.rendered_html = self.rendered_content
self.d.save()
self.url = reverse('wiki.document',
args=(self.d.slug,),
locale=self.d.locale)
config.KUMASCRIPT_TIMEOUT = 5.0
config.KUMASCRIPT_MAX_AGE = 600
def tearDown(self):
super(DeferredRenderingViewTests, self).tearDown()
config.KUMASCRIPT_TIMEOUT = 0
config.KUMASCRIPT_MAX_AGE = 0
@mock.patch('kuma.wiki.kumascript.get')
def test_rendered_content(self, mock_kumascript_get):
"""Document view should serve up rendered content when available"""
mock_kumascript_get.return_value = (self.rendered_content, None)
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
txt = p.find('#wikiArticle').text()
ok_(self.rendered_content in txt)
ok_(self.raw_content not in txt)
eq_(0, p.find('#doc-rendering-in-progress').length)
eq_(0, p.find('#doc-render-raw-fallback').length)
def test_rendering_in_progress_warning(self):
"""Document view should serve up rendered content when available"""
# Make the document look like there's a rendering in progress.
self.d.render_started_at = datetime.datetime.now()
self.d.save()
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
txt = p.find('#wikiArticle').text()
# Even though a rendering looks like it's in progress, ensure the
# last-known render is displayed.
ok_(self.rendered_content in txt)
ok_(self.raw_content not in txt)
eq_(0, p.find('#doc-rendering-in-progress').length)
# Only for logged-in users, ensure the render-in-progress warning is
# displayed.
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
eq_(1, p.find('#doc-rendering-in-progress').length)
@mock.patch('kuma.wiki.kumascript.get')
def test_raw_content_during_initial_render(self, mock_kumascript_get):
"""Raw content should be displayed during a document's initial
deferred rendering"""
mock_kumascript_get.return_value = (self.rendered_content, None)
# Make the document look like there's no rendered content, but that a
# rendering is in progress.
self.d.html = self.raw_content
self.d.rendered_html = ''
self.d.render_started_at = datetime.datetime.now()
self.d.save()
# Now, ensure that raw content is shown in the view.
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
txt = p.find('#wikiArticle').text()
ok_(self.rendered_content not in txt)
ok_(self.raw_content in txt)
eq_(0, p.find('#doc-render-raw-fallback').length)
# Only for logged-in users, ensure that a warning is displayed about
# the fallback
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
eq_(1, p.find('#doc-render-raw-fallback').length)
@attr('schedule_rendering')
@mock.patch.object(Document, 'schedule_rendering')
@mock.patch('kuma.wiki.kumascript.get')
def test_schedule_rendering(self, mock_kumascript_get,
mock_document_schedule_rendering):
mock_kumascript_get.return_value = (self.rendered_content, None)
self.client.login(username='testuser', password='testpass')
data = new_document_data()
data.update({
'form': 'rev',
'content': 'This is an update',
})
edit_url = reverse('wiki.edit_document', args=[self.d.slug])
resp = self.client.post(edit_url, data)
eq_(302, resp.status_code)
ok_(mock_document_schedule_rendering.called)
mock_document_schedule_rendering.reset_mock()
data.update({
'form': 'both',
'content': 'This is a translation',
})
translate_url = (reverse('wiki.translate', args=[data['slug']],
locale=settings.WIKI_DEFAULT_LANGUAGE) + '?tolocale=fr')
response = self.client.post(translate_url, data)
eq_(302, response.status_code)
ok_(mock_document_schedule_rendering.called)
@mock.patch('kuma.wiki.kumascript.get')
@mock.patch('requests.post')
def test_alternate_bleach_whitelist(self, mock_requests_post,
mock_kumascript_get):
# Some test content with contentious tags.
test_content = """
<p id="foo">
<a style="position: absolute; border: 1px;" href="http://example.com">This is a test</a>
<textarea name="foo"></textarea>
</p>
"""
# Expected result filtered through old/current Bleach rules
expected_content_old = """
<p id="foo">
<a style="position: absolute; border: 1px;" href="http://example.com">This is a test</a>
<textarea name="foo"></textarea>
</p>
"""
# Expected result filtered through alternate whitelist
expected_content_new = """
<p id="foo">
<a style="border: 1px;" href="http://example.com">This is a test</a>
<textarea name="foo"></textarea>
</p>
"""
# Set up an alternate set of whitelists...
config.BLEACH_ALLOWED_TAGS = json.dumps([
"a", "p"
])
config.BLEACH_ALLOWED_ATTRIBUTES = json.dumps({
"a": ['href', 'style'],
"p": ['id']
})
config.BLEACH_ALLOWED_STYLES = json.dumps([
"border"
])
config.KUMASCRIPT_TIMEOUT = 100
# Rig up a mocked response from KumaScript GET method
mock_kumascript_get.return_value = (test_content, None)
# Rig up a mocked response from KumaScript POST service
# Digging a little deeper into the stack, so that the rest of
# kumascript.post processing happens.
from StringIO import StringIO
m_resp = mock.Mock()
m_resp.status_code = 200
m_resp.text = test_content
m_resp.read = StringIO(test_content).read
mock_requests_post.return_value = m_resp
d, r = doc_rev(test_content)
trials = (
(False, '', expected_content_old),
(False, '&bleach_new', expected_content_old),
(True, '', expected_content_old),
(True, '&bleach_new', expected_content_new),
)
for trial in trials:
do_login, param, expected = trial
if do_login:
self.client.login(username='testuser', password='testpass')
else:
self.client.logout()
url = ('%s?raw¯os%s' % (
reverse('wiki.document', args=(d.slug,), locale=d.locale),
param))
resp = self.client.get(url, follow=True)
eq_(normalize_html(expected),
normalize_html(resp.content),
"Should match? %s %s %s %s" %
(do_login, param, expected, resp.content))
class APITests(UserTestCase, WikiTestCase):
localizing_client = True
def setUp(self):
super(APITests, self).setUp()
self.username = 'tester23'
self.password = 'trustno1'
self.email = 'tester23@example.com'
self.user = user(username=self.username,
email=self.email,
password=self.password,
save=True)
self.key = Key(user=self.user, description='Test Key 1')
self.secret = self.key.generate_secret()
self.key_id = self.key.key
self.key.save()
auth = '%s:%s' % (self.key_id, self.secret)
self.basic_auth = 'Basic %s' % base64.encodestring(auth)
self.d, self.r = doc_rev("""
<h3 id="S1">Section 1</h3>
<p>This is a page. Deal with it.</p>
<h3 id="S2">Section 2</h3>
<p>This is a page. Deal with it.</p>
<h3 id="S3">Section 3</h3>
<p>This is a page. Deal with it.</p>
""")
self.r.tags = "foo, bar, baz"
self.r.review_tags.set('technical', 'editorial')
self.url = self.d.get_absolute_url()
def tearDown(self):
super(APITests, self).tearDown()
Document.objects.filter(current_revision__creator=self.user).delete()
Revision.objects.filter(creator=self.user).delete()
Key.objects.filter(user=self.user).delete()
self.user.delete()
def test_put_existing(self):
"""PUT API should allow overwrite of existing document content"""
data = dict(
summary="Look, I made an edit!",
content="""
<p>This is an edit to the page. We've dealt with it.</p>
""",
)
# No auth key leads to a 403 Forbidden
resp = self._put(self.url, data)
eq_(403, resp.status_code)
# But, this should work, given a proper auth key
resp = self._put(self.url, data,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(205, resp.status_code)
# Verify the edit happened.
curr_d = Document.objects.get(pk=self.d.pk)
eq_(normalize_html(data['content'].strip()),
normalize_html(Document.objects.get(pk=self.d.pk).html))
# Also, verify that this resulted in a new revision.
curr_r = curr_d.current_revision
ok_(self.r.pk != curr_r.pk)
eq_(data['summary'], curr_r.summary)
r_tags = ','.join(sorted(t.name for t in curr_r.review_tags.all()))
eq_('editorial,technical', r_tags)
def test_put_section_edit(self):
"""PUT API should allow overwrite of a specific section of an existing
document"""
data = dict(
content="""
<h3 id="S2">Section 2</h3>
<p>This is an edit to the page. We've dealt with it.</p>
""",
# Along with the section, let's piggyback in some other metadata
# edits just for good measure. They're not tied to section edit
# though.
title="Hahah this is a new title!",
tags="hello,quux,xyzzy",
review_tags="technical",
)
resp = self._put('%s?section=S2' % self.url, data,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(205, resp.status_code)
expected = """
<h3 id="S1">Section 1</h3>
<p>This is a page. Deal with it.</p>
<h3 id="S2">Section 2</h3>
<p>This is an edit to the page. We've dealt with it.</p>
<h3 id="S3">Section 3</h3>
<p>This is a page. Deal with it.</p>
"""
# Verify the section edit happened.
curr_d = Document.objects.get(pk=self.d.pk)
eq_(normalize_html(expected.strip()),
normalize_html(curr_d.html))
eq_(data['title'], curr_d.title)
d_tags = ','.join(sorted(t.name for t in curr_d.tags.all()))
eq_(data['tags'], d_tags)
# Also, verify that this resulted in a new revision.
curr_r = curr_d.current_revision
ok_(self.r.pk != curr_r.pk)
r_tags = ','.join(sorted(t.name for t in curr_r.review_tags.all()))
eq_(data['review_tags'], r_tags)
def test_put_new_root(self):
"""PUT API should allow creation of a document whose path would place
it at the root of the topic hierarchy."""
slug = 'new-root-doc'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
data = dict(
title="This is the title of a new page",
content="""
<p>This is a new page, hooray!</p>
""",
tags="hello,quux,xyzzy",
review_tags="technical",
)
resp = self._put(url, data,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
def test_put_new_child(self):
"""PUT API should allow creation of a document whose path would make it
a child of an existing parent."""
data = dict(
title="This is the title of a new page",
content="""
<p>This is a new page, hooray!</p>
""",
tags="hello,quux,xyzzy",
review_tags="technical",
)
# This first attempt should fail; the proposed parent does not exist.
url = '%s/nonexistent/newchild' % self.url
resp = self._put(url, data,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(404, resp.status_code)
# TODO: I suppose we could rework this part to create the chain of
# missing parents with stub content, but currently this demands
# that API users do that themselves.
# Now, fill in the parent gap...
p_doc = document(slug='%s/nonexistent' % self.d.slug,
locale=settings.WIKI_DEFAULT_LANGUAGE,
parent_topic=self.d)
p_doc.save()
p_rev = revision(document=p_doc,
slug='%s/nonexistent' % self.d.slug,
title='I EXIST NOW', save=True)
p_rev.save()
# The creation should work, now.
resp = self._put(url, data,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
new_slug = '%s/nonexistent/newchild' % self.d.slug
new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=new_slug)
eq_(p_doc.pk, new_doc.parent_topic.pk)
def test_put_unsupported_content_type(self):
"""PUT API should complain with a 400 Bad Request on an unsupported
content type submission"""
slug = 'new-root-doc'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
data = "I don't even know what this content is."
resp = self._put(url, json.dumps(data),
content_type='x-super-happy-fun-text',
HTTP_AUTHORIZATION=self.basic_auth)
eq_(400, resp.status_code)
def test_put_json(self):
"""PUT API should handle application/json requests"""
slug = 'new-root-json-doc'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
data = dict(
title="This is the title of a new page",
content="""
<p>This is a new page, hooray!</p>
""",
tags="hello,quux,xyzzy",
review_tags="technical",
)
resp = self._put(url, json.dumps(data),
content_type='application/json',
HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
eq_(data['title'], new_doc.title)
eq_(normalize_html(data['content']), normalize_html(new_doc.html))
def test_put_simple_html(self):
"""PUT API should handle text/html requests"""
slug = 'new-root-html-doc-1'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
html = """
<p>This is a new page, hooray!</p>
"""
resp = self._put(url, html, content_type='text/html',
HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
eq_(normalize_html(html), normalize_html(new_doc.html))
def test_put_complex_html(self):
"""PUT API should handle text/html requests with complex HTML documents
and extract document fields from the markup"""
slug = 'new-root-html-doc-2'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
data = dict(
title='This is a complex document',
content="""
<p>This is a new page, hooray!</p>
""",
)
html = """
<html>
<head>
<title>%(title)s</title>
</head>
<body>%(content)s</body>
</html>
""" % data
resp = self._put(url, html, content_type='text/html',
HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
eq_(data['title'], new_doc.title)
eq_(normalize_html(data['content']), normalize_html(new_doc.html))
# TODO: Anything else useful to extract from HTML?
# Extract tags from head metadata?
def test_put_track_authkey(self):
"""Revisions modified by PUT API should track the auth key used"""
slug = 'new-root-doc'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
data = dict(
title="This is the title of a new page",
content="""
<p>This is a new page, hooray!</p>
""",
tags="hello,quux,xyzzy",
review_tags="technical",
)
resp = self._put(url, data, HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
last_log = self.key.history.order_by('-pk').all()[0]
eq_('created', last_log.action)
data['title'] = 'New title for old page'
resp = self._put(url, data, HTTP_AUTHORIZATION=self.basic_auth)
eq_(205, resp.status_code)
last_log = self.key.history.order_by('-pk').all()[0]
eq_('updated', last_log.action)
def test_put_etag_conflict(self):
"""A PUT request with an if-match header throws a 412 Precondition
Failed if the underlying document has been changed."""
resp = self.client.get(self.url)
orig_etag = resp['ETag']
content1 = """
<h2 id="s1">Section 1</h2>
<p>New section 1</p>
<h2 id="s2">Section 2</h2>
<p>New section 2</p>
"""
# First update should work.
resp = self._put(self.url, dict(content=content1),
HTTP_IF_MATCH=orig_etag,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(205, resp.status_code)
# Get the new etag, ensure it doesn't match the original.
resp = self.client.get(self.url)
new_etag = resp['ETag']
ok_(orig_etag != new_etag)
# But, the ETag should have changed, so this update shouldn't work.
# Using the old ETag suggests a mid-air edit collision happened.
resp = self._put(self.url, dict(content=content1),
HTTP_IF_MATCH=orig_etag,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(412, resp.status_code)
# Just for good measure, switching to the new ETag should work
resp = self._put(self.url, dict(content=content1),
HTTP_IF_MATCH=new_etag,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(205, resp.status_code)
def _put(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""django.test.client.put() does the wrong thing, here. This does
better, based on post()."""
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
post_data = smart_str(data, encoding=charset)
parsed = urlparse(path)
params = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self.client._get_path(parsed),
'QUERY_STRING': parsed[4],
'REQUEST_METHOD': 'PUT',
'wsgi.input': FakePayload(post_data),
}
params.update(extra)
response = self.client.request(**params)
if follow:
response = self.client._handle_redirects(response, **extra)
return response
class PageMoveTests(UserTestCase, WikiTestCase):
localizing_client = True
def setUp(self):
super(PageMoveTests, self).setUp()
page_move_flag = Flag.objects.create(name='page_move')
page_move_flag.users = self.user_model.objects.filter(is_superuser=True)
page_move_flag.save()
def test_move_conflict(self):
parent = revision(title='Test page move views',
slug='test-page-move-views',
is_approved=True,
save=True)
parent_doc = parent.document
child = revision(title='Child of page-move view test',
slug='page-move/test-views',
is_approved=True,
save=True)
child_doc = child.document
child_doc.parent_topic = parent.document
child_doc.save()
revision(title='Conflict for page-move view',
slug='moved/test-page-move-views/test-views',
is_approved=True,
save=True)
data = {'slug': 'moved/test-page-move-views'}
self.client.login(username='admin', password='testpass')
resp = self.client.post(reverse('wiki.move',
args=(parent_doc.slug,),
locale=parent_doc.locale),
data=data)
eq_(200, resp.status_code)
class DocumentZoneTests(UserTestCase, WikiTestCase):
localizing_client = True
def setUp(self):
super(DocumentZoneTests, self).setUp()
root_rev = revision(title='ZoneRoot', slug='ZoneRoot',
content='This is the Zone Root',
is_approved=True, save=True)
self.root_doc = root_rev.document
middle_rev = revision(title='middlePage', slug='middlePage',
content='This is a middlepage',
is_approved=True, save=True)
self.middle_doc = middle_rev.document
self.middle_doc.parent_topic = self.root_doc
self.middle_doc.save()
sub_rev = revision(title='SubPage', slug='SubPage',
content='This is a subpage',
is_approved=True, save=True)
self.sub_doc = sub_rev.document
self.sub_doc.parent_topic = self.middle_doc
self.sub_doc.save()
self.root_zone = DocumentZone(document=self.root_doc)
self.root_zone.styles = """
article { color: blue; }
"""
self.root_zone.save()
self.middle_zone = DocumentZone(document=self.middle_doc)
self.middle_zone.styles = """
article { font-weight: bold; }
"""
self.middle_zone.save()
def test_zone_styles(self):
"""Ensure CSS styles for a zone can be fetched"""
url = reverse('wiki.styles', args=(self.root_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(url, follow=True)
eq_(self.root_zone.styles, response.content)
url = reverse('wiki.styles', args=(self.middle_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(url, follow=True)
eq_(self.middle_zone.styles, response.content)
url = reverse('wiki.styles', args=(self.sub_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(url, follow=True)
eq_(404, response.status_code)
def test_zone_styles_links(self):
"""Ensure link to zone style appears in child document views"""
url = reverse('wiki.document', args=(self.sub_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(url, follow=True)
styles_url = reverse('wiki.styles', args=(self.root_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
root_expected = ('<link rel="stylesheet" type="text/css" href="%s"' %
styles_url)
ok_(root_expected in response.content)
styles_url = reverse('wiki.styles', args=(self.middle_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
middle_expected = ('<link rel="stylesheet" type="text/css" href="%s"' %
styles_url)
ok_(middle_expected in response.content)
class ListDocumentTests(UserTestCase, WikiTestCase):
"""Tests for list_documents view"""
localizing_client = True
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
def test_case_insensitive_tags(self):
"""
Bug 976071 - Tags should be case insensitive
https://bugzil.la/976071
"""
lower_tag = DocumentTag.objects.create(name='foo', slug='foo')
lower_tag.save()
doc = Document.objects.get(pk=1)
doc.tags.set(lower_tag)
response = self.client.get(reverse('wiki.tag', args=['foo']))
ok_(doc.slug in response.content.decode('utf-8'))
response = self.client.get(reverse('wiki.tag', args=['Foo']))
ok_(doc.slug in response.content.decode('utf-8'))
|
cindyyu/kuma
|
kuma/wiki/tests/test_views.py
|
Python
|
mpl-2.0
| 167,438
|
[
"VisIt"
] |
752a693ca68f6bd83a3e48ec2d2c880b9b41919fa64508bb9ac622aeef9db74a
|
"""
Module holds JMX handlers implementations
Copyright 2017 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
from distutils.version import LooseVersion
from lxml import etree
from bzt import TaurusInternalException, TaurusConfigError
from bzt.engine import Scenario
from bzt.jmx import JMX
from bzt.jmx.base import cond_int
from bzt.jmx.threadgroups import ThreadGroup, ConcurrencyThreadGroup, ThreadGroupHandler
from bzt.requests_model import RequestVisitor, has_variable_pattern, HierarchicRequestParser
from bzt.utils import iteritems, numeric_types
from bzt.utils import BetterDict, dehumanize_time, ensure_is_dict, load_class, guess_delimiter
class RequestCompiler(RequestVisitor):
def __init__(self, jmx_builder):
super(RequestCompiler, self).__init__()
self.jmx_builder = jmx_builder
def visit_mqttrequest(self, request):
return self.jmx_builder.compile_request(request)
def visit_hierarchichttprequest(self, request):
return self.jmx_builder.compile_request(request)
def visit_ifblock(self, block):
return self.jmx_builder.compile_if_block(block)
def visit_onceblock(self, block):
return self.jmx_builder.compile_once_block(block)
def visit_loopblock(self, block):
return self.jmx_builder.compile_loop_block(block)
def visit_whileblock(self, block):
return self.jmx_builder.compile_while_block(block)
def visit_foreachblock(self, block):
return self.jmx_builder.compile_foreach_block(block)
def visit_transactionblock(self, block):
return self.jmx_builder.compile_transaction_block(block)
def visit_includescenarioblock(self, block):
scenario_name = block.scenario_name
if scenario_name in self.path:
msg = "Mutual recursion detected in include-scenario blocks (scenario %s)"
raise TaurusConfigError(msg % scenario_name)
self.record_path(scenario_name)
return self.jmx_builder.compile_include_scenario_block(block)
def visit_actionblock(self, block):
return self.jmx_builder.compile_action_block(block)
def visit_setvariables(self, block):
return self.jmx_builder.compile_set_variables_block(block)
class LoadSettingsProcessor(object):
TG = ThreadGroup.__name__
CTG = ConcurrencyThreadGroup.__name__
def __init__(self, executor):
self.log = executor.log.getChild(self.__class__.__name__)
self.load = executor.get_specific_load()
self.raw_load = executor.get_raw_load()
self.log.debug("Load: %s", self.load)
self.force_ctg = executor.settings.get("force-ctg", True)
self.tg = self._detect_thread_group(executor)
self.tg_handler = ThreadGroupHandler(self.log)
def _detect_thread_group(self, executor):
"""
Detect preferred thread group
:param executor:
:return:
"""
tg = self.TG
if not self.force_ctg:
return tg
msg = 'Thread group detection: %s, regular ThreadGroup will be used'
if not self.load.duration:
self.log.debug(msg, 'duration not found')
elif self.load.iterations:
self.log.debug(msg, 'iterations are found')
elif not executor.tool:
msg = 'You must set executor tool (%s) for choosing of ConcurrencyThreadGroup'
raise TaurusInternalException(msg % executor.tool_name)
elif not executor.tool.ctg_plugin_installed():
self.log.warning(msg % 'plugin for ConcurrentThreadGroup not found')
else:
tg = self.CTG
return tg
def modify(self, jmx, is_jmx_generated=False):
if not (self.raw_load.iterations or self.raw_load.concurrency or self.load.duration):
self.log.debug('No iterations/concurrency/duration found, thread group modification is skipped')
return
# IMPORTANT: fix groups order as changing of element type changes order of getting of groups
groups = list(self.tg_handler.groups(jmx))
# user concurrency is jmeter variable, write it to tg as is
if isinstance(self.load.concurrency, str):
target_list = [(group, self.load.concurrency) for group in groups]
else: # concurrency is numeric or empty
raw = self.load.concurrency is None # keep existed concurrency if self.load.concurrency is omitted
concurrency_list = []
for group in groups:
concurrency = group.get_concurrency(raw=raw)
if concurrency is None:
concurrency = 1
concurrency_list.append(concurrency)
if not raw: # divide numeric concurrency
self._divide_concurrency(concurrency_list)
target_list = zip(groups, concurrency_list)
for group, concurrency in target_list:
iterations = None
existed_tg = (not is_jmx_generated) and (group.gtype == self.TG)
if not self.force_ctg and existed_tg:
iterations = group.get_iterations()
self.tg_handler.convert(source=group, target_gtype=self.tg, load=self.load,
concurrency=concurrency, iterations=iterations)
if self.load.throughput:
self._add_shaper(jmx)
if self.tg == self.TG and self.load.steps:
self.log.warning("Stepping ramp-up isn't supported for regular ThreadGroup")
def _divide_concurrency(self, concurrency_list):
"""
calculate target concurrency for every thread group
"""
total_old_concurrency = sum(concurrency_list)
for idx, concurrency in enumerate(concurrency_list):
if total_old_concurrency and concurrency_list[idx] != 0:
part_of_load = 1.0 * self.load.concurrency * concurrency / total_old_concurrency
concurrency_list[idx] = int(round(part_of_load))
if concurrency_list[idx] == 0:
concurrency_list[idx] = 1
else:
concurrency_list[idx] = 0
total_new_concurrency = sum(concurrency_list)
leftover = self.load.concurrency - total_new_concurrency
if leftover < 0:
msg = "Had to add %s more threads to maintain thread group proportion"
self.log.warning(msg, -leftover)
elif leftover > 0:
msg = "%s threads left undistributed due to thread group proportion"
self.log.warning(msg, leftover)
def _add_shaper(self, jmx):
"""
Add shaper
:param jmx: JMX
:return:
"""
if not self.load.duration:
self.log.warning("You must set 'ramp-up' and/or 'hold-for' when using 'throughput' option")
return
etree_shaper = jmx.get_rps_shaper()
if self.load.ramp_up:
if isinstance(self.load.throughput, numeric_types) and self.load.duration:
start_rps = self.load.throughput / float(self.load.duration)
start_rps = max(start_rps, 0.001) # avoid zeroing
start_rps = min(start_rps, 1.0) # avoid starting too fast
else:
start_rps = 1
if not self.load.steps:
jmx.add_rps_shaper_schedule(etree_shaper, start_rps, self.load.throughput, self.load.ramp_up)
else:
step_h = self.load.throughput / self.load.steps
step_w = float(self.load.ramp_up) / self.load.steps
accum_time = 0
for step in range(1, self.load.steps + 1):
jmx.add_rps_shaper_schedule(etree_shaper, step_h * step, step_h * step,
step_w * step - accum_time)
accum_time += cond_int(step_w * step - accum_time)
if self.load.hold:
jmx.add_rps_shaper_schedule(etree_shaper, self.load.throughput, self.load.throughput, self.load.hold)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree_shaper)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
class ProtocolHandler(object):
def __init__(self, sys_props, engine):
super(ProtocolHandler, self).__init__()
self.system_props = sys_props
self.engine = engine
def get_toplevel_elements(self, scenario):
return []
def get_sampler_pair(self, request):
return None, None
@staticmethod
def safe_time(any_time):
try:
smart_time = int(1000 * dehumanize_time(any_time))
except TaurusInternalException:
smart_time = any_time
return smart_time
class JMeterScenarioBuilder(JMX):
"""
Helper to build JMeter test plan from Scenario
:type protocol_handlers: dict[str,ProtocolHandler]
"""
def __init__(self, executor, original=None):
"""
:type executor: ScenarioExecutor
:type original: JMX
"""
super(JMeterScenarioBuilder, self).__init__(original)
self.executor = executor
self.scenario = executor.get_scenario()
self.engine = executor.engine
self.system_props = BetterDict()
self.request_compiler = None
self.default_protocol = self.executor.settings.get('default-protocol', 'http')
self.protocol_handlers = {}
for protocol, cls_name in iteritems(self.executor.settings.get("protocol-handlers")):
cls_obj = load_class(cls_name)
instance = cls_obj(self.system_props, self.engine)
self.protocol_handlers[protocol] = instance
self.FIELD_KEYSTORE_CONFIG = 'keystore-config'
@staticmethod
def _get_timer(req):
think_time = req.get_think_time(full=True)
if not think_time:
return []
if not isinstance(think_time, list): # constant
return JMX.get_constant_timer(delay=ProtocolHandler.safe_time(think_time))
mean = ProtocolHandler.safe_time(think_time[1])
dev = ProtocolHandler.safe_time(think_time[2])
if think_time[0] == "uniform":
return JMX.get_uniform_timer(maximum=dev * 2, offset=mean - dev)
elif think_time[0] == "gaussian":
return JMX.get_gaussian_timer(dev=dev, offset=mean)
elif think_time[0] == "poisson":
return JMX.get_poisson_timer(lam=mean - dev, delay=dev)
else:
raise TaurusConfigError("Wrong timer type: %s" % think_time[0])
def __add_extractors(self, children, req):
self.__add_boundary_ext(children, req)
self.__add_regexp_ext(children, req)
self.__add_json_ext(children, req)
self.__add_jquery_ext(children, req)
self.__add_xpath_ext(children, req)
def __add_boundary_ext(self, children, req):
extractors = req.config.get("extract-boundary")
for varname, cfg in iteritems(extractors):
subj = cfg.get('subject', 'body')
left = cfg.get('left', TaurusConfigError("Left boundary is missing for boundary extractor %s" % varname))
right = cfg.get('right', TaurusConfigError("Right boundary is missing for boundary extractor %s" % varname))
match_no = cfg.get('match-no', 1)
defvalue = cfg.get('default', 'NOT_FOUND')
scope = cfg.get("scope", None)
from_var = cfg.get("from-variable", None)
extractor = JMX._get_boundary_extractor(varname, subj, left, right, match_no, defvalue, scope, from_var)
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_regexp_ext(self, children, req):
extractors = req.config.get("extract-regexp")
for varname in extractors:
cfg = ensure_is_dict(extractors, varname, "regexp")
scope = cfg.get("scope", None)
from_var = cfg.get("from-variable", None)
extractor = JMX._get_extractor(varname, cfg.get('subject', 'body'), cfg['regexp'], cfg.get('template', 1),
cfg.get('match-no', 1), cfg.get('default', 'NOT_FOUND'), scope, from_var)
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_json_ext(self, children, req):
jextractors = req.config.get("extract-jsonpath")
for varname in jextractors:
cfg = ensure_is_dict(jextractors, varname, "jsonpath")
if LooseVersion(str(self.executor.settings.get("version"))) < LooseVersion("3.0"):
extractor = JMX._get_json_extractor(varname,
cfg["jsonpath"],
cfg.get("default", "NOT_FOUND"),
cfg.get("from-variable", None))
else:
extractor = JMX._get_internal_json_extractor(varname,
cfg["jsonpath"],
cfg.get("default", "NOT_FOUND"),
cfg.get("scope", None),
cfg.get("from-variable", None),
cfg.get("match-no", "0"),
cfg.get("concat", False))
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_jquery_ext(self, children, req):
css_jquery_extors = req.config.get("extract-css-jquery")
for varname in css_jquery_extors:
cfg = ensure_is_dict(css_jquery_extors, varname, "expression")
extractor = self._get_jquerycss_extractor(varname,
cfg['expression'],
cfg.get('attribute', ""),
cfg.get('match-no', 0),
cfg.get('default', 'NOT_FOUND'),
cfg.get("scope", None),
cfg.get("from-variable", None))
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_xpath_ext(self, children, req):
xpath_extractors = req.config.get("extract-xpath")
for varname in xpath_extractors:
cfg = ensure_is_dict(xpath_extractors, varname, "xpath")
children.append(JMX._get_xpath_extractor(varname,
cfg['xpath'],
cfg.get('default', 'NOT_FOUND'),
cfg.get('validate-xml', False),
cfg.get('ignore-whitespace', True),
cfg.get("match-no", "-1"),
cfg.get('use-namespaces', False),
cfg.get('use-tolerant-parser', False),
cfg.get("scope", None),
cfg.get("from-variable", None)))
children.append(etree.Element("hashTree"))
@staticmethod
def __add_assertions(children, req):
assertions = req.config.get("assert", [])
for idx, assertion in enumerate(assertions):
assertion = ensure_is_dict(assertions, idx, "contains")
if not isinstance(assertion['contains'], list):
assertion['contains'] = [assertion['contains']]
children.append(JMX._get_resp_assertion(assertion.get("subject", Scenario.FIELD_BODY),
assertion['contains'],
assertion.get('regexp', True),
assertion.get('not', False),
assertion.get('assume-success', False)))
children.append(etree.Element("hashTree"))
jpath_assertions = req.config.get("assert-jsonpath", [])
for idx, assertion in enumerate(jpath_assertions):
assertion = ensure_is_dict(jpath_assertions, idx, "jsonpath")
exc = TaurusConfigError('JSON Path not found in assertion: %s' % assertion)
component = JMX._get_json_path_assertion(assertion.get('jsonpath', exc),
assertion.get('expected-value', ''),
assertion.get('validate', False),
assertion.get('expect-null', False),
assertion.get('invert', False),
assertion.get('regexp', True))
children.append(component)
children.append(etree.Element("hashTree"))
xpath_assertions = req.config.get("assert-xpath", [])
for idx, assertion in enumerate(xpath_assertions):
assertion = ensure_is_dict(xpath_assertions, idx, "xpath")
exc = TaurusConfigError('XPath not found in assertion: %s' % assertion)
component = JMX._get_xpath_assertion(assertion.get('xpath', exc),
assertion.get('validate-xml', False),
assertion.get('ignore-whitespace', True),
assertion.get('use-tolerant-parser', False),
assertion.get('invert', False))
children.append(component)
children.append(etree.Element("hashTree"))
@staticmethod
def __add_jsr_elements(children, req, get_from_config=True):
"""
:type children: etree.Element
:type req: Request
"""
jsrs = []
if get_from_config:
jsrs = req.config.get("jsr223", [])
else:
jsrs = req.get("jsr223", [])
if not isinstance(jsrs, list):
jsrs = [jsrs]
for idx, _ in enumerate(jsrs):
jsr = ensure_is_dict(jsrs, idx, sub_key='script-text')
lang = jsr.get("language", "groovy")
script_file = jsr.get("script-file", None)
script_text = jsr.get("script-text", None)
if not script_file and not script_text:
raise TaurusConfigError("jsr223 element must specify one of 'script-file' or 'script-text'")
parameters = jsr.get("parameters", "")
execute = jsr.get("execute", "after")
cache_key = str(jsr.get("compile-cache", True)).lower()
children.append(JMX._get_jsr223_element(lang, script_file, parameters, execute, script_text, cache_key))
children.append(etree.Element("hashTree"))
def __gen_requests(self, scenario):
http_protocol = scenario.data.get('protocol', 'http') == 'http'
requests = scenario.get_requests(parser=HierarchicRequestParser, require_url=http_protocol)
elements = []
for compiled in self.compile_requests(requests):
elements.extend(compiled)
return elements
def compile_scenario(self, scenario):
elements = []
for _, protocol in iteritems(self.protocol_handlers):
elements.extend(protocol.get_toplevel_elements(scenario))
elements.extend(self.__gen_authorization(scenario))
elements.extend(self.__gen_keystore_config(scenario))
elements.extend(self.__gen_data_sources(scenario))
elements.extend(self.__gen_requests(scenario))
self.__add_jsr_elements(elements, scenario, False)
return elements
def compile_request(self, request):
"""
:type request: HierarchicHTTPRequest
:return:
"""
sampler = children = None
protocol_name = request.priority_option('protocol', default=self.default_protocol)
if protocol_name in self.protocol_handlers:
protocol = self.protocol_handlers[protocol_name]
sampler, children = protocol.get_sampler_pair(request)
if sampler is None:
self.log.warning("Problematic request: %s", request.config)
raise TaurusInternalException("Unable to handle request, please review missing options")
children.extend(self._get_timer(request))
self.__add_assertions(children, request)
self.__add_extractors(children, request)
self.__add_jsr_elements(children, request)
return [sampler, children]
def compile_if_block(self, block):
elements = []
if_controller = JMX._get_if_controller(block.condition)
then_children = etree.Element("hashTree")
for compiled in self.compile_requests(block.then_clause):
for element in compiled:
then_children.append(element)
elements.extend([if_controller, then_children])
if block.else_clause:
inverted_condition = "!(" + block.condition + ")"
else_controller = JMX._get_if_controller(inverted_condition)
else_children = etree.Element("hashTree")
for compiled in self.compile_requests(block.else_clause):
for element in compiled:
else_children.append(element)
elements.extend([else_controller, else_children])
return elements
def compile_once_block(self, block):
elements = []
once_controller = JMX._get_once_controller()
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([once_controller, children])
return elements
def compile_loop_block(self, block):
elements = []
loop_controller = JMX._get_loop_controller(block.loops)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([loop_controller, children])
return elements
def compile_while_block(self, block):
elements = []
controller = JMX._get_while_controller(block.condition)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
def compile_foreach_block(self, block):
"""
:type block: ForEachBlock
"""
elements = []
controller = JMX._get_foreach_controller(block.input_var, block.loop_var)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
def compile_transaction_block(self, block):
elements = []
controller = JMX._get_transaction_controller(block.label,
block.priority_option('force-parent-sample', False),
block.include_timers)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
def compile_include_scenario_block(self, block):
elements = []
controller = JMX._get_simple_controller(block.scenario_name)
children = etree.Element("hashTree")
scenario = self.executor.get_scenario(name=block.scenario_name)
for element in self.compile_scenario(scenario):
children.append(element)
elements.extend([controller, children])
return elements
def compile_action_block(self, block):
"""
:type block: ActionBlock
:return:
"""
actions = {
'stop': 0,
'pause': 1,
'stop-now': 2,
'continue': 3,
}
targets = {'current-thread': 0, 'all-threads': 2}
action = actions[block.action]
target = targets[block.target]
duration = 0
if block.duration is not None:
duration = int(block.duration * 1000)
test_action = JMX._get_action_block(action, target, duration)
children = etree.Element("hashTree")
self.__add_jsr_elements(children, block)
return [test_action, children]
@staticmethod
def compile_set_variables_block(block):
set_var_action = JMX.get_set_var_action(block.mapping)
hashtree = etree.Element("hashTree")
return [set_var_action, hashtree]
def compile_requests(self, requests):
if self.request_compiler is None:
self.request_compiler = RequestCompiler(self)
compiled = []
for request in requests:
compiled.append(self.request_compiler.visit(request))
self.request_compiler.clear_path_cache()
return compiled
def __generate(self):
"""
Generate the test plan
"""
thread_group = JMX.get_thread_group(testname=self.executor.label)
thread_group_ht = etree.Element("hashTree", type="tg")
# NOTE: set realistic dns-cache and JVM prop by default?
self.request_compiler = RequestCompiler(self)
for element in self.compile_scenario(self.scenario):
thread_group_ht.append(element)
results_tree = self._get_results_tree()
results_tree_ht = etree.Element("hashTree")
self.append(self.TEST_PLAN_SEL, thread_group)
self.append(self.TEST_PLAN_SEL, thread_group_ht)
self.append(self.TEST_PLAN_SEL, results_tree)
self.append(self.TEST_PLAN_SEL, results_tree_ht)
def save(self, filename):
"""
Generate test plan and save
:type filename: str
"""
# NOTE: bad design, as repetitive save will duplicate stuff
self.__generate()
super(JMeterScenarioBuilder, self).save(filename)
@staticmethod
def __gen_authorization(scenario):
"""
Generates HTTP Authorization Manager
"""
elements = []
authorizations = scenario.get("authorization")
if authorizations:
clear_flag = False
if isinstance(authorizations, dict):
if "clear" in authorizations or "list" in authorizations: # full form
clear_flag = authorizations.get("clear", False)
authorizations = authorizations.get("list", [])
else:
authorizations = [authorizations] # short form
if not isinstance(authorizations, list):
raise TaurusConfigError("Wrong authorization format: %s" % authorizations)
auth_manager = JMX.get_auth_manager(authorizations, clear_flag)
elements.append(auth_manager)
elements.append(etree.Element("hashTree"))
return elements
def __gen_data_sources(self, scenario):
elements = []
for source in scenario.get_data_sources():
source_path = source["path"]
delimiter = source.get("delimiter")
if has_variable_pattern(source_path):
msg = "Path to CSV contains JMeter variable/function, can't check for file existence: %s"
self.log.warning(msg, source_path)
if not delimiter:
delimiter = ','
self.log.warning("Can't detect CSV dialect, default delimiter will be '%s'", delimiter)
else:
source_path = self.executor.engine.find_file(source_path)
if not os.path.isfile(source_path):
raise TaurusConfigError("data-sources path not found: %s" % source_path)
if not delimiter:
delimiter = guess_delimiter(source_path)
if source.get("random-order"):
config = JMX._get_csv_config_random(source_path, delimiter, source.get("loop", True),
source.get("variable-names", ""))
else:
config = JMX._get_csv_config(source_path, delimiter, source.get("loop", True),
source.get("variable-names", ""), source.get("quoted", False))
elements.append(config)
elements.append(etree.Element("hashTree"))
return elements
def __gen_keystore_config(self, scenario):
elements = []
keystore_config = scenario.get(self.FIELD_KEYSTORE_CONFIG)
if keystore_config:
variable_name = keystore_config["variable-name"]
start_index = keystore_config["start-index"]
end_index = keystore_config["end-index"]
preload = keystore_config["preload"]
config = JMX.get_keystore_config_elements(variable_name, start_index, end_index, preload)
elements.append(config)
elements.append(etree.Element("hashTree"))
return elements
|
greyfenrir/taurus
|
bzt/jmx/tools.py
|
Python
|
apache-2.0
| 30,351
|
[
"Gaussian",
"VisIt"
] |
f3eed78b3814bfdd0062a09e1a10f114d06934d2270c9655d7938b8444c82b5c
|
from __future__ import print_function
# Copyright 2008, 2009
# CAMd (see accompanying license files for details).
from __future__ import print_function
import sys
from optparse import OptionParser
import ase.gui.i18n
from gettext import gettext as _
# Grrr, older versions (pre-python2.7) of optparse have a bug
# which prevents non-ascii descriptions. How do we circumvent this?
# For now, we'll have to use English in the command line options then.
def build_parser():
parser = OptionParser(usage='%prog [options] [file[, file2, ...]]',
version='%prog 0.1',
description='See the online manual ' +
'(https://wiki.fysik.dtu.dk/ase/ase/gui/gui.html) ' +
'for more information.')
parser.add_option('-n', '--image-number',
default=':', metavar='NUMBER',
help='Pick image(s) from trajectory. NUMBER can be a '
'single number (use a negative number to count from '
'the back) or a range: start:stop:step, where the '
'":step" part can be left out - default values are '
'0:nimages:1.')
parser.add_option('-u', '--show-unit-cell', type='int',
default=1, metavar='I',
help="0: Don't show unit cell. 1: Show unit cell. "
'2: Show all of unit cell.')
parser.add_option('-r', '--repeat',
default='1',
help='Repeat unit cell. Use "-r 2" or "-r 2,3,1".')
parser.add_option('-R', '--rotations', default='',
help='Examples: "-R -90x", "-R 90z,-30x".')
parser.add_option('-o', '--output', metavar='FILE',
help='Write configurations to FILE.')
parser.add_option('-g', '--graph',
# TRANSLATORS: EXPR abbreviates 'expression'
metavar='EXPR',
help='Plot x,y1,y2,... graph from configurations or '
'write data to sdtout in terminal mode. Use the '
'symbols: i, s, d, fmax, e, ekin, A, R, E and F. See '
'https://wiki.fysik.dtu.dk/ase/ase/gui/gui.html'
'#plotting-data for more details.')
parser.add_option('-t', '--terminal',
action='store_true',
default=False,
help='Run in terminal window - no GUI.')
parser.add_option('--aneb',
action='store_true',
default=False,
help='Read ANEB data.')
parser.add_option('--interpolate',
type='int', metavar='N',
help='Interpolate N images between 2 given images.')
parser.add_option('-b', '--bonds',
action='store_true',
default=False,
help='Draw bonds between atoms.')
parser.add_option('-s', '--scale', dest='radii_scale', metavar='FLOAT',
default=None, type=float,
help='Scale covalent radii.')
parser.add_option('-v', '--verbose', action='store_true',
help='Verbose mode.')
return parser
def main():
parser = build_parser()
opt, args = parser.parse_args()
try:
import ase
except ImportError:
from os.path import dirname, join, pardir
sys.path.append(join(dirname(__file__), pardir))
from ase.gui.images import Images
from ase.atoms import Atoms
def run(opt, args):
images = Images()
if opt.aneb:
opt.image_number = '-1'
if len(args) > 0:
from ase.io import string2index
try:
images.read(args, string2index(opt.image_number))
except IOError as e:
if len(e.args) == 1:
parser.error(e.args[0])
else:
parser.error(e.args[1] + ': ' + e.filename)
else:
images.initialize([Atoms()])
if opt.interpolate:
images.interpolate(opt.interpolate)
if opt.aneb:
images.aneb()
if opt.repeat != '1':
r = opt.repeat.split(',')
if len(r) == 1:
r = 3 * r
images.repeat_images([int(c) for c in r])
if opt.radii_scale:
images.set_radii(opt.radii_scale)
if opt.output is not None:
images.write(opt.output, rotations=opt.rotations,
show_unit_cell=opt.show_unit_cell)
opt.terminal = True
if opt.terminal:
if opt.graph is not None:
data = images.graph(opt.graph)
for line in data.T:
for x in line:
print(x, end=' ')
print()
else:
from ase.gui.gui import GUI
import ase.gui.gtkexcepthook
gui = GUI(images, opt.rotations, opt.show_unit_cell, opt.bonds)
gui.run(opt.graph)
try:
run(opt, args)
except KeyboardInterrupt:
pass
except Exception as x:
if opt.verbose:
raise
else:
print('{0}: {1}'.format(x.__class__.__name__, x), file=sys.stderr)
print(_('To get a full traceback, use: ase-gui --verbose'),
file=sys.stderr)
|
suttond/MODOI
|
ase/gui/ag.py
|
Python
|
lgpl-3.0
| 5,523
|
[
"ASE"
] |
5a3486c814ba853eab75eb81387f0a62c613ad2c09b8c382a79e4f1c00a2864b
|
# -*- coding: utf-8 -*-
"""
Generate centreline and write it out as .vtk legacy format.
"""
import os
import sys
# Run in current directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Import path for the CentrelineGenerator script.
importPath = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../util'))
if not importPath in sys.path:
sys.path.insert(1, importPath)
del importPath
import CentrelineGenerator
# A centreline for a mesh with 8064 cores. 42x64.
CentrelineGenerator.segmentList = [10.92,[(10.92,60),None,None], [(10.92,120),None,None]]
CentrelineGenerator.radiusBase = 2.0371832715762603
CentrelineGenerator.outputFileName = "c8064Centreline.vtk"
CentrelineGenerator.sphereRadius = None
def main():
# CentrelineGenerator.GenerateCentreline(CentrelineGenerator.BuildDecreasingRadiiScalars)
CentrelineGenerator.GenerateCentreline()
if __name__ == '__main__':
print "Starting", os.path.basename(__file__)
main()
print "Exiting", os.path.basename(__file__)
|
BlueFern/DBiharMesher
|
meshes/c8064Smooth/Generate8064Centreline.py
|
Python
|
gpl-2.0
| 1,020
|
[
"VTK"
] |
dfac248af049118743b38d34428d31de63568aa411887c8e7ea0a0430aad27fc
|
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_almost_equal)
from skimage.filters._gabor import gabor_kernel, gabor_filter, _sigma_prefactor
def test_gabor_kernel_size():
sigma_x = 5
sigma_y = 10
# Sizes cut off at +/- three sigma + 1 for the center
size_x = sigma_x * 6 + 1
size_y = sigma_y * 6 + 1
kernel = gabor_kernel(0, theta=0, sigma_x=sigma_x, sigma_y=sigma_y)
assert_equal(kernel.shape, (size_y, size_x))
kernel = gabor_kernel(0, theta=np.pi/2, sigma_x=sigma_x, sigma_y=sigma_y)
assert_equal(kernel.shape, (size_x, size_y))
def test_gabor_kernel_bandwidth():
kernel = gabor_kernel(1, bandwidth=1)
assert_equal(kernel.shape, (5, 5))
kernel = gabor_kernel(1, bandwidth=0.5)
assert_equal(kernel.shape, (9, 9))
kernel = gabor_kernel(0.5, bandwidth=1)
assert_equal(kernel.shape, (9, 9))
def test_sigma_prefactor():
assert_almost_equal(_sigma_prefactor(1), 0.56, 2)
assert_almost_equal(_sigma_prefactor(0.5), 1.09, 2)
def test_gabor_kernel_sum():
for sigma_x in range(1, 10, 2):
for sigma_y in range(1, 10, 2):
for frequency in range(0, 10, 2):
kernel = gabor_kernel(frequency+0.1, theta=0,
sigma_x=sigma_x, sigma_y=sigma_y)
# make sure gaussian distribution is covered nearly 100%
assert_almost_equal(np.abs(kernel).sum(), 1, 2)
def test_gabor_kernel_theta():
for sigma_x in range(1, 10, 2):
for sigma_y in range(1, 10, 2):
for frequency in range(0, 10, 2):
for theta in range(0, 10, 2):
kernel0 = gabor_kernel(frequency+0.1, theta=theta,
sigma_x=sigma_x, sigma_y=sigma_y)
kernel180 = gabor_kernel(frequency, theta=theta+np.pi,
sigma_x=sigma_x, sigma_y=sigma_y)
assert_array_almost_equal(np.abs(kernel0),
np.abs(kernel180))
def test_gabor_filter():
Y, X = np.mgrid[:40, :40]
frequencies = (0.1, 0.3)
wave_images = [np.sin(2 * np.pi * X * f) for f in frequencies]
def match_score(image, frequency):
gabor_responses = gabor_filter(image, frequency)
return np.mean(np.hypot(*gabor_responses))
# Gabor scores: diagonals are frequency-matched, off-diagonals are not.
responses = np.array([[match_score(image, f) for f in frequencies]
for image in wave_images])
assert responses[0, 0] > responses[0, 1]
assert responses[1, 1] > responses[0, 1]
assert responses[0, 0] > responses[1, 0]
assert responses[1, 1] > responses[1, 0]
if __name__ == "__main__":
from numpy import testing
testing.run_module_suite()
|
michaelpacer/scikit-image
|
skimage/filters/tests/test_gabor.py
|
Python
|
bsd-3-clause
| 2,901
|
[
"Gaussian"
] |
b4dbe5ccc3e53d9c56950dc84a831a1428a11a6180dda5987e71a90c79a1eb04
|
import os
import numpy as np
import pandas as pd
import xarray as xr
from . import randint, randn, requires_dask
try:
import dask
import dask.multiprocessing
except ImportError:
pass
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
class IOSingleNetCDF:
"""
A few examples that benchmark reading/writing a single netCDF file with
xarray
"""
timeout = 300.0
repeat = 1
number = 5
def make_ds(self):
# single Dataset
self.ds = xr.Dataset()
self.nt = 1000
self.nx = 90
self.ny = 45
self.block_chunks = {
"time": self.nt / 4,
"lon": self.nx / 3,
"lat": self.ny / 3,
}
self.time_chunks = {"time": int(self.nt / 36)}
times = pd.date_range("1970-01-01", periods=self.nt, freq="D")
lons = xr.DataArray(
np.linspace(0, 360, self.nx),
dims=("lon",),
attrs={"units": "degrees east", "long_name": "longitude"},
)
lats = xr.DataArray(
np.linspace(-90, 90, self.ny),
dims=("lat",),
attrs={"units": "degrees north", "long_name": "latitude"},
)
self.ds["foo"] = xr.DataArray(
randn((self.nt, self.nx, self.ny), frac_nan=0.2),
coords={"lon": lons, "lat": lats, "time": times},
dims=("time", "lon", "lat"),
name="foo",
encoding=None,
attrs={"units": "foo units", "description": "a description"},
)
self.ds["bar"] = xr.DataArray(
randn((self.nt, self.nx, self.ny), frac_nan=0.2),
coords={"lon": lons, "lat": lats, "time": times},
dims=("time", "lon", "lat"),
name="bar",
encoding=None,
attrs={"units": "bar units", "description": "a description"},
)
self.ds["baz"] = xr.DataArray(
randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32),
coords={"lon": lons, "lat": lats},
dims=("lon", "lat"),
name="baz",
encoding=None,
attrs={"units": "baz units", "description": "a description"},
)
self.ds.attrs = {"history": "created for xarray benchmarking"}
self.oinds = {
"time": randint(0, self.nt, 120),
"lon": randint(0, self.nx, 20),
"lat": randint(0, self.ny, 10),
}
self.vinds = {
"time": xr.DataArray(randint(0, self.nt, 120), dims="x"),
"lon": xr.DataArray(randint(0, self.nx, 120), dims="x"),
"lat": slice(3, 20),
}
class IOWriteSingleNetCDF3(IOSingleNetCDF):
def setup(self):
self.format = "NETCDF3_64BIT"
self.make_ds()
def time_write_dataset_netcdf4(self):
self.ds.to_netcdf("test_netcdf4_write.nc", engine="netcdf4", format=self.format)
def time_write_dataset_scipy(self):
self.ds.to_netcdf("test_scipy_write.nc", engine="scipy", format=self.format)
class IOReadSingleNetCDF4(IOSingleNetCDF):
def setup(self):
self.make_ds()
self.filepath = "test_single_file.nc4.nc"
self.format = "NETCDF4"
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_netcdf4(self):
xr.open_dataset(self.filepath, engine="netcdf4").load()
def time_orthogonal_indexing(self):
ds = xr.open_dataset(self.filepath, engine="netcdf4")
ds = ds.isel(**self.oinds).load()
def time_vectorized_indexing(self):
ds = xr.open_dataset(self.filepath, engine="netcdf4")
ds = ds.isel(**self.vinds).load()
class IOReadSingleNetCDF3(IOReadSingleNetCDF4):
def setup(self):
self.make_ds()
self.filepath = "test_single_file.nc3.nc"
self.format = "NETCDF3_64BIT"
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_scipy(self):
xr.open_dataset(self.filepath, engine="scipy").load()
def time_orthogonal_indexing(self):
ds = xr.open_dataset(self.filepath, engine="scipy")
ds = ds.isel(**self.oinds).load()
def time_vectorized_indexing(self):
ds = xr.open_dataset(self.filepath, engine="scipy")
ds = ds.isel(**self.vinds).load()
class IOReadSingleNetCDF4Dask(IOSingleNetCDF):
def setup(self):
requires_dask()
self.make_ds()
self.filepath = "test_single_file.nc4.nc"
self.format = "NETCDF4"
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_netcdf4_with_block_chunks(self):
xr.open_dataset(
self.filepath, engine="netcdf4", chunks=self.block_chunks
).load()
def time_load_dataset_netcdf4_with_block_chunks_oindexing(self):
ds = xr.open_dataset(self.filepath, engine="netcdf4", chunks=self.block_chunks)
ds = ds.isel(**self.oinds).load()
def time_load_dataset_netcdf4_with_block_chunks_vindexing(self):
ds = xr.open_dataset(self.filepath, engine="netcdf4", chunks=self.block_chunks)
ds = ds.isel(**self.vinds).load()
def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_dataset(
self.filepath, engine="netcdf4", chunks=self.block_chunks
).load()
def time_load_dataset_netcdf4_with_time_chunks(self):
xr.open_dataset(self.filepath, engine="netcdf4", chunks=self.time_chunks).load()
def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_dataset(
self.filepath, engine="netcdf4", chunks=self.time_chunks
).load()
class IOReadSingleNetCDF3Dask(IOReadSingleNetCDF4Dask):
def setup(self):
requires_dask()
self.make_ds()
self.filepath = "test_single_file.nc3.nc"
self.format = "NETCDF3_64BIT"
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_scipy_with_block_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_dataset(
self.filepath, engine="scipy", chunks=self.block_chunks
).load()
def time_load_dataset_scipy_with_block_chunks_oindexing(self):
ds = xr.open_dataset(self.filepath, engine="scipy", chunks=self.block_chunks)
ds = ds.isel(**self.oinds).load()
def time_load_dataset_scipy_with_block_chunks_vindexing(self):
ds = xr.open_dataset(self.filepath, engine="scipy", chunks=self.block_chunks)
ds = ds.isel(**self.vinds).load()
def time_load_dataset_scipy_with_time_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_dataset(
self.filepath, engine="scipy", chunks=self.time_chunks
).load()
class IOMultipleNetCDF:
"""
A few examples that benchmark reading/writing multiple netCDF files with
xarray
"""
timeout = 300.0
repeat = 1
number = 5
def make_ds(self, nfiles=10):
# multiple Dataset
self.ds = xr.Dataset()
self.nt = 1000
self.nx = 90
self.ny = 45
self.nfiles = nfiles
self.block_chunks = {
"time": self.nt / 4,
"lon": self.nx / 3,
"lat": self.ny / 3,
}
self.time_chunks = {"time": int(self.nt / 36)}
self.time_vars = np.split(
pd.date_range("1970-01-01", periods=self.nt, freq="D"), self.nfiles
)
self.ds_list = []
self.filenames_list = []
for i, times in enumerate(self.time_vars):
ds = xr.Dataset()
nt = len(times)
lons = xr.DataArray(
np.linspace(0, 360, self.nx),
dims=("lon",),
attrs={"units": "degrees east", "long_name": "longitude"},
)
lats = xr.DataArray(
np.linspace(-90, 90, self.ny),
dims=("lat",),
attrs={"units": "degrees north", "long_name": "latitude"},
)
ds["foo"] = xr.DataArray(
randn((nt, self.nx, self.ny), frac_nan=0.2),
coords={"lon": lons, "lat": lats, "time": times},
dims=("time", "lon", "lat"),
name="foo",
encoding=None,
attrs={"units": "foo units", "description": "a description"},
)
ds["bar"] = xr.DataArray(
randn((nt, self.nx, self.ny), frac_nan=0.2),
coords={"lon": lons, "lat": lats, "time": times},
dims=("time", "lon", "lat"),
name="bar",
encoding=None,
attrs={"units": "bar units", "description": "a description"},
)
ds["baz"] = xr.DataArray(
randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32),
coords={"lon": lons, "lat": lats},
dims=("lon", "lat"),
name="baz",
encoding=None,
attrs={"units": "baz units", "description": "a description"},
)
ds.attrs = {"history": "created for xarray benchmarking"}
self.ds_list.append(ds)
self.filenames_list.append("test_netcdf_%i.nc" % i)
class IOWriteMultipleNetCDF3(IOMultipleNetCDF):
def setup(self):
self.make_ds()
self.format = "NETCDF3_64BIT"
def time_write_dataset_netcdf4(self):
xr.save_mfdataset(
self.ds_list, self.filenames_list, engine="netcdf4", format=self.format
)
def time_write_dataset_scipy(self):
xr.save_mfdataset(
self.ds_list, self.filenames_list, engine="scipy", format=self.format
)
class IOReadMultipleNetCDF4(IOMultipleNetCDF):
def setup(self):
requires_dask()
self.make_ds()
self.format = "NETCDF4"
xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)
def time_load_dataset_netcdf4(self):
xr.open_mfdataset(self.filenames_list, engine="netcdf4").load()
def time_open_dataset_netcdf4(self):
xr.open_mfdataset(self.filenames_list, engine="netcdf4")
class IOReadMultipleNetCDF3(IOReadMultipleNetCDF4):
def setup(self):
requires_dask()
self.make_ds()
self.format = "NETCDF3_64BIT"
xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)
def time_load_dataset_scipy(self):
xr.open_mfdataset(self.filenames_list, engine="scipy").load()
def time_open_dataset_scipy(self):
xr.open_mfdataset(self.filenames_list, engine="scipy")
class IOReadMultipleNetCDF4Dask(IOMultipleNetCDF):
def setup(self):
requires_dask()
self.make_ds()
self.format = "NETCDF4"
xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)
def time_load_dataset_netcdf4_with_block_chunks(self):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.block_chunks
).load()
def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.block_chunks
).load()
def time_load_dataset_netcdf4_with_time_chunks(self):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.time_chunks
).load()
def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.time_chunks
).load()
def time_open_dataset_netcdf4_with_block_chunks(self):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.block_chunks
)
def time_open_dataset_netcdf4_with_block_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.block_chunks
)
def time_open_dataset_netcdf4_with_time_chunks(self):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.time_chunks
)
def time_open_dataset_netcdf4_with_time_chunks_multiprocessing(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="netcdf4", chunks=self.time_chunks
)
class IOReadMultipleNetCDF3Dask(IOReadMultipleNetCDF4Dask):
def setup(self):
requires_dask()
self.make_ds()
self.format = "NETCDF3_64BIT"
xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)
def time_load_dataset_scipy_with_block_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="scipy", chunks=self.block_chunks
).load()
def time_load_dataset_scipy_with_time_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="scipy", chunks=self.time_chunks
).load()
def time_open_dataset_scipy_with_block_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="scipy", chunks=self.block_chunks
)
def time_open_dataset_scipy_with_time_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_mfdataset(
self.filenames_list, engine="scipy", chunks=self.time_chunks
)
def create_delayed_write():
import dask.array as da
vals = da.random.random(300, chunks=(1,))
ds = xr.Dataset({"vals": (["a"], vals)})
return ds.to_netcdf("file.nc", engine="netcdf4", compute=False)
class IOWriteNetCDFDask:
timeout = 60
repeat = 1
number = 5
def setup(self):
requires_dask()
self.write = create_delayed_write()
def time_write(self):
self.write.compute()
class IOWriteNetCDFDaskDistributed:
def setup(self):
try:
import distributed
except ImportError:
raise NotImplementedError()
self.client = distributed.Client()
self.write = create_delayed_write()
def cleanup(self):
self.client.shutdown()
def time_write(self):
self.write.compute()
|
shoyer/xarray
|
asv_bench/benchmarks/dataset_io.py
|
Python
|
apache-2.0
| 14,824
|
[
"NetCDF"
] |
70e84ffbedfa9cf7d752dc6c8ba1103bf215ae81d01251eb1b557c3232a04ed9
|
from setuptools import setup, find_packages
import bioframes
setup(
name = bioframes.__projectname__,
version = bioframes.__release__,
packages = find_packages(),
author = bioframes.__authors__,
author_email = bioframes.__authoremails__,
description = bioframes.__description__,
license = "GPLv2",
keywords = bioframes.__keywords__,
install_requires = [
'pandas',
'numpy',
'schema',
'pyparsing',
'biopython',
'pyvcf',
'toolz',
'sh',
'mock'
],
)
|
VDBWRAIR/bioframes
|
setup.py
|
Python
|
gpl-2.0
| 552
|
[
"Biopython"
] |
e463cedc1f689566bc08dc4db8e57f5a55262a65ebff9782eddf27ffdd7fde1c
|
import json,code
def make_original_json():
JSON_DATA_FILE = 'last_backup.json'
json_data = json.load(open(JSON_DATA_FILE))
data = {}
for d in json_data:
model = d['model'].split('.')[1]
pk = d['pk']
fields = d['fields']
fields['pk'] = pk
if model == 'client':
fields.update({
'messages':[],
'visits':[],
'notes':[],
'delivery':[],
})
#save fields to data array
try:
data[model][pk] = fields
except KeyError as e:
data[model] = {pk:fields}
for pk,fields in data['interaction'].iteritems():
if pk in data['message']:
data['message'][pk].update(fields)
for pk,fields in data['message'].iteritems():
data['client'][fields['client_id']]['messages'].append(fields)
for pk,fields in data['visit'].iteritems():
data['client'][fields['client_id']]['visits'].append(fields)
for pk,fields in data['note'].iteritems():
data['client'][fields['client_id']]['notes'].append(fields)
for pk,fields in data['pregnancyevent'].iteritems():
data['client'][fields['client']]['delivery'].append(fields)
json.dump(data['client'],open('tmp.json','w'),indent=2)
def make_small_json():
ids = [88,170,96,4,69,132,68,184,12,261,16,155,25,49,202,30,140,83,143,75]
original = json.load(open('db.json'))
small = {i:original[str(i)] for i in ids}
json.dump(small,open('small.json','w'),indent=2)
make_small_json()
|
I-TECH-UW/mwachx
|
tools/make_json.py
|
Python
|
apache-2.0
| 1,621
|
[
"VisIt"
] |
7ee7ee96f2cca7110de48ca277c215779d94927335a69f325f592d2c8392e741
|
# If this code is slow, I think some simple improvements can be made to greatly increase speed.
# For example, allocate all memory at the start of the function.
import sys
import numpy as np
from numpy.linalg import det
from scipy.signal import butter, lfilter
from copy import deepcopy
ANG = 1/1.889725989
#####################################
# Holy shit this function is inefficient.
def read_cube(inpf,qwalk_patch=False):
if type(inpf)==str: inpf = open(inpf,'r')
cube={}
cube['comment']=inpf.readline().replace('\n','')
cube['type']=inpf.readline().replace('\n','')
spl=inpf.readline().split()
#cube['natoms']=int(spl[0])
cube['natoms']=round(float(spl[0]))
if qwalk_patch:
cube['origin']=np.zeros(3)
else:
cube['origin']=np.array(spl[1:],dtype=float)
cube['ints']=np.array([0,0,0])
cube['latvec']=np.zeros((3,3))
for i in range(0,3):
spl=inpf.readline().split()
cube['ints'][i]=int(spl[0])
cube['latvec'][i,:]=np.array(spl[1:],dtype=float)
natoms=cube['natoms']
cube['atomname']=[]
cube['atomxyz']=np.zeros((natoms,3))
for i in range(0,natoms):
spl=inpf.readline().split()
cube['atomname'].append(spl[0])
cube['atomxyz'][i,:]=np.array(spl[2:],dtype=float)
cube['data']=np.zeros(cube['ints'])
vector=[]
while True:
spl=inpf.readline().split()
if len(spl) < 1:
break
vector.extend(map(float,spl))
count=0
for x in range(0,cube['ints'][0]):
for y in range(0,cube['ints'][1]):
for z in range(0,cube['ints'][2]):
cube['data'][x,y,z]=vector[count]
count+=1
#if count >= nread:
# break;
#if count>=nread:
# break
#if count >= nread:
# break
return cube
#####################################
def write_cube(cube, outf):
if type(outf)==str: outf = open(outf,'w')
outf.write(cube['comment']+'\n')
outf.write(cube['type']+'\n')
outf.write(str(cube['natoms'])+" {} {} {}".format(*cube['origin']))
outf.write("\n")
for i in range(0,3):
outf.write("%i "%cube['ints'][i])
outf.write(" % 20.16e % 20.16e % 20.16e \n"%(cube['latvec'][i,0],cube['latvec'][i,1],cube['latvec'][i,2]))
natoms=cube['natoms']
for i in range(0,natoms):
outf.write("%s 0.0 "%cube['atomname'][i])
outf.write(" % 20.16e % 20.16e % 20.16e \n"%(cube['atomxyz'][i,0],cube['atomxyz'][i,1],cube['atomxyz'][i,2]))
count=0
for x in range(0,cube['ints'][0]):
for y in range(0,cube['ints'][1]):
for z in range(0,cube['ints'][2]):
outf.write("% 20.16e "%cube['data'][x,y,z])
count+=1
if count%5==0:
outf.write('\n')
outf.write('\n')
#####################################
def write_xsf(cube,outf):
if type(outf)==str: outf=open(outf,'w')
outf.write("CRYSTAL\n")
outf.write("PRIMVEC\n")
natoms=cube['natoms']
for i in range(0,3):
npts=cube['ints'][i]
outf.write(" %g %g %g \n"%(npts*cube['latvec'][i,0]*ANG,npts*cube['latvec'][i,1]*ANG,npts*cube['latvec'][i,2]*ANG))
outf.write("PRIMCOORD\n")
outf.write("%i 1\n"%natoms)
for i in range(0,natoms):
outf.write("%s "%cube['atomname'][i])
outf.write(" %g %g %g \n"%(cube['atomxyz'][i,0]*ANG,cube['atomxyz'][i,1]*ANG,cube['atomxyz'][i,2]*ANG))
outf.write("BEGIN_BLOCK_DATAGRID_3D\n cube_file_conversion \n")
outf.write("BEGIN_DATAGRID_3D\n")
outf.write("%i %i %i\n"%(cube['ints'][0],cube['ints'][1],cube['ints'][2]))
outf.write("0.0 0.0 0.0\n")
for i in range(0,3):
npts=cube['ints'][i]
outf.write(" %g %g %g \n"%(npts*cube['latvec'][i,0]*ANG,npts*cube['latvec'][i,1]*ANG,npts*cube['latvec'][i,2]*ANG))
count=0
for z in range(0,cube['ints'][2]):
for y in range(0,cube['ints'][1]):
for x in range(0,cube['ints'][0]):
outf.write("%g "%cube['data'][x,y,z])
count+=1
if count%5==0:
outf.write('\n')
outf.write('\n')
outf.write("END_DATAGRID_3D\n")
outf.write("END_BLOCK_DATAGRID_3D\n")
#####################################
def integrate(cube):
"""Numerically integrate the density.
Appoximates integral by simple sum."""
vol=abs(det(cube['latvec']))
return np.sum(cube['data'])*vol
#####################################
def integrate_abs(cube):
"""Numerically integrate the absolute value of the density.
Appoximates integral by simple sum."""
vol=abs(det(cube['latvec']))
return np.sum(abs(cube['data']))*vol
#####################################
def cabs(cube):
"""Take absolute value of cube data."""
newcube = deepcopy(cube)
newcube['data']=abs(newcube['data'])
return newcube
#####################################
def normalize_abs(cube,Nelec=1):
"""Normalize the density so the integral over all space yeilds Nelec.
Appoximates integral by simple sum."""
vol=abs(det(cube['latvec']))
norm=np.sum(abs(cube['data']))*vol
cube['data']*=(float(Nelec)/norm)
return cube
#####################################
def freq_cutoff(cube,freq_cutoff=0.90):
""" Cutoff frequencies of the signal with size freq_cutoff * the
maximum or higher, in place."""
# Frequency distribution is fastest in middle, slowest at end, and positive in
# first half.
max_val = cube['data'].max()
fft = np.fft.fftn(cube['data'])
endfreqs = np.array([int(round(s*freq_cutoff/2.)) for s in fft.shape])
print("Cutting off ",)
for si,s in enumerate(fft.shape): print("%d "%(s - 2*int(round(endfreqs[si]))),)
print("frequencies, out of ",)
print("%d %d %d"%fft.shape)
for dim in range(fft.ndim):
fft = fft.swapaxes(dim,fft.ndim-1)
for d1 in fft:
for d2 in d1:
d2[endfreqs[dim]:-endfreqs[dim]+1] = 0.2*d2[endfreqs[dim]:-endfreqs[dim]+1]
fft = fft.swapaxes(dim,fft.ndim-1)
cube['data'] = np.fft.ifftn(fft)
if abs(cube['data'].imag).max() > 1e-16:
print("Warning, inverting FFT may not be completely real!")
cube['data'] = cube['data'].real
cube['data'] *= (max_val / cube['data'].max())
#####################################
def butter_cutoff(cube,crit_freq=1,order=4):
"""
Simple wrapper for scipy routines to perform Butterworth low pass filter.
crit_freq = point where gain drops to 1/sqrt(2) of passband. 1 is defined as
the Nyquist frequency.
order = Order of Butterworth function, which controls steepness.
"""
b,a = butter(order, crit_freq)
cube['data'] = lfilter(b,a,cube['data'])
#cube['data'] = lfilter(b,a,cube['data'],0)
#cube['data'] = lfilter(b,a,cube['data'],1)
#cube['data'] = lfilter(b,a,cube['data'],2)
#####################################
def gaussian_averager(cube,sigma=3,nbr_dist=1,repeat=1):
""" Average each point in the cube file with blob_range neighbors in each
direction, weighted by a Gaussian with SD sigma."""
nd = nbr_dist
total_steps = 0.
for ii in range(-nd,nd+1):
for jj in range(-(nd-abs(ii)),(nd-abs(ii))+1):
for kk in range(-(nd-abs(ii)-abs(jj)),(nd-abs(ii)-abs(jj))+1):
total_steps += 1
total_steps *= repeat
done_steps = 0.
for iteration in range(repeat):
new = np.zeros(cube['data'].shape)
def wf(x):
return np.exp(-x**2/(2*sigma**2))
for ii in range(-nd,nd+1):
wi = wf(ii)
for jj in range(-(nd-abs(ii)),(nd-abs(ii))+1):
wj = wf(jj)
for kk in range(-(nd-abs(ii)-abs(jj)),(nd-abs(ii)-abs(jj))+1):
wk = wf(kk)
print("Finished {:5.2%}. ".format(done_steps/total_steps))
for i in range(cube['data'].shape[0]):
ip = (i+ii)%cube['data'].shape[0]
for j in range(cube['data'].shape[1]):
jp = (j+jj)%cube['data'].shape[1]
for k in range(cube['data'].shape[2]):
kp = (k+kk)%cube['data'].shape[2]
new[i,j,k] += cube['data'][ip,jp,kp]*wi*wj*wk
done_steps += 1
cube['data'] = new
#####################################
def sub_cubes(poscube,negcube):
"""Subtract two cube files.
Note: you may need to normalize these appropriately first."""
subcube = deepcopy(poscube)
subcube['data'] -= negcube['data']
return subcube
#####################################
def add_cubes(cube1,cube2,N1=1,N2=1):
"""Add two cube files.
Note: you may need to normalize these appropriately first."""
addcube = deepcopy(cube1)
addcube['data'] += cube2['data']
#addcube['data'] /= abs(addcube['data']).sum()
return addcube
#####################################
def mul_cubes(cube1,cube2,N1=1,N2=1):
"""Multiply two cube files, pointwise.
Note: you may need to normalize these appropriately first."""
mulcube = deepcopy(cube1)
mulcube['data'] *= cube2['data']
#mulcube['data'] /= abs(mulcube['data']).sum()
return mulcube
#####################################
# Used for interpolation scheme
def nearest(point,cube):
"""Find the value in the cube file located closest to point."""
a = np.array([ np.dot(cube['latvec'][i],point)/np.dot(cube['latvec'][i],cube['latvec'][i])
for i in range(3) ]).round()
#print a % cube['ints']
return cube['data'][tuple(map(int,a % cube['ints']))]
#####################################
# Used for interpolation scheme
def linear(point,cube):
"""Compute the linear extrapolation to the point using closest available
points."""
latvec = cube['latvec']
ints = cube['ints']
# point in the basis of latvec.
pnb = np.array([ np.dot(latvec[i],point)/np.dot(latvec[i],latvec[i])
for i in range(3) ])
# Round up and down to get points on the lattice.
neighbors = [(ax,ay,az)
for ax in map(int,[np.floor(pnb[0]),np.ceil(pnb[0])])
for ay in map(int,[np.floor(pnb[1]),np.ceil(pnb[1])])
for az in map(int,[np.floor(pnb[2]),np.ceil(pnb[2])])]
vals = np.array([cube['data'][tuple(n)] for n in (neighbors%ints)])
vals = vals.reshape(2,2,2)
tvol = abs(det(latvec))
vols = np.array([abs(det((n-pnb)*latvec)) for n in neighbors]).reshape(2,2,2)
wght = np.zeros(vals.shape)
# Weights for average are the subvolumes of opposing corner.
for i in range(2):
for j in range(2):
for k in range(2):
wght[i,j,k] = vols[1-i,1-j,1-k]/tvol
return np.sum(wght*vals)
#####################################
def interp_cube(cube, pos, res=(10,10), method='nearest', atrad=0.0):
"""Interpolate cube in plane defined by three points, pos, with res points, using
method to interpolate, and ensuring atrad radius around each atom is included.
By design, tries to put the longer axis on the x axis. To change this, you'd
need to switch pvm and pvo."""
latvec = cube['latvec']
ints = cube['ints']
# Classify points to show all atoms while minimizing extra space.
pidx = (np.array((pos[1]-pos[2], pos[2]-pos[0], pos[0]-pos[1]))**2).sum(axis=1).argsort()
orig = pos[pidx[ 1]]
pvm = pos[pidx[-1]] - orig # Main plotting axis (static).
pvo = pos[pidx[ 0]] - orig # Orthogonal plotting axis.
# Idea is that normally you'd want to orthogonalize the sortest axis.
# Moving the axis is what creates extra space in the plot.
# Orthogonalize orth. axis, since this is how most plots are.
pvo -= np.sum(pvo*pvm)/np.sum(pvm**2) * pvm
# Make it a square plot.
#pvo *= (sum(pvm**2)/sum(pvo**2))**.5
# Add buffer to contain all of atom. Extra space in each plot dir.
buffm = atrad/(np.sum(pvm**2))**.5 * pvm
buffo = atrad/(np.sum(pvo**2))**.5 * pvo
pvm += 2*buffm
pvo += 2*buffo
e1 = pvm / float(res[0]-1)
e2 = pvo / float(res[1]-1)
# Domain of output.
odom = np.array( [ i*e1 + j*e2
for i in range(res[0])
for j in range(res[1]) ])
odom += orig - buffm - buffo
#odom.shape = (cumprod(odom.shape[:2])[-1], odom.shape[2])
# Wrap at zone boundaries.
basis = latvec * ints
for j in range(len(odom)):
odom[j] = np.sum( [(np.dot(basis[i],odom[j])/np.dot(basis[i],basis[i]))%1*basis[i]
for i in range(3)], axis=1 )
# Compute the closest point of the atoms to the plane, and how far away they
# are.
atpos = [a - orig + buffm + buffo for a in cube['atomxyz']]
# includes periodic images of neighboring cells.
atpos += [a-b for b in basis for a in atpos] + [a+b for b in basis for a in atpos]
acoor = [( np.dot(pvm,a) / np.dot(pvm,pvm)**.5,
np.dot(pvo,a) / np.dot(pvo,pvo)**.5 )
for a in atpos]
adist = [np.dot( np.cross(pvm,pvo)/(np.dot(pvm,pvm)*np.dot(pvo,pvo))**.5, a )
for a in atpos]
X = np.linspace(0,np.sum(pvm**2)**.5,res[0])
Y = np.linspace(0,np.sum(pvo**2)**.5,res[1])
if method=='nearest':
Z = np.array([nearest(point,cube) for point in odom]).reshape(res)
elif method=='linear':
Z = np.array([linear(point,cube) for point in odom]).reshape(res)
else:
print('Interpolation method is not implemented yet.')
return {'points':(X,Y), 'data':Z, 'acoor':np.array(acoor), 'adist':np.array(adist)}
if __name__=="__main__":
implemented = ['add','sub','xsf','integrate_abs']
if len(sys.argv) < 2:
raise AssertionError("""
Usage: python cube.py <operation> <list of input cubes>
Operations: {}.""".format(implemented))
if sys.argv[1] == "add":
if len(sys.argv) != 4:
raise AssertionError("Add needs two input cubes.")
outfn = input("Output cube: ")
with open(outfn,'w') as outf:
write_cube(
add_cubes(
read_cube(open(sys.argv[2],'r')),
read_cube(open(sys.argv[3],'r'))
),
outf
)
elif sys.argv[1] == "sub":
if len(sys.argv) != 4:
raise AssertionError("Subtract needs two input cubes.")
outfn = input("Output cube: ")
with open(outfn,'w') as outf:
write_cube(
sub_cubes(
read_cube(open(sys.argv[2],'r')),
read_cube(open(sys.argv[3],'r'))
),
outf
)
elif sys.argv[1] == "xsf":
if len(sys.argv) != 3:
raise AssertionError("Converter needs exactly one input cube.")
outfn = input("Output xsf: ")
with open(outfn,'w') as outf:
write_xsf(
read_cube(open(sys.argv[2],'r')),
outf
)
elif sys.argv[1] == "integrate_abs":
if len(sys.argv) != 3:
raise AssertionError("Integration needs exactly one input cube.")
print("Abs. integral: ",integrate_abs(read_cube(open(sys.argv[2],'r'))))
else:
raise AssertionError("""
Sorry, '{}' keyword isn't implemented.
Implemented operations are {}.
It's probably trivial to add this operation yourself, you should do it and
push the result!""".format(sys.argv[1],implemented)
)
|
bbusemeyer/busempyer
|
busempyer/cubetools.py
|
Python
|
gpl-2.0
| 14,521
|
[
"CRYSTAL",
"Gaussian"
] |
0e9bf7cbb429660f6d06c26d251f3a8bb57d96629e5947493a3328634ac5cca5
|
""" DIRACAccountingCommand
The DIRACAccountingCommand class is a command class to
interrogate the DIRAC Accounting.
"""
# FIXME: NOT Usable ATM
# missing doNew, doCache, doMaster
from datetime import datetime, timedelta
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.JEncode import strToIntDict
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC.ResourceStatusSystem.Command.Command import Command
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
################################################################################
################################################################################
class DIRACAccountingCommand(Command):
def __init__(self, args=None, clients=None):
super(DIRACAccountingCommand, self).__init__(args, clients)
if "ReportsClient" in self.apis:
self.rClient = self.apis["ReportsClient"]
else:
self.rClient = ReportsClient()
def doCommand(self):
"""
Returns jobs accounting info for sites in the last 24h
Args:
- args[0]: string - should be a ValidElement
- args[1]: string - should be the name of the ValidElement
- args[2]: string - should be 'Job' or 'Pilot' or 'DataOperation'
or 'WMSHistory' (??)
- args[3]: string - should be the plot to generate (e.g. CPUEfficiency)
- args[4]: dictionary - e.g. {'Format': 'LastHours', 'hours': 24}
- args[5]: string - should be the grouping
- args[6]: dictionary - optional conditions
"""
granularity = self.args[0]
name = self.args[1]
accounting = self.args[2]
plot = self.args[3]
period = self.args[4]
grouping = self.args[5]
if period["Format"] == "LastHours":
fromT = datetime.utcnow() - timedelta(hours=period["hours"])
toT = datetime.utcnow()
elif period["Format"] == "Periods":
# TODO
pass
if self.args[6] is not None:
conditions = self.args[6]
else:
conditions = {}
if accounting == "Job" or accounting == "Pilot":
if granularity == "Resource":
conditions["GridCE"] = [name]
elif granularity == "Service":
conditions["Site"] = [name.split("@").pop()]
elif granularity == "Site":
conditions["Site"] = [name]
else:
return S_ERROR("%s is not a valid granularity" % granularity)
elif accounting == "DataOperation":
conditions["Destination"] = [name]
return self.rClient.getReport(accounting, plot, fromT, toT, conditions, grouping)
################################################################################
################################################################################
class TransferQualityCommand(Command):
def __init__(self, args=None, clients=None):
super(TransferQualityCommand, self).__init__(args, clients)
if "ReportsClient" in self.apis:
self.rClient = self.apis["ReportsClient"]
else:
self.rClient = ReportsClient()
def doCommand(self):
"""
Return getQuality from DIRAC's accounting ReportsClient
`args`: a tuple
- args[0]: string: should be a ValidElement
- args[1]: string should be the name of the ValidElement
- args[2]: optional dateTime object: a "from" date
- args[3]: optional dateTime object: a "to" date
:returns:
{'Result': None | a float between 0.0 and 100.0}
"""
if "fromDate" not in self.args:
fromDate = datetime.utcnow() - timedelta(hours=2)
else:
fromDate = self.args["fromDate"]
if "toDate" not in self.args:
toDate = datetime.utcnow()
else:
toDate = self.args["toDate"]
if "name" not in self.args:
return S_ERROR("name not specified")
name = self.args["name"]
results = self.rClient.getReport(
"DataOperation",
"Quality",
fromDate,
toDate,
{"OperationType": "putAndRegister", "Destination": [name]},
"Channel",
)
if not results["OK"]:
return results
pr_q_d = {channel: strToIntDict(value) for channel, value in results["Value"]["data"].items()}
# FIXME: WHAT the hell is this doing ?
values = []
if len(pr_q_d) == 1:
for k in pr_q_d:
for n in pr_q_d[k].values():
values.append(n)
res = sum(values) / len(values) # FIXME: should convert to int?
else:
for n in pr_q_d["Total"].values():
values.append(n)
res = sum(values) / len(values) # FIXME: should convert to int?
return S_OK(res)
################################################################################
################################################################################
#
# class TransferQualityCached_Command(Command):
#
# __APIs__ = [ 'ResourceManagementClient' ]
#
# def doCommand(self):
# """
# Returns transfer quality as it is cached
#
# :attr:`args`:
# - args[0]: string: should be a ValidElement
#
# - args[1]: string should be the name of the ValidElement
#
# :returns:
# {'Result': None | a float between 0.0 and 100.0}
# """
#
# super(TransferQualityCached_Command, self).doCommand()
# self.apis = initAPIs( self.__APIs__, self.apis )
#
# name = self.args[1]
#
# try:
# res = self.apis[ 'ResourceManagementClient' ].getCachedResult(name, 'TransferQualityEverySEs', 'TQ', 'NULL')
# if res == []:
# return {'Result':None}
# except:
# gLogger.exception("Exception when calling ResourceManagementClient for %s" %(name))
# return {'Result':'Unknown'}
#
# return {'Result':float(res[0])}
#
# doCommand.__doc__ = Command.doCommand.__doc__ + doCommand.__doc__
#
################################################################################
################################################################################
class CachedPlotCommand(Command):
def __init__(self, args=None, clients=None):
super(CachedPlotCommand, self).__init__(args, clients)
if "ResourceManagementClient" in self.apis:
self.rmClient = self.apis["ResourceManagementClient"]
else:
self.rmClient = ResourceManagementClient()
def doCommand(self):
"""
Returns transfer quality plot as it is cached in the accounting cache.
:attr:`args`:
- args[0]: string - should be a ValidElement
- args[1]: string - should be the name of the ValidElement
- args[2]: string - should be the plot type
- args[3]: string - should be the plot name
:returns:
a plot
"""
if "element" not in self.args:
return S_ERROR("element no specified")
element = self.args["element"]
if "name" not in self.args:
return S_ERROR("Name no specified")
name = self.args["name"]
if "plotType" not in self.args:
return S_ERROR("plotType no specified")
plotType = self.args["plotType"]
if "plotName" not in self.args:
return S_ERROR("plotName no specified")
plotName = self.args["plotName"]
# FIXME: we have no any longer Service granularity !
if element == "Service":
name = name.split("@")[1]
meta = {"columns": "Result"}
results = self.rmClient.selectAccountingCache(name=name, plotType=plotType, plotName=plotName, meta=meta)
if not results["OK"]:
return results
results = results["Value"]
if results == []:
results = {"data": {}, "granularity": 900}
else:
# FIXME: WTH is an eval doing here !!!!
results = eval(results[0])
return results
################################################################################
################################################################################
class TransferQualityFromCachedPlotCommand(Command):
def __init__(self, args=None, clients=None):
super(TransferQualityFromCachedPlotCommand, self).__init__(args, clients)
if "ResourceManagementClient" in self.apis:
self.rmClient = self.apis["ResourceManagementClient"]
else:
self.rmClient = ResourceManagementClient()
def doCommand(self):
"""
Returns transfer quality from the plot cached in the accounting cache.
:attr:`args`:
- args[0]: string: should be a ValidElement
- args[1]: string should be the name of the ValidElement
:returns:
{'Result': None | a float between 0.0 and 100.0}
"""
if "name" not in self.args:
return S_ERROR("Name no specified")
name = self.args["name"]
if "plotType" not in self.args:
return S_ERROR("plotType no specified")
plotType = self.args["plotType"]
if "plotName" not in self.args:
return S_ERROR("plotName no specified")
plotName = self.args["plotName"]
meta = {"columns": "Result"}
results = self.rmClient.selectAccountingCache(name=name, plotType=plotType, plotName=plotName, meta=meta)
if not results["OK"]:
return results
results = results["Value"]
if results == []:
results = None
else:
# FIXME: remove the eval from here !!
results = eval(results[0][0])
num, den = 0, 0
se = list(results["data"])[0]
num = num + len(results["data"][se])
den = den + sum(results["data"][se].values())
meanQuality = den / num
results = meanQuality
return S_OK(results)
|
ic-hep/DIRAC
|
src/DIRAC/ResourceStatusSystem/Command/DIRACAccountingCommand.py
|
Python
|
gpl-3.0
| 10,229
|
[
"DIRAC"
] |
1ec1501d15d097cf25c68774b49dbf4047c891197efd940621dd5eb9e511b61b
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import random
import re
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..compat import (
compat_chr,
compat_HTTPError,
compat_parse_qs,
compat_str,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..jsinterp import JSInterpreter
from ..utils import (
ExtractorError,
clean_html,
dict_get,
float_or_none,
int_or_none,
mimetype2ext,
parse_codecs,
parse_duration,
qualities,
remove_start,
smuggle_url,
str_or_none,
str_to_int,
try_get,
unescapeHTML,
unified_strdate,
unsmuggle_url,
update_url_query,
url_or_none,
urlencode_postdata,
urljoin,
)
def parse_qs(url):
return compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
_CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
_TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM)'
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
username, password = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
login_form = self._hidden_inputs(login_page)
def req(url, f_req, note, errnote):
data = login_form.copy()
data.update({
'pstMsg': 1,
'checkConnection': 'youtube',
'checkedDomains': 'youtube',
'hl': 'en',
'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
'f.req': json.dumps(f_req),
'flowName': 'GlifWebSignIn',
'flowEntry': 'ServiceLogin',
# TODO: reverse actual botguard identifier generation algo
'bgRequest': '["identifier",""]',
})
return self._download_json(
url, None, note=note, errnote=errnote,
transform_source=lambda s: re.sub(r'^[^[]*', '', s),
fatal=False,
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
'Google-Accounts-XSRF': 1,
})
def warn(message):
self._downloader.report_warning(message)
lookup_req = [
username,
None, [], None, 'US', None, None, 2, False, True,
[
None, None,
[2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4],
1, [None, None, []], None, None, None, True
],
username,
]
lookup_results = req(
self._LOOKUP_URL, lookup_req,
'Looking up account info', 'Unable to look up account info')
if lookup_results is False:
return False
user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
if not user_hash:
warn('Unable to extract user hash')
return False
challenge_req = [
user_hash,
None, 1, None, [1, None, None, None, [password, None, True]],
[
None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
1, [None, None, []], None, None, None, True
]]
challenge_results = req(
self._CHALLENGE_URL, challenge_req,
'Logging in', 'Unable to log in')
if challenge_results is False:
return
login_res = try_get(challenge_results, lambda x: x[0][5], list)
if login_res:
login_msg = try_get(login_res, lambda x: x[5], compat_str)
warn(
'Unable to login: %s' % 'Invalid password'
if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
return False
res = try_get(challenge_results, lambda x: x[0][-1], list)
if not res:
warn('Unable to extract result entry')
return False
login_challenge = try_get(res, lambda x: x[0][0], list)
if login_challenge:
challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
if challenge_str == 'TWO_STEP_VERIFICATION':
# SEND_SUCCESS - TFA code has been successfully sent to phone
# QUOTA_EXCEEDED - reached the limit of TFA codes
status = try_get(login_challenge, lambda x: x[5], compat_str)
if status == 'QUOTA_EXCEEDED':
warn('Exceeded the limit of TFA codes, try later')
return False
tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
if not tl:
warn('Unable to extract TL')
return False
tfa_code = self._get_tfa_info('2-step verification code')
if not tfa_code:
warn(
'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
tfa_code = remove_start(tfa_code, 'G-')
tfa_req = [
user_hash, None, 2, None,
[
9, None, None, None, None, None, None, None,
[None, tfa_code, True, 2]
]]
tfa_results = req(
self._TFA_URL.format(tl), tfa_req,
'Submitting TFA code', 'Unable to submit TFA code')
if tfa_results is False:
return False
tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
if tfa_res:
tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
warn(
'Unable to finish TFA: %s' % 'Invalid TFA code'
if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
return False
check_cookie_url = try_get(
tfa_results, lambda x: x[0][-1][2], compat_str)
else:
CHALLENGES = {
'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
}
challenge = CHALLENGES.get(
challenge_str,
'%s returned error %s.' % (self.IE_NAME, challenge_str))
warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
return False
else:
check_cookie_url = try_get(res, lambda x: x[2], compat_str)
if not check_cookie_url:
warn('Unable to extract CheckCookie URL')
return False
check_cookie_results = self._download_webpage(
check_cookie_url, None, 'Checking cookie', fatal=False)
if check_cookie_results is False:
return False
if 'https://myaccount.google.com/' not in check_cookie_results:
warn('Unable to log in')
return False
return True
def _initialize_consent(self):
cookies = self._get_cookies('https://www.youtube.com/')
if cookies.get('__Secure-3PSID'):
return
consent_id = None
consent = cookies.get('CONSENT')
if consent:
if 'YES' in consent.value:
return
consent_id = self._search_regex(
r'PENDING\+(\d+)', consent.value, 'consent', default=None)
if not consent_id:
consent_id = random.randint(100, 999)
self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
def _real_initialize(self):
self._initialize_consent()
if self._downloader is None:
return
if not self._login():
return
_DEFAULT_API_DATA = {
'context': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20201021.03.00',
}
},
}
_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
_YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
_YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
def _call_api(self, ep, query, video_id, fatal=True):
data = self._DEFAULT_API_DATA.copy()
data.update(query)
return self._download_json(
'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id,
note='Downloading API JSON', errnote='Unable to download API page',
data=json.dumps(data).encode('utf8'), fatal=fatal,
headers={'content-type': 'application/json'},
query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'})
def _extract_yt_initial_data(self, video_id, webpage):
return self._parse_json(
self._search_regex(
(r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE),
self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'),
video_id)
def _extract_ytcfg(self, video_id, webpage):
return self._parse_json(
self._search_regex(
r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
default='{}'), video_id, fatal=False) or {}
def _extract_video(self, renderer):
video_id = renderer['videoId']
title = try_get(
renderer,
(lambda x: x['title']['runs'][0]['text'],
lambda x: x['title']['simpleText']), compat_str)
description = try_get(
renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'],
compat_str)
duration = parse_duration(try_get(
renderer, lambda x: x['lengthText']['simpleText'], compat_str))
view_count_text = try_get(
renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
view_count = str_to_int(self._search_regex(
r'^([\d,]+)', re.sub(r'\s', '', view_count_text),
'view count', default=None))
uploader = try_get(
renderer,
(lambda x: x['ownerText']['runs'][0]['text'],
lambda x: x['shortBylineText']['runs'][0]['text']), compat_str)
return {
'_type': 'url',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': video_id,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
}
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_INVIDIOUS_SITES = (
# invidious-redirect websites
r'(?:www\.)?redirect\.invidious\.io',
r'(?:(?:www|dev)\.)?invidio\.us',
# Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md
r'(?:(?:www|no)\.)?invidiou\.sh',
r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
r'(?:www\.)?invidious\.kabi\.tk',
r'(?:www\.)?invidious\.13ad\.de',
r'(?:www\.)?invidious\.mastodon\.host',
r'(?:www\.)?invidious\.zapashcanon\.fr',
r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks',
r'(?:www\.)?invidious\.tinfoil-hat\.net',
r'(?:www\.)?invidious\.himiko\.cloud',
r'(?:www\.)?invidious\.reallyancient\.tech',
r'(?:www\.)?invidious\.tube',
r'(?:www\.)?invidiou\.site',
r'(?:www\.)?invidious\.site',
r'(?:www\.)?invidious\.xyz',
r'(?:www\.)?invidious\.nixnet\.xyz',
r'(?:www\.)?invidious\.048596\.xyz',
r'(?:www\.)?invidious\.drycat\.fr',
r'(?:www\.)?inv\.skyn3t\.in',
r'(?:www\.)?tube\.poal\.co',
r'(?:www\.)?tube\.connect\.cafe',
r'(?:www\.)?vid\.wxzm\.sx',
r'(?:www\.)?vid\.mint\.lgbt',
r'(?:www\.)?vid\.puffyan\.us',
r'(?:www\.)?yewtu\.be',
r'(?:www\.)?yt\.elukerio\.org',
r'(?:www\.)?yt\.lelux\.fi',
r'(?:www\.)?invidious\.ggc-project\.de',
r'(?:www\.)?yt\.maisputain\.ovh',
r'(?:www\.)?ytprivate\.com',
r'(?:www\.)?invidious\.13ad\.de',
r'(?:www\.)?invidious\.toot\.koeln',
r'(?:www\.)?invidious\.fdn\.fr',
r'(?:www\.)?watch\.nettohikari\.com',
r'(?:www\.)?invidious\.namazso\.eu',
r'(?:www\.)?invidious\.silkky\.cloud',
r'(?:www\.)?invidious\.exonip\.de',
r'(?:www\.)?invidious\.riverside\.rocks',
r'(?:www\.)?invidious\.blamefran\.net',
r'(?:www\.)?invidious\.moomoo\.de',
r'(?:www\.)?ytb\.trom\.tf',
r'(?:www\.)?yt\.cyberhost\.uk',
r'(?:www\.)?kgg2m7yk5aybusll\.onion',
r'(?:www\.)?qklhadlycap4cnod\.onion',
r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion',
r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion',
r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion',
r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion',
r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p',
r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion',
r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion',
r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
)
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com|
(?:www\.)?deturl\.com/www\.youtube\.com|
(?:www\.)?pwnyoutube\.com|
(?:www\.)?hooktube\.com|
(?:www\.)?yourepeat\.com|
tube\.majestyc\.net|
%(invidious)s|
youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
%(invidious)s
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
(?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?(1).+)? # if we found the ID, everything can follow
$""" % {
'invidious': '|'.join(_INVIDIOUS_SITES),
}
_PLAYER_INFO_RE = (
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
)
_SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
'start_time': 1,
'end_time': 9,
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'age_limit': 18,
},
'skip': 'Private video',
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
'abr': 129.495,
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'duration': 219,
'upload_date': '20100909',
'uploader': 'Amazing Atheist',
'uploader_id': 'TheAmazingAtheist',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms \r\n\r\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed), available via embed page
{
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
},
},
{
# Age-gated video only available with authentication (unavailable
# via embed page workaround)
'url': 'XgnwCQzjau8',
'only_matching': True,
},
# video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'deadmau5',
'description': 'md5:6cbcd3a92ce1bc676fc4d6ab4ace2336',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'Some Chords',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympic',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jvGDaLqkpTg',
'info_dict': {
'id': 'jvGDaLqkpTg',
'title': 'Tom Clancy Free Weekend Rainbow Whatever',
'description': 'md5:e03b909557865076822aa169218d6a5d',
},
'playlist': [{
'info_dict': {
'id': 'jvGDaLqkpTg',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Main Camera)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10643,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': '3AKt1R1aDnw',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 2)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10991,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': 'RtAMM00gpVc',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 3)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10995,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': '6N2fdlP3C5U',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 4)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10990,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}],
'params': {
'skip_download': True,
},
},
{
# Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/ytdl-org/youtube-dl/issues/1892,
# https://github.com/ytdl-org/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk - Position Music',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk - Position Music',
'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video with incomplete 'yt:stretch=16:'
'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI',
'only_matching': True,
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150127',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:13a2503d7b5904ef4b223aa101628f39',
'duration': 4060,
'upload_date': '20151119',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:f540112edec5d09fc8cc752d3d4ba3cd',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
},
'params': {
'skip_download': True,
},
'skip': 'This video has been removed for violating YouTube\'s policy on hate speech.',
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://invidio.us/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
'url': 'https://redirect.invidious.io/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
# from https://nitter.pussthecat.org/YouTube/status/1360363141947944964#m
'url': 'https://redirect.invidious.io/Yh0AhrY9GjA',
'only_matching': True,
},
{
# DRM protected
'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
'only_matching': True,
},
{
# Video with unsupported adaptive stream type formats
'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
'info_dict': {
'id': 'Z4Vy8R84T1U',
'ext': 'mp4',
'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'duration': 433,
'upload_date': '20130923',
'uploader': 'Amelia Putri Harwita',
'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
'formats': 'maxcount:10',
},
'params': {
'skip_download': True,
'youtube_include_dash_manifest': False,
},
'skip': 'not actual anymore',
},
{
# Youtube Music Auto-generated description
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'info_dict': {
'id': 'MgNrAu2pzNs',
'ext': 'mp4',
'title': 'Voyeur Girl',
'description': 'md5:7ae382a65843d6df2685993e90a8628f',
'upload_date': '20190312',
'uploader': 'Stephen - Topic',
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'artist': 'Stephen',
'track': 'Voyeur Girl',
'album': 'it\'s too much love to know my dear',
'release_date': '20190313',
'release_year': 2019,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
'only_matching': True,
},
{
# invalid -> valid video id redirection
'url': 'DJztXj2GPfl',
'info_dict': {
'id': 'DJztXj2GPfk',
'ext': 'mp4',
'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
'description': 'md5:bf577a41da97918e94fa9798d9228825',
'upload_date': '20090125',
'uploader': 'Prochorowka',
'uploader_id': 'Prochorowka',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
'artist': 'Panjabi MC',
'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
'album': 'Beware of the Boys (Mundian To Bach Ke)',
},
'params': {
'skip_download': True,
},
'skip': 'Video unavailable',
},
{
# empty description results in an empty string
'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
'info_dict': {
'id': 'x41yOUIvK2k',
'ext': 'mp4',
'title': 'IMG 3456',
'description': '',
'upload_date': '20170613',
'uploader_id': 'ElevageOrVert',
'uploader': 'ElevageOrVert',
},
'params': {
'skip_download': True,
},
},
{
# with '};' inside yt initial data (see [1])
# see [2] for an example with '};' inside ytInitialPlayerResponse
# 1. https://github.com/ytdl-org/youtube-dl/issues/27093
# 2. https://github.com/ytdl-org/youtube-dl/issues/27216
'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
'info_dict': {
'id': 'CHqg6qOn4no',
'ext': 'mp4',
'title': 'Part 77 Sort a list of simple types in c#',
'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
'upload_date': '20130831',
'uploader_id': 'kudvenkat',
'uploader': 'kudvenkat',
},
'params': {
'skip_download': True,
},
},
{
# another example of '};' in ytInitialData
'url': 'https://www.youtube.com/watch?v=gVfgbahppCY',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/watch_popup?v=63RmMXCd_bQ',
'only_matching': True,
},
{
# https://github.com/ytdl-org/youtube-dl/pull/28094
'url': 'OtqTfy26tG0',
'info_dict': {
'id': 'OtqTfy26tG0',
'ext': 'mp4',
'title': 'Burn Out',
'description': 'md5:8d07b84dcbcbfb34bc12a56d968b6131',
'upload_date': '20141120',
'uploader': 'The Cinematic Orchestra - Topic',
'uploader_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
'artist': 'The Cinematic Orchestra',
'track': 'Burn Out',
'album': 'Every Day',
'release_data': None,
'release_year': None,
},
'params': {
'skip_download': True,
},
},
{
# controversial video, only works with bpctr when authenticated with cookies
'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
'only_matching': True,
},
{
# restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
'url': 'cBvYw8_A0vQ',
'info_dict': {
'id': 'cBvYw8_A0vQ',
'ext': 'mp4',
'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き',
'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
'upload_date': '20201120',
'uploader': 'Walk around Japan',
'uploader_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
},
'params': {
'skip_download': True,
},
},
]
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
# av01 video only formats sometimes served with "unknown" codecs
'394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
}
@classmethod
def suitable(cls, url):
# Hack for lazy extractors until more generic solution is implemented
# (see #28780)
from .youtube import parse_qs
qs = parse_qs(url)
if qs.get('list', [None])[0]:
return False
return super(YoutubeIE, cls).suitable(url)
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._code_cache = {}
self._player_cache = {}
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod
def _extract_player_info(cls, player_url):
for player_re in cls._PLAYER_INFO_RE:
id_m = re.search(player_re, player_url)
if id_m:
break
else:
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('id')
def _extract_signature_function(self, video_id, player_url, example_sig):
player_id = self._extract_player_info(player_url)
# Read from filesystem cache
func_id = 'js_%s_%s' % (
player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
if player_id not in self._code_cache:
self._code_cache[player_id] = self._download_webpage(
player_url, video_id,
note='Downloading player ' + player_id,
errnote='Download of %s failed' % player_url)
code = self._code_cache[player_id]
res = self._parse_sig_js(code)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bm=(?P<sig>[a-zA-Z0-9$]{2})\(decodeURIComponent\(h\.s\)\)',
r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2})\(decodeURIComponent\(c\)\)',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
# Obsolete patterns
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
elif not re.match(r'https?://', player_url):
player_url = compat_urlparse.urljoin(
'https://www.youtube.com', player_url)
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _mark_watched(self, video_id, player_response):
playback_url = url_or_none(try_get(
player_response,
lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']))
if not playback_url:
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_chapters_from_json(self, data, video_id, duration):
chapters_list = try_get(
data,
lambda x: x['playerOverlays']
['playerOverlayRenderer']
['decoratedPlayerBarRenderer']
['decoratedPlayerBarRenderer']
['playerBar']
['chapteredPlayerBarRenderer']
['chapters'],
list)
if not chapters_list:
return
def chapter_time(chapter):
return float_or_none(
try_get(
chapter,
lambda x: x['chapterRenderer']['timeRangeStartMillis'],
int),
scale=1000)
chapters = []
for next_num, chapter in enumerate(chapters_list, start=1):
start_time = chapter_time(chapter)
if start_time is None:
continue
end_time = (chapter_time(chapters_list[next_num])
if next_num < len(chapters_list) else duration)
if end_time is None:
continue
title = try_get(
chapter, lambda x: x['chapterRenderer']['title']['simpleText'],
compat_str)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': title,
})
return chapters
def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
return self._parse_json(self._search_regex(
(r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
regex), webpage, name, default='{}'), video_id, fatal=False)
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
video_id = self._match_id(url)
base_url = self.http_scheme() + '//www.youtube.com/'
webpage_url = base_url + 'watch?v=' + video_id
webpage = self._download_webpage(
webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
player_response = None
if webpage:
player_response = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
video_id, 'initial player response')
if not player_response:
player_response = self._call_api(
'player', {'videoId': video_id}, video_id)
playability_status = player_response.get('playabilityStatus') or {}
if playability_status.get('reason') == 'Sign in to confirm your age':
video_info = self._download_webpage(
base_url + 'get_video_info', video_id,
'Refetching age-gated info webpage',
'unable to download video info webpage', query={
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'html5': 1,
# See https://github.com/ytdl-org/youtube-dl/issues/29333#issuecomment-864049544
'c': 'TVHTML5',
'cver': '6.20180913',
}, fatal=False)
if video_info:
pr = self._parse_json(
try_get(
compat_parse_qs(video_info),
lambda x: x['player_response'][0], compat_str) or '{}',
video_id, fatal=False)
if pr and isinstance(pr, dict):
player_response = pr
trailer_video_id = try_get(
playability_status,
lambda x: x['errorScreen']['playerLegacyDesktopYpcTrailerRenderer']['trailerVideoId'],
compat_str)
if trailer_video_id:
return self.url_result(
trailer_video_id, self.ie_key(), trailer_video_id)
def get_text(x):
if not x:
return
text = x.get('simpleText')
if text and isinstance(text, compat_str):
return text
runs = x.get('runs')
if not isinstance(runs, list):
return
return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)])
search_meta = (
lambda x: self._html_search_meta(x, webpage, default=None)) \
if webpage else lambda x: None
video_details = player_response.get('videoDetails') or {}
microformat = try_get(
player_response,
lambda x: x['microformat']['playerMicroformatRenderer'],
dict) or {}
video_title = video_details.get('title') \
or get_text(microformat.get('title')) \
or search_meta(['og:title', 'twitter:title', 'title'])
video_description = video_details.get('shortDescription')
if not smuggled_data.get('force_singlefeed', False):
if not self._downloader.params.get('noplaylist'):
multifeed_metadata_list = try_get(
player_response,
lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
compat_str)
if multifeed_metadata_list:
entries = []
feed_ids = []
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/ytdl-org/youtube-dl/issues/8536)
feed_data = compat_parse_qs(
compat_urllib_parse_unquote_plus(feed))
def feed_entry(name):
return try_get(
feed_data, lambda x: x[name][0], compat_str)
feed_id = feed_entry('id')
if not feed_id:
continue
feed_title = feed_entry('title')
title = video_title
if feed_title:
title += ' (%s)' % feed_title
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
base_url + 'watch?v=' + feed_data['id'][0],
{'force_singlefeed': True}),
'title': title,
})
feed_ids.append(feed_id)
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(
entries, video_id, video_title, video_description)
else:
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
formats = []
itags = []
itag_qualities = {}
player_url = None
q = qualities(['tiny', 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'])
streaming_data = player_response.get('streamingData') or {}
streaming_formats = streaming_data.get('formats') or []
streaming_formats.extend(streaming_data.get('adaptiveFormats') or [])
for fmt in streaming_formats:
if fmt.get('targetDurationSec') or fmt.get('drmFamilies'):
continue
itag = str_or_none(fmt.get('itag'))
quality = fmt.get('quality')
if itag and quality:
itag_qualities[itag] = quality
# FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment
# (adding `&sq=0` to the URL) and parsing emsg box to determine the
# number of fragment that would subsequently requested with (`&sq=N`)
if fmt.get('type') == 'FORMAT_STREAM_TYPE_OTF':
continue
fmt_url = fmt.get('url')
if not fmt_url:
sc = compat_parse_qs(fmt.get('signatureCipher'))
fmt_url = url_or_none(try_get(sc, lambda x: x['url'][0]))
encrypted_sig = try_get(sc, lambda x: x['s'][0])
if not (sc and fmt_url and encrypted_sig):
continue
if not player_url:
if not webpage:
continue
player_url = self._search_regex(
r'"(?:PLAYER_JS_URL|jsUrl)"\s*:\s*"([^"]+)"',
webpage, 'player URL', fatal=False)
if not player_url:
continue
signature = self._decrypt_signature(sc['s'][0], video_id, player_url)
sp = try_get(sc, lambda x: x['sp'][0]) or 'signature'
fmt_url += '&' + sp + '=' + signature
if itag:
itags.append(itag)
tbr = float_or_none(
fmt.get('averageBitrate') or fmt.get('bitrate'), 1000)
dct = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_id': itag,
'format_note': fmt.get('qualityLabel') or quality,
'fps': int_or_none(fmt.get('fps')),
'height': int_or_none(fmt.get('height')),
'quality': q(quality),
'tbr': tbr,
'url': fmt_url,
'width': fmt.get('width'),
}
mimetype = fmt.get('mimeType')
if mimetype:
mobj = re.match(
r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', mimetype)
if mobj:
dct['ext'] = mimetype2ext(mobj.group(1))
dct.update(parse_codecs(mobj.group(2)))
no_audio = dct.get('acodec') == 'none'
no_video = dct.get('vcodec') == 'none'
if no_audio:
dct['vbr'] = tbr
if no_video:
dct['abr'] = tbr
if no_audio or no_video:
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
if dct.get('ext'):
dct['container'] = dct['ext'] + '_dash'
formats.append(dct)
hls_manifest_url = streaming_data.get('hlsManifestUrl')
if hls_manifest_url:
for f in self._extract_m3u8_formats(
hls_manifest_url, video_id, 'mp4', fatal=False):
itag = self._search_regex(
r'/itag/(\d+)', f['url'], 'itag', default=None)
if itag:
f['format_id'] = itag
formats.append(f)
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_manifest_url = streaming_data.get('dashManifestUrl')
if dash_manifest_url:
for f in self._extract_mpd_formats(
dash_manifest_url, video_id, fatal=False):
itag = f['format_id']
if itag in itags:
continue
if itag in itag_qualities:
f['quality'] = q(itag_qualities[itag])
filesize = int_or_none(self._search_regex(
r'/clen/(\d+)', f.get('fragment_base_url')
or f['url'], 'file size', default=None))
if filesize:
f['filesize'] = filesize
formats.append(f)
if not formats:
if streaming_data.get('licenseInfos'):
raise ExtractorError(
'This video is DRM protected.', expected=True)
pemr = try_get(
playability_status,
lambda x: x['errorScreen']['playerErrorMessageRenderer'],
dict) or {}
reason = get_text(pemr.get('reason')) or playability_status.get('reason')
subreason = pemr.get('subreason')
if subreason:
subreason = clean_html(get_text(subreason))
if subreason == 'The uploader has not made this video available in your country.':
countries = microformat.get('availableCountries')
if not countries:
regions_allowed = search_meta('regionsAllowed')
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(
subreason, countries)
reason += '\n' + subreason
if reason:
raise ExtractorError(reason, expected=True)
self._sort_formats(formats)
keywords = video_details.get('keywords') or []
if not keywords and webpage:
keywords = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
for keyword in keywords:
if keyword.startswith('yt:stretch='):
mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword)
if mobj:
# NB: float is intentional for forcing float division
w, h = (float(v) for v in mobj.groups())
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
break
thumbnails = []
for container in (video_details, microformat):
for thumbnail in (try_get(
container,
lambda x: x['thumbnail']['thumbnails'], list) or []):
thumbnail_url = thumbnail.get('url')
if not thumbnail_url:
continue
thumbnails.append({
'height': int_or_none(thumbnail.get('height')),
'url': thumbnail_url,
'width': int_or_none(thumbnail.get('width')),
})
if thumbnails:
break
else:
thumbnail = search_meta(['og:image', 'twitter:image'])
if thumbnail:
thumbnails = [{'url': thumbnail}]
category = microformat.get('category') or search_meta('genre')
channel_id = video_details.get('channelId') \
or microformat.get('externalChannelId') \
or search_meta('channelId')
duration = int_or_none(
video_details.get('lengthSeconds')
or microformat.get('lengthSeconds')) \
or parse_duration(search_meta('duration'))
is_live = video_details.get('isLive')
owner_profile_url = microformat.get('ownerProfileUrl')
info = {
'id': video_id,
'title': self._live_title(video_title) if is_live else video_title,
'formats': formats,
'thumbnails': thumbnails,
'description': video_description,
'upload_date': unified_strdate(
microformat.get('uploadDate')
or search_meta('uploadDate')),
'uploader': video_details['author'],
'uploader_id': self._search_regex(r'/(?:channel|user)/([^/?&#]+)', owner_profile_url, 'uploader id') if owner_profile_url else None,
'uploader_url': owner_profile_url,
'channel_id': channel_id,
'channel_url': 'https://www.youtube.com/channel/' + channel_id if channel_id else None,
'duration': duration,
'view_count': int_or_none(
video_details.get('viewCount')
or microformat.get('viewCount')
or search_meta('interactionCount')),
'average_rating': float_or_none(video_details.get('averageRating')),
'age_limit': 18 if (
microformat.get('isFamilySafe') is False
or search_meta('isFamilyFriendly') == 'false'
or search_meta('og:restrictions:age') == '18+') else 0,
'webpage_url': webpage_url,
'categories': [category] if category else None,
'tags': keywords,
'is_live': is_live,
}
pctr = try_get(
player_response,
lambda x: x['captions']['playerCaptionsTracklistRenderer'], dict)
if pctr:
def process_language(container, base_url, lang_code, query):
lang_subs = []
for fmt in self._SUBTITLE_FORMATS:
query.update({
'fmt': fmt,
})
lang_subs.append({
'ext': fmt,
'url': update_url_query(base_url, query),
})
container[lang_code] = lang_subs
subtitles = {}
for caption_track in (pctr.get('captionTracks') or []):
base_url = caption_track.get('baseUrl')
if not base_url:
continue
if caption_track.get('kind') != 'asr':
lang_code = caption_track.get('languageCode')
if not lang_code:
continue
process_language(
subtitles, base_url, lang_code, {})
continue
automatic_captions = {}
for translation_language in (pctr.get('translationLanguages') or []):
translation_language_code = translation_language.get('languageCode')
if not translation_language_code:
continue
process_language(
automatic_captions, base_url, translation_language_code,
{'tlang': translation_language_code})
info['automatic_captions'] = automatic_captions
info['subtitles'] = subtitles
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
for k, v in query.items():
for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
d_k += '_time'
if d_k not in info and k in s_ks:
info[d_k] = parse_duration(query[k][0])
if video_description:
mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
if mobj:
release_year = mobj.group('release_year')
release_date = mobj.group('release_date')
if release_date:
release_date = release_date.replace('-', '')
if not release_year:
release_year = release_date[:4]
info.update({
'album': mobj.group('album'.strip()),
'artist': mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·')),
'track': mobj.group('track').strip(),
'release_date': release_date,
'release_year': int_or_none(release_year),
})
initial_data = None
if webpage:
initial_data = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_DATA_RE, video_id,
'yt initial data')
if not initial_data:
initial_data = self._call_api(
'next', {'videoId': video_id}, video_id, fatal=False)
if initial_data:
chapters = self._extract_chapters_from_json(
initial_data, video_id, duration)
if not chapters:
for engagment_pannel in (initial_data.get('engagementPanels') or []):
contents = try_get(
engagment_pannel, lambda x: x['engagementPanelSectionListRenderer']['content']['macroMarkersListRenderer']['contents'],
list)
if not contents:
continue
def chapter_time(mmlir):
return parse_duration(
get_text(mmlir.get('timeDescription')))
chapters = []
for next_num, content in enumerate(contents, start=1):
mmlir = content.get('macroMarkersListItemRenderer') or {}
start_time = chapter_time(mmlir)
end_time = chapter_time(try_get(
contents, lambda x: x[next_num]['macroMarkersListItemRenderer'])) \
if next_num < len(contents) else duration
if start_time is None or end_time is None:
continue
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': get_text(mmlir.get('title')),
})
if chapters:
break
if chapters:
info['chapters'] = chapters
contents = try_get(
initial_data,
lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'],
list) or []
for content in contents:
vpir = content.get('videoPrimaryInfoRenderer')
if vpir:
stl = vpir.get('superTitleLink')
if stl:
stl = get_text(stl)
if try_get(
vpir,
lambda x: x['superTitleIcon']['iconType']) == 'LOCATION_PIN':
info['location'] = stl
else:
mobj = re.search(r'(.+?)\s*S(\d+)\s*•\s*E(\d+)', stl)
if mobj:
info.update({
'series': mobj.group(1),
'season_number': int(mobj.group(2)),
'episode_number': int(mobj.group(3)),
})
for tlb in (try_get(
vpir,
lambda x: x['videoActions']['menuRenderer']['topLevelButtons'],
list) or []):
tbr = tlb.get('toggleButtonRenderer') or {}
for getter, regex in [(
lambda x: x['defaultText']['accessibility']['accessibilityData'],
r'(?P<count>[\d,]+)\s*(?P<type>(?:dis)?like)'), ([
lambda x: x['accessibility'],
lambda x: x['accessibilityData']['accessibilityData'],
], r'(?P<type>(?:dis)?like) this video along with (?P<count>[\d,]+) other people')]:
label = (try_get(tbr, getter, dict) or {}).get('label')
if label:
mobj = re.match(regex, label)
if mobj:
info[mobj.group('type') + '_count'] = str_to_int(mobj.group('count'))
break
sbr_tooltip = try_get(
vpir, lambda x: x['sentimentBar']['sentimentBarRenderer']['tooltip'])
if sbr_tooltip:
like_count, dislike_count = sbr_tooltip.split(' / ')
info.update({
'like_count': str_to_int(like_count),
'dislike_count': str_to_int(dislike_count),
})
vsir = content.get('videoSecondaryInfoRenderer')
if vsir:
info['channel'] = get_text(try_get(
vsir,
lambda x: x['owner']['videoOwnerRenderer']['title'],
dict))
rows = try_get(
vsir,
lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
list) or []
multiple_songs = False
for row in rows:
if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
multiple_songs = True
break
for row in rows:
mrr = row.get('metadataRowRenderer') or {}
mrr_title = mrr.get('title')
if not mrr_title:
continue
mrr_title = get_text(mrr['title'])
mrr_contents_text = get_text(mrr['contents'][0])
if mrr_title == 'License':
info['license'] = mrr_contents_text
elif not multiple_songs:
if mrr_title == 'Album':
info['album'] = mrr_contents_text
elif mrr_title == 'Artist':
info['artist'] = mrr_contents_text
elif mrr_title == 'Song':
info['track'] = mrr_contents_text
for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
v = info.get(s_k)
if v:
info[d_k] = v
self.mark_watched(video_id, player_response)
return info
class YoutubeTabIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com tab'
_VALID_URL = r'''(?x)
https?://
(?:\w+\.)?
(?:
youtube(?:kids)?\.com|
invidio\.us
)/
(?:
(?:channel|c|user|feed|hashtag)/|
(?:playlist|watch)\?.*?\blist=|
(?!(?:watch|embed|v|e)\b)
)
(?P<id>[^/?\#&]+)
'''
IE_NAME = 'youtube:tab'
_TESTS = [{
# playlists, multipage
'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Игорь Клейнер - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
},
}, {
# playlists, multipage, different order
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Игорь Клейнер - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
},
}, {
# playlists, series
'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3',
'playlist_mincount': 5,
'info_dict': {
'id': 'UCYO_jab_esuFRV4b17AJtAw',
'title': '3Blue1Brown - Playlists',
'description': 'md5:e1384e8a133307dd10edee76e875d62f',
},
}, {
# playlists, singlepage
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'title': 'ThirstForScience - Playlists',
'description': 'md5:609399d937ea957b0f53cbffb747a14c',
}
}, {
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
'only_matching': True,
}, {
# basic, single video playlist
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'title': 'youtube-dl public playlist',
},
'playlist_count': 1,
}, {
# empty playlist
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'title': 'youtube-dl empty playlist',
},
'playlist_count': 0,
}, {
# Home tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Home',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
},
'playlist_mincount': 2,
}, {
# Videos tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
},
'playlist_mincount': 975,
}, {
# Videos tab, sorted by popular
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
},
'playlist_mincount': 199,
}, {
# Playlists tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Playlists',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
},
'playlist_mincount': 17,
}, {
# Community tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Community',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
},
'playlist_mincount': 18,
}, {
# Channels tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Channels',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
},
'playlist_mincount': 138,
}, {
'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'uploader': 'Christiaan008',
'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
},
'playlist_count': 96,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
'uploader': 'Cauchemar',
'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
},
'playlist_mincount': 1123,
}, {
# even larger playlist, 8832 videos
'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
'only_matching': True,
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
'uploader': 'Interstellar Movie',
'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
},
'playlist_mincount': 21,
}, {
# https://github.com/ytdl-org/youtube-dl/issues/21844
'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'info_dict': {
'title': 'Data Analysis with Dr Mike Pound',
'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
'uploader': 'Computerphile',
},
'playlist_mincount': 11,
}, {
'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'only_matching': True,
}, {
# Playlist URL that does not actually serve a playlist
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
'info_dict': {
'id': '9Auq9mYxFEE',
'ext': 'mp4',
'title': 'Watch Sky News live',
'uploader': 'Sky News',
'uploader_id': 'skynews',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
'upload_date': '20191102',
'description': 'md5:78de4e1c2359d0ea3ed829678e38b662',
'categories': ['News & Politics'],
'tags': list,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
'only_matching': True,
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/trending',
'only_matching': True,
}, {
# needs auth
'url': 'https://www.youtube.com/feed/library',
'only_matching': True,
}, {
# needs auth
'url': 'https://www.youtube.com/feed/history',
'only_matching': True,
}, {
# needs auth
'url': 'https://www.youtube.com/feed/subscriptions',
'only_matching': True,
}, {
# needs auth
'url': 'https://www.youtube.com/feed/watch_later',
'only_matching': True,
}, {
# no longer available?
'url': 'https://www.youtube.com/feed/recommended',
'only_matching': True,
}, {
# inline playlist with not always working continuations
'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/course',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/zsecurity',
'only_matching': True,
}, {
'url': 'http://www.youtube.com/NASAgovVideo/videos',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/hashtag/cctv9',
'info_dict': {
'id': 'cctv9',
'title': '#cctv9',
},
'playlist_mincount': 350,
}, {
'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if YoutubeIE.suitable(url) else super(
YoutubeTabIE, cls).suitable(url)
def _extract_channel_id(self, webpage):
channel_id = self._html_search_meta(
'channelId', webpage, 'channel id', default=None)
if channel_id:
return channel_id
channel_url = self._html_search_meta(
('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
'twitter:app:url:googleplay'), webpage, 'channel url')
return self._search_regex(
r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
channel_url, 'channel id')
@staticmethod
def _extract_grid_item_renderer(item):
assert isinstance(item, dict)
for key, renderer in item.items():
if not key.startswith('grid') or not key.endswith('Renderer'):
continue
if not isinstance(renderer, dict):
continue
return renderer
def _grid_entries(self, grid_renderer):
for item in grid_renderer['items']:
if not isinstance(item, dict):
continue
renderer = self._extract_grid_item_renderer(item)
if not isinstance(renderer, dict):
continue
title = try_get(
renderer, (lambda x: x['title']['runs'][0]['text'],
lambda x: x['title']['simpleText']), compat_str)
# playlist
playlist_id = renderer.get('playlistId')
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
continue
# video
video_id = renderer.get('videoId')
if video_id:
yield self._extract_video(renderer)
continue
# channel
channel_id = renderer.get('channelId')
if channel_id:
title = try_get(
renderer, lambda x: x['title']['simpleText'], compat_str)
yield self.url_result(
'https://www.youtube.com/channel/%s' % channel_id,
ie=YoutubeTabIE.ie_key(), video_title=title)
continue
# generic endpoint URL support
ep_url = urljoin('https://www.youtube.com/', try_get(
renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str))
if ep_url:
for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE):
if ie.suitable(ep_url):
yield self.url_result(
ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title)
break
def _shelf_entries_from_content(self, shelf_renderer):
content = shelf_renderer.get('content')
if not isinstance(content, dict):
return
renderer = content.get('gridRenderer')
if renderer:
# TODO: add support for nested playlists so each shelf is processed
# as separate playlist
# TODO: this includes only first N items
for entry in self._grid_entries(renderer):
yield entry
renderer = content.get('horizontalListRenderer')
if renderer:
# TODO
pass
def _shelf_entries(self, shelf_renderer, skip_channels=False):
ep = try_get(
shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str)
shelf_url = urljoin('https://www.youtube.com', ep)
if shelf_url:
# Skipping links to another channels, note that checking for
# endpoint.commandMetadata.webCommandMetadata.webPageTypwebPageType == WEB_PAGE_TYPE_CHANNEL
# will not work
if skip_channels and '/channels?' in shelf_url:
return
title = try_get(
shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
yield self.url_result(shelf_url, video_title=title)
# Shelf may not contain shelf URL, fallback to extraction from content
for entry in self._shelf_entries_from_content(shelf_renderer):
yield entry
def _playlist_entries(self, video_list_renderer):
for content in video_list_renderer['contents']:
if not isinstance(content, dict):
continue
renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
if not isinstance(renderer, dict):
continue
video_id = renderer.get('videoId')
if not video_id:
continue
yield self._extract_video(renderer)
def _video_entry(self, video_renderer):
video_id = video_renderer.get('videoId')
if video_id:
return self._extract_video(video_renderer)
def _post_thread_entries(self, post_thread_renderer):
post_renderer = try_get(
post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
if not post_renderer:
return
# video attachment
video_renderer = try_get(
post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict)
video_id = None
if video_renderer:
entry = self._video_entry(video_renderer)
if entry:
yield entry
# inline video links
runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
for run in runs:
if not isinstance(run, dict):
continue
ep_url = try_get(
run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
if not ep_url:
continue
if not YoutubeIE.suitable(ep_url):
continue
ep_video_id = YoutubeIE._match_id(ep_url)
if video_id == ep_video_id:
continue
yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
def _post_thread_continuation_entries(self, post_thread_continuation):
contents = post_thread_continuation.get('contents')
if not isinstance(contents, list):
return
for content in contents:
renderer = content.get('backstagePostThreadRenderer')
if not isinstance(renderer, dict):
continue
for entry in self._post_thread_entries(renderer):
yield entry
def _rich_grid_entries(self, contents):
for content in contents:
video_renderer = try_get(content, lambda x: x['richItemRenderer']['content']['videoRenderer'], dict)
if video_renderer:
entry = self._video_entry(video_renderer)
if entry:
yield entry
@staticmethod
def _build_continuation_query(continuation, ctp=None):
query = {
'ctoken': continuation,
'continuation': continuation,
}
if ctp:
query['itct'] = ctp
return query
@staticmethod
def _extract_next_continuation_data(renderer):
next_continuation = try_get(
renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict)
if not next_continuation:
return
continuation = next_continuation.get('continuation')
if not continuation:
return
ctp = next_continuation.get('clickTrackingParams')
return YoutubeTabIE._build_continuation_query(continuation, ctp)
@classmethod
def _extract_continuation(cls, renderer):
next_continuation = cls._extract_next_continuation_data(renderer)
if next_continuation:
return next_continuation
contents = []
for key in ('contents', 'items'):
contents.extend(try_get(renderer, lambda x: x[key], list) or [])
for content in contents:
if not isinstance(content, dict):
continue
continuation_ep = try_get(
content, lambda x: x['continuationItemRenderer']['continuationEndpoint'],
dict)
if not continuation_ep:
continue
continuation = try_get(
continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
if not continuation:
continue
ctp = continuation_ep.get('clickTrackingParams')
return YoutubeTabIE._build_continuation_query(continuation, ctp)
def _entries(self, tab, item_id, webpage):
tab_content = try_get(tab, lambda x: x['content'], dict)
if not tab_content:
return
slr_renderer = try_get(tab_content, lambda x: x['sectionListRenderer'], dict)
if slr_renderer:
is_channels_tab = tab.get('title') == 'Channels'
continuation = None
slr_contents = try_get(slr_renderer, lambda x: x['contents'], list) or []
for slr_content in slr_contents:
if not isinstance(slr_content, dict):
continue
is_renderer = try_get(slr_content, lambda x: x['itemSectionRenderer'], dict)
if not is_renderer:
continue
isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
for isr_content in isr_contents:
if not isinstance(isr_content, dict):
continue
renderer = isr_content.get('playlistVideoListRenderer')
if renderer:
for entry in self._playlist_entries(renderer):
yield entry
continuation = self._extract_continuation(renderer)
continue
renderer = isr_content.get('gridRenderer')
if renderer:
for entry in self._grid_entries(renderer):
yield entry
continuation = self._extract_continuation(renderer)
continue
renderer = isr_content.get('shelfRenderer')
if renderer:
for entry in self._shelf_entries(renderer, not is_channels_tab):
yield entry
continue
renderer = isr_content.get('backstagePostThreadRenderer')
if renderer:
for entry in self._post_thread_entries(renderer):
yield entry
continuation = self._extract_continuation(renderer)
continue
renderer = isr_content.get('videoRenderer')
if renderer:
entry = self._video_entry(renderer)
if entry:
yield entry
if not continuation:
continuation = self._extract_continuation(is_renderer)
if not continuation:
continuation = self._extract_continuation(slr_renderer)
else:
rich_grid_renderer = tab_content.get('richGridRenderer')
if not rich_grid_renderer:
return
for entry in self._rich_grid_entries(rich_grid_renderer.get('contents') or []):
yield entry
continuation = self._extract_continuation(rich_grid_renderer)
ytcfg = self._extract_ytcfg(item_id, webpage)
client_version = try_get(
ytcfg, lambda x: x['INNERTUBE_CLIENT_VERSION'], compat_str) or '2.20210407.08.00'
headers = {
'x-youtube-client-name': '1',
'x-youtube-client-version': client_version,
'content-type': 'application/json',
}
context = try_get(ytcfg, lambda x: x['INNERTUBE_CONTEXT'], dict) or {
'client': {
'clientName': 'WEB',
'clientVersion': client_version,
}
}
visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str)
identity_token = self._extract_identity_token(ytcfg, webpage)
if identity_token:
headers['x-youtube-identity-token'] = identity_token
data = {
'context': context,
}
for page_num in itertools.count(1):
if not continuation:
break
if visitor_data:
headers['x-goog-visitor-id'] = visitor_data
data['continuation'] = continuation['continuation']
data['clickTracking'] = {
'clickTrackingParams': continuation['itct']
}
count = 0
retries = 3
while count <= retries:
try:
# Downloading page may result in intermittent 5xx HTTP error
# that is usually worked around with a retry
response = self._download_json(
'https://www.youtube.com/youtubei/v1/browse?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
None, 'Downloading page %d%s' % (page_num, ' (retry #%d)' % count if count else ''),
headers=headers, data=json.dumps(data).encode('utf8'))
break
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503):
count += 1
if count <= retries:
continue
raise
if not response:
break
visitor_data = try_get(
response, lambda x: x['responseContext']['visitorData'], compat_str) or visitor_data
continuation_contents = try_get(
response, lambda x: x['continuationContents'], dict)
if continuation_contents:
continuation_renderer = continuation_contents.get('playlistVideoListContinuation')
if continuation_renderer:
for entry in self._playlist_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
continuation_renderer = continuation_contents.get('gridContinuation')
if continuation_renderer:
for entry in self._grid_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
continuation_renderer = continuation_contents.get('itemSectionContinuation')
if continuation_renderer:
for entry in self._post_thread_continuation_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
continuation_items = try_get(
on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
if continuation_items:
continuation_item = continuation_items[0]
if not isinstance(continuation_item, dict):
continue
renderer = self._extract_grid_item_renderer(continuation_item)
if renderer:
grid_renderer = {'items': continuation_items}
for entry in self._grid_entries(grid_renderer):
yield entry
continuation = self._extract_continuation(grid_renderer)
continue
renderer = continuation_item.get('playlistVideoRenderer') or continuation_item.get('itemSectionRenderer')
if renderer:
video_list_renderer = {'contents': continuation_items}
for entry in self._playlist_entries(video_list_renderer):
yield entry
continuation = self._extract_continuation(video_list_renderer)
continue
renderer = continuation_item.get('backstagePostThreadRenderer')
if renderer:
continuation_renderer = {'contents': continuation_items}
for entry in self._post_thread_continuation_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
renderer = continuation_item.get('richItemRenderer')
if renderer:
for entry in self._rich_grid_entries(continuation_items):
yield entry
continuation = self._extract_continuation({'contents': continuation_items})
continue
break
@staticmethod
def _extract_selected_tab(tabs):
for tab in tabs:
if try_get(tab, lambda x: x['tabRenderer']['selected'], bool):
return tab['tabRenderer']
else:
raise ExtractorError('Unable to find selected tab')
@staticmethod
def _extract_uploader(data):
uploader = {}
sidebar_renderer = try_get(
data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list)
if sidebar_renderer:
for item in sidebar_renderer:
if not isinstance(item, dict):
continue
renderer = item.get('playlistSidebarSecondaryInfoRenderer')
if not isinstance(renderer, dict):
continue
owner = try_get(
renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
if owner:
uploader['uploader'] = owner.get('text')
uploader['uploader_id'] = try_get(
owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
uploader['uploader_url'] = urljoin(
'https://www.youtube.com/',
try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
return uploader
@staticmethod
def _extract_alert(data):
alerts = []
for alert in try_get(data, lambda x: x['alerts'], list) or []:
if not isinstance(alert, dict):
continue
alert_text = try_get(
alert, lambda x: x['alertRenderer']['text'], dict)
if not alert_text:
continue
text = try_get(
alert_text,
(lambda x: x['simpleText'], lambda x: x['runs'][0]['text']),
compat_str)
if text:
alerts.append(text)
return '\n'.join(alerts)
def _extract_from_tabs(self, item_id, webpage, data, tabs):
selected_tab = self._extract_selected_tab(tabs)
renderer = try_get(
data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
playlist_id = item_id
title = description = None
if renderer:
channel_title = renderer.get('title') or item_id
tab_title = selected_tab.get('title')
title = channel_title or item_id
if tab_title:
title += ' - %s' % tab_title
description = renderer.get('description')
playlist_id = renderer.get('externalId')
else:
renderer = try_get(
data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
if renderer:
title = renderer.get('title')
else:
renderer = try_get(
data, lambda x: x['header']['hashtagHeaderRenderer'], dict)
if renderer:
title = try_get(renderer, lambda x: x['hashtag']['simpleText'])
playlist = self.playlist_result(
self._entries(selected_tab, item_id, webpage),
playlist_id=playlist_id, playlist_title=title,
playlist_description=description)
playlist.update(self._extract_uploader(data))
return playlist
def _extract_from_playlist(self, item_id, url, data, playlist):
title = playlist.get('title') or try_get(
data, lambda x: x['titleText']['simpleText'], compat_str)
playlist_id = playlist.get('playlistId') or item_id
# Inline playlist rendition continuation does not always work
# at Youtube side, so delegating regular tab-based playlist URL
# processing whenever possible.
playlist_url = urljoin(url, try_get(
playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str))
if playlist_url and playlist_url != url:
return self.url_result(
playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
return self.playlist_result(
self._playlist_entries(playlist), playlist_id=playlist_id,
playlist_title=title)
def _extract_identity_token(self, ytcfg, webpage):
if ytcfg:
token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
if token:
return token
return self._search_regex(
r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
'identity token', default=None)
def _real_extract(self, url):
item_id = self._match_id(url)
url = compat_urlparse.urlunparse(
compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
# Handle both video/playlist URLs
qs = parse_qs(url)
video_id = qs.get('v', [None])[0]
playlist_id = qs.get('list', [None])[0]
if video_id and playlist_id:
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
webpage = self._download_webpage(url, item_id)
data = self._extract_yt_initial_data(item_id, webpage)
tabs = try_get(
data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
if tabs:
return self._extract_from_tabs(item_id, webpage, data, tabs)
playlist = try_get(
data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
if playlist:
return self._extract_from_playlist(item_id, url, data, playlist)
# Fallback to video extraction if no playlist alike page is recognized.
# First check for the current video then try the v attribute of URL query.
video_id = try_get(
data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
compat_str) or video_id
if video_id:
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
# Capture and output alerts
alert = self._extract_alert(data)
if alert:
raise ExtractorError(alert, expected=True)
# Failed to recognize
raise ExtractorError('Unable to recognize tab page')
class YoutubePlaylistIE(InfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r'''(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
(?:
youtube(?:kids)?\.com|
invidio\.us
)
/.*?\?.*?\blist=
)?
(?P<id>%(playlist_id)s)
)''' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
IE_NAME = 'youtube:playlist'
_TESTS = [{
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
'uploader': 'Wickydoo',
'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
},
'playlist_mincount': 29,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'uploader': 'milan',
'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
}
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 982,
'info_dict': {
'title': '2018 Chinese New Singles (11/6 updated)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'uploader': 'LBK',
'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
}
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
if YoutubeTabIE.suitable(url):
return False
# Hack for lazy extractors until more generic solution is implemented
# (see #28780)
from .youtube import parse_qs
qs = parse_qs(url)
if qs.get('v', [None])[0]:
return False
return super(YoutubePlaylistIE, cls).suitable(url)
def _real_extract(self, url):
playlist_id = self._match_id(url)
qs = parse_qs(url)
if not qs:
qs = {'list': playlist_id}
return self.url_result(
update_url_query('https://www.youtube.com/playlist', qs),
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeYtBeIE(InfoExtractor):
_VALID_URL = r'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{11})/*?.*?\blist=(?P<playlist_id>%(playlist_id)s)' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_TESTS = [{
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'dislike_count': int,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
playlist_id = mobj.group('playlist_id')
return self.url_result(
update_url_query('https://www.youtube.com/watch', {
'v': video_id,
'list': playlist_id,
'feature': 'youtu.be',
}), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeYtUserIE(InfoExtractor):
_VALID_URL = r'ytuser:(?P<id>.+)'
_TESTS = [{
'url': 'ytuser:phihag',
'only_matching': True,
}]
def _real_extract(self, url):
user_id = self._match_id(url)
return self.url_result(
'https://www.youtube.com/user/%s' % user_id,
ie=YoutubeTabIE.ie_key(), video_id=user_id)
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': ':ytfav',
'only_matching': True,
}, {
'url': ':ytfavorites',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=LL',
ie=YoutubeTabIE.ie_key())
class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_MAX_RESULTS = float('inf')
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_SEARCH_PARAMS = None
_TESTS = []
def _entries(self, query, n):
data = {
'context': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20201021.03.00',
}
},
'query': query,
}
if self._SEARCH_PARAMS:
data['params'] = self._SEARCH_PARAMS
total = 0
for page_num in itertools.count(1):
search = self._download_json(
'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
video_id='query "%s"' % query,
note='Downloading page %s' % page_num,
errnote='Unable to download API page', fatal=False,
data=json.dumps(data).encode('utf8'),
headers={'content-type': 'application/json'})
if not search:
break
slr_contents = try_get(
search,
(lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
list)
if not slr_contents:
break
for slr_content in slr_contents:
isr_contents = try_get(
slr_content,
lambda x: x['itemSectionRenderer']['contents'],
list)
if not isr_contents:
continue
for content in isr_contents:
if not isinstance(content, dict):
continue
video = content.get('videoRenderer')
if not isinstance(video, dict):
continue
video_id = video.get('videoId')
if not video_id:
continue
yield self._extract_video(video)
total += 1
if total == n:
return
token = try_get(
slr_contents,
lambda x: x[-1]['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'],
compat_str)
if not token:
break
data['continuation'] = token
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
return self.playlist_result(self._entries(query, n), query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
_SEARCH_PARAMS = 'CAI%3D'
r"""
class YoutubeSearchURLIE(YoutubeSearchIE):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
return self.playlist_result(self._process_page(webpage), playlist_title=query)
"""
class YoutubeFeedsInfoExtractor(YoutubeTabIE):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME property.
"""
_LOGIN_REQUIRED = True
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/feed/%s' % self._FEED_NAME,
ie=YoutubeTabIE.ie_key())
class YoutubeWatchLaterIE(InfoExtractor):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r':ytwatchlater'
_TESTS = [{
'url': ':ytwatchlater',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r':ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_TESTS = [{
'url': ':ytrec',
'only_matching': True,
}, {
'url': ':ytrecommended',
'only_matching': True,
}]
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r':ytsubs(?:criptions)?'
_FEED_NAME = 'subscriptions'
_TESTS = [{
'url': ':ytsubs',
'only_matching': True,
}, {
'url': ':ytsubscriptions',
'only_matching': True,
}]
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = r':ythistory'
_FEED_NAME = 'history'
_TESTS = [{
'url': ':ythistory',
'only_matching': True,
}]
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
|
rg3/youtube-dl
|
youtube_dl/extractor/youtube.py
|
Python
|
unlicense
| 142,967
|
[
"ADF"
] |
450e3a31e37bbf846ab8390311f2cead3edaf26dc03b05ed0939c3c347e406b4
|
# mooseplots.py ---
#
# Filename: mooseplots.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Mon Jul 5 21:35:09 2010 (+0530)
# Version:
# Last-Updated: Fri Jun 17 12:39:59 2011 (+0530)
# By: Subhasis Ray
# Update #: 615
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# Class to handle plotting in MOOSE GUI
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
from PyQt4 import QtGui
from PyQt4.Qt import Qt
from PyQt4 import QtCore
import PyQt4.Qwt5 as Qwt
from PyQt4.Qwt5.anynumpy import *
import config
import moose
import os
class MoosePlot(Qwt.QwtPlot):
"""Handler for plots in MOOSE gui"""
plot_index = 0
colors = [ Qt.red,
Qt.blue,
Qt.darkYellow,
Qt.green,
Qt.magenta,
Qt.darkCyan,
Qt.black,
Qt.cyan,
Qt.darkRed,
Qt.darkGreen,
Qt.yellow,
Qt.darkMagenta,
Qt.gray,
Qt.darkBlue,
Qt.lightGray ]
def __init__(self, *args):
Qwt.QwtPlot.__init__(self, *args)
self.plotNo = MoosePlot.plot_index
MoosePlot.plot_index = MoosePlot.plot_index + 1
self.setAcceptDrops(True)
self.curveIndex = 0
self.setCanvasBackground(Qt.white)
self.alignScales()
self.xmin = 100.1
self.curveTableMap = {} # curve -> moose table
self.tableCurveMap = {} # moose table -> curve
self.overlay = False
legend = Qwt.QwtLegend()
legend.setItemMode(Qwt.QwtLegend.CheckableItem)
self.insertLegend(legend, Qwt.QwtPlot.RightLegend)
# self.setTitle('Plot %d' % (self.plotNo))
mY = Qwt.QwtPlotMarker()
mY.setLabelAlignment(Qt.AlignRight | Qt.AlignTop)
mY.setLineStyle(Qwt.QwtPlotMarker.HLine)
mY.setYValue(0.0)
mY.attach(self)
xtitle = Qwt.QwtText('Time (s)')
ytitle = Qwt.QwtText('Value')
if self.parent():
xtitle.setFont(self.parent().font())
ytitle.setFont(self.parent().font())
else:
xtitle.setFont(QtGui.QFont("Helvetica", 18))
ytitle.setFont(QtGui.QFont("Helvetica", 18))
self.setAxisTitle(Qwt.QwtPlot.xBottom, xtitle)
self.setAxisTitle(Qwt.QwtPlot.yLeft, ytitle)
self.zoomer = Qwt.QwtPlotZoomer(Qwt.QwtPlot.xBottom,
Qwt.QwtPlot.yLeft,
Qwt.QwtPicker.DragSelection,
Qwt.QwtPicker.AlwaysOn,
self.canvas())
self.zoomer.setRubberBandPen(QtGui.QPen(Qt.black))
self.zoomer.setTrackerPen(QtGui.QPen(Qt.black))
self.mooseHandler = None
QtCore.QObject.connect(self, QtCore.SIGNAL("legendClicked(QwtPlotItem *)"), self.plotItemClicked)
def clearZoomStack(self):
"""Auto scale and clear the zoom stack
"""
self.setAxisAutoScale(Qwt.QwtPlot.xBottom)
self.setAxisAutoScale(Qwt.QwtPlot.yLeft)
self.replot()
self.zoomer.setZoomBase()
def reconfigureSelectedCurves(self, pen, symbol, style, attribute):
"""Reconfigure the selected curves to use pen for line and
symbol for marking the data points."""
print 'Reconfiguring slected plots'
for item in self.itemList():
widget = self.legend().find(item)
if isinstance(widget, Qwt.QwtLegendItem) and widget.isChecked():
item.setPen(pen)
item.setSymbol(symbol)
item.setStyle(style)
item.setCurveAttribute(attribute)
self.replot()
def fitSelectedPlots(self):
for item in self.itemList():
widget = self.legend().find(item)
if isinstance(widget, Qwt.QwtLegendItem) and widget.isChecked():
item.setCurveAttribute(Qwt.QwtPlotCurve.Fitted)
self.replot()
def showSelectedCurves(self, on):
for item in self.itemList():
widget = self.legend().find(item)
if isinstance(widget, Qwt.QwtLegendItem) and widget.isChecked():
item.setVisible(on)
self.replot()
def showAllCurves(self):
for item in self.itemList():
if isinstance(item, Qwt.QwtPlotCurve):
print item
item.setVisible(True)
self.replot()
def setLineStyleSelectedCurves(self, style=Qwt.QwtPlotCurve.NoCurve):
for item in self.itemList():
widget = self.legend().find(item)
if isinstance(widget, Qwt.QwtLegendItem) and widget.isChecked():
item.setStyle(style)
self.replot()
def setSymbol(self,
symbolStyle=None,
brushColor=None, brushStyle=None,
penColor=None, penWidth=None, penStyle=None,
symbolHeight=None, symbolWidth=None):
"""Set the symbol used in plotting.
This function gives overly flexible access to set the symbol
of all the properties of the currently selected curves. If any
parameter is left unspecified, the existing value of that
property of the symbol is maintained.
TODO: create a little plot-configuration widget to manipulate each
property of the selected curves visually. That should replace setting setSymbol amd setLineStyle.
"""
for item in self.itemList():
widget = self.legend().find(item)
if isinstance(widget, Qwt.QwtLegendItem) and widget.isChecked():
oldSymbol = item.symbol()
if symbolStyle is None:
symbolStyle = oldSymbol.style()
if brushColor is None:
brushColor = oldSymbol.brush().color()
if brushStyle is None:
brushStyle = oldSymbol.brush().style()
if penColor is None:
penColor = oldSymbol.pen().color()
if penWidth is None:
penWidth = oldSymbol.pen().width()
if penStyle is None:
penStyle = oldSymbol.pen().style()
if symbolHeight is None:
symbolHeight = oldSymbol.size().height()
if symbolWidth is None:
symbolWidth = oldSymbol.size().width()
pen = QtGui.QPen(penColor, penWidth, penStyle)
symbol = Qwt.QwtSymbol(symbolStyle, oldSymbol.brush(), pen, QtCore.QSize(width, height))
item.setSymbol(symbol)
self.replot()
def alignScales(self):
self.canvas().setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Plain)
self.canvas().setLineWidth(1)
for ii in range(Qwt.QwtPlot.axisCnt):
scaleWidget = self.axisWidget(ii)
if scaleWidget:
scaleWidget.setMargin(0)
scaleDraw = self.axisScaleDraw(ii)
if scaleDraw:
scaleDraw.enableComponent(Qwt.QwtAbstractScaleDraw.Backbone, False)
def updatePlot(self, currentTime):
config.LOGGER.debug('update: %g' % (currentTime))
if currentTime > self.xmin:
self.xmin = currentTime
for curve, table in self.curveTableMap.items():
tabLen = len(table)
if tabLen == 0:
continue
ydata = array(table)
xdata = linspace(0, currentTime, tabLen)
#~ harsha:for Genesis first element had some invalid number which when ploted had a different result so eliminating
#~ curve.setData(xdata, ydata)
curve.setData(xdata[2:tabLen:1],ydata[2:tabLen:1])
self.clearZoomStack()
def addTable(self, table, curve_name=None):
try:
curve = self.tableCurveMap[table]
except KeyError:
print 'Adding table ', table.path
if curve_name is None:
curve_name = table.name
curve = Qwt.QwtPlotCurve(curve_name)
curve.setPen(MoosePlot.colors[self.curveIndex])
self.curveIndex = (self.curveIndex + 1) % len(MoosePlot.colors)
self.curveTableMap[curve] = table
self.tableCurveMap[table] = curve
curve.attach(self)
if len(table) > 0:
yy = array(table)
xx = linspace(0.0, self.xmin, len(yy))
curve.setData(xx, yy)
def removeTable(self, table):
try:
curve = self.tableCurveMap.pop(table)
curve.detach()
self.curveTableMap.pop(curve)
except KeyError:
pass
def setOverlay(self, overlay):
self.overlay = overlay
def reset(self):
if not self.overlay:
self.updatePlot(0)
return
table_list = []
try:
while self.tableCurveMap:
(table, curve) = self.tableCurveMap.popitem()
self.curveTableMap.pop(curve)
table_list.append(table)
except KeyError:
pass
for table in table_list:
self.addTable(table)
def detachItems(self):
self.tableCurveMap.clear()
self.curveTableMap.clear()
QtGui.QwtPlotDict.detachItems(self)
def plotItemClicked(self,item):
if(item.isVisible):
''' Initially all the item.isVisible is true'''
item.setVisible(not item.isVisible)
item.isVisible = False
item.setItemAttribute(Qwt.QwtPlotItem.AutoScale,False);
else:
'''If the item.isVisible is made false (say hidden) here makes true'''
item.setVisible(not item.isVisible)
item.isVisible = True
item.setItemAttribute(Qwt.QwtPlotItem.AutoScale,True);
self.replot()
def dragEnterEvent(self, event):
event.accept()
def dropEvent(self, event):
"""Overrides QWidget's method to accept drops of fields from
ObjectEditor.
"""
source = event.source()
# Should check that source is objectEditor - right now we don't have
# any other source for Plot, so don't bother.
model = source.model()
index = source.currentIndex()
if index.isValid():
# This is horrible code as I am peeping into the
# ObjectEditor's internals, but I don't have the time or
# patience to implement Drag objects for ObjectEditor.
fieldName = model.fields[index.row()]
fieldPath = model.mooseObject.path + '/' + fieldName
# Till now this file was decoupled from MooseHandler. Now
# I am going to break that for the sake of getting the job
# done quick and dirty.
# This is terrible code ... I would have been ashamed of
# this unless it was a brainless typing session to meet
# the immediate needs. I hope some day somebody with the
# time and skill to do "Software Engineering" will clean
# this mishmash of dependencies.
# -- Subha
#edit_chait
# table = self.mooseHandler.addFieldTable(fieldPath)
# tokens = fieldPath.split('/')
# if len(tokens) < 2:
# raise IndexError('Field path should have at least two components. Got %d' % (len(tokens)))
# self.addTable(table, tokens[-2] + '_' + tokens[-1])
self.emit(QtCore.SIGNAL('draggedAField(const QString&,const QString&)'),fieldPath,self.objectName())
# This also breaks the capability to move a plot from one
# plot window to another.
model.updatePlotField(index, self.objectName())
def savePlotData(self, directory=''):
for table in self.tableCurveMap.keys():
filename = os.path.join(directory, table.name + '.plot')
print 'Saving', filename
table.dumpFile(filename)
class MoosePlotWindow(QtGui.QMdiSubWindow):
"""This is to customize MDI sub window for our purpose.
In particular, we don't want anything to be deleted when the window is closed.
"""
def __init__(self, *args):
QtGui.QMdiSubWindow.__init__(self, *args)
def closeEvent(self, event):
self.emit(QtCore.SIGNAL('subWindowClosed()'))
self.hide()
import sys
if __name__ == '__main__':
app = QtGui.QApplication([])
testComp = moose.Compartment('c')
testTable = moose.Table('t')
testTable.stepMode = 3
testTable.connect('inputRequest', testComp, 'Vm')
testPulse = moose.PulseGen('p')
testPulse.firstDelay = 50e-3
testPulse.firstWidth = 40e-3
testPulse.firstLevel = 1e-9
testPulse.connect('outputSrc', testComp, 'injectMsg')
context = moose.PyMooseBase.getContext()
simdt = 1e-4/4
context.setClock(0, simdt)
context.setClock(1, simdt)
context.setClock(2, simdt)
stop = 1000 # stop every 1000 steos
simtime = 500e-3
context.reset()
plot = MoosePlot()
plot.addTable(testTable)
plot.show()
sys.exit(app.exec_())
#
# mooseplots.py ends here
|
BhallaLab/moose-thalamocortical
|
pymoose/gui/qt/mooseplot.py
|
Python
|
lgpl-2.1
| 14,089
|
[
"MOOSE"
] |
62e39ba18a63db411270803db5a4a35b7dd9627ebe3074224fee09dd8c029f63
|
# Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# plotview tool
oneline = "Plot multiple vectors from a data set"
docstr = """
p = plotview(d,pl) create GUI for viewing plots
d = Pizza.py object that contains vectors (log, vec)
pl = Pizza.py plotting object (gnu, matlab)
p.select(2) select one plot as current (1-N)
p.yes(3) toggle one plot's visibility
p.no(3)
only one plot is selected at a time
multiple plots can be visible at same time
select is same as clicking on left-side radio-button
yes/no is same as clicking on right-side checkbox
p.x = "Time" which vector is X vector (1st vec by default)
p.file("pressure") filename prefix for saving a plot
p.save() save currently selected plot to file.eps
"""
# History
# 8/05, Matt Jones (BYU): original version
# ToDo list
# option to plot all N vectors against linear index?
# Variables
# source = source of vector data
# plot = plotting object
# nplots = # of plots (not including 1st vector)
# names = names of plots (from vector source)
# radiovar = index of clicked radio button (0 = none, 1-N)
# checkbuttons = list of check button objects
# checkvars = list of status of check buttons
# checkold = list of status of check buttons before click
# Imports and external programs
import sys, re, glob, time
from Tkinter import *
# Class definition
class plotview:
# --------------------------------------------------------------------
def __init__(self,source,plot):
self.source = source
self.plot = plot
# create GUI
from __main__ import tkroot
root = Toplevel(tkroot)
root.title('Pizza.py plotview tool')
self.frame1 = Frame(root)
self.frame2 = Frame(root)
self.frame3 = Frame(root)
Button(self.frame1,text="Print As:",command=self.save).pack(side=TOP)
self.entry = Entry(self.frame1,width=16)
self.entry.insert(0,"tmp")
self.entry.pack(side=TOP)
Label(self.frame2,text="Select").pack(side=LEFT)
Label(self.frame2,text = "Display").pack(side=RIGHT)
self.nplots = source.nvec
self.names = source.names
self.x = self.names[0]
self.radiovar = IntVar()
self.checkbuttons = []
self.checkvars = []
self.checkold = []
# for each vector (not including 1st)
# create a plot and title it
# create a line in GUI with selection and check button
for i in range(self.nplots):
self.plot.select(i+1)
self.plot.xtitle(self.x)
self.plot.ytitle(self.names[i])
self.plot.title(self.names[i])
b = BooleanVar()
b.set(0)
self.checkvars.append(b)
self.checkold.append(0)
line = Frame(self.frame3)
rtitle = "%d %s" % (i+1,self.names[i])
Radiobutton(line, text=rtitle, value=i+1, variable=self.radiovar,
command=self.radioselect).pack(side=LEFT)
cbutton = Checkbutton(line, variable=b, command=self.check)
cbutton.pack(side=RIGHT)
self.checkbuttons.append(cbutton)
line.pack(side=TOP,fill=X)
self.radiovar.set(0)
self.frame1.pack(side=TOP)
self.frame2.pack(side=TOP,fill=X)
self.frame3.pack(side=TOP,fill=X)
# --------------------------------------------------------------------
# set radio button and checkbox
def select(self,n):
self.plot.select(n)
self.radiovar.set(n)
self.yes(n)
# --------------------------------------------------------------------
# only invoke if currently unset
def yes(self,n):
if not self.checkvars[n-1].get(): self.checkbuttons[n-1].invoke()
# --------------------------------------------------------------------
# only invoke if currently set
def no(self,n):
if self.checkvars[n-1].get(): self.checkbuttons[n-1].invoke()
# --------------------------------------------------------------------
def file(self,newtext):
oldtext = self.entry.get()
self.entry.delete(0,len(oldtext))
self.entry.insert(0,newtext)
# --------------------------------------------------------------------
def save(self):
n = self.radiovar.get()
if n == 0: raise StandardError,"no plot selected"
name = self.entry.get()
self.plot.save(name)
# --------------------------------------------------------------------
# called when any radio selection button is clicked
def radioselect(self):
self.select(self.radiovar.get())
# --------------------------------------------------------------------
# called when any checkbox is clicked
# draws or hides plot
# loop is to find which checkbox changed status
# grab x,y data to plot out of source object
def check(self):
for i in range(self.nplots):
if int(self.checkvars[i].get()) != self.checkold[i]:
if self.checkvars[i].get():
self.radiovar.set(i+1)
self.plot.select(i+1)
self.plot.xtitle(self.x)
x,y = self.source.get(self.x,self.names[i])
self.plot.plot(x,y)
else:
if self.radiovar.get() == i+1: self.radiovar.set(0)
self.plot.hide(i+1)
self.checkold[i] = int(self.checkvars[i].get())
# --------------------------------------------------------------------
# called by lammps() tool to update all visible plots with new data
def refresh(self):
for i in range(self.nplots):
if self.checkvars[i].get():
self.plot.select(i+1)
self.plot.xtitle(self.x)
x,y = self.source.get(self.x,self.names[i])
self.plot.plot(x,y)
|
sn-amber/mylpp
|
src/plotview.py
|
Python
|
gpl-2.0
| 5,880
|
[
"LAMMPS"
] |
9f1684ac32f44ae7da1b5aed4d3bd15299c9d888c3be8a2664fe5ade73a07988
|
#!/usr/bin/env python3
# Version 1.1
# Author Alexis Blanchet-Cohen
# Date: 09/06/2014
import argparse
import glob
import os
import os.path
import pandas
import subprocess
import util
# Read the command line arguments.
parser = argparse.ArgumentParser(description="Generates GATK RealignerTargetCreator scripts.")
parser.add_argument("-s", "--scriptsDirectory", help="Scripts directory. DEFAULT=realignertargetcreator", default="realignertargetcreator")
parser.add_argument("-i", "--inputDirectory", help="Input directory with BAM files. DEFAULT=../results/bwa", default="../results/bwa")
parser.add_argument("-o", "--outputDirectory", help="Output directory with realigned BAM files. DEFAULT=../results/bwa", default="../results/bwa")
parser.add_argument("-q", "--submitJobsToQueue", help="Submit jobs to queue immediately.", choices=["yes", "no", "y", "n"], default="no")
args = parser.parse_args()
# If not in the main scripts directory, cd to the main scripts directory, if it exists.
util.cdMainScriptsDirectory()
# Process the command line arguments.
inputDirectory = os.path.abspath(args.inputDirectory)
scriptsDirectory = os.path.abspath(args.scriptsDirectory)
# Read configuration files
config = util.readConfigurationFiles()
header = config.getboolean("server", "PBS_header")
toolsFolder = config.get("server", "toolsFolder")
genome = config.get("project", "genome")
genomeFolder = config.get(genome, "genomeFolder")
genomeFile = config.get(genome, "genomeFile")
xmx = config.get("realignertargetcreator", "xmx")
# Get samples
samples = util.getsamples(lanes=True)
# Create scripts directory, if it does not exist yet, and cd to it.
if not os.path.exists(scriptsDirectory):
os.mkdir(scriptsDirectory)
os.chdir(scriptsDirectory)
# Write the script
for sample in samples:
scriptName = "realignertargetcreator_" + sample + "_.sh"
script = open(scriptName, "w")
if header:
util.writeHeader(script, config, "realignertargetcreator")
# Write script
script.write("java -Xmx" + xmx + " \\\n")
script.write("-jar " + os.path.join(toolsFolder, "GenomeAnalysisTK.jar") + " \\\n")
script.write("--analysis_type RealignerTargetCreator" + " \\\n")
script.write("--reference_sequence " + genomeFile + " \\\n")
script.write("--input_file " + os.path.join(inputDirectory, sample, sample + "_deduplicated.bam") + " \\\n")
script.write("--known " + os.path.join(genomeFolder, "1000G_phase1.indels.b37.vcf") + " \\\n")
script.write("--known " + os.path.join(genomeFolder, "Mills_and_1000G_gold_standard.indels.b37.vcf") + " \\\n")
script.write("--out target_intervals.list" + " \\\n")
script.write("&> " + scriptName + ".log")
script.close()
if (args.submitJobsToQueue.lower() == "yes") | (args.submitJobsToQueue.lower() == "y"):
subprocess.call("submitJobs.py", shell=True)
|
blancha/abcngspipelines
|
exomeseq/realignertargetcreator.py
|
Python
|
gpl-3.0
| 2,858
|
[
"BWA"
] |
60317ccdb3cd49cc6b94fad1515a18c93137b8d02191cc86dbca6a50758b6587
|
"""
Inline-optimized Sheet classes
$Id$
"""
__version__='$Revision$'
import param
from topo.base.cf import MaskedCFIter
from topo.base.projection import NeighborhoodMask
from topo.misc.inlinec import inline,provide_unoptimized_equivalent,c_header
from topo.sheet.lissom import LISSOM
from topo.sheet.basic import compute_joint_norm_totals
def compute_joint_norm_totals_opt(projlist,active_units_mask):
"""
Compute norm_total for each CF in each projections from a
group to be normalized jointly. The same assumptions are
made as in the original function.
"""
# Assumes that all Projections in the list have the same r,c size
length = len(projlist)
assert length>=1
proj = projlist[0]
iterator = MaskedCFIter(proj,active_units_mask=active_units_mask)
num_cfs = len(proj.flatcfs)
active_units_mask = iterator.get_active_units_mask()
sheet_mask = iterator.get_sheet_mask()
code = c_header + """
npfloat *x = active_units_mask;
npfloat *m = sheet_mask;
for (int r=0; r<num_cfs; ++r) {
double load = *x++;
double msk = *m++;
if (msk!=0 && load != 0) {
double nt = 0;
for(int p=0; p<length; p++) {
PyObject *proj = PyList_GetItem(projlist,p);
PyObject *cfs = PyObject_GetAttrString(proj,"flatcfs");
PyObject *cf = PyList_GetItem(cfs,r);
PyObject *o = PyObject_GetAttrString(cf,"norm_total");
nt += PyFloat_AsDouble(o);
Py_DECREF(cfs);
Py_DECREF(o);
}
for(int p=0; p<length; p++) {
PyObject *proj = PyList_GetItem(projlist,p);
PyObject *cfs = PyObject_GetAttrString(proj,"flatcfs");
PyObject *cf = PyList_GetItem(cfs,r);
PyObject *total_obj = PyFloat_FromDouble(nt); //(new ref)
PyObject_SetAttrString(cf,"_norm_total",total_obj);
PyObject_SetAttrString(cf,"_has_norm_total",Py_True);
Py_DECREF(cfs);
Py_DECREF(total_obj);
}
}
}
"""
inline(code, ['projlist','active_units_mask','sheet_mask','num_cfs','length'],
local_dict=locals())
provide_unoptimized_equivalent("compute_joint_norm_totals_opt",
"compute_joint_norm_totals",locals())
# CEBALERT: not tested
class LISSOM_Opt(LISSOM):
"""
Faster but potentially unsafe optimized version of LISSOM.
Adds a NeighborhoodMask that skips computation for neurons
sufficiently distant from all those activated in the first few
steps of settling. This is safe only if activity bubbles reliably
shrink after the first few steps; otherwise the results will
differ from LISSOM.
Typically useful only for standard LISSOM simulations with
localized (e.g. Gaussian) inputs and that shrink the lateral
excitatory radius, which results in small patches of activity in
an otherwise inactive sheet.
Also overrides the function
JointNormalizingCFSheet.__compute_joint_norm_totals with
C-optimized code for LISSOM sheets.
"""
joint_norm_fn = param.Callable(default=compute_joint_norm_totals_opt)
def __init__(self,**params):
super(LISSOM_Opt,self).__init__(**params)
# CEBALERT: this wipes out any user-specified sheet mask.
self.mask = NeighborhoodMask_Opt(threshold = 0.00001,radius = 0.05,sheet = self)
provide_unoptimized_equivalent("LISSOM_Opt","LISSOM",locals())
class NeighborhoodMask_Opt(NeighborhoodMask):
def calculate(self):
rows,cols = self.data.shape
ignore1,matradius = self.sheet.sheet2matrixidx(self.radius,0)
ignore2,x = self.sheet.sheet2matrixidx(0,0)
matradius = int(abs(matradius -x))
thr = self.threshold
activity = self.sheet.activity
mask = self.data
code = c_header + """
#define min(x,y) (x<y?x:y)
#define max(x,y) (x>y?x:y)
npfloat *X = mask;
npfloat *A = activity;
for (int r=0; r<rows; ++r) {
for (int l=0; l<cols; ++l) {
int lbx = max(0,r-matradius);
int lby = max(0,l-matradius);
int hbx = min(r+matradius+1,rows);
int hby = min(l+matradius+1,cols);
*X = 0.0;
int breakFlag = 0;
for(int k=lbx;k<hbx;k++)
{
for(int l=lby;l<hby;l++)
{
npfloat *a = A+k*rows + l;
if(*a > thr)
{
*X = 1.0;
//JAALERT HACK. Want to jump out both nested loops!!!
breakFlag = 1;
break;
}
}
if(breakFlag)break;
}
X++;
}
}
"""
inline(code, ['thr','activity','matradius','mask','rows','cols'], local_dict=locals())
provide_unoptimized_equivalent("NeighborhoodMask_Opt","NeighborhoodMask",locals())
|
jesuscript/topo-mpi
|
topo/sheet/optimized.py
|
Python
|
bsd-3-clause
| 5,510
|
[
"Gaussian"
] |
72ca9cbdc8d2dddd72558d3c2c6f380cd33a57f61b06b607384785426d4b9054
|
########################################################################
# $HeadURL$
########################################################################
""" NotificationDB class is a front-end to the Notifications database
"""
__RCSID__ = "$Id$"
import time
import types
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.Mail import Mail
from DIRAC.ConfigurationSystem.Client.PathFinder import getDatabaseSection
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Utilities import DEncode
from DIRAC.Core.Security import CS
class NotificationDB( DB ):
def __init__( self, maxQueueSize = 10 ):
DB.__init__( self, 'NotificationDB', 'Framework/NotificationDB', maxQueueSize )
result = self.__initializeDB()
if not result[ 'OK' ]:
self.log.fatal( "Cannot initialize DB!", result[ 'Message' ] )
self.__alarmQueryFields = [ 'alarmid', 'author', 'creationtime', 'modtime', 'subject',
'status', 'priority', 'notifications', 'body', 'assignee', 'alarmkey' ]
self.__alarmLogFields = [ 'timestamp', 'author', 'comment', 'modifications' ]
self.__notificationQueryFields = ( 'id', 'user', 'seen', 'message', 'timestamp' )
self.__newAlarmMandatoryFields = [ 'author', 'subject', 'status', 'notifications', 'body', 'assignee', 'priority' ]
self.__updateAlarmIdentificationFields = [ 'id', 'alarmKey' ]
self.__updateAlarmMandatoryFields = [ 'author' ]
self.__updateAlarmAtLeastOneField = [ 'comment', 'modifications' ]
self.__updateAlarmModificableFields = [ 'status', 'assignee', 'priority' ]
self.__validAlarmStatus = [ 'Open', 'OnGoing', 'Closed', 'Testing' ]
self.__validAlarmNotifications = [ 'Web', 'Mail', 'SMS' ]
self.__validAlarmPriorities = [ 'Low', 'Medium', 'High', 'Extreme' ]
def __initializeDB( self ):
retVal = self._query( "show tables" )
if not retVal[ 'OK' ]:
return retVal
tablesInDB = [ t[0] for t in retVal[ 'Value' ] ]
tablesToCreate = {}
if 'ntf_Alarms' not in tablesInDB:
tablesToCreate[ 'ntf_Alarms' ] = { 'Fields' : { 'AlarmId' : 'INTEGER UNSIGNED AUTO_INCREMENT NOT NULL',
'AlarmKey' : 'VARCHAR(32) NOT NULL',
'Author' : 'VARCHAR(64) NOT NULL',
'CreationTime' : 'DATETIME NOT NULL',
'ModTime' : 'DATETIME NOT NULL',
'Subject' : 'VARCHAR(255) NOT NULL',
'Status' : 'VARCHAR(64) NOT NULL',
'Priority' : 'VARCHAR(32) NOT NULL',
'Body' : 'BLOB',
'Assignee' : 'VARCHAR(64) NOT NULL',
'Notifications' : 'VARCHAR(128) NOT NULL'
},
'PrimaryKey' : 'AlarmId',
'Indexes' : { 'Status' : [ 'Status' ],
'Assignee' : [ 'Assignee' ] }
}
if 'ntf_AssigneeGroups' not in tablesInDB:
tablesToCreate[ 'ntf_AssigneeGroups' ] = { 'Fields' : { 'AssigneeGroup' : 'VARCHAR(64) NOT NULL',
'User' : 'VARCHAR(64) NOT NULL',
},
'Indexes' : { 'ag' : [ 'AssigneeGroup' ] }
}
if 'ntf_AlarmLog' not in tablesInDB:
tablesToCreate[ 'ntf_AlarmLog' ] = { 'Fields' : { 'AlarmId' : 'INTEGER UNSIGNED NOT NULL',
'Timestamp' : 'DATETIME NOT NULL',
'Author' : 'VARCHAR(64) NOT NULL',
'Comment' : 'BLOB',
'Modifications' : 'VARCHAR(255)',
},
'Indexes' : { 'AlarmID' : [ 'AlarmId' ] }
}
if 'ntf_AlarmFollowers' not in tablesInDB:
tablesToCreate[ 'ntf_AlarmFollowers' ] = { 'Fields' : { 'AlarmId' : 'INTEGER UNSIGNED NOT NULL',
'User' : 'VARCHAR(64) NOT NULL',
'Mail' : 'TINYINT(1) DEFAULT 0',
'Notification' : 'TINYINT(1) DEFAULT 1',
'SMS' : 'TINYINT(1) DEFAULT 0',
},
'Indexes' : { 'AlarmID' : [ 'AlarmId' ] }
}
if 'ntf_Notifications' not in tablesInDB:
tablesToCreate[ 'ntf_Notifications' ] = { 'Fields' : { 'Id' : 'INTEGER UNSIGNED AUTO_INCREMENT NOT NULL',
'User' : 'VARCHAR(64) NOT NULL',
'Message' : 'BLOB NOT NULL',
'Seen' : 'TINYINT(1) NOT NULL DEFAULT 0',
'Expiration' : 'DATETIME',
'Timestamp' : 'DATETIME',
'DeferToMail' : 'TINYINT(1) NOT NULL DEFAULT 1',
},
'PrimaryKey' : 'Id',
}
if tablesToCreate:
result = self._createTables( tablesToCreate )
if result['OK'] and result['Value']:
self.log.info( "NotificationDB: created tables %s" % result['Value'] )
return result
return S_OK()
def __checkAlarmField( self, name, value ):
name = name.lower()
if name == 'status':
if value not in self.__validAlarmStatus:
return S_ERROR( "Status %s is invalid. Valid ones are: %s" % ( value, self.__validAlarmStatus ) )
elif name == 'priority':
if value not in self.__validAlarmPriorities:
return S_ERROR( "Type %s is invalid. Valid ones are: %s" % ( value, self.__validAlarmPriorities ) )
elif name == 'assignee':
result = self.getUserAsignees( value )
if not result[ 'OK' ]:
return result
if not result[ 'Value' ]:
return S_ERROR( "%s is not a known assignee" % value )
return result
return S_OK()
def newAlarm( self, alarmDef ):
""" Create a new alarm record
"""
followers = ""
for field in self.__newAlarmMandatoryFields:
if field not in alarmDef:
return S_ERROR( "Oops. Missing %s" % field )
result = self.__checkAlarmField( field, alarmDef[ field ] )
if not result[ 'OK' ]:
return result
if field == 'assignee':
followers = result[ 'Value' ]
author = alarmDef[ 'author' ]
if author not in followers:
followers.append( author )
sqlFieldsName = []
sqlFieldsValue = []
for field in self.__newAlarmMandatoryFields:
if field == 'notifications':
notifications = {}
for type in self.__validAlarmNotifications:
if type in alarmDef[ field ]:
notifications[ type ] = 1
else:
notifications[ type ] = 0
val = DEncode.encode( notifications )
else:
val = alarmDef[ field ]
#Add to the list of fields to add
sqlFieldsName.append( field )
result = self._escapeString( val )
if result['OK']:
sqlFieldsValue.append( result['Value'] )
else:
return S_ERROR( 'Failed to escape value %s' % val )
sqlFieldsName.extend( [ 'CreationTime', 'ModTime' ] )
sqlFieldsValue.extend( [ 'UTC_TIMESTAMP()', 'UTC_TIMESTAMP()' ] )
#Get the defined alarmkey and generate a random one if not defined
if 'alarmKey' in alarmDef:
result = self._escapeString( alarmDef[ 'alarmKey' ] )
if result['OK']:
alarmKey = result['Value']
else:
return S_ERROR( 'Failed to escape value %s for key AlarmKey' % val )
gLogger.info( "Checking there are no alarms with key %s" % alarmKey )
result = self._query( "SELECT AlarmId FROM `ntf_Alarms` WHERE AlarmKey=%s" % alarmKey )
if not result[ 'OK' ]:
return result
if result[ 'Value' ]:
return S_ERROR( "Oops, alarm with id %s has the same alarm key!" % result[ 'Value' ][0][0] )
else:
alarmKey = str( time.time() )[-31:]
sqlFieldsName.append( 'AlarmKey' )
sqlFieldsValue.append( alarmKey )
sqlInsert = "INSERT INTO `ntf_Alarms` (%s) VALUES (%s)" % ( ",".join( sqlFieldsName ),
",".join( sqlFieldsValue ) )
result = self._update( sqlInsert )
if not result['OK']:
return result
alarmId = result[ 'lastRowId' ]
for follower in followers:
result = self.modifyFollowerForAlarm( alarmId, follower, notifications )
if not result[ 'OK' ]:
varMsg = "\nFollower: %s\nAlarm: %s\nError: %s" % ( follower, alarmId, result['Message'] )
self.log.error( "Couldn't set follower for alarm", varMsg )
self.__notifyAlarm( alarmId )
return S_OK( alarmId )
def deleteAlarmsByAlarmKey( self, alarmKeyList ):
alarmsIdList = []
for alarmKey in alarmKeyList:
result = self.__getAlarmIdFromKey( alarmKey )
if not result[ 'OK' ]:
return result
alarmId = result[ 'Value' ]
alarmsIdList.append( alarmId )
self.log.info( "Trying to delete alarms with:\n alamKey %s\n alarmId %s" % ( alarmKeyList, alarmsIdList ) )
return self.deleteAlarmsByAlarmId( alarmsIdList )
def deleteAlarmsByAlarmId( self, alarmIdList ):
self.log.info( "Trying to delete alarms with ids %s" % alarmIdList )
try:
alarmId = int( alarmIdList )
alarmIdList = [ alarmId ]
except:
pass
try:
alarmIdList = [ int( alarmId ) for alarmId in alarmIdList ]
except:
self.log.error( "At least one alarmId is not a number", str( alarmIdList ) )
return S_ERROR( "At least one alarmId is not a number: %s" % str( alarmIdList ) )
tablesToCheck = ( "ntf_AlarmLog", "ntf_AlarmFollowers", "ntf_Alarms" )
alamsSQLList = ",".join( [ "%d" % alarmId for alarmId in alarmIdList ] )
for tableName in tablesToCheck:
delSql = "DELETE FROM `%s` WHERE AlarmId in ( %s )" % ( tableName, alamsSQLList )
result = self._update( delSql )
if not result[ 'OK' ]:
self.log.error( "Could not delete alarm", "from table %s: %s" % ( tableName, result[ 'Message' ] ) )
return S_OK()
def __processUpdateAlarmModifications( self, modifications ):
if type( modifications ) != types.DictType:
return S_ERROR( "Modifications must be a dictionary" )
updateFields = []
followers = []
for field in modifications:
if field not in self.__updateAlarmModificableFields:
return S_ERROR( "%s is not a valid modificable field" % field )
value = modifications[ field ]
result = self.__checkAlarmField( field , value )
if not result[ 'OK' ]:
return result
if field == 'assignee':
followers = result[ 'Value' ]
result = self._escapeString( modifications[ field ] )
if not result[ 'OK' ]:
return result
updateFields.append( "%s=%s" % ( field, result[ 'Value' ] ) )
return S_OK( ( ", ".join( updateFields ), DEncode.encode( modifications ), followers ) )
def __getAlarmIdFromKey( self, alarmKey ):
result = self._escapeString( alarmKey )
if not result[ 'OK' ]:
return S_ERROR( "Cannot escape alarmKey %s" % alarmKey )
alarmKey = result[ 'Value' ]
sqlQuery = "SELECT AlarmId FROM `ntf_Alarms` WHERE AlarmKey=%s" % alarmKey
result = self._query( sqlQuery )
if result[ 'OK' ]:
result[ 'Value' ] = result[ 'Value' ][0][0]
return result
def updateAlarm( self, updateReq ):
#Discover alarm identification
idOK = False
for field in self.__updateAlarmIdentificationFields:
if field in updateReq:
idOK = True
if not idOK:
return S_ERROR( "Need at least one field to identify which alarm to update! %s" % self.__updateAlarmIdentificationFields )
if 'alarmKey' in updateReq:
alarmKey = updateReq[ 'alarmKey' ]
result = self.__getAlarmIdFromKey( alarmKey )
if not result[ 'OK' ]:
self.log.error( "Could not get alarm id for key", " %s: %s" % ( alarmKey, result[ 'Value' ] ) )
return result
updateReq[ 'id' ] = result[ 'Value' ]
self.log.info( "Retrieving alarm key %s maps to id %s" % ( alarmKey, updateReq[ 'id' ] ) )
#Check fields
for field in self.__updateAlarmMandatoryFields:
if field not in updateReq:
return S_ERROR( "Oops. Missing %s" % field )
validReq = False
for field in self.__updateAlarmAtLeastOneField:
if field in updateReq:
validReq = True
if not validReq:
return S_OK( "Requirement needs at least one of %s" % " ".join( self.__updateAlarmAtLeastOneField ) )
author = updateReq[ 'author' ]
followers = [ author ]
if author not in CS.getAllUsers():
return S_ERROR( "%s is not a known user" % author )
result = self._escapeString( author )
if not result[ 'OK' ]:
return result
author = result[ 'Value' ]
try:
alarmId = int( updateReq[ 'id' ] )
except:
return S_ERROR( "Oops, Alarm id is not valid! (bad boy...)" )
result = self._query( "SELECT AlarmId FROM `ntf_Alarms` WHERE AlarmId=%d" % alarmId )
if not result[ 'OK' ]:
return result
if not result[ 'Value' ]:
return S_ERROR( "Alarm %s does not exist!" % alarmId )
sqlFields = [ 'AlarmId', 'Author', 'Timestamp' ]
sqlValues = [ "%d" % alarmId, author, 'UTC_TIMESTAMP()' ]
rawComment = ""
if 'comment' in updateReq:
rawComment = updateReq[ 'comment' ]
result = self._escapeString( rawComment )
if not result[ 'OK' ]:
return result
sqlFields.append( "Comment" )
sqlValues.append( result[ 'Value' ] )
modifications = False
if 'modifications' in updateReq:
modifications = updateReq[ 'modifications' ]
result = self.__processUpdateAlarmModifications( modifications )
if not result[ 'OK' ]:
return result
alarmModsSQL, encodedMods, newFollowers = result[ 'Value' ]
sqlFields.append( "Modifications" )
result = self._escapeString( encodedMods )
if not result[ 'OK' ]:
return result
sqlValues.append( result[ 'Value' ] )
if newFollowers:
followers.extend( newFollowers )
logSQL = "INSERT INTO `ntf_AlarmLog` (%s) VALUES (%s)" % ( ",".join( sqlFields ), ",".join( sqlValues ) )
result = self._update( logSQL )
if not result[ 'OK' ]:
return result
modSQL = "ModTime=UTC_TIMESTAMP()"
if modifications:
modSQL = "%s, %s" % ( modSQL, alarmModsSQL )
updateSQL = "UPDATE `ntf_Alarms` SET %s WHERE AlarmId=%d" % ( modSQL, alarmId )
result = self._update( updateSQL )
if not result[ 'OK' ]:
return result
#Get notifications config
sqlQuery = "SELECT Notifications FROM `ntf_Alarms` WHERE AlarmId=%s" % alarmId
result = self._query( sqlQuery )
if not result[ 'OK' ] or not result[ 'Value' ]:
self.log.error( "Could not retrieve default notifications for alarm %s" % alarmId )
return S_OK( alarmId )
notificationsDict = DEncode.decode( result[ 'Value' ][0][0] )[0]
for v in self.__validAlarmNotifications:
if v not in notificationsDict:
notificationsDict[ v ] = 0
for follower in followers:
result = self.modifyFollowerForAlarm( alarmId, follower, notificationsDict, overwrite = False )
if not result[ 'OK' ]:
varMsg = "\nFollower: %s\nAlarm: %s\nError: %s" % ( follower, alarmId, result['Message'] )
self.log.error( "Couldn't set follower for alarm", varMsg )
return self.__notifyAlarm( alarmId )
def __notifyAlarm( self, alarmId ):
result = self.getSubscribersForAlarm( alarmId )
if not result[ 'OK' ]:
return result
subscribers = result[ 'Value' ]
needLongText = False
if subscribers[ 'mail' ]:
needLongText = True
result = self.getAlarmInfo( alarmId )
if not result[ 'OK' ]:
return result
alarmInfo = result[ 'Value' ]
result = self.getAlarmLog( alarmId )
if not result[ 'OK' ]:
return result
alarmLog = result[ 'Value' ]
if subscribers[ 'notification' ]:
msg = self.__generateAlarmInfoMessage( alarmInfo )
logMsg = self.__generateAlarmLogMessage( alarmLog, True )
if logMsg:
msg = "%s\n\n%s\nLast modification:\n%s" % ( msg, "*"*30, logMsg )
for user in subscribers[ 'notification' ]:
self.addNotificationForUser( user, msg, 86400, deferToMail = True )
if subscribers[ 'mail' ]:
msg = self.__generateAlarmInfoMessage( alarmInfo )
logMsg = self.__generateAlarmLogMessage( alarmLog )
if logMsg:
msg = "%s\n\n%s\nAlarm Log:\n%s" % ( msg, "*"*30, logMsg )
subject = "Update on alarm %s" % alarmId
else:
subject = "New alarm %s" % alarmId
for user in subscribers[ 'mail' ]:
self.__sendMailToUser( user, subject, msg )
if subscribers[ 'sms' ]:
#TODO
pass
return S_OK()
def __generateAlarmLogMessage( self, alarmLog, showOnlyLast = False ):
if len( alarmLog[ 'Records' ] ) == 0:
return ""
records = alarmLog[ 'Records' ]
if showOnlyLast:
logToShow = [-1]
else:
logToShow = range( len( records ) - 1, -1, -1 )
finalMessage = []
for id in logToShow:
rec = records[ id ]
data = {}
for i in range( len( alarmLog[ 'ParameterNames' ] ) ):
if rec[i]:
data[ alarmLog[ 'ParameterNames' ][i] ] = rec[i]
#[ 'timestamp', 'author', 'comment', 'modifications' ]
msg = [ " Entry by : %s" % data[ 'author' ] ]
msg.append( " On : %s" % data[ 'timestamp' ].strftime( "%Y/%m/%d %H:%M:%S" ) )
if 'modifications' in data:
mods = data[ 'modifications' ]
keys = mods.keys()
keys.sort()
msg.append( " Modificaitons:" )
for key in keys:
msg.append( " %s -> %s" % ( key, mods[ key ] ) )
if 'comment' in data:
msg.append( " Comment:\n\n%s" % data[ 'comment' ] )
finalMessage.append( "\n".join( msg ) )
return "\n\n===============\n".join( finalMessage )
def __generateAlarmInfoMessage( self, alarmInfo ):
#[ 'alarmid', 'author', 'creationtime', 'modtime', 'subject', 'status', 'type', 'body', 'assignee' ]
msg = " Alarm %6d\n" % alarmInfo[ 'alarmid' ]
msg += " Author : %s\n" % alarmInfo[ 'author' ]
msg += " Subject : %s\n" % alarmInfo[ 'subject' ]
msg += " Status : %s\n" % alarmInfo[ 'status' ]
msg += " Priority : %s\n" % alarmInfo[ 'priority' ]
msg += " Assignee : %s\n" % alarmInfo[ 'assignee' ]
msg += " Creation date : %s UTC\n" % alarmInfo[ 'creationtime' ].strftime( "%Y/%m/%d %H:%M:%S" )
msg += " Last modificaiton : %s UTC\n" % alarmInfo[ 'modtime' ].strftime( "%Y/%m/%d %H:%M:%S" )
msg += " Body:\n\n%s" % alarmInfo[ 'body' ]
return msg
def __sendMailToUser( self, user, subject, message ):
address = gConfig.getValue( "/Registry/Users/%s/Email" % user, "" )
if not address:
self.log.error( "User does not have an email registered", user )
return S_ERROR( "User %s does not have an email registered" % user )
self.log.info( "Sending mail (%s) to user %s at %s" % ( subject, user, address ) )
m = Mail()
m._subject = "[DIRAC] %s" % subject
m._message = message
m._mailAddress = address
result = m._send()
if not result['OK']:
gLogger.warn( 'Could not send mail with the following message:\n%s' % result['Message'] )
return result
def getAlarms( self, condDict = {}, sortList = False, start = 0, limit = 0, modifiedAfter = False ):
condSQL = []
for field in self.__alarmQueryFields:
if field in condDict:
fieldValues = []
rawValue = condDict[ field ]
if field == 'assignee':
expandedValue = []
for user in rawValue:
result = self.getAssigneeGroupsForUser( user )
if not result[ 'OK' ]:
return result
for ag in result[ 'Value' ]:
if ag not in expandedValue:
expandedValue.append( ag )
rawValue = expandedValue
for value in rawValue:
result = self._escapeString( value )
if not result[ 'OK' ]:
return result
fieldValues.append( result[ 'Value' ] )
condSQL.append( "%s in ( %s )" % ( field, ",".join( fieldValues ) ) )
selSQL = "SELECT %s FROM `ntf_Alarms`" % ",".join( self.__alarmQueryFields )
if modifiedAfter:
condSQL.append( "ModTime >= %s" % modifiedAfter.strftime( "%Y-%m-%d %H:%M:%S" ) )
if condSQL:
selSQL = "%s WHERE %s" % ( selSQL, " AND ".join( condSQL ) )
if sortList:
selSQL += " ORDER BY %s" % ", ".join( [ "%s %s" % ( sort[0], sort[1] ) for sort in sortList ] )
if limit:
selSQL += " LIMIT %d,%d" % ( start, limit )
result = self._query( selSQL )
if not result['OK']:
return result
resultDict = {}
resultDict['ParameterNames'] = self.__alarmQueryFields
resultDict['Records'] = [ list( v ) for v in result['Value'] ]
return S_OK( resultDict )
def getAlarmInfo( self, alarmId ):
result = self.getAlarms( { 'alarmId' : alarmId } )
if not result[ 'OK' ]:
return result
alarmInfo = {}
data = result[ 'Value' ]
if len( data[ 'Records' ] ) == 0:
return S_OK( {} )
for i in range( len( data[ 'ParameterNames' ] ) ):
alarmInfo[ data[ 'ParameterNames' ][i] ] = data[ 'Records' ][0][i]
return S_OK( alarmInfo )
def getAlarmLog( self, alarmId ):
try:
alarmId = int( alarmId )
except:
return S_ERROR( "Alarm id must be a non decimal number" )
sqlSel = "SELECT %s FROM `ntf_AlarmLog` WHERE AlarmId=%d ORDER BY Timestamp ASC" % ( ",".join( self.__alarmLogFields ),
alarmId )
result = self._query( sqlSel )
if not result[ 'OK' ]:
return result
decodedRows = []
for row in result[ 'Value' ]:
decodedRows.append( list( row ) )
if not row[3]:
decodedRows.append( list( row ) )
continue
dec = DEncode.decode( row[ 3 ] )
decodedRows[-1][3] = dec[0]
resultDict = {}
resultDict['ParameterNames'] = self.__alarmLogFields
resultDict['Records'] = decodedRows
return S_OK( resultDict )
###
# Followers management
###
def modifyFollowerForAlarm( self, alarmId, user, notificationsDict, overwrite = True ):
rawUser = user
if rawUser not in CS.getAllUsers():
return S_OK()
result = self._escapeString( user )
if not result[ 'OK' ]:
return result
user = result[ 'Value' ]
subscriber = False
for k in notificationsDict:
if notificationsDict[ k ]:
subscriber = True
break
selSQL = "SELECT Notification, Mail, SMS FROM `ntf_AlarmFollowers` WHERE AlarmId=%d AND User=%s" % ( alarmId, user )
result = self._query( selSQL )
if not result[ 'OK' ]:
return result
if not result[ 'Value' ]:
if not subscriber:
return S_OK()
sqlValues = [ "%d" % alarmId, user ]
for k in self.__validAlarmNotifications:
if notificationsDict[ k ]:
sqlValues.append( "1" )
else:
sqlValues.append( "0" )
inSQL = "INSERT INTO `ntf_AlarmFollowers` ( AlarmId, User, Notification, Mail, SMS ) VALUES (%s)" % ",".join( sqlValues )
return self._update( inSQL )
sqlCond = "AlarmId=%d AND User=%s" % ( alarmId, user )
#Need to delete
if not subscriber:
return self._update( "DELETE FROM `ntf_AlarmFollowers` WHERE %s" % sqlCond )
if not overwrite:
return S_OK()
#Need to update
modSQL = []
for k in self.__validAlarmNotifications:
if notificationsDict[ k ]:
modSQL.append( "%s=1" % k )
else:
modSQL.append( "%s=0" % k )
return self._update( "UPDATE `ntf_AlarmFollowers` SET %s WHERE %s" % ( modSQL, sqlCond ) )
def getSubscribersForAlarm( self, alarmId ):
selSQL = "SELECT User, Mail, Notification, SMS FROM `ntf_AlarmFollowers` WHERE AlarmId=%d" % alarmId
result = self._query( selSQL )
if not result[ 'OK' ]:
return result
fw = result[ 'Value' ]
followWays = { 'mail' : [], 'notification' : [], 'sms' : [] }
followers = []
for user, mail, Notification, SMS in fw:
if user in followers:
continue
followers.append( user )
if mail:
followWays[ 'mail' ].append( user )
if Notification:
followWays[ 'notification' ].append( user )
if SMS:
followWays[ 'sms' ].append( user )
return S_OK( followWays )
###
# Assignee groups management
###
def getUserAsignees( self, assignee ):
#Check if it is a user
if assignee in CS.getAllUsers():
return S_OK( [ assignee ] )
result = self._escapeString( assignee )
if not result[ 'OK' ]:
return result
escAG = result[ 'Value' ]
sqlSel = "SELECT User FROM `ntf_AssigneeGroups` WHERE AssigneeGroup = %s" % escAG
result = self._query( sqlSel )
if not result[ 'OK' ]:
return result
users = [ row[0] for row in result[ 'Value' ] ]
if not users:
return S_OK( [] )
return S_OK( users )
def setAssigneeGroup( self, groupName, usersList ):
validUsers = CS.getAllUsers()
result = self._escapeString( groupName )
if not result[ 'OK' ]:
return result
escGroup = result[ 'Value' ]
sqlSel = "SELECT User FROM `ntf_AssigneeGroups` WHERE AssigneeGroup = %s" % escGroup
result = self._query( sqlSel )
if not result[ 'OK' ]:
return result
currentUsers = [ row[0] for row in result[ 'Value' ] ]
usersToDelete = []
usersToAdd = []
finalUsersInGroup = len( currentUsers )
for user in currentUsers:
if user not in usersList:
result = self._escapeString( user )
if not result[ 'OK' ]:
return result
usersToDelete.append( result[ 'Value' ] )
finalUsersInGroup -= 1
for user in usersList:
if user not in validUsers:
continue
if user not in currentUsers:
result = self._escapeString( user )
if not result[ 'OK' ]:
return result
usersToAdd.append( "( %s, %s )" % ( escGroup, result[ 'Value' ] ) )
finalUsersInGroup += 1
if not finalUsersInGroup:
return S_ERROR( "Group must have at least one user!" )
#Delete old users
if usersToDelete:
sqlDel = "DELETE FROM `ntf_AssigneeGroups` WHERE User in ( %s )" % ",".join( usersToDelete )
result = self._update( sqlDel )
if not result[ 'OK' ]:
return result
#Add new users
if usersToAdd:
sqlInsert = "INSERT INTO `ntf_AssigneeGroups` ( AssigneeGroup, User ) VALUES %s" % ",".join( usersToAdd )
result = self._update( sqlInsert )
if not result[ 'OK' ]:
return result
return S_OK()
def deleteAssigneeGroup( self, groupName ):
result = self._escapeString( groupName )
if not result[ 'OK' ]:
return result
escGroup = result[ 'Value' ]
sqlSel = "SELECT AlarmId FROM `ntf_Alarms` WHERE Assignee=%s" % escGroup
result = self._query( sqlSel )
if not result[ 'OK' ]:
return result
if result[ 'Value' ]:
alarmIds = [ row[0] for row in result[ 'Value' ] ]
return S_ERROR( "There are %s alarms assigned to this group" % len( alarmIds ) )
sqlDel = "DELETE FROM `ntf_AssigneeGroups` WHERE AssigneeGroup=%s" % escGroup
return self._update( sqlDel )
def getAssigneeGroups( self ):
result = self._query( "SELECT AssigneeGroup, User from `ntf_AssigneeGroups` ORDER BY User" )
if not result[ 'OK' ]:
return result
agDict = {}
for row in result[ 'Value' ]:
ag = row[0]
user = row[1]
if ag not in agDict:
agDict[ ag ] = []
agDict[ ag ].append( user )
return S_OK( agDict )
def getAssigneeGroupsForUser( self, user ):
if user not in CS.getAllUsers():
return S_ERROR( "%s is an unknown user" % user )
result = self._escapeString( user )
if not result[ 'OK' ]:
return result
user = result[ 'Value' ]
result = self._query( "SELECT AssigneeGroup from `ntf_AssigneeGroups` WHERE User=%s" % user )
if not result[ 'OK' ]:
return result
return S_OK( [ row[0] for row in result[ 'Value' ] ] )
###
# Notifications
###
def addNotificationForUser( self, user, message, lifetime = 0, deferToMail = 1 ):
if user not in CS.getAllUsers():
return S_ERROR( "%s is an unknown user" % user )
self.log.info( "Adding a notification for user %s (msg is %s chars)" % ( user, len( message ) ) )
result = self._escapeString( user )
if not result[ 'OK' ]:
return result
user = result[ 'Value' ]
result = self._escapeString( message )
if not result[ 'OK' ]:
return result
message = result[ 'Value' ]
sqlFields = [ 'User', 'Message', 'Timestamp' ]
sqlValues = [ user, message, 'UTC_TIMESTAMP()' ]
if not deferToMail:
sqlFields.append( "DeferToMail" )
sqlValues.append( "0" )
if lifetime:
sqlFields.append( "Expiration" )
sqlValues.append( "TIMESTAMPADD( SECOND, %d, UTC_TIMESTAMP() )" % int( lifetime ) )
sqlInsert = "INSERT INTO `ntf_Notifications` (%s) VALUES (%s) " % ( ",".join( sqlFields ),
",".join( sqlValues ) )
result = self._update( sqlInsert )
if not result[ 'OK' ]:
return result
return S_OK( result[ 'lastRowId' ] )
def removeNotificationsForUser( self, user, msgIds = False ):
if user not in CS.getAllUsers():
return S_ERROR( "%s is an unknown user" % user )
result = self._escapeString( user )
if not result[ 'OK' ]:
return result
user = result[ 'Value' ]
delSQL = "DELETE FROM `ntf_Notifications` WHERE User=%s" % user
escapedIDs = []
if msgIds:
for id in msgIds:
result = self._escapeString( str( id ) )
if not result[ 'OK' ]:
return result
escapedIDs.append( result[ 'Value' ] )
delSQL = "%s AND Id in ( %s ) " % ( delSQL, ",".join( escapedIDs ) )
return self._update( delSQL )
def markNotificationsSeen( self, user, seen = True, msgIds = False ):
if user not in CS.getAllUsers():
return S_ERROR( "%s is an unknown user" % user )
result = self._escapeString( user )
if not result[ 'OK' ]:
return result
user = result[ 'Value' ]
if seen:
seen = 1
else:
seen = 0
updateSQL = "UPDATE `ntf_Notifications` SET Seen=%d WHERE User=%s" % ( seen, user )
escapedIDs = []
if msgIds:
for id in msgIds:
result = self._escapeString( str( id ) )
if not result[ 'OK' ]:
return result
escapedIDs.append( result[ 'Value' ] )
updateSQL = "%s AND Id in ( %s ) " % ( updateSQL, ",".join( escapedIDs ) )
return self._update( updateSQL )
def getNotifications( self, condDict = {}, sortList = False, start = 0, limit = 0 ):
condSQL = []
for field in self.__notificationQueryFields:
if field in condDict:
fieldValues = []
for value in condDict[ field ]:
result = self._escapeString( value )
if not result[ 'OK' ]:
return result
fieldValues.append( result[ 'Value' ] )
condSQL.append( "%s in ( %s )" % ( field, ",".join( fieldValues ) ) )
selSQL = "SELECT %s FROM `ntf_Notifications`" % ",".join( self.__notificationQueryFields )
if condSQL:
selSQL = "%s WHERE %s" % ( selSQL, " AND ".join( condSQL ) )
if sortList:
selSQL += " ORDER BY %s" % ", ".join( [ "%s %s" % ( sort[0], sort[1] ) for sort in sortList ] )
else:
selSQL += " ORDER BY Id DESC"
if limit:
selSQL += " LIMIT %d,%d" % ( start, limit )
result = self._query( selSQL )
if not result['OK']:
return result
resultDict = {}
resultDict['ParameterNames'] = self.__notificationQueryFields
resultDict['Records'] = [ list( v ) for v in result['Value'] ]
return S_OK( resultDict )
def purgeExpiredNotifications( self ):
self.log.info( "Purging expired notifications" )
delConds = [ '(Seen=1 OR DeferToMail=0)', '(TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), Expiration ) < 0 )' ]
delSQL = "DELETE FROM `ntf_Notifications` WHERE %s" % " AND ".join( delConds )
result = self._update( delSQL )
if not result[ 'OK' ]:
return result
self.log.info( "Purged %s notifications" % result[ 'Value' ] )
deferCond = [ 'Seen=0', 'DeferToMail=1', 'TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), Expiration ) < 0' ]
selSQL = "SELECT Id, User, Message FROM `ntf_Notifications` WHERE %s" % " AND ".join( deferCond )
result = self._query( selSQL )
if not result[ 'OK' ]:
return result
messages = result[ 'Value' ]
if not messages:
return S_OK()
ids = []
for msg in messages:
self.__sendMailToUser( msg[1], 'Notification defered to mail', msg[2] )
ids.append( str( msg[0] ) )
self.log.info( "Defered %s notifications" % len( ids ) )
return self._update( "DELTE FROM `ntf_Notifications` WHERE Id in (%s)" % ",".join( ids ) )
|
sposs/DIRAC
|
FrameworkSystem/DB/NotificationDB.py
|
Python
|
gpl-3.0
| 34,490
|
[
"DIRAC"
] |
1f6d6a5c74f510f87e9400bda3f99139b6602bf4c587a50f7bea0bffa488ecae
|
r"""
Biot problem - deformable porous medium with the no-penetration boundary
condition on a boundary region enforced using Lagrange multipliers.
The non-penetration condition is enforced weakly using the Lagrange
multiplier :math:`\lambda`. There is also a rigid body movement
constraint imposed on the :math:`\Gamma_{outlet}` region using the
linear combination boundary conditions.
Find :math:`\ul{u}`, :math:`p` and :math:`\lambda` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
- \int_{\Omega} p\ \alpha_{ij} e_{ij}(\ul{v})
+ \int_{\Gamma_{walls}} \lambda \ul{n} \cdot \ul{v}
= 0
\;, \quad \forall \ul{v} \;,
\int_{\Omega} q\ \alpha_{ij} e_{ij}(\ul{u})
+ \int_{\Omega} K_{ij} \nabla_i q \nabla_j p
= 0
\;, \quad \forall q \;,
\int_{\Gamma_{walls}} \hat\lambda \ul{n} \cdot \ul{u}
= 0
\;, \quad \forall \hat\lambda \;,
\ul{u} \cdot \ul{n} = 0 \mbox{ on } \Gamma_{walls} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
"""
from biot_npbc import cinc_simple, define_regions, get_pars
def define():
from sfepy import data_dir
filename = data_dir + '/meshes/3d/cylinder.mesh'
output_dir = 'output'
return define_input(filename, output_dir)
def post_process(out, pb, state, extend=False):
from sfepy.base.base import Struct
dvel = pb.evaluate('ev_diffusion_velocity.2.Omega( m.K, p )',
mode='el_avg')
out['dvel'] = Struct(name='output_data', var_name='p',
mode='cell', data=dvel, dofs=None)
stress = pb.evaluate('ev_cauchy_stress.2.Omega( m.D, u )',
mode='el_avg')
out['cauchy_stress'] = Struct(name='output_data', var_name='u',
mode='cell', data=stress, dofs=None)
return out
def define_input(filename, output_dir):
filename_mesh = filename
options = {
'output_dir' : output_dir,
'output_format' : 'vtk',
'post_process_hook' : 'post_process',
## 'file_per_var' : True,
'ls' : 'ls',
'nls' : 'newton',
}
functions = {
'cinc_simple0' : (lambda coors, domain:
cinc_simple(coors, 0),),
'cinc_simple1' : (lambda coors, domain:
cinc_simple(coors, 1),),
'cinc_simple2' : (lambda coors, domain:
cinc_simple(coors, 2),),
'get_pars' : (lambda ts, coors, mode=None, **kwargs:
get_pars(ts, coors, mode,
output_dir=output_dir, **kwargs),),
}
regions, dim = define_regions(filename_mesh)
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
'pressure': ('real', 'scalar', 'Omega', 1),
'multiplier': ('real', 'scalar', 'Walls', 1),
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
'p' : ('unknown field', 'pressure', 1),
'q' : ('test field', 'pressure', 'p'),
'ul' : ('unknown field', 'multiplier', 2),
'vl' : ('test field', 'multiplier', 'ul'),
}
ebcs = {
'inlet' : ('Inlet', {'p.0' : 1.0, 'u.all' : 0.0}),
'outlet' : ('Outlet', {'p.0' : -1.0}),
}
lcbcs = {
'rigid' : ('Outlet', {'u.all' : None}, None, 'rigid'),
}
materials = {
'm' : 'get_pars',
}
equations = {
'eq_1' :
"""dw_lin_elastic.2.Omega( m.D, v, u )
- dw_biot.2.Omega( m.alpha, v, p )
+ dw_non_penetration.2.Walls( v, ul )
= 0""",
'eq_2' :
"""dw_biot.2.Omega( m.alpha, u, q )
+ dw_diffusion.2.Omega( m.K, q, p )
= 0""",
'eq_3' :
"""dw_non_penetration.2.Walls( u, vl )
= 0""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {}),
}
return locals()
|
RexFuzzle/sfepy
|
examples/multi_physics/biot_npbc_lagrange.py
|
Python
|
bsd-3-clause
| 4,049
|
[
"VTK"
] |
4d9510be66d9debe18282c9ddf8996855b7e0b7ec04fc8daf5e00f61a4b32953
|
#!/usr/bin/env python
'''
Purpose:
non-coding sequence retrieval surrounding a gene or sequence of interest
from large genomic scaffolds.
This script uses BLAST, so the exact location of the gene is not required.
Because there may be multiple matching HSPs (high scoring segment pairs)
produced by BLAST for each gene of interest, this script outputs all HSPs of a
minimum percent similarity (provided by the user).
Prerequisites:
ncd_run.py requires ncdmodules.py in the same directory
this script also requires Biopython and ncbi-blast+
Example:
python ncd_run.py protocadherinBs.fasta human_scaffolds.fasta 90
Output:
Three fasta files per genomic region of interest: *_upstream.fasta,
*_downstream.fasta, and *_fullseq.fasta
*=These files are named after the genomic scaffold queried, so a brief name in
the fastafile will be desired, i.e. >CHR1, >CHR2, >scaffold34234, etc.
The sequences in fasta format are named after the corresponding matching gene,
the percent similarity to that gene, and the location of the gene (based on the
scaffold queried)
example:
>protocadherinB-1|99%|1600000
ATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATG
Usage:
python ncd_run.py Genes.fasta genome.fasta perID
where
genes.fasta = list of genes in 5->3 orientation
genome.fasta = genome of interest containing gene of interest
perID = minimum percent similarity to the gene
NOTE: percent similarity is written without "%"
percent similarity represents the similarity of the HSP produced by BLAST
'''
import sys
import os
import imp
import subprocess
import shlex
import fnmatch
import tempfile
import Bio
from Bio.Seq import Seq
from Bio import SeqIO
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast import NCBIXML
#Alter this variable to change the bps retrieved upstream & downstream
SeqLen = int(10000)
#find requisite libraries
def findlibs():
try:
imp.find_module('Bio')
except ImportError:
print 'install Bio libraries (Biopython)'
sys.exit('Exiting script...')
#making temporary blast db
def makeblastdb(FastaFile):
DBname = FastaFile.replace('.fasta','.db')
try:
Command = ('makeblastdb -in '
+ str(FastaFile)
+ ' -input_type fasta -dbtype nucl -parse_seqids -out '
+ str(DBname))
Args = shlex.split(Command)
subprocess.Popen(Args, stdout=subprocess.PIPE)
except OSError as e:
print "Error({0}): {1}".format(e.errno, e.strerror)
print "Insure ncbi-blast+ is installed and available to environment"
#delete temp DB
def delete_db(DBname):
File_Del = str(DBname) + '.*'
for file in os.listdir('.'):
if fnmatch.fnmatch(file, str(File_Del)):
print file
os.remove(str(file))
#Checking temporary blastdb
def check_blastdb(FastaFile):
DBname = FastaFile.replace('.fasta','.db')
while True:
if os.path.isfile(str(DBname)+'.nhr'):
print ('blast DB named... '
+ str(DBname)
+ ' already exists. Overwrite?')
Reply = raw_input('(Y/N)... ')
if Reply == 'Y':
makeblastdb(FastaFile)
return DBname
elif Reply == 'N':
sys.exit('Exiting script...')
else:
print 'Incorrect input. Use Y/N'
else:
makeblastdb(FastaFile)
return DBname
#Parse blast hits and Records for sequences
def parse_seq(Blast_Record, Hsp, Record, SeqLen):
# library: from Bio import Seq for reverse complimentation
QueryStart = int(Hsp.query_start)
QueryStop = int(Hsp.query_start) + int(Hsp.align_length)
#Get Coordinates
if int(Hsp.query_start) - SeqLen > 0:
MaxStart = int(Hsp.query_start) - SeqLen
else:
MaxStart = 1
if int(Blast_Record.query_length) - int(QueryStop + SeqLen) > 0:
MaxStop = int(QueryStop + SeqLen)
else:
MaxStop = int(Blast_Record.query_length)
#Extract (with gene/or seq of interest)
#upstream and downstream regardless of orientation
Seq = Record.seq
FullSeq = Seq[MaxStart:MaxStop]
UpSeq = Seq[MaxStart:QueryStop]
DownSeq = Seq[QueryStart:MaxStop]
#Output 5'->3' oriented sequences,
#NOTE:We are assuming gene/seq of interest is in 5'->3' orientation
Cond1 = int(Hsp.sbjct_start) >= int(Hsp.align_length)
Cond2 = int(Hsp.sbjct_start) >= 1
Cond3 = int(Hsp.sbjct_start) < int(Hsp.align_length)
if Cond1: #TRUE
#revcomp sequences
UpOut = DownSeq.reverse_complement()
DownOut = UpSeq.reverse_complement()
FullOut = FullSeq.reverse_complement()
return UpOut, DownOut, FullOut
elif Cond2 and Cond3: # FALSE
print("Partial match, may need to check strand orientation: "
+ str(Record.id))
#nothing happens, no revcomp
return UpSeq, DownSeq, FullSeq
#remove characters
def remove(filename, deletechars='\/:*?"<>|'):
for c in deletechars:
filename = filename.replace(c,'')
return filename;
#Print out results by blast Record
def print_out(SeqList, Headers, FlPt1, FlPt2):
FlPt1 = remove(FlPt1)
OutFile = str(FlPt1) + str(FlPt2)
OutHandle = open(OutFile, 'ab+')
for x in range(0, len(SeqList)):
OutHandle.write('>%s\n'%(Headers[x]))
OutHandle.write('%s\n'%(SeqList[x]))
OutHandle.close()
#BLAST genomic seq against temp gene database
def blast(FastaFile, BlastDB, perID, SeqLen):
'''
libraries:
from Bio import SeqIO
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast import NCBIXML
'''
Fasta_Handle = open(FastaFile, "r")
for Record in SeqIO.parse(Fasta_Handle, "fasta"):
#generate temporary fasta file input, and BLASTxml output
TempFasta = tempfile.NamedTemporaryFile()
TempFasta.write(">%s\n%s\n" % (Record.id, Record.seq))
TempBlastXML = tempfile.NamedTemporaryFile()
#BLAST Record
Blast_Command = NcbiblastnCommandline(
query=TempFasta.name,
db=BlastDB, evalue=1e-10,
out=TempBlastXML.name, outfmt=5)
std_output, err_output = Blast_Command()
TempFasta.close()
Result_Handle = open(TempBlastXML.name)
Blast_Records = NCBIXML.parse(Result_Handle)
#lists
UpList = []
DownList = []
FullSeqList = []
Headers = []
#loop over Records, check perID
for Blast_Record in Blast_Records:
for Alignment in Blast_Record.alignments:
for Hsp in Alignment.hsps:
Hsp_perID = ((float(Hsp.positives)
/float(Hsp.align_length))
* 100)
if Hsp_perID >= int(perID):
#call seq function
UpStream, DownStream, FullSeq = parse_seq(
Blast_Record, Hsp, Record, SeqLen)
#create list of seqs and Headers
UpList.append(UpStream)
DownList.append(DownStream)
FullSeqList.append(FullSeq)
#create header
Sbjct_Name = Alignment.title
Sbjct_Edit = Sbjct_Name.replace(
' No definition line', '')
Header_String = (str(Sbjct_Edit)
+ '|' + str(round(Hsp_perID,1))
+ '%' + '|' + str(Hsp.query_start))
Headers.append(Header_String)
#print out and close
print_out(UpList, Headers, Record.id, '_upstream.fasta')
print_out(DownList, Headers, Record.id, '_downstream.fasta')
print_out(FullSeqList, Headers, Record.id, '_fullseq.fasta')
Result_Handle.close()
#parameter input
try:
GeneFile = sys.argv[1]
GenomicFile = sys.argv[2]
PerID = sys.argv[3]
except IndexError as e:
print 'Error: '+ str(e)
print "Incorrect parameter usage, see instructions"
sys.exit('Exiting script...')
#Check for biopython libs
findlibs()
#Check if DB already exists. Also calls makeblastdb
DBname = check_blastdb(GeneFile)
print "Making temp DB file..."
print DBname
#performs BLAST, calls parse_seq and print_out
print 'printing out files... '
blast(GenomicFile, DBname, PerID, SeqLen)
#delete temp DB
print 'deleting temp DB files... '
delete_db(DBname)
|
juswilliams/bioscripts
|
non-coding-retrieval/ncd_run.py
|
Python
|
gpl-3.0
| 8,656
|
[
"BLAST",
"Biopython"
] |
4fbf7add048a65f9728733ffc50eb72338a50942f4b78c6e9f4e8fe638ade4bd
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`maindisplay` module provides the functionality to display screens and play multimedia within OpenLP.
Some of the code for this form is based on the examples at:
* `http://www.steveheffernan.com/html5-video-player/demo-video-player.html`_
* `http://html5demos.com/two-videos`_
"""
import cgi
import logging
import os
import sys
from PyQt4 import QtCore, QtGui, QtWebKit, QtOpenGL
from PyQt4.phonon import Phonon
from openlp.core.lib import ServiceItem, Settings, ImageSource, Registry, build_html, expand_tags, \
image_to_byte, translate
from openlp.core.lib.theme import BackgroundType
from openlp.core.lib import ScreenList
from openlp.core.ui import HideMode, AlertLocation
log = logging.getLogger(__name__)
class Display(QtGui.QGraphicsView):
"""
This is a general display screen class. Here the general display settings
will done. It will be used as specialized classes by Main Display and
Preview display.
"""
def __init__(self, parent, live, controller):
"""
Constructor
"""
if live:
super(Display, self).__init__()
# Overwrite the parent() method.
self.parent = lambda: parent
else:
super(Display, self).__init__(parent)
self.is_live = live
self.controller = controller
self.screen = {}
# FIXME: On Mac OS X (tested on 10.7) the display screen is corrupt with
# OpenGL. Only white blank screen is shown on the 2nd monitor all the
# time. We need to investigate more how to use OpenGL properly on Mac OS
# X.
if sys.platform != 'darwin':
self.setViewport(QtOpenGL.QGLWidget())
def setup(self):
"""
Set up and build the screen base
"""
log.debug('Start Display base setup (live = %s)' % self.is_live)
self.setGeometry(self.screen['size'])
log.debug('Setup webView')
self.web_view = QtWebKit.QWebView(self)
self.web_view.setGeometry(0, 0, self.screen['size'].width(), self.screen['size'].height())
self.web_view.settings().setAttribute(QtWebKit.QWebSettings.PluginsEnabled, True)
palette = self.web_view.palette()
palette.setBrush(QtGui.QPalette.Base, QtCore.Qt.transparent)
self.web_view.page().setPalette(palette)
self.web_view.setAttribute(QtCore.Qt.WA_OpaquePaintEvent, False)
self.page = self.web_view.page()
self.frame = self.page.mainFrame()
if self.is_live and log.getEffectiveLevel() == logging.DEBUG:
self.web_view.settings().setAttribute(QtWebKit.QWebSettings.DeveloperExtrasEnabled, True)
self.web_view.loadFinished.connect(self.is_web_loaded)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.frame.setScrollBarPolicy(QtCore.Qt.Vertical, QtCore.Qt.ScrollBarAlwaysOff)
self.frame.setScrollBarPolicy(QtCore.Qt.Horizontal, QtCore.Qt.ScrollBarAlwaysOff)
def resizeEvent(self, event):
"""
React to resizing of this display
"""
self.web_view.setGeometry(0, 0, self.width(), self.height())
def is_web_loaded(self):
"""
Called by webView event to show display is fully loaded
"""
log.debug('is web loaded')
self.web_loaded = True
class MainDisplay(Display):
"""
This is the display screen as a specialized class from the Display class
"""
def __init__(self, parent, live, controller):
"""
Constructor
"""
super(MainDisplay, self).__init__(parent, live, controller)
self.screens = ScreenList()
self.rebuild_css = False
self.hide_mode = None
self.override = {}
self.retranslateUi()
self.media_object = None
if live:
self.audio_player = AudioPlayer(self)
else:
self.audio_player = None
self.first_time = True
self.web_loaded = True
self.setStyleSheet('border: 0px; margin: 0px; padding: 0px;')
window_flags = QtCore.Qt.FramelessWindowHint | QtCore.Qt.Tool | QtCore.Qt.WindowStaysOnTopHint
if Settings().value('advanced/x11 bypass wm'):
window_flags |= QtCore.Qt.X11BypassWindowManagerHint
# TODO: The following combination of window_flags works correctly
# on Mac OS X. For next OpenLP version we should test it on other
# platforms. For OpenLP 2.0 keep it only for OS X to not cause any
# regressions on other platforms.
if sys.platform == 'darwin':
window_flags = QtCore.Qt.FramelessWindowHint | QtCore.Qt.Window
# For primary screen ensure it stays above the OS X dock
# and menu bar
if self.screens.current['primary']:
self.setWindowState(QtCore.Qt.WindowFullScreen)
self.setWindowFlags(window_flags)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.set_transparency(False)
if self.is_live:
Registry().register_function('live_display_hide', self.hide_display)
Registry().register_function('live_display_show', self.show_display)
Registry().register_function('update_display_css', self.css_changed)
def set_transparency(self, enabled):
"""
Set the transparency of the window
"""
if enabled:
self.setAutoFillBackground(False)
self.setStyleSheet("QGraphicsView {background: transparent; border: 0px;}")
else:
self.setAttribute(QtCore.Qt.WA_NoSystemBackground, False)
self.setStyleSheet("QGraphicsView {}")
self.setAttribute(QtCore.Qt.WA_TranslucentBackground, enabled)
self.repaint()
def css_changed(self):
"""
We need to rebuild the CSS on the live display.
"""
for plugin in self.plugin_manager.plugins:
plugin.refresh_css(self.frame)
def retranslateUi(self):
"""
Setup the interface translation strings.
"""
self.setWindowTitle(translate('OpenLP.MainDisplay', 'OpenLP Display'))
def setup(self):
"""
Set up and build the output screen
"""
log.debug('Start MainDisplay setup (live = %s)' % self.is_live)
self.screen = self.screens.current
self.setVisible(False)
Display.setup(self)
if self.is_live:
# Build the initial frame.
background_color = QtGui.QColor()
background_color.setNamedColor(Settings().value('advanced/default color'))
if not background_color.isValid():
background_color = QtCore.Qt.white
image_file = Settings().value('advanced/default image')
splash_image = QtGui.QImage(image_file)
self.initial_fame = QtGui.QImage(
self.screen['size'].width(),
self.screen['size'].height(),
QtGui.QImage.Format_ARGB32_Premultiplied)
painter_image = QtGui.QPainter()
painter_image.begin(self.initial_fame)
painter_image.fillRect(self.initial_fame.rect(), background_color)
painter_image.drawImage(
(self.screen['size'].width() - splash_image.width()) // 2,
(self.screen['size'].height() - splash_image.height()) // 2,
splash_image)
service_item = ServiceItem()
service_item.bg_image_bytes = image_to_byte(self.initial_fame)
self.web_view.setHtml(build_html(service_item, self.screen, self.is_live, None,
plugins=self.plugin_manager.plugins))
self.__hideMouse()
log.debug('Finished MainDisplay setup')
def text(self, slide, animate=True):
"""
Add the slide text from slideController
``slide``
The slide text to be displayed
``animate``
Perform transitions if applicable when setting the text
"""
log.debug('text to display')
# Wait for the webview to update before displaying text.
while not self.web_loaded:
self.application.process_events()
self.setGeometry(self.screen['size'])
if animate:
self.frame.evaluateJavaScript('show_text("%s")' % slide.replace('\\', '\\\\').replace('\"', '\\\"'))
else:
# This exists for https://bugs.launchpad.net/openlp/+bug/1016843
# For unknown reasons if evaluateJavaScript is called
# from the themewizard, then it causes a crash on
# Windows if there are many items in the service to re-render.
# Setting the div elements direct seems to solve the issue
self.frame.findFirstElement("#lyricsmain").setInnerXml(slide)
self.frame.findFirstElement("#lyricsoutline").setInnerXml(slide)
self.frame.findFirstElement("#lyricsshadow").setInnerXml(slide)
def alert(self, text, location):
"""
Display an alert.
``text``
The text to be displayed.
"""
log.debug('alert to display')
# First we convert <>& marks to html variants, then apply
# formattingtags, finally we double all backslashes for JavaScript.
text_prepared = expand_tags(cgi.escape(text)).replace('\\', '\\\\').replace('\"', '\\\"')
if self.height() != self.screen['size'].height() or not self.isVisible():
shrink = True
js = 'show_alert("%s", "%s")' % (text_prepared, 'top')
else:
shrink = False
js = 'show_alert("%s", "")' % text_prepared
height = self.frame.evaluateJavaScript(js)
if shrink:
if text:
alert_height = int(height)
self.resize(self.width(), alert_height)
self.setVisible(True)
if location == AlertLocation.Middle:
self.move(self.screen['size'].left(), (self.screen['size'].height() - alert_height) // 2)
elif location == AlertLocation.Bottom:
self.move(self.screen['size'].left(), self.screen['size'].height() - alert_height)
else:
self.setVisible(False)
self.setGeometry(self.screen['size'])
def direct_image(self, path, background):
"""
API for replacement backgrounds so Images are added directly to cache.
"""
self.image_manager.add_image(path, ImageSource.ImagePlugin, background)
if not hasattr(self, 'service_item'):
return False
self.override['image'] = path
self.override['theme'] = self.service_item.themedata.background_filename
self.image(path)
# Update the preview frame.
if self.is_live:
self.live_controller.update_preview()
return True
def image(self, path):
"""
Add an image as the background. The image has already been added to the
cache.
``path``
The path to the image to be displayed. **Note**, the path is only
passed to identify the image. If the image has changed it has to be
re-added to the image manager.
"""
log.debug('image to display')
image = self.image_manager.get_image_bytes(path, ImageSource.ImagePlugin)
self.controller.media_controller.media_reset(self.controller)
self.display_image(image)
def display_image(self, image):
"""
Display an image, as is.
"""
self.setGeometry(self.screen['size'])
if image:
js = 'show_image("data:image/png;base64,%s");' % image
else:
js = 'show_image("");'
self.frame.evaluateJavaScript(js)
def reset_image(self):
"""
Reset the background image to the service item image. Used after the
image plugin has changed the background.
"""
log.debug('reset_image')
if hasattr(self, 'service_item'):
self.display_image(self.service_item.bg_image_bytes)
else:
self.display_image(None)
# Update the preview frame.
if self.is_live:
self.live_controller.update_preview()
# clear the cache
self.override = {}
def preview(self):
"""
Generates a preview of the image displayed.
"""
log.debug('preview for %s', self.is_live)
was_visible = self.isVisible()
self.application.process_events()
# We must have a service item to preview.
if self.is_live and hasattr(self, 'service_item'):
# Wait for the fade to finish before geting the preview.
# Important otherwise preview will have incorrect text if at all!
if self.service_item.themedata and self.service_item.themedata.display_slide_transition:
while not self.frame.evaluateJavaScript('show_text_completed()'):
self.application.process_events()
# Wait for the webview to update before getting the preview.
# Important otherwise first preview will miss the background !
while not self.web_loaded:
self.application.process_events()
# if was hidden keep it hidden
if self.is_live:
if self.hide_mode:
self.hide_display(self.hide_mode)
# Only continue if the visibility wasn't changed during method call.
elif was_visible == self.isVisible():
# Single screen active
if self.screens.display_count == 1:
# Only make visible if setting enabled.
if Settings().value('core/display on monitor'):
self.setVisible(True)
else:
self.setVisible(True)
return QtGui.QPixmap.grabWidget(self)
def build_html(self, service_item, image_path=''):
"""
Store the service_item and build the new HTML from it. Add the
HTML to the display
"""
log.debug('build_html')
self.web_loaded = False
self.initial_fame = None
self.service_item = service_item
background = None
# We have an image override so keep the image till the theme changes.
if self.override:
# We have an video override so allow it to be stopped.
if 'video' in self.override:
Registry().execute('video_background_replaced')
self.override = {}
# We have a different theme.
elif self.override['theme'] != service_item.themedata.background_filename:
Registry().execute('live_theme_changed')
self.override = {}
else:
# replace the background
background = self.image_manager.get_image_bytes(self.override['image'], ImageSource.ImagePlugin)
self.set_transparency(self.service_item.themedata.background_type ==
BackgroundType.to_string(BackgroundType.Transparent))
if self.service_item.themedata.background_filename:
self.service_item.bg_image_bytes = self.image_manager.get_image_bytes(
self.service_item.themedata.background_filename, ImageSource.Theme
)
if image_path:
image_bytes = self.image_manager.get_image_bytes(image_path, ImageSource.ImagePlugin)
else:
image_bytes = None
html = build_html(self.service_item, self.screen, self.is_live, background, image_bytes,
plugins=self.plugin_manager.plugins)
log.debug('buildHtml - pre setHtml')
self.web_view.setHtml(html)
log.debug('buildHtml - post setHtml')
if service_item.foot_text:
self.footer(service_item.foot_text)
# if was hidden keep it hidden
if self.hide_mode and self.is_live and not service_item.is_media():
if Settings().value('core/auto unblank'):
Registry().execute('slidecontroller_live_unblank')
else:
self.hide_display(self.hide_mode)
self.__hideMouse()
def footer(self, text):
"""
Display the Footer
"""
log.debug('footer')
js = 'show_footer(\'' + text.replace('\\', '\\\\').replace('\'', '\\\'') + '\')'
self.frame.evaluateJavaScript(js)
def hide_display(self, mode=HideMode.Screen):
"""
Hide the display by making all layers transparent
Store the images so they can be replaced when required
"""
log.debug('hide_display mode = %d', mode)
if self.screens.display_count == 1:
# Only make visible if setting enabled.
if not Settings().value('core/display on monitor'):
return
if mode == HideMode.Screen:
self.frame.evaluateJavaScript('show_blank("desktop");')
self.setVisible(False)
elif mode == HideMode.Blank or self.initial_fame:
self.frame.evaluateJavaScript('show_blank("black");')
else:
self.frame.evaluateJavaScript('show_blank("theme");')
if mode != HideMode.Screen:
if self.isHidden():
self.setVisible(True)
self.web_view.setVisible(True)
self.hide_mode = mode
def show_display(self):
"""
Show the stored layers so the screen reappears as it was originally.
Make the stored images None to release memory.
"""
log.debug('show_display')
if self.screens.display_count == 1:
# Only make visible if setting enabled.
if not Settings().value('core/display on monitor'):
return
self.frame.evaluateJavaScript('show_blank("show");')
if self.isHidden():
self.setVisible(True)
self.hide_mode = None
# Trigger actions when display is active again.
if self.is_live:
Registry().execute('live_display_active')
def __hideMouse(self):
"""
Hide mouse cursor when moved over display.
"""
if Settings().value('advanced/hide mouse'):
self.setCursor(QtCore.Qt.BlankCursor)
self.frame.evaluateJavaScript('document.body.style.cursor = "none"')
else:
self.setCursor(QtCore.Qt.ArrowCursor)
self.frame.evaluateJavaScript('document.body.style.cursor = "auto"')
def _get_plugin_manager(self):
"""
Adds the Renderer to the class dynamically
"""
if not hasattr(self, '_plugin_manager'):
self._plugin_manager = Registry().get('plugin_manager')
return self._plugin_manager
plugin_manager = property(_get_plugin_manager)
def _get_image_manager(self):
"""
Adds the image manager to the class dynamically
"""
if not hasattr(self, '_image_manager'):
self._image_manager = Registry().get('image_manager')
return self._image_manager
image_manager = property(_get_image_manager)
def _get_application(self):
"""
Adds the openlp to the class dynamically.
Windows needs to access the application in a dynamic manner.
"""
if os.name == 'nt':
return Registry().get('application')
else:
if not hasattr(self, '_application'):
self._application = Registry().get('application')
return self._application
application = property(_get_application)
def _get_live_controller(self):
"""
Adds the live controller to the class dynamically
"""
if not hasattr(self, '_live_controller'):
self._live_controller = Registry().get('live_controller')
return self._live_controller
live_controller = property(_get_live_controller)
class AudioPlayer(QtCore.QObject):
"""
This Class will play audio only allowing components to work with a
soundtrack independent of the user interface.
"""
log.info('AudioPlayer Loaded')
def __init__(self, parent):
"""
The constructor for the display form.
``parent``
The parent widget.
"""
log.debug('AudioPlayer Initialisation started')
super(AudioPlayer, self).__init__(parent)
self.currentIndex = -1
self.playlist = []
self.repeat = False
self.media_object = Phonon.MediaObject()
self.media_object.setTickInterval(100)
self.audio_object = Phonon.AudioOutput(Phonon.VideoCategory)
Phonon.createPath(self.media_object, self.audio_object)
self.media_object.aboutToFinish.connect(self.on_about_to_finish)
self.media_object.finished.connect(self.on_finished)
def __del__(self):
"""
Shutting down so clean up connections
"""
self.stop()
for path in self.media_object.outputPaths():
path.disconnect()
def on_about_to_finish(self):
"""
Just before the audio player finishes the current track, queue the next
item in the playlist, if there is one.
"""
self.currentIndex += 1
if len(self.playlist) > self.currentIndex:
self.media_object.enqueue(self.playlist[self.currentIndex])
def on_finished(self):
"""
When the audio track finishes.
"""
if self.repeat:
log.debug('Repeat is enabled... here we go again!')
self.media_object.clearQueue()
self.media_object.clear()
self.currentIndex = -1
self.play()
def connectVolumeSlider(self, slider):
"""
Connect the volume slider to the output channel.
"""
slider.setAudioOutput(self.audio_object)
def reset(self):
"""
Reset the audio player, clearing the playlist and the queue.
"""
self.currentIndex = -1
self.playlist = []
self.stop()
self.media_object.clear()
def play(self):
"""
We want to play the file so start it
"""
log.debug('AudioPlayer.play() called')
if self.currentIndex == -1:
self.on_about_to_finish()
self.media_object.play()
def pause(self):
"""
Pause the Audio
"""
log.debug('AudioPlayer.pause() called')
self.media_object.pause()
def stop(self):
"""
Stop the Audio and clean up
"""
log.debug('AudioPlayer.stop() called')
self.media_object.stop()
def add_to_playlist(self, filenames):
"""
Add another file to the playlist.
``filenames``
A list with files to be added to the playlist.
"""
if not isinstance(filenames, list):
filenames = [filenames]
self.playlist.extend(list(map(Phonon.MediaSource, filenames)))
def next(self):
"""
Skip forward to the next track in the list
"""
if not self.repeat and self.currentIndex + 1 >= len(self.playlist):
return
isPlaying = self.media_object.state() == Phonon.PlayingState
self.currentIndex += 1
if self.repeat and self.currentIndex == len(self.playlist):
self.currentIndex = 0
self.media_object.clearQueue()
self.media_object.clear()
self.media_object.enqueue(self.playlist[self.currentIndex])
if isPlaying:
self.media_object.play()
def go_to(self, index):
"""
Go to a particular track in the list
"""
isPlaying = self.media_object.state() == Phonon.PlayingState
self.media_object.clearQueue()
self.media_object.clear()
self.currentIndex = index
self.media_object.enqueue(self.playlist[self.currentIndex])
if isPlaying:
self.media_object.play()
def connectSlot(self, signal, slot):
"""
Connect a slot to a signal on the media object. Used by slidecontroller to connect to audio object.
"""
QtCore.QObject.connect(self.media_object, signal, slot)
|
marmyshev/bug_1117098
|
openlp/core/ui/maindisplay.py
|
Python
|
gpl-2.0
| 26,442
|
[
"Brian"
] |
4439b4fa67ecde092b03e439696c1b9e5378605e5a696426a95a4d067ec33807
|
import pytest
import os
import glob
import tempfile
import filecmp
import numpy.testing.utils as nptu
from abipy.abio.input_tags import *
from abipy.abio.factories import dte_from_gsinput
from fireworks.core.rocket_launcher import rapidfire
from abiflows.fireworks.workflows.abinit_workflows import DteFWWorkflow
from abiflows.fireworks.utils.fw_utils import get_fw_by_task_index, load_abitask
from abiflows.core.testing import AbiflowsIntegrationTest
#ABINIT_VERSION = "8.6.1"
# pytestmark = [pytest.mark.skipif(not has_abinit(ABINIT_VERSION), reason="Abinit version {} is not in PATH".format(ABINIT_VERSION)),
# pytest.mark.skipif(not has_fireworks(), reason="fireworks paackage is missing"),
# pytest.mark.skipif(not has_mongodb(), reason="no connection to mongodb")]
pytestmark = pytest.mark.usefixtures("cleandb")
class ItestDte(AbiflowsIntegrationTest):
def itest_dte_with_phonons(self, lp, fworker, tmpdir, input_scf_phonon_gan_low, use_autoparal, db_data):
"""
Simple test of DteFWWorkflow with autoparal True and False.
Skips dte permutations.
"""
# dte calculations only work with selected values of ixc
input_scf_phonon_gan_low['ixc'] = 7
dte_inputs = dte_from_gsinput(input_scf_phonon_gan_low, use_phonons=True, skip_dte_permutations=True,
ph_tol={"tolvrs": 1.0e-7}, ddk_tol = {"tolwfr": 1.0e-16},
dde_tol = {"tolvrs": 1.0e-7})
wf = DteFWWorkflow(input_scf_phonon_gan_low, ddk_inp = dte_inputs.filter_by_tags(DDK),
dde_inp = dte_inputs.filter_by_tags(DDE), dte_inp = dte_inputs.filter_by_tags(DTE),
ph_inp = dte_inputs.filter_by_tags(PH_Q_PERT), autoparal=use_autoparal,
initialization_info={"kppa": 100})
wf.add_anaddb_dte_fw(input_scf_phonon_gan_low.structure, dieflag=1, nlflag=1)
wf.add_mongoengine_db_insertion(db_data)
wf.add_final_cleanup(["WFK"])
scf_fw_id = wf.scf_fw.fw_id
old_new = wf.add_to_db(lpad=lp)
scf_fw_id = old_new[scf_fw_id]
rapidfire(lp, fworker, m_dir=str(tmpdir))
wf = lp.get_wf_by_fw_id(scf_fw_id)
assert wf.state == "COMPLETED"
# check the effect of the final cleanup
scf_task = load_abitask(get_fw_by_task_index(wf, "scf", index=1))
assert len(glob.glob(os.path.join(scf_task.outdir.path, "*_WFK"))) == 0
assert len(glob.glob(os.path.join(scf_task.outdir.path, "*_DEN"))) == 1
assert len(glob.glob(os.path.join(scf_task.tmpdir.path, "*"))) == 0
assert len(glob.glob(os.path.join(scf_task.indir.path, "*"))) == 0
# check the save in the DB
from abiflows.database.mongoengine.abinit_results import DteResult
with db_data.switch_collection(DteResult) as DteResult:
results = DteResult.objects()
assert len(results) == 1
r = results[0]
assert r.abinit_input.structure.to_mgobj() == input_scf_phonon_gan_low.structure
assert r.abinit_output.structure.to_mgobj() == input_scf_phonon_gan_low.structure
assert r.abinit_input.ecut == input_scf_phonon_gan_low['ecut']
assert r.abinit_input.kppa == 100
nptu.assert_array_equal(r.abinit_input.gs_input.to_mgobj()['ngkpt'], input_scf_phonon_gan_low['ngkpt'])
ana_task = load_abitask(get_fw_by_task_index(wf, "anaddb", index=None))
with tempfile.NamedTemporaryFile(mode="wb") as db_file:
db_file.write(r.abinit_output.anaddb_nc.read())
db_file.seek(0)
assert filecmp.cmp(ana_task.anaddb_nc_path, db_file.name)
mrgddb_task = load_abitask(get_fw_by_task_index(wf, "mrgddb", index=None))
with tempfile.NamedTemporaryFile(mode="wb") as db_file:
db_file.write(r.abinit_output.ddb.read())
db_file.seek(0)
assert filecmp.cmp(mrgddb_task.merged_ddb_path, db_file.name)
if self.check_numerical_values:
with scf_task.open_gsr() as gsr:
assert gsr.energy == pytest.approx(-680.402255069, rel=0.005)
ana_task = load_abitask(get_fw_by_task_index(wf, "anaddb", index=None))
with ana_task.open_anaddbnc() as ananc:
assert float(ananc.dchide[0,0,2]) == pytest.approx(-1.69328765210, rel=0.15)
def itest_dte_skip_permutations(self, lp, fworker, tmpdir, input_scf_phonon_gan_low):
"""
Simple test of DteFWWorkflow without phonons.
"""
# dte calculations only work with selected values of ixc
input_scf_phonon_gan_low['ixc'] = 7
dte_inputs = dte_from_gsinput(input_scf_phonon_gan_low, use_phonons=False, skip_dte_permutations=False,
ph_tol={"tolvrs": 1.0e-7}, ddk_tol = {"tolwfr": 1.0e-16},
dde_tol = {"tolvrs": 1.0e-7})
wf = DteFWWorkflow(input_scf_phonon_gan_low, ddk_inp = dte_inputs.filter_by_tags(DDK),
dde_inp = dte_inputs.filter_by_tags(DDE), dte_inp = dte_inputs.filter_by_tags(DTE),
ph_inp = dte_inputs.filter_by_tags(PH_Q_PERT), autoparal=False)
wf.add_anaddb_dte_fw(input_scf_phonon_gan_low.structure, dieflag=2, nlflag=3, ramansr=0, alphon=0, prtmbm=0)
scf_fw_id = wf.scf_fw.fw_id
old_new = wf.add_to_db(lpad=lp)
scf_fw_id = old_new[scf_fw_id]
rapidfire(lp, fworker, m_dir=str(tmpdir))
wf = lp.get_wf_by_fw_id(scf_fw_id)
assert wf.state == "COMPLETED"
if self.check_numerical_values:
scf_task = load_abitask(get_fw_by_task_index(wf, "scf", index=1))
with scf_task.open_gsr() as gsr:
assert gsr.energy == pytest.approx(-680.402255069, rel=0.005)
ana_task = load_abitask(get_fw_by_task_index(wf, "anaddb", index=None))
with ana_task.open_anaddbnc() as ananc:
assert float(ananc.dchide[0,0,2]) == pytest.approx(-1.69328765210, rel=0.15)
|
davidwaroquiers/abiflows
|
abiflows/fireworks/integration_tests/itest_dte.py
|
Python
|
gpl-2.0
| 6,183
|
[
"ABINIT"
] |
8cca117bdc0545cf9418c91294147ef3363833ad104967a4d50d742bfb4bb8d6
|
"""
GWAS association graphing Galaxy Wrapper
Mark Einon <mark.einon@gmail.com>
"""
import optparse
import os
import subprocess
import sys
def __main__():
cmd = 'plot_assoc.R'
# Local args are options not passed on, but processed in this file
local_args = ['output', 'output_id', 'new_file_path', 'run_dir']
# opt args are options passed directly on
opt_args = ['assoc']
print 'Parsing input options...'
parser = optparse.OptionParser()
for local_arg in local_args:
parser.add_option("--%s" % local_arg)
for arg in opt_args:
parser.add_option("--%s" % arg)
(options, args) = parser.parse_args()
if len(args) > 0:
parser.error('Wrong number of arguments')
# build command to be executed
cmd = options.run_dir + "/" + cmd
for key in vars(options):
if key in opt_args:
if vars(options)[key] != None:
cmd += " %s" % vars(options)[key]
print 'Executing... %s\n' % cmd
log = open(options.output, 'w') if options.output else sys.stdout
try:
# need to redirect stderr because Plink writes some logging info there
subprocess.check_call(cmd, stdout=log, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError:
# catch this error, as we still may want to keep the output file (e.g. missnp)
print "Failed\n"
finally:
subprocess.call(['mkdir', options.new_file_path], stderr=subprocess.STDOUT)
cwd = os.getcwd()
files = sorted([f for f in os.listdir(cwd) if (os.path.isfile(f) and f.endswith('pdf'))])
for f in files:
new_file = "%s/%s" % (options.new_file_path, f)
subprocess.check_call(['mv', f, new_file])
print "Successfully moved %d files - %s \n" % (len(files), files)
if log != sys.stdout:
log.close()
if __name__ == "__main__":
__main__()
|
einon/galaxy-tools
|
plink2/plot_assoc.py
|
Python
|
gpl-3.0
| 1,926
|
[
"Galaxy"
] |
9c35a95fe53596c0499b28925cf00b458161a48a2be1b8e31c13512f5dee0c89
|
import functools
import json
import logging
import random
import re
import string # pylint: disable=W0402
import fnmatch
from textwrap import dedent
from external_auth.models import ExternalAuthMap
from external_auth.djangostore import DjangoOpenIDStore
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME, authenticate, login
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from student.models import UserProfile, TestCenterUser, TestCenterRegistration
from django.http import HttpResponse, HttpResponseRedirect, HttpRequest
from django.utils.http import urlquote
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from mitxmako.shortcuts import render_to_response, render_to_string
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
from django.contrib.csrf.middleware import csrf_exempt
from django_future.csrf import ensure_csrf_cookie
from util.cache import cache_if_anonymous
import django_openid_auth.views as openid_views
from django_openid_auth import auth as openid_auth
from openid.consumer.consumer import SUCCESS
from openid.server.server import Server, ProtocolError, UntrustedReturnURL
from openid.server.trustroot import TrustRoot
from openid.extensions import ax, sreg
import student.views as student_views
# Required for Pearson
from courseware.views import get_module_for_descriptor, jump_to
from courseware.model_data import ModelDataCache
from xmodule.modulestore.django import modulestore
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore import Location
from xmodule.modulestore.exceptions import ItemNotFoundError
log = logging.getLogger("mitx.external_auth")
# -----------------------------------------------------------------------------
# OpenID Common
# -----------------------------------------------------------------------------
@csrf_exempt
def default_render_failure(request,
message,
status=403,
template_name='extauth_failure.html',
exception=None):
"""Render an Openid error page to the user"""
log.debug("In openid_failure " + message)
data = render_to_string(template_name,
dict(message=message, exception=exception))
return HttpResponse(data, status=status)
# -----------------------------------------------------------------------------
# OpenID Authentication
# -----------------------------------------------------------------------------
def generate_password(length=12, chars=string.letters + string.digits):
"""Generate internal password for externally authenticated user"""
choice = random.SystemRandom().choice
return ''.join([choice(chars) for i in range(length)])
@csrf_exempt
def openid_login_complete(request,
redirect_field_name=REDIRECT_FIELD_NAME,
render_failure=None):
"""Complete the openid login process"""
render_failure = (render_failure or default_render_failure)
openid_response = openid_views.parse_openid_response(request)
if not openid_response:
return render_failure(request,
'This is an OpenID relying party endpoint.')
if openid_response.status == SUCCESS:
external_id = openid_response.identity_url
oid_backend = openid_auth.OpenIDBackend()
details = oid_backend._extract_user_details(openid_response)
log.debug('openid success, details=%s', details)
url = getattr(settings, 'OPENID_SSO_SERVER_URL', None)
external_domain = "openid:%s" % url
fullname = '%s %s' % (details.get('first_name', ''),
details.get('last_name', ''))
return external_login_or_signup(request,
external_id,
external_domain,
details,
details.get('email', ''),
fullname)
return render_failure(request, 'Openid failure')
def external_login_or_signup(request,
external_id,
external_domain,
credentials,
email,
fullname,
retfun=None):
"""Generic external auth login or signup"""
# see if we have a map from this external_id to an edX username
try:
eamap = ExternalAuthMap.objects.get(external_id=external_id,
external_domain=external_domain)
log.debug('Found eamap=%s', eamap)
except ExternalAuthMap.DoesNotExist:
# go render form for creating edX user
eamap = ExternalAuthMap(external_id=external_id,
external_domain=external_domain,
external_credentials=json.dumps(credentials))
eamap.external_email = email
eamap.external_name = fullname
eamap.internal_password = generate_password()
log.debug('Created eamap=%s', eamap)
eamap.save()
log.info(u"External_Auth login_or_signup for %s : %s : %s : %s", external_domain, external_id, email, fullname)
internal_user = eamap.user
if internal_user is None:
if settings.MITX_FEATURES.get('AUTH_USE_SHIB'):
# if we are using shib, try to link accounts using email
try:
link_user = User.objects.get(email=eamap.external_email)
if not ExternalAuthMap.objects.filter(user=link_user).exists():
# if there's no pre-existing linked eamap, we link the user
eamap.user = link_user
eamap.save()
internal_user = link_user
log.info('SHIB: Linking existing account for %s', eamap.external_email)
# now pass through to log in
else:
# otherwise, there must have been an error, b/c we've already linked a user with these external
# creds
failure_msg = _(dedent("""
You have already created an account using an external login like WebAuth or Shibboleth.
Please contact %s for support """
% getattr(settings, 'TECH_SUPPORT_EMAIL', 'techsupport@class.stanford.edu')))
return default_render_failure(request, failure_msg)
except User.DoesNotExist:
log.info('SHIB: No user for %s yet, doing signup', eamap.external_email)
return signup(request, eamap)
else:
log.info('No user for %s yet. doing signup', eamap.external_email)
return signup(request, eamap)
# We trust shib's authentication, so no need to authenticate using the password again
if settings.MITX_FEATURES.get('AUTH_USE_SHIB'):
uname = internal_user.username
user = internal_user
# Assuming this 'AUTHENTICATION_BACKENDS' is set in settings, which I think is safe
if settings.AUTHENTICATION_BACKENDS:
auth_backend = settings.AUTHENTICATION_BACKENDS[0]
else:
auth_backend = 'django.contrib.auth.backends.ModelBackend'
user.backend = auth_backend
log.info('SHIB: Logging in linked user %s', user.email)
else:
uname = internal_user.username
user = authenticate(username=uname, password=eamap.internal_password)
if user is None:
log.warning("External Auth Login failed for %s / %s",
uname, eamap.internal_password)
return signup(request, eamap)
if not user.is_active:
log.warning("User %s is not active", uname)
# TODO: improve error page
msg = 'Account not yet activated: please look for link in your email'
return default_render_failure(request, msg)
login(request, user)
request.session.set_expiry(0)
# Now to try enrollment
# Need to special case Shibboleth here because it logs in via a GET.
# testing request.method for extra paranoia
if settings.MITX_FEATURES.get('AUTH_USE_SHIB') and 'shib:' in external_domain and request.method == 'GET':
enroll_request = make_shib_enrollment_request(request)
student_views.try_change_enrollment(enroll_request)
else:
student_views.try_change_enrollment(request)
log.info("Login success - %s (%s)", user.username, user.email)
if retfun is None:
return redirect('/')
return retfun()
@ensure_csrf_cookie
@cache_if_anonymous
def signup(request, eamap=None):
"""
Present form to complete for signup via external authentication.
Even though the user has external credentials, he/she still needs
to create an account on the edX system, and fill in the user
registration form.
eamap is an ExteralAuthMap object, specifying the external user
for which to complete the signup.
"""
if eamap is None:
pass
# save this for use by student.views.create_account
request.session['ExternalAuthMap'] = eamap
# default conjoin name, no spaces
username = eamap.external_name.replace(' ', '')
context = {'has_extauth_info': True,
'show_signup_immediately': True,
'extauth_id': eamap.external_id,
'extauth_email': eamap.external_email,
'extauth_username': username,
'extauth_name': eamap.external_name,
'ask_for_tos': True,
}
# Some openEdX instances can't have terms of service for shib users, like
# according to Stanford's Office of General Counsel
if settings.MITX_FEATURES.get('AUTH_USE_SHIB') and settings.MITX_FEATURES.get('SHIB_DISABLE_TOS') and \
('shib' in eamap.external_domain):
context['ask_for_tos'] = False
# detect if full name is blank and ask for it from user
context['ask_for_fullname'] = eamap.external_name.strip() == ''
# validate provided mail and if it's not valid ask the user
try:
validate_email(eamap.external_email)
context['ask_for_email'] = False
except ValidationError:
context['ask_for_email'] = True
log.info('EXTAUTH: Doing signup for %s', eamap.external_id)
return student_views.register_user(request, extra_context=context)
# -----------------------------------------------------------------------------
# MIT SSL
# -----------------------------------------------------------------------------
def ssl_dn_extract_info(dn):
"""
Extract username, email address (may be anyuser@anydomain.com) and
full name from the SSL DN string. Return (user,email,fullname) if
successful, and None otherwise.
"""
ss = re.search('/emailAddress=(.*)@([^/]+)', dn)
if ss:
user = ss.group(1)
email = "%s@%s" % (user, ss.group(2))
else:
return None
ss = re.search('/CN=([^/]+)/', dn)
if ss:
fullname = ss.group(1)
else:
return None
return (user, email, fullname)
def ssl_get_cert_from_request(request):
"""
Extract user information from certificate, if it exists, returning (user, email, fullname).
Else return None.
"""
certkey = "SSL_CLIENT_S_DN" # specify the request.META field to use
cert = request.META.get(certkey, '')
if not cert:
cert = request.META.get('HTTP_' + certkey, '')
if not cert:
try:
# try the direct apache2 SSL key
cert = request._req.subprocess_env.get(certkey, '')
except Exception:
return ''
return cert
(user, email, fullname) = ssl_dn_extract_info(cert)
return (user, email, fullname)
def ssl_login_shortcut(fn):
"""
Python function decorator for login procedures, to allow direct login
based on existing ExternalAuth record and MIT ssl certificate.
"""
def wrapped(*args, **kwargs):
if not settings.MITX_FEATURES['AUTH_USE_MIT_CERTIFICATES']:
return fn(*args, **kwargs)
request = args[0]
cert = ssl_get_cert_from_request(request)
if not cert: # no certificate information - show normal login window
return fn(*args, **kwargs)
(user, email, fullname) = ssl_dn_extract_info(cert)
return external_login_or_signup(request,
external_id=email,
external_domain="ssl:MIT",
credentials=cert,
email=email,
fullname=fullname)
return wrapped
@csrf_exempt
def ssl_login(request):
"""
This is called by student.views.index when
MITX_FEATURES['AUTH_USE_MIT_CERTIFICATES'] = True
Used for MIT user authentication. This presumes the web server
(nginx) has been configured to require specific client
certificates.
If the incoming protocol is HTTPS (SSL) then authenticate via
client certificate. The certificate provides user email and
fullname; this populates the ExternalAuthMap. The user is
nevertheless still asked to complete the edX signup.
Else continues on with student.views.index, and no authentication.
"""
cert = ssl_get_cert_from_request(request)
if not cert:
# no certificate information - go onward to main index
return student_views.index(request)
(user, email, fullname) = ssl_dn_extract_info(cert)
retfun = functools.partial(student_views.index, request)
return external_login_or_signup(request,
external_id=email,
external_domain="ssl:MIT",
credentials=cert,
email=email,
fullname=fullname,
retfun=retfun)
# -----------------------------------------------------------------------------
# Shibboleth (Stanford and others. Uses *Apache* environment variables)
# -----------------------------------------------------------------------------
def shib_login(request):
"""
Uses Apache's REMOTE_USER environment variable as the external id.
This in turn typically uses EduPersonPrincipalName
http://www.incommonfederation.org/attributesummary.html#eduPersonPrincipal
but the configuration is in the shibboleth software.
"""
shib_error_msg = _(dedent(
"""
Your university identity server did not return your ID information to us.
Please try logging in again. (You may need to restart your browser.)
"""))
if not request.META.get('REMOTE_USER'):
log.error("SHIB: no REMOTE_USER found in request.META")
return default_render_failure(request, shib_error_msg)
elif not request.META.get('Shib-Identity-Provider'):
log.error("SHIB: no Shib-Identity-Provider in request.META")
return default_render_failure(request, shib_error_msg)
else:
#if we get here, the user has authenticated properly
shib = {attr: request.META.get(attr, '')
for attr in ['REMOTE_USER', 'givenName', 'sn', 'mail', 'Shib-Identity-Provider']}
#Clean up first name, last name, and email address
#TODO: Make this less hardcoded re: format, but split will work
#even if ";" is not present since we are accessing 1st element
shib['sn'] = shib['sn'].split(";")[0].strip().capitalize().decode('utf-8')
shib['givenName'] = shib['givenName'].split(";")[0].strip().capitalize().decode('utf-8')
log.info("SHIB creds returned: %r", shib)
return external_login_or_signup(request,
external_id=shib['REMOTE_USER'],
external_domain="shib:" + shib['Shib-Identity-Provider'],
credentials=shib,
email=shib['mail'],
fullname=u'%s %s' % (shib['givenName'], shib['sn']),
)
def make_shib_enrollment_request(request):
"""
Need this hack function because shibboleth logins don't happen over POST
but change_enrollment expects its request to be a POST, with
enrollment_action and course_id POST parameters.
"""
enroll_request = HttpRequest()
enroll_request.user = request.user
enroll_request.session = request.session
enroll_request.method = "POST"
# copy() also makes GET and POST mutable
# See https://docs.djangoproject.com/en/dev/ref/request-response/#django.http.QueryDict.update
enroll_request.GET = request.GET.copy()
enroll_request.POST = request.POST.copy()
# also have to copy these GET parameters over to POST
if "enrollment_action" not in enroll_request.POST and "enrollment_action" in enroll_request.GET:
enroll_request.POST.setdefault('enrollment_action', enroll_request.GET.get('enrollment_action'))
if "course_id" not in enroll_request.POST and "course_id" in enroll_request.GET:
enroll_request.POST.setdefault('course_id', enroll_request.GET.get('course_id'))
return enroll_request
def course_specific_login(request, course_id):
"""
Dispatcher function for selecting the specific login method
required by the course
"""
query_string = request.META.get("QUERY_STRING", '')
try:
course = course_from_id(course_id)
except ItemNotFoundError:
#couldn't find the course, will just return vanilla signin page
return redirect_with_querystring('signin_user', query_string)
#now the dispatching conditionals. Only shib for now
if settings.MITX_FEATURES.get('AUTH_USE_SHIB') and 'shib:' in course.enrollment_domain:
return redirect_with_querystring('shib-login', query_string)
#Default fallthrough to normal signin page
return redirect_with_querystring('signin_user', query_string)
def course_specific_register(request, course_id):
"""
Dispatcher function for selecting the specific registration method
required by the course
"""
query_string = request.META.get("QUERY_STRING", '')
try:
course = course_from_id(course_id)
except ItemNotFoundError:
#couldn't find the course, will just return vanilla registration page
return redirect_with_querystring('register_user', query_string)
#now the dispatching conditionals. Only shib for now
if settings.MITX_FEATURES.get('AUTH_USE_SHIB') and 'shib:' in course.enrollment_domain:
#shib-login takes care of both registration and login flows
return redirect_with_querystring('shib-login', query_string)
#Default fallthrough to normal registration page
return redirect_with_querystring('register_user', query_string)
def redirect_with_querystring(view_name, query_string):
"""
Helper function to add query string to redirect views
"""
if query_string:
return redirect("%s?%s" % (reverse(view_name), query_string))
return redirect(view_name)
# -----------------------------------------------------------------------------
# OpenID Provider
# -----------------------------------------------------------------------------
def get_xrds_url(resource, request):
"""
Return the XRDS url for a resource
"""
host = request.get_host()
location = host + '/openid/provider/' + resource + '/'
if request.is_secure():
return 'https://' + location
else:
return 'http://' + location
def add_openid_simple_registration(request, response, data):
sreg_data = {}
sreg_request = sreg.SRegRequest.fromOpenIDRequest(request)
sreg_fields = sreg_request.allRequestedFields()
# if consumer requested simple registration fields, add them
if sreg_fields:
for field in sreg_fields:
if field == 'email' and 'email' in data:
sreg_data['email'] = data['email']
elif field == 'fullname' and 'fullname' in data:
sreg_data['fullname'] = data['fullname']
elif field == 'nickname' and 'nickname' in data:
sreg_data['nickname'] = data['nickname']
# construct sreg response
sreg_response = sreg.SRegResponse.extractResponse(sreg_request,
sreg_data)
sreg_response.toMessage(response.fields)
def add_openid_attribute_exchange(request, response, data):
try:
ax_request = ax.FetchRequest.fromOpenIDRequest(request)
except ax.AXError:
# not using OpenID attribute exchange extension
pass
else:
ax_response = ax.FetchResponse()
# if consumer requested attribute exchange fields, add them
if ax_request and ax_request.requested_attributes:
for type_uri in ax_request.requested_attributes.iterkeys():
email_schema = 'http://axschema.org/contact/email'
name_schema = 'http://axschema.org/namePerson'
if type_uri == email_schema and 'email' in data:
ax_response.addValue(email_schema, data['email'])
elif type_uri == name_schema and 'fullname' in data:
ax_response.addValue(name_schema, data['fullname'])
# construct ax response
ax_response.toMessage(response.fields)
def provider_respond(server, request, response, data):
"""
Respond to an OpenID request
"""
# get and add extensions
add_openid_simple_registration(request, response, data)
add_openid_attribute_exchange(request, response, data)
# create http response from OpenID response
webresponse = server.encodeResponse(response)
http_response = HttpResponse(webresponse.body)
http_response.status_code = webresponse.code
# add OpenID headers to response
for k, v in webresponse.headers.iteritems():
http_response[k] = v
return http_response
def validate_trust_root(openid_request):
"""
Only allow OpenID requests from valid trust roots
"""
trusted_roots = getattr(settings, 'OPENID_PROVIDER_TRUSTED_ROOT', None)
if not trusted_roots:
# not using trusted roots
return True
# don't allow empty trust roots
if (not hasattr(openid_request, 'trust_root') or
not openid_request.trust_root):
log.error('no trust_root')
return False
# ensure trust root parses cleanly (one wildcard, of form *.foo.com, etc.)
trust_root = TrustRoot.parse(openid_request.trust_root)
if not trust_root:
log.error('invalid trust_root')
return False
# don't allow empty return tos
if (not hasattr(openid_request, 'return_to') or
not openid_request.return_to):
log.error('empty return_to')
return False
# ensure return to is within trust root
if not trust_root.validateURL(openid_request.return_to):
log.error('invalid return_to')
return False
# check that the root matches the ones we trust
if not any(r for r in trusted_roots if fnmatch.fnmatch(trust_root, r)):
log.error('non-trusted root')
return False
return True
@csrf_exempt
def provider_login(request):
"""
OpenID login endpoint
"""
# make and validate endpoint
endpoint = get_xrds_url('login', request)
if not endpoint:
return default_render_failure(request, "Invalid OpenID request")
# initialize store and server
store = DjangoOpenIDStore()
server = Server(store, endpoint)
# first check to see if the request is an OpenID request.
# If so, the client will have specified an 'openid.mode' as part
# of the request.
querydict = dict(request.REQUEST.items())
error = False
if 'openid.mode' in request.GET or 'openid.mode' in request.POST:
# decode request
try:
openid_request = server.decodeRequest(querydict)
except (UntrustedReturnURL, ProtocolError):
openid_request = None
if not openid_request:
return default_render_failure(request, "Invalid OpenID request")
# don't allow invalid and non-trusted trust roots
if not validate_trust_root(openid_request):
return default_render_failure(request, "Invalid OpenID trust root")
# checkid_immediate not supported, require user interaction
if openid_request.mode == 'checkid_immediate':
return provider_respond(server, openid_request,
openid_request.answer(False), {})
# checkid_setup, so display login page
# (by falling through to the provider_login at the
# bottom of this method).
elif openid_request.mode == 'checkid_setup':
if openid_request.idSelect():
# remember request and original path
request.session['openid_setup'] = {
'request': openid_request,
'url': request.get_full_path()
}
# user failed login on previous attempt
if 'openid_error' in request.session:
error = True
del request.session['openid_error']
# OpenID response
else:
return provider_respond(server, openid_request,
server.handleRequest(openid_request), {})
# handle login redirection: these are also sent to this view function,
# but are distinguished by lacking the openid mode. We also know that
# they are posts, because they come from the popup
elif request.method == 'POST' and 'openid_setup' in request.session:
# get OpenID request from session
openid_setup = request.session['openid_setup']
openid_request = openid_setup['request']
openid_request_url = openid_setup['url']
del request.session['openid_setup']
# don't allow invalid trust roots
if not validate_trust_root(openid_request):
return default_render_failure(request, "Invalid OpenID trust root")
# check if user with given email exists
# Failure is redirected to this method (by using the original URL),
# which will bring up the login dialog.
email = request.POST.get('email', None)
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
request.session['openid_error'] = True
msg = "OpenID login failed - Unknown user email: %s"
log.warning(msg, email)
return HttpResponseRedirect(openid_request_url)
# attempt to authenticate user (but not actually log them in...)
# Failure is again redirected to the login dialog.
username = user.username
password = request.POST.get('password', None)
user = authenticate(username=username, password=password)
if user is None:
request.session['openid_error'] = True
msg = "OpenID login failed - password for %s is invalid"
log.warning(msg, email)
return HttpResponseRedirect(openid_request_url)
# authentication succeeded, so fetch user information
# that was requested
if user is not None and user.is_active:
# remove error from session since login succeeded
if 'openid_error' in request.session:
del request.session['openid_error']
log.info("OpenID login success - %s (%s)",
user.username, user.email)
# redirect user to return_to location
url = endpoint + urlquote(user.username)
response = openid_request.answer(True, None, url)
# TODO: for CS50 we are forcibly returning the username
# instead of fullname. In the OpenID simple registration
# extension, we don't have to return any fields we don't
# want to, even if they were marked as required by the
# Consumer. The behavior of what to do when there are
# missing fields is up to the Consumer. The proper change
# should only return the username, however this will likely
# break the CS50 client. Temporarily we will be returning
# username filling in for fullname in addition to username
# as sreg nickname.
# Note too that this is hardcoded, and not really responding to
# the extensions that were registered in the first place.
results = {
'nickname': user.username,
'email': user.email,
'fullname': user.username
}
# the request succeeded:
return provider_respond(server, openid_request, response, results)
# the account is not active, so redirect back to the login page:
request.session['openid_error'] = True
msg = "Login failed - Account not active for user %s"
log.warning(msg, username)
return HttpResponseRedirect(openid_request_url)
# determine consumer domain if applicable
return_to = ''
if 'openid.return_to' in request.REQUEST:
return_to = request.REQUEST['openid.return_to']
matches = re.match(r'\w+:\/\/([\w\.-]+)', return_to)
return_to = matches.group(1)
# display login page
response = render_to_response('provider_login.html', {
'error': error,
'return_to': return_to
})
# add custom XRDS header necessary for discovery process
response['X-XRDS-Location'] = get_xrds_url('xrds', request)
return response
def provider_identity(request):
"""
XRDS for identity discovery
"""
response = render_to_response('identity.xml',
{'url': get_xrds_url('login', request)},
mimetype='text/xml')
# custom XRDS header necessary for discovery process
response['X-XRDS-Location'] = get_xrds_url('identity', request)
return response
def provider_xrds(request):
"""
XRDS for endpoint discovery
"""
response = render_to_response('xrds.xml',
{'url': get_xrds_url('login', request)},
mimetype='text/xml')
# custom XRDS header necessary for discovery process
response['X-XRDS-Location'] = get_xrds_url('xrds', request)
return response
#-------------------
# Pearson
#-------------------
def course_from_id(course_id):
"""Return the CourseDescriptor corresponding to this course_id"""
course_loc = CourseDescriptor.id_to_location(course_id)
return modulestore().get_instance(course_id, course_loc)
@csrf_exempt
def test_center_login(request):
''' Log in students taking exams via Pearson
Takes a POST request that contains the following keys:
- code - a security code provided by Pearson
- clientCandidateID
- registrationID
- exitURL - the url that we redirect to once we're done
- vueExamSeriesCode - a code that indicates the exam that we're using
'''
# errors are returned by navigating to the error_url, adding a query parameter named "code"
# which contains the error code describing the exceptional condition.
def makeErrorURL(error_url, error_code):
log.error("generating error URL with error code {}".format(error_code))
return "{}?code={}".format(error_url, error_code)
# get provided error URL, which will be used as a known prefix for returning error messages to the
# Pearson shell.
error_url = request.POST.get("errorURL")
# TODO: check that the parameters have not been tampered with, by comparing the code provided by Pearson
# with the code we calculate for the same parameters.
if 'code' not in request.POST:
return HttpResponseRedirect(makeErrorURL(error_url, "missingSecurityCode"))
code = request.POST.get("code")
# calculate SHA for query string
# TODO: figure out how to get the original query string, so we can hash it and compare.
if 'clientCandidateID' not in request.POST:
return HttpResponseRedirect(makeErrorURL(error_url, "missingClientCandidateID"))
client_candidate_id = request.POST.get("clientCandidateID")
# TODO: check remaining parameters, and maybe at least log if they're not matching
# expected values....
# registration_id = request.POST.get("registrationID")
# exit_url = request.POST.get("exitURL")
# find testcenter_user that matches the provided ID:
try:
testcenteruser = TestCenterUser.objects.get(client_candidate_id=client_candidate_id)
except TestCenterUser.DoesNotExist:
log.error("not able to find demographics for cand ID {}".format(client_candidate_id))
return HttpResponseRedirect(makeErrorURL(error_url, "invalidClientCandidateID"))
# find testcenter_registration that matches the provided exam code:
# Note that we could rely in future on either the registrationId or the exam code,
# or possibly both. But for now we know what to do with an ExamSeriesCode,
# while we currently have no record of RegistrationID values at all.
if 'vueExamSeriesCode' not in request.POST:
# we are not allowed to make up a new error code, according to Pearson,
# so instead of "missingExamSeriesCode", we use a valid one that is
# inaccurate but at least distinct. (Sigh.)
log.error("missing exam series code for cand ID {}".format(client_candidate_id))
return HttpResponseRedirect(makeErrorURL(error_url, "missingPartnerID"))
exam_series_code = request.POST.get('vueExamSeriesCode')
registrations = TestCenterRegistration.objects.filter(testcenter_user=testcenteruser, exam_series_code=exam_series_code)
if not registrations:
log.error("not able to find exam registration for exam {} and cand ID {}".format(exam_series_code, client_candidate_id))
return HttpResponseRedirect(makeErrorURL(error_url, "noTestsAssigned"))
# TODO: figure out what to do if there are more than one registrations....
# for now, just take the first...
registration = registrations[0]
course_id = registration.course_id
course = course_from_id(course_id) # assume it will be found....
if not course:
log.error("not able to find course from ID {} for cand ID {}".format(course_id, client_candidate_id))
return HttpResponseRedirect(makeErrorURL(error_url, "incorrectCandidateTests"))
exam = course.get_test_center_exam(exam_series_code)
if not exam:
log.error("not able to find exam {} for course ID {} and cand ID {}".format(exam_series_code, course_id, client_candidate_id))
return HttpResponseRedirect(makeErrorURL(error_url, "incorrectCandidateTests"))
location = exam.exam_url
log.info("proceeding with test of cand {} on exam {} for course {}: URL = {}".format(client_candidate_id, exam_series_code, course_id, location))
# check if the test has already been taken
timelimit_descriptor = modulestore().get_instance(course_id, Location(location))
if not timelimit_descriptor:
log.error("cand {} on exam {} for course {}: descriptor not found for location {}".format(client_candidate_id, exam_series_code, course_id, location))
return HttpResponseRedirect(makeErrorURL(error_url, "missingClientProgram"))
timelimit_module_cache = ModelDataCache.cache_for_descriptor_descendents(course_id, testcenteruser.user,
timelimit_descriptor, depth=None)
timelimit_module = get_module_for_descriptor(request.user, request, timelimit_descriptor,
timelimit_module_cache, course_id, position=None)
if not timelimit_module.category == 'timelimit':
log.error("cand {} on exam {} for course {}: non-timelimit module at location {}".format(client_candidate_id, exam_series_code, course_id, location))
return HttpResponseRedirect(makeErrorURL(error_url, "missingClientProgram"))
if timelimit_module and timelimit_module.has_ended:
log.warning("cand {} on exam {} for course {}: test already over at {}".format(client_candidate_id, exam_series_code, course_id, timelimit_module.ending_at))
return HttpResponseRedirect(makeErrorURL(error_url, "allTestsTaken"))
# check if we need to provide an accommodation:
time_accommodation_mapping = {'ET12ET': 'ADDHALFTIME',
'ET30MN': 'ADD30MIN',
'ETDBTM': 'ADDDOUBLE', }
time_accommodation_code = None
for code in registration.get_accommodation_codes():
if code in time_accommodation_mapping:
time_accommodation_code = time_accommodation_mapping[code]
if time_accommodation_code:
timelimit_module.accommodation_code = time_accommodation_code
log.info("cand {} on exam {} for course {}: receiving accommodation {}".format(client_candidate_id, exam_series_code, course_id, time_accommodation_code))
# UGLY HACK!!!
# Login assumes that authentication has occurred, and that there is a
# backend annotation on the user object, indicating which backend
# against which the user was authenticated. We're authenticating here
# against the registration entry, and assuming that the request given
# this information is correct, we allow the user to be logged in
# without a password. This could all be formalized in a backend object
# that does the above checking.
# TODO: (brian) create a backend class to do this.
# testcenteruser.user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
testcenteruser.user.backend = "%s.%s" % ("TestcenterAuthenticationModule", "TestcenterAuthenticationClass")
login(request, testcenteruser.user)
# And start the test:
return jump_to(request, course_id, location)
|
rationalAgent/edx-platform-custom
|
common/djangoapps/external_auth/views.py
|
Python
|
agpl-3.0
| 38,612
|
[
"Brian"
] |
52172fc29d6030e00ade1150e8c4a2733a23ff81dba91274411c12e89af0b17e
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import glob
import os
class Hybpiper(Package):
"""HybPiper was designed for targeted sequence capture, in which DNA
sequencing libraries are enriched for gene regions of interest,
especially for phylogenetics. HybPiper is a suite of Python scripts
that wrap and connect bioinformatics tools in order to extract target
sequences from high-throughput DNA sequencing reads"""
homepage = "https://github.com/mossmatters/HybPiper"
url = "https://github.com/mossmatters/HybPiper/archive/v1.2.0.tar.gz"
version('1.2.0', '0ad78e9ca5e3f23ae0eb6236b07e1780')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('py-biopython', type=('build', 'run'))
depends_on('exonerate')
depends_on('blast-plus')
depends_on('spades')
depends_on('parallel')
depends_on('bwa')
depends_on('samtools')
def setup_envionment(self, spack_env, run_env):
run_env.set('HYBPIPER_HOME', prefix)
def install(self, spec, prefix):
mkdirp(prefix.bin)
files = glob.iglob("*.py")
for file in files:
if os.path.isfile(file):
install(file, prefix.bin)
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/hybpiper/package.py
|
Python
|
lgpl-2.1
| 2,426
|
[
"BLAST",
"BWA",
"Biopython"
] |
a45d3cd62a8f5a11ed4195a1e6efe8083c15af319a8ae3bdfc89aeafcab9d7ca
|
""" GraphData encapsulates input data for the DIRAC Graphs plots
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import time
import datetime
import numpy
from matplotlib.dates import date2num
from DIRAC.Core.Utilities.Graphs.GraphUtilities import to_timestamp, pretty_float
DEBUG = 0
def get_key_type( keys ):
""" A utility function to guess the type of the plot keys
"""
min_time_stamp = 1000000000
max_time_stamp = 1900000000
time_type = True
num_type = True
string_type = True
key_type = 'unknown'
for key in keys:
if time_type:
try:
time_data = to_timestamp( key )
if time_data < min_time_stamp or time_data > max_time_stamp:
time_type = False
except ValueError:
time_type = False
if num_type:
try:
num_data = float( key )
except:
num_type = False
if not isinstance(key, basestring):
string_type = False
# Take the most restrictive type
if string_type:
key_type = "string"
if num_type :
key_type = "numeric"
if time_type:
key_type = "time"
return key_type
class GraphData:
def __init__( self, data = {} ):
self.truncated = 0
self.all_keys = []
self.labels = []
self.label_values = []
self.subplots = {}
self.plotdata = None
self.data = dict( data )
self.key_type = 'string'
self.initialize()
def isEmpty( self ):
""" Check if there is no data inserted
"""
return not self.plotdata and not self.subplots
def setData( self, data ):
""" Add data to the GraphData object
"""
self.data = dict( data )
self.initialize()
def initialize( self, key_type = None ):
keys = self.data.keys()
if not keys:
print("GraphData Error: empty data")
start = time.time()
if isinstance( self.data[keys[0]], dict ):
for key in self.data:
self.subplots[key] = PlotData( self.data[key], key_type = key_type )
else:
self.plotdata = PlotData( self.data, key_type = key_type )
if DEBUG:
print("Time: plot data", time.time() - start, len(self.subplots))
if self.plotdata:
self.all_keys = self.plotdata.getKeys()
else:
tmpset = set()
for sub in self.subplots.values():
for key in sub.getKeys():
tmpset.add( key )
self.all_keys = list( tmpset )
if key_type:
self.key_type = key_type
else:
self.key_type = get_key_type( self.all_keys )
self.sortKeys()
self.makeNumKeys()
self.sortLabels()
def expandKeys( self ):
if not self.plotdata:
for sub in self.subplots:
self.subplots[sub].expandKeys( self.all_keys )
def isSimplePlot( self ):
return not self.plotdata is None
def sortLabels( self, sort_type = 'max_value', reverse_order=False ):
""" Sort labels with a specified method:
alpha - alphabetic order
max_value - by max value of the subplot
sum - by the sum of values of the subplot
last_value - by the last value in the subplot
avg_nozeros - by an average that excludes all zero values
"""
if self.plotdata:
if self.key_type == "string":
if sort_type in ['max_value', 'sum']:
self.labels = self.plotdata.sortKeys( 'weight' )
else:
self.labels = self.plotdata.sortKeys()
if reverse_order:
self.labels.reverse()
self.label_values = [ self.plotdata.parsed_data[l] for l in self.labels]
else:
if sort_type == 'max_value':
pairs = zip( self.subplots.keys(), self.subplots.values() )
reverse = not reverse_order
pairs.sort( key = lambda x: x[1].max_value, reverse = reverse )
self.labels = [ x[0] for x in pairs ]
self.label_values = [ x[1].max_value for x in pairs ]
elif sort_type == 'last_value':
pairs = zip( self.subplots.keys(), self.subplots.values() )
reverse = not reverse_order
pairs.sort( key = lambda x: x[1].last_value, reverse = reverse )
self.labels = [ x[0] for x in pairs ]
self.label_values = [ x[1].last_value for x in pairs ]
elif sort_type == 'sum':
pairs = []
for key in self.subplots:
pairs.append( ( key, self.subplots[key].sum_value ) )
reverse = not reverse_order
pairs.sort( key = lambda x: x[1], reverse = reverse )
self.labels = [ x[0] for x in pairs ]
self.label_values = [ x[1] for x in pairs ]
elif sort_type == 'alpha':
self.labels = self.subplots.keys()
self.labels.sort()
if reverse_order:
self.labels.reverse()
self.label_values = [ self.subplots[x].sum_value for x in self.labels ]
elif sort_type == 'avg_nozeros':
pairs = zip( self.subplots.keys(), self.subplots.values() )
reverse = not reverse_order
pairs.sort( key = lambda x: x[1].avg_nozeros, reverse = reverse )
self.labels = [ x[0] for x in pairs ]
self.label_values = [ x[1].avg_nozeros for x in pairs ]
else:
self.labels = self.subplots.keys()
if reverse_order:
self.labels.reverse()
def sortKeys( self ):
""" Sort the graph keys in a natural order
"""
if self.plotdata:
self.plotdata.sortKeys()
self.all_keys = self.plotdata.getKeys()
else:
self.all_keys.sort()
self.min_key = min( self.all_keys )
self.max_key = max( self.all_keys )
def makeNumKeys( self ):
""" Make numerical representation of the graph keys suitable for plotting
"""
self.all_num_keys = []
if self.key_type == "string":
self.all_string_map = {}
next = 0
for key in self.all_keys:
self.all_string_map[key] = next
self.all_num_keys.append( next )
next += 1
elif self.key_type == "time":
self.all_num_keys = [ date2num( datetime.datetime.fromtimestamp( to_timestamp( key ) ) ) for key in self.all_keys ]
elif self.key_type == "numeric":
self.all_num_keys = [ float( key ) for key in self.all_keys ]
self.min_num_key = min( self.all_num_keys )
self.max_num_key = max( self.all_num_keys )
def makeCumulativeGraph( self ):
""" Prepare data for the cumulative graph
"""
self.expandKeys()
if self.plotdata:
self.plotdata.makeCumulativePlot()
if self.truncated:
self.otherPlot.makeCumulativePlot()
if self.subplots:
for label in self.subplots:
self.subplots[label].makeCumulativePlot()
self.sortLabels( sort_type = 'last_value' )
def getLabels( self ):
""" Get the graph labels together with the numeric values used for the label
sorting
"""
labels = []
if self.plotdata:
if self.key_type != 'string':
labels = [( 'NoLabels', 0. )]
else:
labels = zip( self.labels, self.label_values )
elif self.truncated:
tlabels = self.labels[:self.truncated]
tvalues = self.label_values[:self.truncated]
labels = zip( tlabels, tvalues )
labels.append( ( 'Others', sum( self.label_values[self.truncated:] ) ) )
else:
labels = zip( self.labels, self.label_values )
return labels
def getStringMap( self ):
""" Get string to number mapping for numeric type keys
"""
return self.all_string_map
def getNumberOfKeys( self ):
return len( self.all_keys )
def getNumberOfLabels( self ):
if self.truncated:
return self.truncated + 1
else:
return len( self.labels )
def getPlotNumData( self, label = None, zipFlag = True ):
""" Get the plot data in a numeric form
"""
if self.plotdata:
if zipFlag:
return zip( self.plotdata.getNumKeys(), self.plotdata.getValues(), self.plotdata.getPlotErrors() )
else:
return self.plotdata.getValues()
elif label is not None:
if label == "Others":
return self.otherPlot.getPlotDataForNumKeys( self.all_num_keys )
else:
return self.subplots[label].getPlotDataForNumKeys( self.all_num_keys )
else:
# Get the sum of all the subplots
self.expandKeys()
arrays = []
for label in self.subplots:
arrays.append( numpy.array( [ x[1] for x in self.subplots[label].getPlotDataForNumKeys( self.all_num_keys, True )] ) )
sum_array = sum( arrays )
if zipFlag:
return zip( self.all_num_keys, list( sum_array ) )
else:
return sum_array
def truncateLabels( self, limit = 10 ):
""" Truncate the number of labels to the limit, leave the most important
ones, accumulate the rest in the 'Others' label
"""
if self.plotdata:
return
nLabels = len( self.labels )
if nLabels <= limit:
return
self.truncated = limit
new_labels = self.labels[:limit]
new_labels.append( 'Others' )
other_data = {}
for key in self.all_keys:
other_data[key] = 0.
for label in self.labels:
if label not in new_labels:
for key in self.all_keys:
if key in self.subplots[label].parsed_data:
other_data[key] += self.subplots[label].parsed_data[key]
self.otherPlot = PlotData( other_data )
def getStats( self ):
""" Get statistics of the graph data
"""
numData = self.getPlotNumData( zipFlag = False )
if not numData:
return 0, 0, 0, 0
numData = numpy.array( numData )
min_value = numData.min()
max_value = numData.max()
average = float( numData.sum() ) / len( numData )
current = numData[-1]
return min_value, max_value, average, current
def getStatString( self, unit = None ):
""" Get a string summarizing the graph data statistics
"""
min_value, max_value, average, current = self.getStats()
tmpList = []
unitString = ''
if unit:
unitString = str( unit )
if max_value:
try:
s = "Max: " + pretty_float( max_value ) + " " + unitString
tmpList.append( s.strip() )
except Exception as e:
pass
if min_value:
try:
s = "Min: " + pretty_float( min_value ) + " " + unitString
tmpList.append( s.strip() )
except Exception as e:
pass
if average:
try:
s = "Average: " + pretty_float( average ) + " " + unitString
tmpList.append( s.strip() )
except Exception as e:
pass
if current:
try:
s = "Current: " + pretty_float( current ) + " " + unitString
tmpList.append( s.strip() )
except Exception as e:
pass
resultString = ', '.join( tmpList )
return resultString
class PlotData:
""" PlotData class is a container for a one dimensional plot data
"""
def __init__( self, data, single = True, key_type = None ):
self.key_type = "unknown"
keys = data.keys()
if not keys:
print("PlotData Error: empty data")
return
# Original data
self.data = dict( data )
# Working copy of the parsed data
self.parsed_data = {}
self.parsed_errors = {}
# Keys and values as synchronized lists
self.keys = []
self.num_keys = []
self.values = []
self.errors = []
self.sorted_keys = []
# Do initial data parsing
self.parseData( key_type )
if single:
self.initialize()
def initialize( self ):
if self.key_type == "string":
self.keys = self.sortKeys( 'weight' )
else:
self.keys = self.sortKeys()
self.values = [ self.parsed_data.get(k, 0.0) for k in self.keys ]
self.errors = [ self.parsed_errors.get(k, 0.0) for k in self.keys ]
values_to_sum = [ self.parsed_data.get(k, 0.0) for k in self.keys if k != '' ]
self.real_values = []
for k in self.keys:
if self.parsed_data[k] is not None:
self.real_values.append( self.parsed_data[k] )
self.values_sum = float( sum( self.real_values ) )
# Prepare numerical representation of keys for plotting
self.num_keys = []
if self.key_type == "string":
self.string_map = {}
next = 0
for key in self.keys:
self.string_map[key] = next
self.num_keys.append( next )
next += 1
elif self.key_type == "time":
self.num_keys = [ date2num( datetime.datetime.fromtimestamp( to_timestamp( key ) ) ) for key in self.keys ]
elif self.key_type == "numeric":
self.num_keys = [ float( key ) for key in self.keys ]
self.min_value = float( min( self.real_values ) )
self.max_value = float( max( self.real_values ) )
self.min_key = self.keys[0]
self.max_key = self.keys[-1]
self.sum_value = float( sum( self.real_values ) )
self.last_value = float( self.real_values[-1] )
count = len( filter(lambda a: a != 0, self.real_values) )
if count != 0:
self.avg_nozeros = self.sum_value / float( count )
else:
self.avg_nozeros = 0
def expandKeys( self, all_keys ):
""" Fill zero values into the missing keys
"""
for k in all_keys:
if k not in self.parsed_data:
self.parsed_data[k] = 0.
self.sorted_keys = []
self.keys = self.parsed_data.keys()
self.initialize()
def sortKeys( self, sort_type = 'alpha' ):
""" Sort keys according to the specified method :
alpha - sort in alphabetic order
weight - sort in the order of values
"""
if self.sorted_keys:
return self.sorted_keys
if sort_type == 'weight':
pairs = zip( self.parsed_data.keys(), self.parsed_data.values() )
pairs.sort( key = lambda x: x[1], reverse = True )
self.sorted_keys = [ x[0] for x in pairs ]
elif sort_type == 'alpha':
self.sorted_keys = self.keys
self.sorted_keys.sort()
else:
print("Unknown sorting type:", sort_type)
return self.sorted_keys
def __data_size( self, item ):
"""
Determine a numerical size for the data; this is used to
sort the keys of the graph.
If the item is a tuple, take the absolute value of the first entry.
Otherwise, attempt to take the absolute value of that item. If that
fails, just return -1.
"""
if isinstance(item, tuple):
return abs( item[0] )
try:
return abs( item )
except TypeError as te:
return - 1
def parseKey( self, key ):
"""
Parse the name of the pivot; this is the identity function.
"""
if self.key_type == "time":
return to_timestamp( key )
else:
return key
def parseDatum( self, data ):
"""
Parse the specific data value; this is the identity.
"""
if isinstance(data, basestring) and "::" in data:
datum,error = data.split("::")
elif isinstance(data, tuple):
datum,error = data
else:
error = 0.
datum = data
try:
resultD = float( datum )
except:
resultD = None
try:
resultE = float( error )
except:
resultE = None
return ( resultD, resultE )
def parseData( self, key_type = None ):
"""
Parse all the data values passed to the graph. For this super class,
basically does nothing except loop through all the data. A sub-class
should override the parseDatum and parse_pivot functions rather than
this one.
"""
if key_type:
self.key_type = key_type
else:
self.key_type = get_key_type( self.data.keys() )
new_parsed_data = {}
new_passed_errors = {}
for key, data in self.data.items():
new_key = self.parseKey( key )
data,error = self.parseDatum( data )
#if data != None:
new_parsed_data[ new_key ] = data
new_passed_errors[ new_key ] = error
self.parsed_data = new_parsed_data
self.parsed_errors = new_passed_errors
self.keys = self.parsed_data.keys()
def makeCumulativePlot( self ):
if not self.sorted_keys:
self.sortKeys()
cum_values = []
if self.values[0] is None:
cum_values.append( 0. )
else:
cum_values.append( self.values[0] )
for i in range( 1, len( self.values ) ):
if self.values[i] is None:
cum_values.append( cum_values[i - 1] )
else:
cum_values.append( cum_values[i - 1] + self.values[i] )
self.values = cum_values
self.last_value = float( self.values[-1] )
def getPlotData( self ):
return self.parsed_data
def getPlotErrors( self ):
return self.parsed_errors
def getPlotNumData( self ):
return zip( self.num_keys, self.values, self.errors )
def getPlotDataForKeys( self, keys ):
result_pairs = []
for key in keys:
if key in self.parsed_data:
result_pairs.append( key, self.parsed_data[key], self.parsed_errors[key] )
else:
result_pairs.append( key, None, 0. )
return result_pairs
def getPlotDataForNumKeys( self, num_keys, zeroes = False ):
result_pairs = []
for num_key in num_keys:
try:
ind = self.num_keys.index( num_key )
if self.values[ind] is None and zeroes:
result_pairs.append( ( self.num_keys[ind], 0., 0. ) )
else:
result_pairs.append( ( self.num_keys[ind], self.values[ind], self.errors[ind] ) )
except ValueError:
if zeroes:
result_pairs.append( ( num_key, 0., 0. ) )
else:
result_pairs.append( ( num_key, None, 0. ) )
return result_pairs
def getKeys( self ):
return self.keys
def getNumKeys( self ):
return self.num_keys
def getValues( self ):
return self.values
def getMaxValue( self ):
return max( self.values )
def getMinValue( self ):
return min( self.values )
|
petricm/DIRAC
|
Core/Utilities/Graphs/GraphData.py
|
Python
|
gpl-3.0
| 17,727
|
[
"DIRAC"
] |
b450ebeab2a4f5ec00819f96af2080437378f036bad21724b990af869987ae17
|
"""Utilities used in the Kadenze Academy Course on Deep Learning w/ Tensorflow.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Parag K. Mital
Copyright Parag K. Mital, June 2016.
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import urllib
import numpy as np
import zipfile
import os
def imcrop_tosquare(img):
"""Make any image a square image.
Parameters
----------
img : np.ndarray
Input image to crop, assumed at least 2d.
Returns
-------
crop : np.ndarray
Cropped image.
"""
size = np.min(img.shape[:2])
extra = img.shape[:2] - size
crop = img
for i in np.flatnonzero(extra):
crop = np.take(crop, extra[i] // 2 + np.r_[:size], axis=i)
return crop
def slice_montage(montage, img_h, img_w, n_imgs):
"""Slice a montage image into n_img h x w images.
Performs the opposite of the montage function. Takes a montage image and
slices it back into a N x H x W x C image.
Parameters
----------
montage : np.ndarray
Montage image to slice.
img_h : int
Height of sliced image
img_w : int
Width of sliced image
n_imgs : int
Number of images to slice
Returns
-------
sliced : np.ndarray
Sliced images as 4d array.
"""
sliced_ds = []
for i in range(int(np.sqrt(n_imgs))):
for j in range(int(np.sqrt(n_imgs))):
sliced_ds.append(montage[
1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w])
return np.array(sliced_ds)
def montage(images, saveto='montage.png'):
"""Draw all images as a montage separated by 1 pixel borders.
Also saves the file to the destination specified by `saveto`.
Parameters
----------
images : numpy.ndarray
Input array to create montage of. Array should be:
batch x height x width x channels.
saveto : str
Location to save the resulting montage image.
Returns
-------
m : numpy.ndarray
Montage image.
"""
if isinstance(images, list):
images = np.array(images)
img_h = images.shape[1]
img_w = images.shape[2]
n_plots = int(np.ceil(np.sqrt(images.shape[0])))
if len(images.shape) == 4 and images.shape[3] == 3:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1, 3)) * 0.5
else:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1)) * 0.5
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < images.shape[0]:
this_img = images[this_filter]
m[1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w] = this_img
plt.imsave(arr=m, fname=saveto)
return m
def get_celeb_files():
"""Download the first 100 images of the celeb dataset.
Files will be placed in a directory 'img_align_celeba' if one
doesn't exist.
Returns
-------
files : list of strings
Locations to the first 100 images of the celeb net dataset.
"""
# Create a directory
if not os.path.exists('img_align_celeba'):
os.mkdir('img_align_celeba')
# Now perform the following 100 times:
for img_i in range(1, 101):
# create a string using the current loop counter
f = '000%03d.jpg' % img_i
# and get the url with that string appended the end
url = 'https://s3.amazonaws.com/cadl/celeb-align/' + f
# We'll print this out to the console so we can see how far we've gone
print(url, end='\r')
# And now download the url to a location inside our new directory
urllib.request.urlretrieve(url, os.path.join('img_align_celeba', f))
files = [os.path.join('img_align_celeba', file_i)
for file_i in os.listdir('img_align_celeba')
if '.jpg' in file_i]
return files
def get_celeb_imgs():
"""Load the first 100 images of the celeb dataset.
Returns
-------
imgs : list of np.ndarray
List of the first 100 images from the celeb dataset
"""
return [plt.imread(f_i) for f_i in get_celeb_files()]
def gauss(mean, stddev, ksize):
"""Use Tensorflow to compute a Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed Gaussian Kernel using Tensorflow.
"""
g = tf.Graph()
with tf.Session(graph=g):
x = tf.linspace(-3.0, 3.0, ksize)
z = (tf.exp(tf.negative(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(stddev, 2.0)))) *
(1.0 / (stddev * tf.sqrt(2.0 * 3.1415))))
return z.eval()
def gauss2d(mean, stddev, ksize):
"""Use Tensorflow to compute a 2D Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed 2D Gaussian Kernel using Tensorflow.
"""
z = gauss(mean, stddev, ksize)
g = tf.Graph()
with tf.Session(graph=g):
z_2d = tf.matmul(tf.reshape(z, [ksize, 1]), tf.reshape(z, [1, ksize]))
return z_2d.eval()
def convolve(img, kernel):
"""Use Tensorflow to convolve a 4D image with a 4D kernel.
Parameters
----------
img : np.ndarray
4-dimensional image shaped N x H x W x C
kernel : np.ndarray
4-dimensional image shape K_H, K_W, C_I, C_O corresponding to the
kernel's height and width, the number of input channels, and the
number of output channels. Note that C_I should = C.
Returns
-------
result : np.ndarray
Convolved result.
"""
g = tf.Graph()
with tf.Session(graph=g):
convolved = tf.nn.conv2d(img, kernel, strides=[1, 1, 1, 1], padding='SAME')
res = convolved.eval()
return res
def gabor(ksize=32):
"""Use Tensorflow to compute a 2D Gabor Kernel.
Parameters
----------
ksize : int, optional
Size of kernel.
Returns
-------
gabor : np.ndarray
Gabor kernel with ksize x ksize dimensions.
"""
g = tf.Graph()
with tf.Session(graph=g):
z_2d = gauss2d(0.0, 1.0, ksize)
ones = tf.ones((1, ksize))
ys = tf.sin(tf.linspace(-3.0, 3.0, ksize))
ys = tf.reshape(ys, [ksize, 1])
wave = tf.matmul(ys, ones)
gabor = tf.multiply(wave, z_2d)
return gabor.eval()
def build_submission(filename, file_list, optional_file_list=[]):
"""Helper utility to check homework assignment submissions and package them.
Parameters
----------
filename : str
Output zip file name
file_list : tuple
Tuple of files to include
"""
# check each file exists
for part_i, file_i in enumerate(file_list):
if not os.path.exists(file_i):
print('\nYou are missing the file {}. '.format(file_i) +
'It does not look like you have completed Part {}.'.format(
part_i + 1))
def zipdir(path, zf):
for root, dirs, files in os.walk(path):
for file in files:
# make sure the files are part of the necessary file list
if file.endswith(file_list) or file.endswith(optional_file_list):
zf.write(os.path.join(root, file))
# create a zip file with the necessary files
zipf = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
zipdir('.', zipf)
zipf.close()
print('Your assignment zip file has been created!')
print('Now submit the file:\n{}\nto Kadenze for grading!'.format(
os.path.abspath(filename)))
def linear(x, n_output, name=None, activation=None, reuse=None):
"""Fully connected layer.
Parameters
----------
x : tf.Tensor
Input tensor to connect
n_output : int
Number of output neurons
name : None, optional
Scope to apply
Returns
-------
op : tf.Tensor
Output of fully connected layer.
"""
if len(x.get_shape()) != 2:
x = flatten(x, reuse=reuse)
n_input = x.get_shape().as_list()[1]
with tf.variable_scope(name or "fc", reuse=reuse):
W = tf.get_variable(
name='W',
shape=[n_input, n_output],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(
name='b',
shape=[n_output],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(
name='h',
value=tf.matmul(x, W),
bias=b)
if activation:
h = activation(h)
return h, W
def flatten(x, name=None, reuse=None):
"""Flatten Tensor to 2-dimensions.
Parameters
----------
x : tf.Tensor
Input tensor to flatten.
name : None, optional
Variable scope for flatten operations
Returns
-------
flattened : tf.Tensor
Flattened tensor.
"""
with tf.variable_scope('flatten'):
dims = x.get_shape().as_list()
if len(dims) == 4:
flattened = tf.reshape(
x,
shape=[-1, dims[1] * dims[2] * dims[3]])
elif len(dims) == 2 or len(dims) == 1:
flattened = x
else:
raise ValueError('Expected n dimensions of 1, 2 or 4. Found:',
len(dims))
return flattened
|
niazangels/CADL
|
session-2/libs/utils.py
|
Python
|
apache-2.0
| 10,004
|
[
"Gaussian"
] |
a987086c0767e9ff9a392cc398b2cbe014721b11cbfc68e8996b8e0674ba26b3
|
../../../../../../../share/pyshared/orca/scripts/apps/gnome-panel/script.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/gnome-panel/script.py
|
Python
|
gpl-3.0
| 75
|
[
"ORCA"
] |
11946c725b662b8b26b0d7d49624b5c079a8bc6e7b4442ce6568caad83303e66
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.2.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Changelog:
#
# * 2.1а (2019.09.20)
# - Improving poriosity support
# - ENH: 180 deg search
# * 2.0d (2019.04.17-2019.05.06)
# - Adding dask support
# - Many code refactorings for semiautomatic runs
# - Allow manual borders selections
# * 2.0.b1 (2019.04.03)
# - bug fixing
# - try remove bad frames
# * 1.6.2 (2019.02.11)
# - fixing object detection
# * 1.6.1 (2018.11.19)
# - exdend borbers range (mean to percentile)
# * 1.6 (2018.11.08)
# - change algorithm of object detection with gaussian fitting
# - add y-clipping to remove sample holder
# - change algorithm of axis searching
# - change hdf5 compression to lzf
# - changing 3d visualisation
# - replace log_process to tqdm
# * 1.5 (2018.09.11)
# - saving full tomography volume
# - deleting temporary files as soon as possible
# - change thresshold in object detection (1/6 -> 1/5)
# * 1.4 (2018.08.23)
# - Fix: correct resized volume serialization (smooth instead cherry picking)
# - New: 3D visualisation
# - Fix: sinogram shifting aftee rotation axis fix
# - Update: Searching rotation axis
# * 1.3 (2018.07.03)
# - Update graphics
# - Update axis search algorithms
# * 1.2 (2018.06.04)
# - Change threshold
# * 1.1 (2018.03.14)
# - Add NLM filtering
# * 1.0 (2017.02.01)
# - First automation version.
# %%
#manual mode
# %matplotlib notebook
#automatic mode
# # %matplotlib inline
# %%
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
import os
import h5py
import pylab as plt
import numpy as np
import dask.array as da
import numexpr as ne
import cv2
import time
import astra
import tomopy
import requests, json
import configparser
from skimage.restoration import denoise_nl_means, estimate_sigma
from skimage.measure import compare_psnr
from skimage.transform import resize
import scipy.optimize
import scipy.signal
from tqdm import tqdm_notebook
from ipywidgets import interact, widgets
from glob import glob
import tomotools2 as tomotools
# %%
# # settings for docker
config = configparser.ConfigParser()
config.read('tomo.ini')
experiment_id = config['SAMPLE']['_id']
data_dir = '/fast/'
storage_dir = '/storage/'
STORAGE_SERVER = "http://rbtmstorage_server_1:5006/"
# %%
STORAGE_SERVER = 'http://10.0.7.153:5006/'
storage_dir = '/diskmnt/a/makov/robotom/'
data_dir = '/diskmnt/fast/makov/robotom/'
# experiment_id = '650c1997-370e-4937-b674-bd7429d29423'
# %%
tomo_info = tomotools.get_tomoobject_info(experiment_id, STORAGE_SERVER)
tomo_info
# %%
def safe_median(data):
m_data = cv2.medianBlur(data,3)
mask = np.abs(m_data-data) > 0.1*np.abs(data)
res = data.copy()
res[mask] = m_data[mask]
return res
# %%
def load_tomo_data(data_file, tmp_dir):
empty_images, _ = tomotools.get_frame_group(data_file, 'empty', tmp_dir)
dark_images, _ = tomotools.get_frame_group(data_file, 'dark', tmp_dir)
empty_image = np.median(empty_images,axis=0)
dark_image = np.median(dark_images,axis=0)
empty_beam = empty_image - dark_image
# Загружаем кадры с даннымии
#TODO: добавить поддержку, когда много кадров на одном угле
data_images, data_angles = tomotools.get_frame_group(data_file, 'data', tmp_dir)
data_images_clear = da.from_array(data_images, chunks=(1, 1024,1024))-dark_image
return empty_beam, data_images_clear, data_angles
# %% [markdown]
# # Loading experimental data
# %%
data_file = tomotools.get_experiment_hdf5(experiment_id, data_dir, STORAGE_SERVER)
tmp_dir = os.path.join(data_dir, experiment_id)
tomotools.mkdir_p(tmp_dir)
empty_beam, data_images, data_angles = load_tomo_data(data_file, tmp_dir)
# %%
def show_exp_data(empty_beam, data_images):
max_intensity = np.percentile(empty_beam[:],90)
plt.figure(figsize=(8,12))
plt.subplot(211)
plt.imshow(empty_beam.T, vmin=0, vmax=max_intensity, cmap=plt.cm.gray, interpolation='bilinear')
cbar = plt.colorbar()
cbar.set_label('Интенсивность, усл.ед.', rotation=90)
plt.title('Прямой пучок')
plt.xlabel('Номер канала детектора')
plt.ylabel('Номер канала детектора')
plt.subplot(212)
plt.imshow(data_images[0].T, vmin=0, vmax=max_intensity, cmap=plt.cm.gray, interpolation='bilinear')
cbar = plt.colorbar()
cbar.set_label('Интенсивность, усл.ед.', rotation=90)
plt.title('Изображение объекта')
plt.xlabel('Номер канала детектора')
plt.ylabel('Номер канала детектора')
plt.show()
# %%
show_exp_data(empty_beam, data_images)
# %%
#TODO: Profile this function
def find_good_frames(data_images, data_angles):
intensity = data_images.mean(axis=-1).mean(axis=-1)
intensity_mask = (intensity<1.2*intensity.mean()) * (intensity>0.8*intensity.mean()) # dorp bad points
good_frames = np.arange(len(intensity))[intensity_mask]
intensity_t = intensity[good_frames]
data_angles_t = data_angles[good_frames]
plt.figure(figsize=(8,5))
plt.plot(data_angles[np.argsort(data_angles)],
intensity[np.argsort(data_angles)],
label='Before filtering')
plt.hlines(np.median(intensity),0, np.max(data_angles),'r', label='Reference value')
plt.plot(data_angles_t[np.argsort(data_angles_t)],
intensity_t[np.argsort(data_angles_t)],
'g', label='After filtering')
plt.xlabel('Angle')
plt.ylabel('Frame mean intensity')
plt.grid()
plt.legend(loc=0)
plt.show()
return good_frames
good_frames = find_good_frames(data_images, data_angles)
# %% [markdown]
# # Remove bad frames
# %%
data_images_good, _ = tomotools.load_create_mm(os.path.join(tmp_dir,'data_images_good.tmp'),
shape=(len(good_frames),
data_images.shape[1],
data_images.shape[2]),
dtype='float32')
#TODO: Profile this code. In case if no bad frames, just skip it
for i in tqdm_notebook(range(len(good_frames))):
data_images_good[i] = data_images[good_frames[i]]
data_angles = data_angles[good_frames]
# %% [markdown]
# # Searching object borders
# %%
data_mean = np.mean(data_images_good,axis=0)
data_mean = cv2.medianBlur(data_mean,3)
data_mean[data_mean<=1] = 1
# %%
def gauss(x, *p):
A, mu, sigma, C = p
return C+A*np.exp(-(x-mu)**2/(2.*sigma**2))
def get_gauss_fit(thr):
k = np.percentile(empty_beam/data_mean, thr,axis=-1)
p0 = [1., len(k)/2., len(k)/4., 0.]
coeff, var_matrix = scipy.optimize.curve_fit(gauss, range(len(k)), k, p0=p0)
A, mu, sigma, C = coeff
sigma = np.abs(sigma)
return A, mu, sigma, C, k
def get_x_limits():
A, mu, sigma, C, k = get_gauss_fit(80)
# res = [get_gauss_fit(k) for k in range(0,100,10)]
x_min = np.max([200, mu-2*sigma-400]).astype('int32')
x_max = np.min([len(k)-200, mu+2*sigma+400]).astype('int32')
plt.figure(figsize=(5,5))
plt.plot(k)
plt.plot(gauss(range(len(k)), A, mu, sigma, C))
plt.vlines([x_min, x_max], k.min(),k.max())
plt.grid()
return x_min, x_max
x_min, x_max = get_x_limits()
# %%
def get_y_limits():
k = np.percentile((empty_beam/data_mean)[x_min:x_max,:],90,axis=0)
k = scipy.signal.medfilt(k,5)
thr_max = np.percentile(k,5)
thr_min = np.percentile(k,5)
y_max = np.max(np.argwhere(k>thr_max))+ 100
y_min = np.min(np.argwhere(k>thr_min))- 100
y_min = np.max([0, y_min])
y_max = np.min([len(k), y_max])
plt.figure(figsize=(5,5))
plt.plot(k)
plt.hlines([thr_min, thr_max], 0, len(k))
plt.vlines([y_min, y_max], min(k), max(k))
plt.grid()
return y_min,y_max
y_min,y_max = get_y_limits()
# %%
print(x_min, x_max, y_min, y_max)
print(x_max-x_min, y_max-y_min)
# %% [markdown]
# # ЗДЕСЬ РУКАМИ ВЫСТАВЛЯТЬ ГРАНИЦЫ
# %%
plt.gray()
plt.figure(figsize=(8,8))
ax = plt.imshow(data_mean.T, vmin=0, interpolation='bilinear')
plt.axis('tight')
plt.hlines([y_min,y_max],x_min, x_max,'r')
plt.vlines([x_min,x_max],y_min, y_max,'g')
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
# %%
xmin, xmax = np.sort(ax.axes.get_xlim())
ymin, ymax = np.sort(ax.axes.get_ylim())
xmin = np.max([0, int(np.floor(xmin))])
xmax = int(np.ceil(xmax))
ymin = np.max([0, int(np.floor(ymin))])
ymax = int(np.ceil(ymax))
xmin = x_min if xmin == 0 else xmin
xmax = x_max if xmax == data_mean.shape[0] else xmax
ymin = y_min if ymin==0 else ymin
ymax = y_max if ymax == data_mean.shape[1] else ymax
# ISERT BORDERS MANUALY HERE
# xmin =
# xmax =
# ymin =
# ymax =
print(xmin, xmax, ymin, ymax)
print(xmax-xmin, ymax-ymin)
#TODO: add save cut parameters to config file
# %%
def cut_data_images(data_images, empty_beam, data_angles, xmin, xmax, ymin, ymax):
data_images_masked, _ = tomotools.load_create_mm(os.path.join(tmp_dir,'data_images_masked.tmp'),
shape = (data_angles.shape[0],
xmax-xmin,
ymax-ymin), dtype='float32',
force_create=True)
empty_masked, _ = tomotools.load_create_mm(os.path.join(tmp_dir,'empty_images_masked.tmp'),
shape = (xmax-xmin,
ymax-ymin), dtype='float32',
force_create=True)
empty_masked[:] = empty_beam[xmin:xmax,ymin:ymax]
plt.figure(figsize=(7,7))
plt.imshow(data_images_good[0, xmin:xmax, ymin:ymax].T,
vmin=0, interpolation='bilinear',
cmap=plt.cm.gray)
cbar = plt.colorbar()
cbar.set_label('Пропускание, усл.ед.', rotation=90)
plt.title('Отнормированное изображение')
plt.show()
for di in tqdm_notebook(range(data_images_masked.shape[0])):
data_images_masked[di] = data_images_good[di, xmin:xmax, ymin:ymax]
return data_images_masked, empty_masked
data_images_masked, empty_masked = cut_data_images(
data_images, empty_beam, data_angles, xmin, xmax, ymin, ymax)
# %%
plt.figure(figsize=(8,8))
plt.imshow(safe_median(empty_masked))
plt.colorbar()
# %%
def group_data(data_images,data_angles, mmap_file_dir):
uniq_angles,_ = tomotools.load_create_mm(
os.path.join(mmap_file_dir,'uniq_angles.tmp'),
shape=(len(list(set(data_angles))),),
dtype='float32',force_create=True)
uniq_angles[:] = list(set(data_angles))
uniq_data_images,_ = tomotools.load_create_mm(
os.path.join(mmap_file_dir,'uniq_data_images.tmp'),
shape=(len(uniq_angles), data_images.shape[1], data_images.shape[2]),
dtype='float32',force_create=True)
for ua_id, ua in tqdm_notebook(list(enumerate(uniq_angles))):
indexes = np.argwhere(data_angles==uniq_angles[ua_id])
if len(indexes)>1:
tmp_images = data_images[indexes]
tmp_images = np.squeeze(tmp_images)
mean_image = np.mean(tmp_images, axis=0)
uniq_data_images[ua_id] = mean_image
else:
uniq_data_images[ua_id]=data_images[indexes]
return uniq_data_images, uniq_angles
uniq_data_images, uniq_angles = group_data(data_images_masked, data_angles, tmp_dir)
# %%
#normalize data frames and calculate sinograms
empty_masked = safe_median(empty_masked)
for di in tqdm_notebook(range(uniq_data_images.shape[0])):
t = uniq_data_images[di]
t = t/empty_masked
t[t<1e-8] = 1e-8
t[t>1] = 1
uniq_data_images[di] = safe_median(t)
# del empty_masked
# %%
sinogram, _ = tomotools.load_create_mm(os.path.join(tmp_dir, 'sinogram.tmp'), shape=uniq_data_images.shape,
dtype='float32')
ne.evaluate('-log(uniq_data_images)', out=sinogram);
# %%
plt.gray()
plt.figure(figsize=(7,5))
s = sinogram[np.argsort(uniq_angles),:,int(sinogram.shape[-1]//2)]
plt.imshow(s, interpolation='bilinear')
plt.axis('tight')
cbar = plt.colorbar()
cbar.set_label('Пропускание, усл.ед.', rotation=90)
plt.title('Синограмма без коррекции')
# %%
# # build frames for video
# images_dir = os.path.join(tmp_dir,'images')
# tomotools.mkdir_p(images_dir)
# im_max=np.max(sinogram)
# im_min=np.min(sinogram)
# print(im_min, im_max)
# for ia, a in tomotools.log_progress(list(enumerate(np.argsort(uniq_angles)))):
# # print('{:34}'.format(ia))
# plt.imsave(os.path.join(images_dir,'prj_{:03}.png'.format(ia)),
# np.rot90(sinogram[a],3), vmin=im_min, vmax=im_max,
# cmap=plt.cm.gray_r)
# # !cd {images_dir} && avconv -r 10 -i "prj_%03d.png" -b:v 1000k prj.avi
# # !cd {images_dir} && rm prj.mp4
# %%
import scipy.ndimage
def my_rc(sino0, level):
def get_my_b(level):
t= np.mean(sino0, axis=0)
gt = scipy.ndimage.filters.gaussian_filter1d(t,level/2.)
return gt-t
def get_my_a(level):
my_b = get_my_b(level)
return np.mean(my_b)/my_b.shape[0]
my_a = get_my_a(level)
my_b = get_my_b(level)
res = sino0.copy()
if not level==0:
res+= sino0*my_a+my_b
return res
# %%
rc_level=10
# %%
tmp_sinogram = sinogram[np.argsort(uniq_angles),:,int(sinogram.shape[-1]//2)]
plt.figure(figsize=(8,8))
plt.imshow(my_rc(tmp_sinogram, rc_level), cmap=plt.cm.viridis, interpolation='nearest')
plt.axis('tight')
plt.colorbar(orientation='horizontal')
#TODO: remove rings
# %%
for s in tqdm_notebook(range(sinogram.shape[1])):
sinogram[:,s,:] = my_rc(sinogram[:,s,:], rc_level)
# %%
np.isnan(sinogram).sum()
# %%
tmp_sinogram = sinogram[np.argsort(uniq_angles),:,int(sinogram.shape[-1]//2)]
plt.figure(figsize=(8,8))
plt.imshow(tmp_sinogram, cmap=plt.cm.viridis, interpolation='nearest')
plt.axis('tight')
plt.colorbar(orientation='horizontal')
# %%
from skimage.measure import compare_ssim, compare_nrmse
from scipy.ndimage.filters import gaussian_filter
from scipy.optimize import minimize
from scipy.linalg import norm
import cv2
def cv_rotate(x, angle):
"""
Rotate square array using OpenCV2 around center of the array
:param x: 2d numpy array
:param angle: angle in degrees
:return: rotated array
"""
x_center = tuple(
np.array((x.shape[1], x.shape[0]), dtype='float32') / 2.0 - 0.5)
rot_mat = cv2.getRotationMatrix2D(x_center, angle, 1.0)
xro = cv2.warpAffine(
x, rot_mat, (x.shape[1], x.shape[0]), flags=cv2.INTER_LINEAR)
return xro
def smooth(x):
return x - gaussian_filter(x,50)+gaussian_filter(x,10)
def find_axis_posiotion(image_0, image_180):
def corr(x):
alfa= x[0]
shift_x = int(x[1])
if shift_x >= 0:
t_180 = image_180[:,shift_x:]
t_0 = image_0[:,shift_x:]
else:
t_180 = image_180[:,:shift_x]
t_0 = image_0[:,:shift_x]
tt_180 = np.fliplr(cv_rotate(t_180,alfa))
tt_180 = cv2.medianBlur(tt_180, 3) #*t_mask
tt_0 = cv_rotate(t_0,alfa)
tt_0 = cv2.medianBlur(tt_0, 3) #*t_mask
res = compare_nrmse(tt_0, tt_180)
return res
s180 = image_180.sum(axis=0)
r180 = np.flipud(np.arange(len(s180)))
p180 = (s180*r180).sum()/s180.sum()
s0 = image_0.sum(axis=0)
r0 = np.arange(len(s0))
p0 = (s0*r0).sum()/s0.sum()
x0 = [1.,0.5*(p0-p180)]
left = x0[1]-200
right = x0[1]+200
qq = [corr([0,q]) for q in np.arange(left,right)]
min_pos = left+np.argmin(qq)
if min_pos==left or min_pos==right:
position_found = False
else:
position_found = True
plt.figure()
plt.plot(np.arange(left,right),qq)
plt.grid()
plt.show()
while not position_found:
if min_pos == left:
right=left
left=right-200
elif min_pos == right:
left=right
right = left+200
qq = [corr([0,q]) for q in np.arange(left,right)]
min_pos = left+np.argmin(qq)
if min_pos==left or min_pos==right:
position_found = False
else:
position_found = True
plt.figure()
plt.plot(np.arange(left,right),qq)
plt.grid()
plt.show()
shift_0 = min_pos
x0 = [1.,shift_0],
res= minimize(corr, x0, method='Powell')
return res
# %%
# seraching opposite frames (0 and 180 deg)
def get_angles_at_180_deg(uniq_angles):
array_0 = np.asarray(uniq_angles)%360
cross_array = np.zeros((len(array_0),len(array_0)))
for i in range(1, len(array_0)):
cross_array[i] = np.roll(array_0, i)
pos = np.argmin(np.abs(cross_array+180-array_0)%360)
print(pos)
position_180 = pos %len(array_0)
position_0 = (pos-position_180)//len(array_0)
print(position_0, position_180)
return position_0, position_180
position_0, position_180 = get_angles_at_180_deg(uniq_angles)
posiotion_180_sorted = np.argwhere(np.isclose(position_180, np.argsort(uniq_angles)))[0][0]
print(posiotion_180_sorted)
posiotions_to_check = np.argsort(uniq_angles)[
posiotion_180_sorted-3:np.min([posiotion_180_sorted+5, len(uniq_angles)-1])] #TODO: check ranges
print(uniq_angles[posiotions_to_check])
# %%
data_0_orig = np.rot90(sinogram[position_0]).copy()
data_0 = cv2.medianBlur(data_0_orig,3)
data_0 = smooth(data_0)
# %%
plt.figure(figsize=(8,8))
plt.imshow(smooth(data_0_orig))
plt.colorbar()
# %%
opt_func_values = []
for position_180 in posiotions_to_check:
print(uniq_angles[position_180])
data_0_orig = np.rot90(sinogram[position_0]).copy()
data_180_orig = np.rot90(sinogram[position_180]).copy()
data_0 = cv2.medianBlur(data_0_orig,3)
data_180 = cv2.medianBlur(data_180_orig,3)
data_0 = smooth(data_0)
data_180 = smooth(data_180)
res = find_axis_posiotion(data_0, data_180)
opt_func_values.append(res['fun'])
print(res)
# alfa, shift_x, shift_y = res.x[0]/10, int(res.x[1]), int(res.x[2])//10
alfa, shift_x, shift_y = res.x[0], int(np.floor(res.x[1])), 0
if shift_x >= 0:
t_180 = data_180_orig[:,shift_x:]
t_0 = data_0_orig[:,shift_x:]
else:
t_180 = data_180_orig[:,:shift_x]
t_0 = data_0_orig[:,:shift_x]
if shift_y > 0:
t_180 = t_180[shift_y:,:]
t_0 = t_0[:-shift_y,:]
elif shift_y < 0:
t_180 = t_180[:shift_y,:]
t_0 = t_0[-shift_y:,:]
tt_180 = np.fliplr(cv_rotate(t_180,alfa))
tt_0 = cv_rotate(t_0,alfa)
plt.figure(figsize=(7,7))
plt.imshow(tt_180-tt_0, cmap=plt.cm.viridis)
plt.title('a={}, sx={} sy={}'.format(alfa,shift_x, shift_y))
plt.colorbar()
plt.show()
# %%
plt.figure()
plt.plot(uniq_angles[posiotions_to_check],opt_func_values)
plt.grid()
new_position_180 = posiotions_to_check[np.argmin(opt_func_values)]
print(new_position_180)
# %%
uniq_angles_orig = uniq_angles.copy()
uniq_angles *= 180./uniq_angles[new_position_180]
position_0, position_180 = get_angles_at_180_deg(uniq_angles)
# %%
print(uniq_angles[position_180])
data_0_orig = np.rot90(sinogram[position_0]).copy()
data_180_orig = np.rot90(sinogram[position_180]).copy()
data_0 = cv2.medianBlur(data_0_orig,3)
data_180 = cv2.medianBlur(data_180_orig,3)
data_0 = smooth(data_0)
data_180 = smooth(data_180)
res = find_axis_posiotion(data_0, data_180)
# opt_func_values.append(res['fun'])
print(res)
# TODO: FIX shift_y
alfa, shift_x, shift_y = res.x[0], int(np.floor(res.x[1])), 0
if shift_x >= 0:
t_180 = data_180_orig[:,shift_x:]
t_0 = data_0_orig[:,shift_x:]
else:
t_180 = data_180_orig[:,:shift_x]
t_0 = data_0_orig[:,:shift_x]
if shift_y > 0:
t_180 = t_180[shift_y:,:]
t_0 = t_0[:-shift_y,:]
elif shift_y < 0:
t_180 = t_180[:shift_y,:]
t_0 = t_0[-shift_y:,:]
tt_180 = np.fliplr(cv_rotate(t_180,alfa))
tt_0 = cv_rotate(t_0,alfa)
plt.figure(figsize=(8,8))
plt.imshow(tt_180-tt_0, cmap=plt.cm.viridis)
plt.title('a={}, sx={} sy={}'.format(alfa,shift_x, shift_y))
plt.colorbar()
plt.show()
# %%
plt.gray()
plt.figure(figsize=(8,8))
im_max = np.max([np.max(data_0_orig), np.max(data_180_orig)])
plt.subplot(221)
plt.imshow(data_0_orig, vmin=0, vmax=im_max, cmap=plt.cm.gray_r)
plt.axis('tight')
plt.title('a')
plt.xlabel('Каналы детектора')
plt.ylabel('Каналы детектора')
cbar = plt.colorbar()
cbar.set_label('Поглощение, усл.ед.', rotation=90)
plt.subplot(222)
plt.imshow(data_180_orig, vmin=0, vmax=im_max, cmap=plt.cm.gray_r)
plt.axis('tight')
plt.title('б')
plt.xlabel('Каналы детектора')
plt.ylabel('Каналы детектора')
cbar = plt.colorbar()
cbar.set_label('Поглощение, усл.ед.', rotation=90)
plt.subplot(223)
plt.imshow(data_0_orig - np.fliplr(data_180_orig), vmin=-im_max/2, vmax=im_max/2, cmap=plt.cm.gray_r)
plt.axis('tight')
plt.title('в')
plt.xlabel('Каналы детектора')
plt.ylabel('Каналы детектора')
cbar = plt.colorbar()
cbar.set_label('Поглощение, усл.ед.', rotation=90)
plt.subplot(224)
plt.imshow(1.0*(tt_180-tt_0), vmin=-im_max/2, vmax=im_max/2, cmap=plt.cm.gray_r)
plt.axis('tight')
plt.title('г')
plt.xlabel('Каналы детектора')
plt.ylabel('Каналы детектора')
cbar = plt.colorbar()
cbar.set_label('Поглощение, усл.ед.', rotation=90)
# %%
t = np.percentile(sinogram, 90, axis=1)
# t1 = t[np.argsort(uniq_angles)]
# %%
plt.figure(figsize=(5,5))
plt.imshow(t[np.argsort(uniq_angles),:])
plt.colorbar()
plt.show()
#TODO: Improve y_shift searching
y_shift_array = np.sum(t>0.05, axis=1)
y_shift_array -=y_shift_array[0]
plt.figure(figsize=(6,6))
plt.plot(y_shift_array[np.argsort(uniq_angles)],'o')
plt.grid()
plt.show()
# %%
# flow = cv2.calcOpticalFlowPyrLK(data_0, data_180)
# %%
sinogram_fixed, _ = tomotools.load_create_mm(os.path.join(tmp_dir,'sinogram_fixed.tmp'),
shape=(sinogram.shape[0], sinogram.shape[1]+abs(shift_x),sinogram.shape[2]),
dtype='float32', force_create=True)
#fix axis tlit
for i in tqdm_notebook(range(sinogram.shape[0])):
t = sinogram[i].copy()
t_angle = uniq_angles[i]
# if not shift_y ==0 :
# delta_angle = t_angle - uniq_angles[position_0]+90
# tmp_shift_y = int(np.sin(delta_angle/180.*np.pi)*shift_y)
# t = np.roll(t, -tmp_shift_y, -1)
# t[:,0:np.abs(shift_y)]=0
# t[:,-np.abs(shift_y):]=0
t = cv_rotate(t, alfa)
#TODO: Fixit
shift_y = y_shift_array[i]
# t = np.roll(t, shift_y, axis=1)
# if shift_y > 0:
# t[:-shift_y] = t[shift_y:]
# t[-shift_y:] = 0
# elif shift_y < 0:
# t[-shift_y:] = t[:shift_y]
# t[:-shift_y] = 0
if shift_x > 0:
sinogram_fixed[i, :-shift_x] = t
else:
sinogram_fixed[i, -shift_x:] = t
# %%
pixel_size = 9e-3
def astra_tomo2d_parallel(sinogram, angles):
# astra.astra.set_gpu_index([0,1])
angles = angles.astype('float64')
detector_size = sinogram.shape[1]
rec_size = detector_size
vol_geom = astra.create_vol_geom(rec_size, rec_size)
proj_geom = astra.create_proj_geom('parallel', 1.0, detector_size, angles)
sinogram_id = astra.data2d.create('-sino', proj_geom, data=sinogram)
# Create a data object for the reconstruction
rec_id = astra.data2d.create('-vol', vol_geom)
# proj_id = astra.create_projector('strip', proj_geom, vol_geom) # for CPU reconstruction only
# Set up the parameters for a reconstruction algorithm using the GPU
cfg = astra.astra_dict('FBP_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
# cfg['ProjectorId'] = proj_id # for CPU reconstruction only
cfg['option'] = {}
alg_id = astra.algorithm.create(cfg)
astra.algorithm.run(alg_id, 1)
cfg = astra.astra_dict('CGLS_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
# cfg['ProjectorId'] = proj_id # for CPU reconstruction only
cfg['option'] = {}
# cfg['option']['MinConstraint'] = -0.01
alg_id = astra.algorithm.create(cfg)
# Run 150 iterations of the algorithm
astra.algorithm.run(alg_id, 5) #30
# Get the result
rec = astra.data2d.get(rec_id)/pixel_size #fixit
# Clean up. Note that GPU memory is tied up in the algorithm object,
# and main RAM in the data objects.
astra.algorithm.delete(alg_id)
astra.data2d.delete(rec_id)
astra.data2d.delete(sinogram_id)
astra.clear()
return rec
def astra_tomo3d_parallel(sinogram, angles, rec_vol, slice_start, slice_stop):
# astra.astra.set_gpu_index([0,1])
angles = angles.astype('float64')
detector_size = sinogram.shape[1]
# slices_number = sinogram.shape[0]
slices_number = slice_stop - slice_start
rec_size = detector_size
vol_geom = astra.create_vol_geom(rec_size, rec_size, slices_number)
proj_geom = astra.create_proj_geom('parallel3d', 1.0, 1.0, slices_number, detector_size, angles)
sinogram_id = astra.data3d.create('-sino', proj_geom, np.rollaxis(sinogram,-1)[slice_start:slice_stop])
# Create a data object for the reconstruction
# rec_id = astra.data3d.link('-vol', vol_geom, rec_vol[slice_start:slice_stop])
rec_id = astra.data3d.create('-vol', vol_geom)
# Set up the parameters for a reconstruction algorithm using the GPU
cfg = astra.astra_dict('CGLS3D_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
# cfg['ProjectorId'] = proj_id # for CPU reconstruction only
cfg['option'] = {}
# cfg['option']['GPUindex'] = 1
cfg['option']['MinConstraint'] = -0.01
# Available algorithms:
# SIRT_CUDA, SART_CUDA, EM_CUDA, FBP_CUDA (see the FBP sample)
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
# astra.data3d.info()
# Run 150 iterations of the algorithm
astra.algorithm.run(alg_id, 1)
# Get the result
rec = astra.data3d.get(rec_id)/pixel_size #fixit
# Clean up. Note that GPU memory is tied up in the algorithm object,
# and main RAM in the data objects.
astra.algorithm.delete(alg_id)
astra.data3d.delete(rec_id)
astra.data3d.delete(sinogram_id)
astra.clear()
return rec
# %%
s1_angles = uniq_angles
s1 = np.require(sinogram_fixed[:,:,int(sinogram_fixed.shape[-1]//3)],
dtype=np.float32, requirements=['C'])
# %%
#preview
def test_rec(s1, uniq_angle):
plt.figure(figsize=(7,7))
plt.imshow(s1[np.argsort(uniq_angle)], interpolation='bilinear', cmap=plt.cm.gray_r)
plt.colorbar()
plt.show()
bh_corr = 1.0
t_angles = (uniq_angles-uniq_angles.min())<=180 # remove angles >180
rec_slice = astra_tomo2d_parallel(s1[t_angles], uniq_angles[t_angles]*np.pi/180)
plt.figure(figsize=(10,8))
plt.imshow(safe_median(rec_slice),
vmin=0, vmax= np.percentile(rec_slice,95)*1.2, cmap=plt.cm.viridis)
plt.axis('equal')
plt.colorbar()
plt.show()
# %%
test_rec(s1, s1_angles)
# %%
plt.figure(figsize=(7,7))
plt.imshow(s1[np.argsort(uniq_angles)], interpolation='bilinear', cmap=plt.cm.gray_r)
plt.axis('tight')
cbar = plt.colorbar()
cbar.set_label('Пропускание, усл.ед.', rotation=90)
plt.title('Синограмма без коррекции')
plt.xlabel('Номер канала детектора')
plt.ylabel('Номер угла поворота')
# %%
#TODO: check mu physical value
sinogram_fixed_median = np.median(sinogram_fixed.sum(axis=-1).sum(axis=-1))
corr_factor = sinogram_fixed.sum(axis=-1).sum(axis=-1)/sinogram_fixed_median
# %%
#TODO: fix bad data
for i in range(len(sinogram_fixed)):
sinogram_fixed[i] = sinogram_fixed[i]/corr_factor[i]
# %%
s2 = np.require(sinogram_fixed[:,:,int(sinogram_fixed.shape[-1]//2)],
dtype=np.float32, requirements=['C'])
# %%
s2 = (s1.T/s1.sum(axis=-1)*s1.sum(axis=-1).mean()).T
test_rec(s1, uniq_angles)
test_rec(s2, uniq_angles)
# %%
del data_0_orig, data_180_orig, data_images_good, data_images_masked
del sinogram, sinogram_fixed, uniq_angles, uniq_angles_orig, uniq_data_images
# %%
files_to_remove = glob(os.path.join(tmp_dir,'*.tmp'))
files_to_remove = [f for f in files_to_remove if f.split('/')[-1] not in [
'uniq_angles.tmp', 'sinogram_fixed.tmp']]
for fr in files_to_remove:
try:
os.remove(os.path.join(tmp_dir,fr))
except:
pass
try:
os.remove(os.path.join(tmp_dir,fr+'.size'))
except:
pass
# %%
uniq_angles, _ = tomotools.load_create_mm(os.path.join(tmp_dir,'uniq_angles.tmp'),
shape= None,
dtype='float32')
s1, _ = tomotools.load_create_mm(os.path.join(tmp_dir,'sinogram_fixed.tmp'),
shape= None,
dtype='float32')
rec_vol, _ = tomotools.load_create_mm(os.path.join(tmp_dir,'rec.tmp'),
dtype=np.float32,
shape = (s1.shape[-1],s1.shape[1],s1.shape[1]))
# %%
# # %%timeit
#preview
bh_corr = 1.0
sss = s1[...,int(s1.shape[-1]//2)]
t_angles = (uniq_angles-uniq_angles.min())<=180 # remove angles >180
s4 = sss.copy()
# s4[s4<0] = 0
rec_slice = astra_tomo2d_parallel(s4[t_angles], uniq_angles[t_angles]*np.pi/180)
plt.figure(figsize=(10,8))
plt.imshow(safe_median(rec_slice),
vmin=0, vmax= np.percentile(rec_slice,95)*1.2, cmap=plt.cm.viridis)
plt.axis('equal')
plt.colorbar()
plt.show()
# plt.figure(figsize=(7,5))
# plt.plot(rec_slice[rec_slice.shape[0]//2])
# plt.grid()
# plt.show()
# plt.figure(figsize=(7,5))
# plt.plot(uniq_angles[t_angles]*np.pi/180,
# np.power(s4[t_angles],bh_corr).sum(axis=1)/np.sum(np.power(s4[t_angles],bh_corr))*np.sum(s4[t_angles]),
# '*')
# plt.grid()
# plt.show()
# plt.figure(figsize=(7,5))
# plt.hist(rec_slice.ravel(), bins=100)
# plt.grid()
# plt.show()
# plt.figure(figsize=(8,8))
# plt.imshow(rec_slice/np.sum(np.power(s4[t_angles],bh_corr))*np.sum(s4[t_angles]),
# vmin=0, vmax= np.percentile(rec_slice,95)*1.2, cmap=plt.cm.viridis)
# plt.axis('tight')
# plt.colorbar()
# plt.show()
# %%
#multi 2d case
t = time.time()
print(s1.shape)
angles =np.array(uniq_angles)*np.pi/180
for i in tqdm_notebook(range(0, s1.shape[-1])):
sino = s1[:,:,i].copy()
sino[sino<0] = 0
sino = np.power(sino, bh_corr) #BH!
t_angles = (uniq_angles-uniq_angles.min())<=180 # remove angles >180
rec_vol[i] = astra_tomo2d_parallel(sino[t_angles], angles[t_angles])
print(time.time()-t)
# %%
rec_vol_filtered = rec_vol
# %%
for i in range(10):
plt.figure(figsize=(8,8))
plt.imshow(rec_vol_filtered[i*rec_vol_filtered.shape[0]//10], cmap=plt.cm.viridis, vmin=0)
plt.axis('equal')
plt.title(i*i*rec_vol_filtered.shape[0]//10)
plt.colorbar()
plt.show()
# %%
for i in range(10):
plt.figure(figsize=(8,8))
plt.imshow(rec_vol_filtered[:,i*rec_vol_filtered.shape[1]//10,:], cmap=plt.cm.viridis, vmin=0)
plt.axis('equal')
plt.title(i*i*rec_vol_filtered.shape[0]//10)
plt.colorbar()
plt.show()
# %%
for i in range(10):
plt.figure(figsize=(8,8))
plt.imshow(rec_vol_filtered[:,:, i*rec_vol_filtered.shape[2]//10], cmap=plt.cm.viridis, vmin=0)
plt.axis('equal')
plt.title(i*i*rec_vol_filtered.shape[0]//10)
plt.colorbar()
plt.show()
# %%
noisy = rec_vol_filtered[int(rec_vol_filtered.shape[0]*0.5)].astype('float64')
noisy = resize(noisy, (noisy.shape[0]//1, noisy.shape[1]//1))
# noisy = rec_vol_filtered[int(rec_vol_filtered.shape[0]*0.75)][::1,::1]
sigma_est = np.mean(estimate_sigma(noisy, multichannel=False))
print("estimated noise standard deviation = {}".format(sigma_est))
patch_kw = dict(patch_size=7, # 5x5 patches
patch_distance=15, # 13x13 search area
multichannel=False)
# 1 algorithm
denoise = denoise_nl_means(noisy, h=1.5 * sigma_est, fast_mode=True,
**patch_kw)
# 2 algorithm
denoise_fast = denoise_nl_means(noisy, h=0.8 * sigma_est, fast_mode=True,
**patch_kw)
plt.figure(figsize=(6, 12))
plt.subplot(311)
plt.imshow(noisy, interpolation='bilinear')
plt.axis('off')
plt.colorbar()
plt.title('noisy')
plt.subplot(312)
plt.imshow(denoise, interpolation='bilinear')
plt.axis('off')
plt.colorbar()
plt.title('non-local means\n(1)')
plt.subplot(313)
plt.imshow(denoise_fast, interpolation='bilinear')
plt.axis('off')
plt.colorbar()
plt.title('non-local means\n(2)')
plt.show()
plt.figure(figsize=(8, 8))
plt.subplot(321)
plt.imshow(noisy, interpolation='bilinear')
plt.axis('off')
plt.colorbar()
plt.title('noisy')
plt.subplot(322)
plt.hist(noisy.ravel(), bins=100);
plt.grid()
plt.subplot(323)
plt.imshow(denoise, interpolation='bilinear')
plt.axis('off')
plt.colorbar()
plt.title('non-local means\n(1)')
plt.subplot(324)
plt.hist(denoise.ravel(), bins=100);
plt.grid()
plt.subplot(325)
plt.imshow(denoise_fast, interpolation='bilinear')
plt.axis('off')
plt.colorbar()
plt.title('non-local means\n(2)')
plt.subplot(326)
plt.hist(denoise_fast.ravel(), bins=100);
plt.grid()
plt.show()
# %%
def reshape_volume(volume, reshape):
res = np.zeros([s//reshape for s in volume.shape], dtype='float32')
xs,ys,zs = [s*reshape for s in res.shape]
for x,y,z in np.ndindex(reshape, reshape, reshape):
res += volume[x:xs:reshape, y:ys:reshape, z:zs:reshape]
return res/reshape**3
# %%
def save_amira(in_array, out_path, reshape=3):
data_path = out_path
with open(os.path.join(data_path, 'amira.raw'), 'wb') as amira_file:
reshaped_vol = reshape_volume(in_array, reshape)
reshaped_vol.tofile(amira_file)
file_shape = reshaped_vol.shape
with open(os.path.join(data_path, 'tomo.hx'), 'w') as af:
af.write('# Amira Script\n')
af.write('remove -all\n')
af.write(r'[ load -raw ${SCRIPTDIR}/amira.raw little xfastest float 1 '+
str(file_shape[1])+' '+str(file_shape[2])+' '+str(file_shape[0])+
' 0 '+str(file_shape[1]-1)+' 0 '+str(file_shape[2]-1)+' 0 '+str(file_shape[0]-1)+
' ] setLabel tomo.raw\n')
# %%
save_amira(rec_vol_filtered, tmp_dir, 3)
# %%
with h5py.File(os.path.join(tmp_dir, 'tomo_rec.h5'), 'w') as h5f:
h5f.create_dataset('Reconstruction', data=rec_vol_filtered, chunks=True,
compression='lzf')
# %%
# import ipyvolume as ipv
# %%
# ipv.figure()
# ipv.volshow(reshape_volume(rec_vol_filtered,10),
# max_shape=1024,
# extent=[[0, rec_vol_filtered.shape[2]*9e-3],
# [0, rec_vol_filtered.shape[1]*9e-3],
# [0, rec_vol_filtered.shape[0]*9e-3]]
# )
# ipv.xlim(0, rec_vol_filtered.shape[2]*9e-3)
# ipv.xlabel('mm')
# ipv.ylim(0, rec_vol_filtered.shape[1]*9e-3)
# ipv.ylabel('mm')
# ipv.zlim(0, rec_vol_filtered.shape[0]*9e-3)
# ipv.zlabel('mm')
# ipv.squarelim()
# # ipv.show()
# ipv.save(os.path.join(tmp_dir,'tomo.html'))
# %%
files_to_remove = glob(os.path.join(tmp_dir,'*.tmp'))
files_to_remove
# %%
for fr in files_to_remove:
try:
os.remove(os.path.join(tmp_dir,fr))
except:
pass
try:
os.remove(os.path.join(tmp_dir,fr+'.size'))
except:
pass
# %%
tomotools.mkdir_p(os.path.join(storage_dir, experiment_id))
# %%
# # !cp 'tomo.ini' {os.path.join(storage_dir, experiment_id)}
# %%
# !cp -r {tmp_dir} {storage_dir}
# %%
# !rm -rf {tmp_dir}
# %%
# !mv {os.path.join(data_dir, experiment_id+'.h5')} {storage_dir}
# %%
# !ls -lha {storage_dir+'/'+experiment_id}
# %%
|
buzmakov/tomography_scripts
|
misc/reconstructor-v-2.1a.py
|
Python
|
mit
| 36,458
|
[
"Gaussian"
] |
f285d49713b7b28e232d546f690b4e0232d6f8e6bf0efc6f07ef99d27d360329
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import shutil
from unittest import TestCase
import numpy as np
import pytest
import tensorflow as tf
from zoo import init_nncontext
from zoo.orca.data import XShards
import zoo.orca.data.pandas
from zoo.orca.learn.tf2 import Estimator
from zoo.ray import RayContext
import ray
NUM_TRAIN_SAMPLES = 1000
NUM_TEST_SAMPLES = 400
import os
resource_path = os.path.join(
os.path.realpath(os.path.dirname(__file__)), "../../../../resources")
def linear_dataset(a=2, size=1000):
x = np.random.rand(size)
y = x / 2
x = x.reshape((-1, 1))
y = y.reshape((-1, 1))
return x, y
def create_train_datasets(config, batch_size):
import tensorflow as tf
x_train, y_train = linear_dataset(size=NUM_TRAIN_SAMPLES)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(NUM_TRAIN_SAMPLES).batch(
batch_size)
return train_dataset
def create_test_dataset(config, batch_size):
import tensorflow as tf
x_test, y_test = linear_dataset(size=NUM_TEST_SAMPLES)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(batch_size)
return test_dataset
def simple_model(config):
import tensorflow as tf
model = tf.keras.models.Sequential([tf.keras.layers.Dense(10, input_shape=(1,)),
tf.keras.layers.Dense(1)])
return model
def compile_args(config):
import tensorflow as tf
if "lr" in config:
lr = config["lr"]
else:
lr = 1e-3
args = {
"optimizer": tf.keras.optimizers.SGD(lr),
"loss": "mean_squared_error",
"metrics": ["mean_squared_error"]
}
return args
def model_creator(config):
model = simple_model(config)
model.compile(**compile_args(config))
return model
def identity_model_creator(config):
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(1)),
tf.keras.layers.Lambda(lambda x: tf.identity(x))
])
model.compile()
return model
def create_auto_shard_datasets(config, batch_size):
import tensorflow as tf
data_path = os.path.join(resource_path, "orca/learn/test_auto_shard/*.csv")
dataset = tf.data.Dataset.list_files(data_path)
dataset = dataset.interleave(lambda x: tf.data.TextLineDataset(x))
dataset = dataset.map(lambda x: tf.strings.to_number(x))
dataset = dataset.map(lambda x: (x, x))
dataset = dataset.batch(batch_size)
return dataset
def create_auto_shard_model(config):
import tensorflow as tf
model = tf.keras.models.Sequential([
tf.keras.layers.Lambda(lambda x: tf.identity(x))
])
return model
def create_auto_shard_compile_args(config):
import tensorflow as tf
def loss_func(y1, y2):
return tf.abs(y1[0] - y1[1]) + tf.abs(y2[0] - y2[1])
args = {
"optimizer": tf.keras.optimizers.SGD(lr=0.0),
"loss": loss_func,
}
return args
def auto_shard_model_creator(config):
model = create_auto_shard_model(config)
model.compile(**create_auto_shard_compile_args(config))
return model
class LRChecker(tf.keras.callbacks.Callback):
def __init__(self, *args):
super(LRChecker, self).__init__(*args)
self.warmup_lr = [0.16, 0.22, 0.28, 0.34, 0.4]
def on_epoch_end(self, epoch, logs=None):
current_lr = tf.keras.backend.get_value(self.model.optimizer.lr)
print("epoch {} current lr is {}".format(epoch, current_lr))
if epoch < 5:
assert abs(current_lr - self.warmup_lr[epoch]) < 1e-5
elif 5 <= epoch < 10:
assert abs(current_lr - 0.4) < 1e-5
elif 10 <= epoch < 15:
assert abs(current_lr - 0.04) < 1e-5
elif 15 <= epoch < 20:
assert abs(current_lr - 0.004) < 1e-5
else:
assert abs(current_lr - 0.0004) < 1e-5
class TestTFRayEstimator(TestCase):
def impl_test_fit_and_evaluate(self, backend):
import tensorflow as tf
ray_ctx = RayContext.get()
batch_size = 32
global_batch_size = batch_size * ray_ctx.num_ray_nodes
if backend == "horovod":
trainer = Estimator.from_keras(
model_creator=simple_model,
compile_args_creator=compile_args,
verbose=True,
config=None,
backend=backend)
else:
trainer = Estimator.from_keras(model_creator=model_creator,
verbose=True,
config=None,
backend=backend,
workers_per_node=2)
# model baseline performance
start_stats = trainer.evaluate(create_test_dataset, batch_size=global_batch_size,
num_steps=NUM_TEST_SAMPLES // global_batch_size)
print(start_stats)
def scheduler(epoch):
if epoch < 2:
return 0.001
else:
return 0.001 * tf.math.exp(0.1 * (2 - epoch))
scheduler = tf.keras.callbacks.LearningRateScheduler(scheduler, verbose=1)
# train for 2 epochs
trainer.fit(create_train_datasets, epochs=2, batch_size=global_batch_size,
steps_per_epoch=10, callbacks=[scheduler])
trainer.fit(create_train_datasets, epochs=2, batch_size=global_batch_size,
steps_per_epoch=10, callbacks=[scheduler])
# model performance after training (should improve)
end_stats = trainer.evaluate(create_test_dataset, batch_size=global_batch_size,
num_steps=NUM_TEST_SAMPLES // global_batch_size)
print(end_stats)
# sanity check that training worked
dloss = end_stats["validation_loss"] - start_stats["validation_loss"]
dmse = (end_stats["validation_mean_squared_error"] -
start_stats["validation_mean_squared_error"])
print(f"dLoss: {dloss}, dMSE: {dmse}")
assert dloss < 0 and dmse < 0, "training sanity check failed. loss increased!"
def test_fit_and_evaluate_tf(self):
self.impl_test_fit_and_evaluate(backend="tf2")
def test_fit_and_evaluate_horovod(self):
self.impl_test_fit_and_evaluate(backend="horovod")
def test_auto_shard_tf(self):
# file 1 contains all 0s, file 2 contains all 1s
# If shard by files, then each model will
# see the same records in the same batch.
# If shard by records, then each batch
# will have different records.
# The loss func is constructed such that
# the former case will return 0, and the latter
# case will return non-zero.
ray_ctx = RayContext.get()
trainer = Estimator.from_keras(
model_creator=auto_shard_model_creator,
verbose=True,
backend="tf2", workers_per_node=2)
stats = trainer.fit(create_auto_shard_datasets, epochs=1, batch_size=4, steps_per_epoch=2)
assert stats["train_loss"] == 0.0
def test_auto_shard_horovod(self):
# file 1 contains all 0s, file 2 contains all 1s
# If shard by files, then each model will
# see the same records in the same batch.
# If shard by records, then each batch
# will have different records.
# The loss func is constructed such that
# the former case will return 0, and the latter
# case will return non-zero.
ray_ctx = RayContext.get()
trainer = Estimator.from_keras(
model_creator=create_auto_shard_model,
compile_args_creator=create_auto_shard_compile_args,
verbose=True,
backend="horovod", workers_per_node=2)
stats = trainer.fit(create_auto_shard_datasets, epochs=1, batch_size=4, steps_per_epoch=2)
assert stats["train_loss"] == 0.0
# this needs horovod >= 0.19.2
def test_horovod_learning_rate_schedule(self):
import horovod
major, minor, patch = horovod.__version__.split(".")
larger_major = int(major) > 0
larger_minor = int(major) == 0 and int(minor) > 19
larger_patch = int(major) == 0 and int(minor) == 19 and int(patch) >= 2
if larger_major or larger_minor or larger_patch:
ray_ctx = RayContext.get()
batch_size = 32
workers_per_node = 4
global_batch_size = batch_size * workers_per_node
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=simple_model,
compile_args_creator=compile_args,
verbose=True,
config=config,
backend="horovod", workers_per_node=workers_per_node)
import horovod.tensorflow.keras as hvd
callbacks = [
hvd.callbacks.LearningRateWarmupCallback(warmup_epochs=5, initial_lr=0.4,
verbose=True),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=5, end_epoch=10,
multiplier=1., initial_lr=0.4),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=10, end_epoch=15,
multiplier=1e-1, initial_lr=0.4),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=15, end_epoch=20,
multiplier=1e-2, initial_lr=0.4),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=20, multiplier=1e-3,
initial_lr=0.4),
LRChecker()
]
for i in range(30):
trainer.fit(create_train_datasets, epochs=1, batch_size=global_batch_size,
callbacks=callbacks)
else:
# skip tests in horovod lower version
pass
def test_sparkxshards(self):
train_data_shard = XShards.partition({"x": np.random.randn(100, 1),
"y": np.random.randint(0, 1, size=(100))})
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(train_data_shard, epochs=1, batch_size=4, steps_per_epoch=25)
trainer.evaluate(train_data_shard, batch_size=4, num_steps=25)
def test_dataframe(self):
sc = init_nncontext()
rdd = sc.range(0, 10)
from pyspark.sql import SparkSession
spark = SparkSession(sc)
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(df, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"])
trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
trainer.predict(df, feature_cols=["feature"]).collect()
def test_dataframe_with_empty_partition(self):
from zoo.orca import OrcaContext
sc = OrcaContext.get_spark_context()
rdd = sc.range(0, 10)
rdd_with_empty = rdd.repartition(4).\
mapPartitionsWithIndex(lambda idx, part: [] if idx == 0 else part)
from pyspark.sql import SparkSession
spark = SparkSession(sc)
from pyspark.ml.linalg import DenseVector
df = rdd_with_empty.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=()))))\
.toDF(["feature", "label"])
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(df, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"])
trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
trainer.predict(df, feature_cols=["feature"]).collect()
def test_pandas_dataframe(self):
def model_creator(config):
import tensorflow as tf
input1 = tf.keras.layers.Input(shape=(1,))
input2 = tf.keras.layers.Input(shape=(1,))
concatenation = tf.concat([input1, input2], axis=-1)
outputs = tf.keras.layers.Dense(units=1, activation='softmax')(concatenation)
model = tf.keras.Model(inputs=[input1, input2], outputs=outputs)
model.compile(**compile_args(config))
return model
file_path = os.path.join(resource_path, "orca/learn/ncf2.csv")
train_data_shard = zoo.orca.data.pandas.read_csv(file_path)
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=1)
trainer.fit(train_data_shard, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["user", "item"],
label_cols=["label"])
trainer.evaluate(train_data_shard, batch_size=4, num_steps=25,
feature_cols=["user", "item"], label_cols=["label"])
trainer.predict(train_data_shard, feature_cols=["user", "item"]).collect()
def test_dataframe_shard_size(self):
from zoo.orca import OrcaContext
OrcaContext._shard_size = 3
sc = init_nncontext()
rdd = sc.range(0, 10)
from pyspark.sql import SparkSession
spark = SparkSession(sc)
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(df, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"])
trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
trainer.predict(df, feature_cols=["feature"]).collect()
def test_partition_num_less_than_workers(self):
sc = init_nncontext()
rdd = sc.range(200, numSlices=1)
assert rdd.getNumPartitions() == 1
from pyspark.sql import SparkSession
spark = SparkSession(sc)
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
assert df.rdd.getNumPartitions() < trainer.num_workers
trainer.fit(df, epochs=1, batch_size=4, steps_per_epoch=25,
validation_data=df, validation_steps=1,
feature_cols=["feature"],
label_cols=["label"])
trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
trainer.predict(df, feature_cols=["feature"]).collect()
def test_num_part_data_diff_val_data(self):
sc = init_nncontext()
rdd = sc.range(200, numSlices=10)
val_rdd = sc.range(60, numSlices=8)
from pyspark.sql import SparkSession
spark = SparkSession(sc)
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=())))).toDF(["feature", "label"])
val_df = val_rdd.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=()))))\
.toDF(["feature", "label"])
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
assert df.rdd.getNumPartitions() > trainer.num_workers
assert df.rdd.getNumPartitions() != val_df.rdd.getNumPartitions()
trainer.fit(df, epochs=1, batch_size=4, steps_per_epoch=25,
validation_data=val_df, validation_steps=1,
feature_cols=["feature"],
label_cols=["label"])
def test_dataframe_predict(self):
sc = init_nncontext()
rdd = sc.parallelize(range(20))
df = rdd.map(lambda x: ([float(x)] * 5,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
estimator = Estimator.from_keras(
model_creator=identity_model_creator,
verbose=True,
config={},
workers_per_node=2)
result = estimator.predict(df, batch_size=4,
feature_cols=["feature"])
expr = "sum(cast(feature <> to_array(prediction) as int)) as error"
assert result.selectExpr(expr).first()["error"] == 0
def test_sparkxshards_with_inbalanced_data(self):
train_data_shard = XShards.partition({"x": np.random.randn(100, 1),
"y": np.random.randint(0, 1, size=(100))})
def random_pad(data):
import numpy as np
import random
times = random.randint(1, 10)
data["x"] = np.concatenate([data["x"]] * times)
data["y"] = np.concatenate([data["y"]] * times)
return data
train_data_shard = train_data_shard.transform_shard(random_pad)
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(train_data_shard, epochs=1, batch_size=4, steps_per_epoch=25)
trainer.evaluate(train_data_shard, batch_size=4, num_steps=25)
def test_predict_xshards(self):
train_data_shard = XShards.partition({"x": np.random.randn(100, 1),
"y": np.random.randint(0, 1, size=(100,))})
expected = train_data_shard.collect()
expected = [shard["x"] for shard in expected]
for x in expected:
print(x.shape)
expected = np.concatenate(expected)
config = {
}
trainer = Estimator.from_keras(
model_creator=identity_model_creator,
verbose=True,
config=config,
workers_per_node=2)
result_shards = trainer.predict(train_data_shard, batch_size=10).collect()
result = [shard["prediction"] for shard in result_shards]
expected_result = [shard["x"] for shard in result_shards]
result = np.concatenate(result)
assert np.allclose(expected, result)
def test_save_and_load(self):
def model_creator(config):
import tensorflow as tf
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='valid'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'),
tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='valid'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')]
)
model.compile(optimizer=tf.keras.optimizers.RMSprop(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
def train_data_creator(config, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((np.random.randn(100, 28, 28, 3),
np.random.randint(0, 10, (100, 1))))
dataset = dataset.repeat()
dataset = dataset.shuffle(1000)
dataset = dataset.batch(batch_size)
return dataset
batch_size = 320
try:
est = Estimator.from_keras(model_creator=model_creator, workers_per_node=2)
history = est.fit(train_data_creator,
epochs=1,
batch_size=batch_size,
steps_per_epoch=5)
print("start saving")
est.save("/tmp/cifar10_keras.ckpt")
est.load("/tmp/cifar10_keras.ckpt")
print("save success")
finally:
os.remove("/tmp/cifar10_keras.ckpt")
def test_string_input(self):
def model_creator(config):
import tensorflow as tf
vectorize_layer = tf.keras.layers.experimental.preprocessing.TextVectorization(
max_tokens=10, output_mode='int', output_sequence_length=4)
model = tf.keras.models.Sequential()
model.add(tf.keras.Input(shape=(1,), dtype=tf.string))
model.add(vectorize_layer)
return model
from zoo.orca import OrcaContext
from pyspark.sql.types import StructType, StructField, StringType
spark = OrcaContext.get_spark_session()
schema = StructType([StructField("input", StringType(), True)])
input_data = [["foo qux bar"], ["qux baz"]]
input_df = spark.createDataFrame(input_data, schema)
estimator = Estimator.from_keras(model_creator=model_creator)
output_df = estimator.predict(input_df, batch_size=1, feature_cols=["input"])
output = output_df.collect()
print(output)
def test_array_string_input(self):
def model_creator(config):
import tensorflow as tf
model = tf.keras.models.Sequential([
tf.keras.Input(shape=(None,), dtype=tf.string),
tf.keras.layers.experimental.preprocessing.StringLookup(
vocabulary=config["vocabulary"]
)
])
return model
import itertools
from zoo.orca import OrcaContext
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, ArrayType
spark = OrcaContext.get_spark_session()
schema = StructType([
StructField("id", IntegerType(), True),
StructField("input", ArrayType(StringType(), True), True)
])
input_data = [(0, ["foo", "qux", "bar"]), (1, ["qux", "baz", "baz"])]
input_df = spark.createDataFrame(input_data, schema)
string_data = [row["input"] for row in input_df.select("input").distinct().collect()]
vocabulary = list(set(itertools.chain(*string_data)))
config = {"vocabulary": vocabulary}
estimator = Estimator.from_keras(model_creator=model_creator, config=config)
output_df = estimator.predict(input_df, batch_size=1, feature_cols=["input"])
output = output_df.collect()
print(output)
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/orca/learn/ray/tf/test_tf_ray_estimator.py
|
Python
|
apache-2.0
| 25,253
|
[
"ORCA"
] |
42d5b50791b46dec717c23fe1ce1c4fd1e91e96738ccbe238e8e6d62c4a879c2
|
#! /usr/bin/env python
###############################################################################
# Copyright 2016 Adam Jackson
###############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
from __future__ import print_function
import ase.io
from argparse import ArgumentParser
from kgrid import calc_kpt_tuple
def calc_grid(cutoff_length,
mode='default',
filename='geometry.in',
filetype=False,
realspace=False,
pretty_print=False):
if filetype:
atoms = ase.io.read(filename, format=filetype)
else:
atoms = ase.io.read(filename)
k_samples = calc_kpt_tuple(
atoms, mode=mode, cutoff_length=cutoff_length, realspace=realspace)
# Print vectors
if pretty_print:
print('{0:3.0f} {1:3.0f} {2:3.0f}'.format(*k_samples))
else:
return k_samples
def get_parser():
parser = ArgumentParser()
parser.add_argument(
action="store",
nargs="?",
type=str,
dest="file",
default="geometry.in",
help="Path to input file [default: ./geometry.in]")
threshold = parser.add_mutually_exclusive_group()
threshold.add_argument(
"-c",
"--cutoff-length",
action="store",
type=float,
dest="cutoff_length",
default=10.0,
help="Set length cutoff in Angstroms [default: 10]")
threshold.add_argument(
"-a",
"--vasp-auto",
action="store",
type=float,
dest="vasp_auto",
help="Real-space cutoff like Auto in VASP KPOINTS file")
threshold.add_argument(
"-s",
"--vasp-kspacing",
action="store",
type=float,
dest="kspacing",
help="Reciprocal-space distance like KSPACING in VASP")
threshold.add_argument(
"--castep",
"--castep_spacing",
"--castep_mp_spacing",
action="store",
type=float,
dest="castep_mp_spacing",
help=("Reciprocal-space distance like KPOINTS_MP_SPACING in CASTEP; "
"this differs from Vasp-like KSPACING by factor of 1/(2 pi)."))
parser.add_argument(
"-t",
"--type",
action="store",
type=str,
default=False,
help="Input file type. If not provided, ASE will guess.")
parser.add_argument(
"-r",
"--realspace",
action="store_true",
help="Use real-space vector lengths instead of "
"computing reciprocal cell; not recommended!")
return parser
def main(params=None):
parser = get_parser()
args = parser.parse_args(params)
if args.vasp_auto:
mode = 'vasp_auto'
cutoff = args.vasp_auto
elif args.kspacing:
mode = 'kspacing'
cutoff = args.kspacing
elif args.castep_mp_spacing:
mode = 'castep_mp_spacing'
cutoff = args.castep_mp_spacing
else:
mode = 'default'
cutoff = args.cutoff_length
calc_grid(
cutoff,
mode=mode,
filename=args.file,
filetype=args.type,
realspace=args.realspace,
pretty_print=True)
if __name__ == '__main__':
main()
|
WMD-group/kgrid
|
kgrid/cli.py
|
Python
|
gpl-3.0
| 3,963
|
[
"ASE",
"CASTEP",
"VASP"
] |
614bdf3421c3eb041dacc74e8e9f5a6562706e0621d095d85a7d855ac4fa252a
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import h5py
import numpy as np
import os
from utils import write_datasets
import matplotlib
import matplotlib.pyplot as plt
import scipy.signal
def generate_rnn(rng, N, g, tau, dt, max_firing_rate):
"""Create a (vanilla) RNN with a bunch of hyper parameters for generating
chaotic data.
Args:
rng: numpy random number generator
N: number of hidden units
g: scaling of recurrent weight matrix in g W, with W ~ N(0,1/N)
tau: time scale of individual unit dynamics
dt: time step for equation updates
max_firing_rate: how to resecale the -1,1 firing rates
Returns:
the dictionary of these parameters, plus some others.
"""
rnn = {}
rnn['N'] = N
rnn['W'] = rng.randn(N,N)/np.sqrt(N)
rnn['Bin'] = rng.randn(N)/np.sqrt(1.0)
rnn['Bin2'] = rng.randn(N)/np.sqrt(1.0)
rnn['b'] = np.zeros(N)
rnn['g'] = g
rnn['tau'] = tau
rnn['dt'] = dt
rnn['max_firing_rate'] = max_firing_rate
mfr = rnn['max_firing_rate'] # spikes / sec
nbins_per_sec = 1.0/rnn['dt'] # bins / sec
# Used for plotting in LFADS
rnn['conversion_factor'] = mfr / nbins_per_sec # spikes / bin
return rnn
def generate_data(rnn, T, E, x0s=None, P_sxn=None, input_magnitude=0.0,
input_times=None):
""" Generates data from an randomly initialized RNN.
Args:
rnn: the rnn
T: Time in seconds to run (divided by rnn['dt'] to get steps, rounded down.
E: total number of examples
S: number of samples (subsampling N)
Returns:
A list of length E of NxT tensors of the network being run.
"""
N = rnn['N']
def run_rnn(rnn, x0, ntime_steps, input_time=None):
rs = np.zeros([N,ntime_steps])
x_tm1 = x0
r_tm1 = np.tanh(x0)
tau = rnn['tau']
dt = rnn['dt']
alpha = (1.0-dt/tau)
W = dt/tau*rnn['W']*rnn['g']
Bin = dt/tau*rnn['Bin']
Bin2 = dt/tau*rnn['Bin2']
b = dt/tau*rnn['b']
us = np.zeros([1, ntime_steps])
for t in range(ntime_steps):
x_t = alpha*x_tm1 + np.dot(W,r_tm1) + b
if input_time is not None and t == input_time:
us[0,t] = input_magnitude
x_t += Bin * us[0,t] # DCS is this what was used?
r_t = np.tanh(x_t)
x_tm1 = x_t
r_tm1 = r_t
rs[:,t] = r_t
return rs, us
if P_sxn is None:
P_sxn = np.eye(N)
ntime_steps = int(T / rnn['dt'])
data_e = []
inputs_e = []
for e in range(E):
input_time = input_times[e] if input_times is not None else None
r_nxt, u_uxt = run_rnn(rnn, x0s[:,e], ntime_steps, input_time)
r_sxt = np.dot(P_sxn, r_nxt)
inputs_e.append(u_uxt)
data_e.append(r_sxt)
S = P_sxn.shape[0]
data_e = normalize_rates(data_e, E, S)
return data_e, x0s, inputs_e
def normalize_rates(data_e, E, S):
# Normalization, made more complex because of the P matrices.
# Normalize by min and max in each channel. This normalization will
# cause offset differences between identical rnn runs, but different
# t hits.
for e in range(E):
r_sxt = data_e[e]
for i in range(S):
rmin = np.min(r_sxt[i,:])
rmax = np.max(r_sxt[i,:])
assert rmax - rmin != 0, 'Something wrong'
r_sxt[i,:] = (r_sxt[i,:] - rmin)/(rmax-rmin)
data_e[e] = r_sxt
return data_e
def spikify_data(data_e, rng, dt=1.0, max_firing_rate=100):
""" Apply spikes to a continuous dataset whose values are between 0.0 and 1.0
Args:
data_e: nexamples length list of NxT trials
dt: how often the data are sampled
max_firing_rate: the firing rate that is associated with a value of 1.0
Returns:
spikified_e: a list of length b of the data represented as spikes,
sampled from the underlying poisson process.
"""
E = len(data_e)
spikes_e = []
for e in range(E):
data = data_e[e]
N,T = data.shape
data_s = np.zeros([N,T]).astype(np.int)
for n in range(N):
f = data[n,:]
s = rng.poisson(f*max_firing_rate*dt, size=T)
data_s[n,:] = s
spikes_e.append(data_s)
return spikes_e
def gaussify_data(data_e, rng, dt=1.0, max_firing_rate=100):
""" Apply gaussian noise to a continuous dataset whose values are between
0.0 and 1.0
Args:
data_e: nexamples length list of NxT trials
dt: how often the data are sampled
max_firing_rate: the firing rate that is associated with a value of 1.0
Returns:
gauss_e: a list of length b of the data with noise.
"""
E = len(data_e)
mfr = max_firing_rate
gauss_e = []
for e in range(E):
data = data_e[e]
N,T = data.shape
noisy_data = data * mfr + np.random.randn(N,T) * (5.0*mfr) * np.sqrt(dt)
gauss_e.append(noisy_data)
return gauss_e
def get_train_n_valid_inds(num_trials, train_fraction, nreplications):
"""Split the numbers between 0 and num_trials-1 into two portions for
training and validation, based on the train fraction.
Args:
num_trials: the number of trials
train_fraction: (e.g. .80)
nreplications: the number of spiking trials per initial condition
Returns:
a 2-tuple of two lists: the training indices and validation indices
"""
train_inds = []
valid_inds = []
for i in range(num_trials):
# This line divides up the trials so that within one initial condition,
# the randomness of spikifying the condition is shared among both
# training and validation data splits.
if (i % nreplications)+1 > train_fraction * nreplications:
valid_inds.append(i)
else:
train_inds.append(i)
return train_inds, valid_inds
def split_list_by_inds(data, inds1, inds2):
"""Take the data, a list, and split it up based on the indices in inds1 and
inds2.
Args:
data: the list of data to split
inds1, the first list of indices
inds2, the second list of indices
Returns: a 2-tuple of two lists.
"""
if data is None or len(data) == 0:
return [], []
else:
dout1 = [data[i] for i in inds1]
dout2 = [data[i] for i in inds2]
return dout1, dout2
def nparray_and_transpose(data_a_b_c):
"""Convert the list of items in data to a numpy array, and transpose it
Args:
data: data_asbsc: a nested, nested list of length a, with sublist length
b, with sublist length c.
Returns:
a numpy 3-tensor with dimensions a x c x b
"""
data_axbxc = np.array([datum_b_c for datum_b_c in data_a_b_c])
data_axcxb = np.transpose(data_axbxc, axes=[0,2,1])
return data_axcxb
def add_alignment_projections(datasets, npcs, ntime=None, nsamples=None):
"""Create a matrix that aligns the datasets a bit, under
the assumption that each dataset is observing the same underlying dynamical
system.
Args:
datasets: The dictionary of dataset structures.
npcs: The number of pcs for each, basically like lfads factors.
nsamples (optional): Number of samples to take for each dataset.
ntime (optional): Number of time steps to take in each sample.
Returns:
The dataset structures, with the field alignment_matrix_cxf added.
This is # channels x npcs dimension
"""
nchannels_all = 0
channel_idxs = {}
conditions_all = {}
nconditions_all = 0
for name, dataset in datasets.items():
cidxs = np.where(dataset['P_sxn'])[1] # non-zero entries in columns
channel_idxs[name] = [cidxs[0], cidxs[-1]+1]
nchannels_all += cidxs[-1]+1 - cidxs[0]
conditions_all[name] = np.unique(dataset['condition_labels_train'])
all_conditions_list = \
np.unique(np.ndarray.flatten(np.array(conditions_all.values())))
nconditions_all = all_conditions_list.shape[0]
if ntime is None:
ntime = dataset['train_data'].shape[1]
if nsamples is None:
nsamples = dataset['train_data'].shape[0]
# In the data workup in the paper, Chethan did intra condition
# averaging, so let's do that here.
avg_data_all = {}
for name, conditions in conditions_all.items():
dataset = datasets[name]
avg_data_all[name] = {}
for cname in conditions:
td_idxs = np.argwhere(np.array(dataset['condition_labels_train'])==cname)
data = np.squeeze(dataset['train_data'][td_idxs,:,:], axis=1)
avg_data = np.mean(data, axis=0)
avg_data_all[name][cname] = avg_data
# Visualize this in the morning.
all_data_nxtc = np.zeros([nchannels_all, ntime * nconditions_all])
for name, dataset in datasets.items():
cidx_s = channel_idxs[name][0]
cidx_f = channel_idxs[name][1]
for cname in conditions_all[name]:
cidxs = np.argwhere(all_conditions_list == cname)
if cidxs.shape[0] > 0:
cidx = cidxs[0][0]
all_tidxs = np.arange(0, ntime+1) + cidx*ntime
all_data_nxtc[cidx_s:cidx_f, all_tidxs[0]:all_tidxs[-1]] = \
avg_data_all[name][cname].T
# A bit of filtering. We don't care about spectral properties, or
# filtering artifacts, simply correlate time steps a bit.
filt_len = 6
bc_filt = np.ones([filt_len])/float(filt_len)
for c in range(nchannels_all):
all_data_nxtc[c,:] = scipy.signal.filtfilt(bc_filt, [1.0], all_data_nxtc[c,:])
# Compute the PCs.
all_data_mean_nx1 = np.mean(all_data_nxtc, axis=1, keepdims=True)
all_data_zm_nxtc = all_data_nxtc - all_data_mean_nx1
corr_mat_nxn = np.dot(all_data_zm_nxtc, all_data_zm_nxtc.T)
evals_n, evecs_nxn = np.linalg.eigh(corr_mat_nxn)
sidxs = np.flipud(np.argsort(evals_n)) # sort such that 0th is highest
evals_n = evals_n[sidxs]
evecs_nxn = evecs_nxn[:,sidxs]
# Project all the channels data onto the low-D PCA basis, where
# low-d is the npcs parameter.
all_data_pca_pxtc = np.dot(evecs_nxn[:, 0:npcs].T, all_data_zm_nxtc)
# Now for each dataset, we regress the channel data onto the top
# pcs, and this will be our alignment matrix for that dataset.
# |B - A*W|^2
for name, dataset in datasets.items():
cidx_s = channel_idxs[name][0]
cidx_f = channel_idxs[name][1]
all_data_zm_chxtc = all_data_zm_nxtc[cidx_s:cidx_f,:] # ch for channel
W_chxp, _, _, _ = \
np.linalg.lstsq(all_data_zm_chxtc.T, all_data_pca_pxtc.T)
dataset['alignment_matrix_cxf'] = W_chxp
alignment_bias_cx1 = all_data_mean_nx1[cidx_s:cidx_f]
dataset['alignment_bias_c'] = np.squeeze(alignment_bias_cx1, axis=1)
do_debug_plot = False
if do_debug_plot:
pc_vecs = evecs_nxn[:,0:npcs]
ntoplot = 400
plt.figure()
plt.plot(np.log10(evals_n), '-x')
plt.figure()
plt.subplot(311)
plt.imshow(all_data_pca_pxtc)
plt.colorbar()
plt.subplot(312)
plt.imshow(np.dot(W_chxp.T, all_data_zm_chxtc))
plt.colorbar()
plt.subplot(313)
plt.imshow(np.dot(all_data_zm_chxtc.T, W_chxp).T - all_data_pca_pxtc)
plt.colorbar()
import pdb
pdb.set_trace()
return datasets
|
jiaphuan/models
|
research/lfads/synth_data/synthetic_data_utils.py
|
Python
|
apache-2.0
| 11,355
|
[
"Gaussian"
] |
97b42161864927645b86d40320f4f5aa89993f093a84e9a0228624aef86c51c5
|
#!/usr/bin/env python
########################################################################
# File : dirac-admin-pilot-summary
# Author : Stuart Paterson
########################################################################
from __future__ import print_function
__RCSID__ = "$Id$"
# pylint: disable=wrong-import-position
import DIRAC
from DIRAC.Core.Base import Script
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
result = diracAdmin.getPilotSummary()
if result['OK']:
DIRAC.exit(0)
else:
print(result['Message'])
DIRAC.exit(2)
|
fstagni/DIRAC
|
Interfaces/scripts/dirac-admin-pilot-summary.py
|
Python
|
gpl-3.0
| 663
|
[
"DIRAC"
] |
82e11cbed8e41ec5d9d4f3778ed0e3029b8aa21ad1902dfc3d2345aa7a320eb0
|
from abc import ABCMeta, abstractmethod
import pandas as pd
import numpy as np
from bids.variables import BIDSRunVariableCollection
from bids.utils import convert_JSON
def create_model_spec(collection, model):
kind = model.get('type', 'glm').lower()
SpecCls = {
'glm': GLMMSpec
}[kind]
return SpecCls.from_collection(collection, model)
class ModelSpec(metaclass=ABCMeta):
"""Base class for all ModelSpec classes."""
@abstractmethod
def from_collection(self, collection, model):
"""Initialize from a BIDSVariableCollection instance."""
pass
class GLMMSpec(ModelSpec):
"""Generalized Linear Mixed Model specification.
s
Parameters
----------
terms : list of Term
A list of Term instances to include in the GLMMSpec instance.
X: pd.DataFrame
A pandas DataFrame containing the fixed effect design matrix
(i.e., the X matrix in the typical mixed effect formulation). Each
column will be internally converted to a separate Term instance.
Z: pd.DataFrame
A pandas DataFrame containing the random effect/grouping matrix
(i.e., the Z matrix in the typical mixed effect formulation). Columns
that share variance components are identified by the groups argument.
groups: NDArray
A binary 2d array with dimension k x v, where k is the number of
columns in Z and v is the number of distinct variance components. A
value of 1 indicates that the i'th of k rows is a level in the j's of
v variance components. If Z is passed and groups is None, it is assumed
that all columns in Z share the same single variance.
sigma: NDArray
A 2d array giving the covariance matrix for the variance components
defined in the groups argument. Has dimension v x v, where v is the
number of columns in groups. If None (default), no constraint is
imposed and the covariance is directly estimated.
family: str
The name of the family to use for the error distribution. By default,
gaussian.
link: str
The name of the link function to use. Default depends on family. In the
case of a gaussian (default family), an identity link is used.
priors: dict
Optional specification of default priors to use for new terms.
"""
def __init__(self, terms=None, X=None, Z=None, groups=None, sigma=None,
family=None, link=None, priors=None):
self.terms = {}
self.family = family
self.link = link
self.sigma = sigma
if priors is not None:
self.set_priors(priors)
if terms is not None:
for t in terms:
self.add_term(t)
if X is not None:
self.build_fixed_terms(X)
if Z is not None:
self.build_variance_components(Z, groups, sigma)
def set_priors(self, fixed=None, random=None):
pass
def build_fixed_terms(self, X):
"""Build one or more fixed terms from the columns of a pandas DF.
Parameters
----------
X : pd.DataFrame
A pandas DataFrame containing variables to convert to Term
instances. Each column is converted to a different (fixed) Term,
with the name taken from the column name.
"""
for col in X.columns:
data = X.loc[:, col].values
cat = data.dtype.name in ('str', 'category', 'object')
# TODO: get default prior
t = Term(col, data, categorical=cat)
self.add_term(t)
def build_variance_components(self, Z, groups=None, sigma=None, names=None):
"""Build one or more variance components from the columns of a binary
grouping matrix and variance specification.
Arguments:
Z (DataFrame, NDArray): A binary 2D array or pandas DataFrame. Each
column represents a column/predictor, each row represents an
observation.
groups (2DArray): A 2D binary array that maps the columns of Z
onto variance components. Has dimension n_rows(Z) x k,
where k is the number of distinct variance components. If None,
a single group over all columns of Z is assumed.
sigma (2DArray): A k x k 2D covariance matrix specifying the
covariances between variance components.
names (list): Optional list specifying the names of the groups.
"""
if groups is None:
groups = np.ones((Z.shape[1], 1))
n_grps = groups.shape[1]
if names is None:
names = getattr(groups, 'columns',
['VC{}'.format(i) for i in range(n_grps)])
# Work with array instead of DF
if hasattr(groups, 'values'):
groups = groups.values
for i in range(n_grps):
z_grp = Z[:, groups[:, i].astype(bool)]
# TODO: select default prior
vc = VarComp(names[i], z_grp)
self.add_term(vc)
def add_term(self, term):
"""Add a new Term to the instance.
Parameters
----------
term : Term
A Term instance to add to the current instance.
"""
if term.name in self.terms:
raise ValueError("Term with name {} already exists!"
.format(term.name))
self.terms[term.name] = term
@property
def X(self):
"""Return X design matrix (i.e., fixed component of model)."""
if not self.fixed_terms:
return None
names, cols = zip(*[(c.name, c.values) for c in self.fixed_terms])
return pd.DataFrame(np.c_[cols], columns=names)
@property
def Z(self):
"""Return Z design matrix (i.e., random effects/variance components).
"""
if not self.variance_components:
return None
names, cols = [], []
for c in self.variance_components:
cols.append(c.values)
names.extend(['{}.{}'.format(c.name, i)
for i in range(c.values.shape[1])])
return pd.DataFrame(np.concatenate(cols, axis=1), columns=names)
@property
def fixed_terms(self):
"""Return a list of all available fixed effects."""
return [t for t in self.terms.values() if not isinstance(t, VarComp)]
@property
def variance_components(self):
"""Return a list of all available variance components."""
return [t for t in self.terms.values() if isinstance(t, VarComp)]
@classmethod
def from_collection(cls, collection, model):
""" Initialize a GLMMSpec instance from a BIDSVariableCollection and
a BIDS-StatsModels JSON spec.
Parameters
----------
collection : BIDSVariableCollection
A BIDSVariableCollection containing variable information.
model : dict
The "Model" section from a BIDS-StatsModel specification.
Returns
-------
A GLMMSpec instance.
"""
if isinstance(collection, BIDSRunVariableCollection):
if not collection.all_dense():
raise ValueError("Input BIDSRunVariableCollection contains at "
"least one sparse variable. All variables must"
" be dense!")
kwargs = {}
# Fixed terms
model = convert_JSON(model)
names = model.get('x', [])
if names:
names = collection.match_variables(names)
X = collection.to_df(names).loc[:, names]
kwargs['X'] = X
# Variance components
vcs = model.get('variance_components', [])
Z_list = []
if vcs:
for vc in vcs:
# Levels can either be defined by the levels of a single
# categorical ("LevelsFrom") or by a set of binary variables.
if 'levels_from' in vc:
data = collection.variables[vc['levels_from']].values
Z_list.append(pd.get_dummies(data).values)
else:
names = collection.match_variables(vc['levels'])
df = collection.to_df(names).loc[:, names]
Z_list.append(df.values)
Z = np.concatenate(Z_list, axis=1)
groups = np.zeros((Z.shape[1], len(Z_list)))
c = 0
for i, vc in enumerate(Z_list):
n = vc.shape[1]
groups[c:(c+n), i] = 1
c += n
groups = pd.DataFrame(groups, columns=[vc['name'] for vc in vcs])
kwargs['Z'] = Z
kwargs['groups'] = groups
error = model.get('error')
if error:
kwargs['family'] = error.get('family')
kwargs['link'] = error.get('link')
return GLMMSpec(**kwargs)
class Term(object):
"""Represents a model term.
Parameters
----------
name : str
The name of the term.
values : iterable
A 1d array or other iterable containing the predictor values.
categorical : bool
Indicates whether or not the Term represents a categorical variable.
prior : dict
Optional specification of the prior distribution for the Term.
metadata : dict
Arbitrary metadata to store internally.
"""
def __init__(self, name, values, categorical=False, prior=None,
metadata=None):
self.name = name
self.values = values
self.categorical = categorical
self.prior = prior
self.metadata = metadata or {}
class VarComp(Term):
"""Represents a variance component/random effect.
Parameters
----------
name : str
The name of the variance component.
values : iterable
A 2d binary array identifying the observations that belong to the
levels of the variance component. Has dimension n x k, where n is the
number of observed rows in the dataset and k is the number of levels
in the factor.
prior : dict
Optional specification of the prior distribution for the VarComp.
metadata : dict
Arbitrary metadata to store internally.
"""
def __init__(self, name, values, prior=None, metadata=None):
super(VarComp, self).__init__(name, values, categorical=True,
prior=prior, metadata=metadata)
self.index_vec = self.dummies_to_vec(values)
@staticmethod
def dummies_to_vec(dummies):
"""Convert dummy-coded columns to a single integer index.
Parameters
----------
dummies : NDArray
2d binary array to recode as a single vector.
Notes
-----
Used for the sake of computational efficiency (i.e., to avoid lots of
large matrix multiplications in the backends), invert the dummy-coding
process and represent full-rank dummies as a vector of indices into the
coefficients.
"""
vec = np.zeros(len(dummies), dtype=int)
for i in range(dummies.shape[1]):
vec[(dummies[:, i] == 1)] = i + 1
return vec
class Prior(object):
'''Abstract specification of a term prior.
Parameters
----------
name : str
Name of prior distribution (e.g., Normal, Bernoulli, etc.)
kwargs: dict
Optional keywords specifying the parameters of the named distribution.
Notes
-----
At present there's no controlled vocabulary of supported prior names and
arguments, but users implementing new Bayesian estimators are encouraged to
use the names used in PyMC3 (e.g., 'Normal', parameterized with mu and
sd arguments).
'''
def __init__(self, name, **kwargs):
self.name = name
self.kwargs = kwargs
|
INCF/pybids
|
bids/analysis/model_spec.py
|
Python
|
mit
| 11,942
|
[
"Gaussian"
] |
172dc780e12dc266f73acff6bf5fd8a7d70d10f418daf1f551b048715c3363ec
|
""" Basic unit tests for the Job API
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from os.path import dirname, join
import pytest
from six import StringIO
from DIRAC.Interfaces.API.Job import Job
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
def test_basicJob():
job = Job()
job.setOwner('ownerName')
job.setOwnerGroup('ownerGroup')
job.setName('jobName')
job.setJobGroup('jobGroup')
job.setExecutable('someExe')
job.setType('jobType')
job.setDestination('ANY')
xml = job._toXML()
with open(join(dirname(__file__), "testWF.xml")) as fd:
expected = fd.read()
assert xml == expected
with open(join(dirname(__file__), "testWFSIO.jdl")) as fd:
expected = fd.read()
jdlSIO = job._toJDL(jobDescriptionObject=StringIO(job._toXML()))
assert jdlSIO == expected
def test_SimpleParametricJob():
job = Job()
job.setExecutable('myExec')
job.setLogLevel('DEBUG')
parList = [1, 2, 3]
job.setParameterSequence('JOB_ID', parList, addToWorkflow=True)
inputDataList = [
[
'/lhcb/data/data1',
'/lhcb/data/data2'
],
[
'/lhcb/data/data3',
'/lhcb/data/data4'
],
[
'/lhcb/data/data5',
'/lhcb/data/data6'
]
]
job.setParameterSequence('InputData', inputDataList, addToWorkflow=True)
jdl = job._toJDL()
with open(join(dirname(__file__), "testWF.jdl")) as fd:
expected = fd.read()
assert jdl == expected
clad = ClassAd('[' + jdl + ']')
arguments = clad.getAttributeString('Arguments')
job_id = clad.getAttributeString('JOB_ID')
inputData = clad.getAttributeString('InputData')
assert job_id == '%(JOB_ID)s'
assert inputData == '%(InputData)s'
assert 'jobDescription.xml' in arguments
assert '-o LogLevel=DEBUG' in arguments
assert'-p JOB_ID=%(JOB_ID)s' in arguments
assert'-p InputData=%(InputData)s' in arguments
@pytest.mark.parametrize("proc, minProc, maxProc, expectedProc, expectedMinProc, expectedMaxProc", [
(4, None, None, 4, None, 4),
(4, 2, None, 4, None, 4),
(4, 2, 8, 4, None, 4),
(4, 8, 6, 8, None, 8), # non-sense
(None, 2, 8, None, 2, 8),
(None, 1, None, None, 1, None),
(None, None, 8, None, 1, 8),
(None, 8, 8, 8, None, 8),
(None, 12, 8, 8, None, 8), # non-sense
])
def test_MPJob(proc, minProc, maxProc, expectedProc, expectedMinProc, expectedMaxProc):
job = Job()
job.setExecutable('myExec')
job.setLogLevel('DEBUG')
job.setNumberOfProcessors(proc, minProc, maxProc)
jdl = job._toJDL()
clad = ClassAd('[' + jdl + ']')
processors = clad.getAttributeInt('NumberOfProcessors')
minProcessors = clad.getAttributeInt('MinNumberOfProcessors')
maxProcessors = clad.getAttributeInt('MaxNumberOfProcessors')
assert processors == expectedProc
assert minProcessors == expectedMinProc
assert maxProcessors == expectedMaxProc
|
yujikato/DIRAC
|
src/DIRAC/Interfaces/API/test/Test_JobAPI.py
|
Python
|
gpl-3.0
| 2,970
|
[
"DIRAC"
] |
b4c3411c1746e1754cfbfd8068a3cce2a133f11f4e11b3d04c852338213c1dd2
|
from unittest import TestCase
from webob import Response
from webob.dec import wsgify
from maitai.prunecookies import PruneCookiesMiddleware
from .utils import FixedTestApp
@wsgify
def bare_app(req):
s = '\n'.join(['%s: %s' % (k, v)
for k, v in req.cookies.items()])
return Response(s)
class TestPruneCookiesMiddleware(TestCase):
def test_whitelist(self):
wrapped_app = PruneCookiesMiddleware(bare_app,
whitelist=('foo', 'bar', 'baz'))
app = FixedTestApp(wrapped_app)
resp = app.get('/')
self.assertEqual(resp.body, b'')
app.cookies = {
'foo': '123',
'bar': '456',
'baz': '789',
'quux': '111',
'larry': '222',
'curly': '333',
'moe': '444',
}
resp = app.get('/', status=307)
resp = resp.follow()
resp.mustcontain('foo: 123')
resp.mustcontain('bar: 456')
resp.mustcontain('baz: 789')
body = resp.body.decode('utf-8')
self.assertNotIn('quux', body)
self.assertNotIn('larry', body)
self.assertNotIn('curly', body)
self.assertNotIn('moe', body)
def test_blacklist(self):
wrapped_app = PruneCookiesMiddleware(bare_app,
blacklist=('quux', 'larry',
'curly', 'moe'))
app = FixedTestApp(wrapped_app)
resp = app.get('/')
self.assertEqual(resp.body, b'')
app.cookies = {
'foo': '123',
'bar': '456',
'baz': '789',
'quux': '111',
'larry': '222',
'curly': '333',
'moe': '444',
}
resp = app.get('/', status=307)
resp = resp.follow()
resp.mustcontain('foo: 123')
resp.mustcontain('bar: 456')
resp.mustcontain('baz: 789')
body = resp.body.decode('utf-8')
self.assertNotIn('quux', body)
self.assertNotIn('larry', body)
self.assertNotIn('curly', body)
self.assertNotIn('moe', body)
|
storborg/maitai
|
maitai/tests/test_prunecookies.py
|
Python
|
mit
| 2,184
|
[
"MOE"
] |
766dd18d0ae3d66a5eab5aeeb398191839c80829fc2d6597cad71e5a2ed3ea5d
|
"""
Read and write [fasta format](http://blast.ncbi.nlm.nih.gov/blastcgihelp.shtml)
"""
from itertools import groupby
class FastaReader(object):
"""
Read fasta files into tuples (header, seq).
"""
def __init__(self, file):
"""
Args:
file: The fasta file. Can either be a name or a handle.
"""
if not hasattr(file, 'read'):
self.file = open(file, 'r')
else:
self.file = file
def get_entries(self):
"""
Get the next Entry from the fasta file.
Returns:
Generator, which yields (header, sequence) tuples
"""
for isheader, group in groupby(self.file, lambda line: line[0] == ">"):
if isheader:
header = next(group)[1:].strip()
else:
seq = "".join(line.strip() for line in group)
yield header, seq
def close(self):
"""Close file handle"""
self.file.close()
class FastaWriter(object):
"""
Write fasta files from tuples (header, seq)
"""
SPLIT = 80
def __init__(self, file, split = SPLIT):
"""
Args:
file: The output fasta file. Can either be a (writeable) file handle or a path
split: specifies after how many characters a sequence line will be wrapped.
"""
self.split = split
if not hasattr(file, 'write'):
self.file = open(file, 'w')
else:
self.file = file
def write_entry(self, header, sequence):
"""
Write Entry to File
Args:
header: >sequence_header (without >)
sequence: ACTGATT...
"""
sequence = [sequence[i:i+self.split] for i in range(0, len(sequence), self.split)]
self.file.write(">{0}\n".format(header))
for s in sequence:
self.file.write(s + "\n")
def flush(self):
self.file.flush()
def close(self):
"""Close file handle"""
self.file.close()
|
grst/microbio
|
microbio/formats/fasta.py
|
Python
|
mit
| 2,049
|
[
"BLAST"
] |
0210a0a6dd5ce95db8de7bba49634bc9d0fbeb422a07a6cdb2e8d0a33b8dc047
|
__RCSID__ = "$Id$"
import os
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.List import stringListToString, intListToString
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.FileManagerBase import FileManagerBase
class FileManagerFlat(FileManagerBase):
######################################################
#
# The all important _findFiles and _getDirectoryFiles methods
#
def _findFiles(self, lfns, metadata=['FileID'], connection=False):
connection = self._getConnection(connection)
""" Find file ID if it exists for the given list of LFNs """
dirDict = self._getFileDirectories(lfns)
failed = {}
directoryIDs = {}
for dirPath in dirDict.keys():
res = self.db.dtree.findDir(dirPath)
if not res['OK'] or not res['Value']:
error = res.get('Message', 'No such file or directory')
for fileName in dirDict[dirPath]:
failed['%s/%s' % (dirPath, fileName)] = error
else:
directoryIDs[dirPath] = res['Value']
successful = {}
for dirPath in directoryIDs.keys():
fileNames = dirDict[dirPath]
res = self._getDirectoryFiles(directoryIDs[dirPath], fileNames, metadata, connection=connection)
if not res['OK'] or not res['Value']:
error = res.get('Message', 'No such file or directory')
for fileName in fileNames:
failed['%s/%s' % (dirPath, fileName)] = error
else:
for fileName, fileDict in res['Value'].items():
successful["%s/%s" % (dirPath, fileName)] = fileDict
return S_OK({"Successful": successful, "Failed": failed})
def _getDirectoryFiles(self, dirID, fileNames, metadata, allStatus=False, connection=False):
connection = self._getConnection(connection)
# metadata can be any of
# ['FileID','Size','UID','GID','Checksum','ChecksumType','Type','CreationDate','ModificationDate','Mode','Status']
req = "SELECT FileName,%s FROM FC_Files WHERE DirID=%d" % (intListToString(metadata), dirID)
if not allStatus:
statusIDs = []
res = self._getStatusInt('AprioriGood', connection=connection)
if res['OK']:
statusIDs.append(res['Value'])
if statusIDs:
req = "%s AND Status IN (%s)" % (req, intListToString(statusIDs))
if fileNames:
req = "%s AND FileName IN (%s)" % (req, stringListToString(fileNames))
res = self.db._query(req, connection)
if not res['OK']:
return res
files = {}
for fTuple in res['Value']:
fileName = fTuple[0]
files[fileName] = dict(zip(metadata, fTuple[1:]))
return S_OK(files)
######################################################
#
# _addFiles related methods
#
def _insertFiles(self, lfns, uid, gid, connection=False):
connection = self._getConnection(connection)
# Add the files
failed = {}
directoryFiles = {}
insertTuples = []
res = self._getStatusInt('AprioriGood', connection=connection)
statusID = 0
if res['OK']:
statusID = res['Value']
for lfn in sorted(lfns.keys()):
fileInfo = lfns[lfn]
size = fileInfo['Size']
guid = fileInfo.get('GUID', '')
checksum = fileInfo['Checksum']
checksumtype = fileInfo.get('ChecksumType', 'Adler32')
dirName = os.path.dirname(lfn)
dirID = fileInfo['DirID']
fileName = os.path.basename(lfn)
if dirName not in directoryFiles:
directoryFiles[dirName] = []
directoryFiles[dirName].append(fileName)
insertTuples.append("(%d,%d,%d,%d,%d,'%s','%s','%s','%s',UTC_TIMESTAMP(),UTC_TIMESTAMP(),%d)" %
(dirID, size, uid, gid, statusID, fileName, guid, checksum, checksumtype, self.db.umask))
fields = "DirID,Size,UID,GID,Status,FileName,GUID,Checksum,ChecksumType,CreationDate,ModificationDate,Mode"
req = "INSERT INTO FC_Files (%s) VALUES %s" % (fields, ','.join(insertTuples))
res = self.db._update(req, connection)
if not res['OK']:
return res
# Get the fileIDs for the inserted files
res = self._findFiles(lfns.keys(), ['FileID'], connection=connection)
if not res['OK']:
for lfn in lfns.keys():
failed[lfn] = 'Failed post insert check'
lfns.pop(lfn)
else:
failed.update(res['Value']['Failed'])
for lfn, fileDict in res['Value']['Successful'].items():
lfns[lfn]['FileID'] = fileDict['FileID']
return S_OK({'Successful': lfns, 'Failed': failed})
def _getFileIDFromGUID(self, guid, connection=False):
connection = self._getConnection(connection)
if not guid:
return S_OK({})
if not isinstance(guid, (list, tuple)):
guid = [guid]
req = "SELECT FileID,GUID FROM FC_Files WHERE GUID IN (%s)" % stringListToString(guid)
res = self.db._query(req, connection)
if not res['OK']:
return res
guidDict = {}
for fileID, guid in res['Value']:
guidDict[guid] = fileID
return S_OK(guidDict)
######################################################
#
# _deleteFiles related methods
#
def _deleteFiles(self, fileIDs, connection=False):
connection = self._getConnection(connection)
replicaPurge = self.__deleteFileReplicas(fileIDs)
filePurge = self.__deleteFiles(fileIDs, connection=connection)
if not replicaPurge['OK']:
return replicaPurge
if not filePurge['OK']:
return filePurge
return S_OK()
def __deleteFileReplicas(self, fileIDs, connection=False):
connection = self._getConnection(connection)
if not fileIDs:
return S_OK()
req = "DELETE FROM FC_Replicas WHERE FileID in (%s)" % (intListToString(fileIDs))
return self.db._update(req, connection)
def __deleteFiles(self, fileIDs, connection=False):
connection = self._getConnection(connection)
if not fileIDs:
return S_OK()
req = "DELETE FROM FC_Files WHERE FileID in (%s)" % (intListToString(fileIDs))
return self.db._update(req, connection)
######################################################
#
# _addReplicas related methods
#
def _insertReplicas(self, lfns, master=False, connection=False):
connection = self._getConnection(connection)
res = self._getStatusInt('AprioriGood', connection=connection)
statusID = 0
if res['OK']:
statusID = res['Value']
replicaType = 'Replica'
if master:
replicaType = 'Master'
insertTuples = {}
deleteTuples = []
successful = {}
failed = {}
directorySESizeDict = {}
for lfn in sorted(lfns.keys()):
fileID = lfns[lfn]['FileID']
pfn = lfns[lfn]['PFN']
seName = lfns[lfn]['SE']
res = self.db.seManager.findSE(seName)
if not res['OK']:
failed[lfn] = res['Message']
continue
seID = res['Value']
if not master:
res = self.__existsReplica(fileID, seID, connection=connection)
if not res['OK']:
failed[lfn] = res['Message']
continue
elif res['Value']:
successful[lfn] = True
continue
dirID = lfns[lfn]['DirID']
if dirID not in directorySESizeDict:
directorySESizeDict[dirID] = {}
if seID not in directorySESizeDict[dirID]:
directorySESizeDict[dirID][seID] = {'Files': 0, 'Size': 0}
directorySESizeDict[dirID][seID]['Size'] += lfns[lfn]['Size']
directorySESizeDict[dirID][seID]['Files'] += 1
insertTuples[lfn] = (
"(%d,%d,%d,'%s',UTC_TIMESTAMP(),UTC_TIMESTAMP(),'%s')" %
(fileID, seID, statusID, replicaType, pfn))
deleteTuples.append((fileID, seID))
if insertTuples:
fields = "FileID,SEID,Status,RepType,CreationDate,ModificationDate,PFN"
req = "INSERT INTO FC_Replicas (%s) VALUES %s" % (fields, ','.join(insertTuples.values()))
res = self.db._update(req, connection)
if not res['OK']:
self.__deleteReplicas(deleteTuples, connection=connection)
for lfn in insertTuples.keys():
failed[lfn] = res['Message']
else:
# Update the directory usage
self._updateDirectoryUsage(directorySESizeDict, '+', connection=connection)
for lfn in insertTuples.keys():
successful[lfn] = True
return S_OK({'Successful': successful, 'Failed': failed})
def __existsReplica(self, fileID, seID, connection=False):
# TODO: This is in efficient. Should perform bulk operation
connection = self._getConnection(connection)
""" Check if a replica already exists """
if isinstance(seID, basestring):
res = self.db.seManager.findSE(seID)
if not res['OK']:
return res
seID = res['Value']
req = "SELECT FileID FROM FC_Replicas WHERE FileID=%d AND SEID=%d" % (fileID, seID)
result = self.db._query(req, connection)
if not result['OK']:
return result
if not result['Value']:
return S_OK(False)
return S_OK(True)
######################################################
#
# _deleteReplicas related methods
#
def _deleteReplicas(self, lfns, connection=False):
connection = self._getConnection(connection)
successful = {}
res = self._findFiles(lfns.keys(), ['DirID', 'FileID', 'Size'], connection=connection)
failed = res['Value']['Failed']
lfnFileIDDict = res['Value']['Successful']
toRemove = []
directorySESizeDict = {}
for lfn, fileDict in lfnFileIDDict.items():
fileID = fileDict['FileID']
se = lfns[lfn]['SE']
toRemove.append((fileID, se))
# Now prepare the storage usage dict
res = self.db.seManager.findSE(se)
if not res['OK']:
return res
seID = res['Value']
dirID = fileDict['DirID']
if dirID not in directorySESizeDict:
directorySESizeDict[dirID] = {}
if seID not in directorySESizeDict[dirID]:
directorySESizeDict[dirID][seID] = {'Files': 0, 'Size': 0}
directorySESizeDict[dirID][seID]['Size'] += fileDict['Size']
directorySESizeDict[dirID][seID]['Files'] += 1
res = self.__deleteReplicas(toRemove)
if not res['OK']:
for lfn in lfnFileIDDict.keys():
failed[lfn] = res['Message']
else:
# Update the directory usage
self._updateDirectoryUsage(directorySESizeDict, '-', connection=connection)
for lfn in lfnFileIDDict.keys():
successful[lfn] = True
return S_OK({'Successful': successful, 'Failed': failed})
def __deleteReplicas(self, replicaTuples, connection=False):
connection = self._getConnection(connection)
deleteTuples = []
for fileID, seID in replicaTuples:
if isinstance(seID, basestring):
res = self.db.seManager.findSE(seID)
if not res['OK']:
return res
seID = res['Value']
deleteTuples.append("(%d,%d)" % (fileID, seID))
req = "DELETE FROM FC_Replicas WHERE (FileID,SEID) IN (%s)" % intListToString(deleteTuples)
return self.db._update(req, connection)
######################################################
#
# _setReplicaStatus _setReplicaHost _setReplicaParameter methods
# _setFileParameter method
#
def _setReplicaStatus(self, fileID, se, status, connection=False):
connection = self._getConnection(connection)
res = self._getStatusInt(status, connection=connection)
if not res['OK']:
return res
statusID = res['Value']
return self._setReplicaParameter(fileID, se, 'Status', statusID, connection=connection)
def _setReplicaHost(self, fileID, se, newSE, connection=False):
connection = self._getConnection(connection)
res = self.db.seManager.findSE(newSE)
if not res['OK']:
return res
newSE = res['Value']
return self._setReplicaParameter(fileID, se, 'SEID', newSE, connection=connection)
def _setReplicaParameter(self, fileID, seID, paramName, paramValue, connection=False):
connection = self._getConnection(connection)
if isinstance(seID, basestring):
res = self.db.seManager.findSE(seID)
if not res['OK']:
return res
seID = res['Value']
req = "UPDATE FC_Replicas SET %s='%s', ModificationDate=UTC_TIMESTAMP() WHERE FileID=%d AND SEID=%d;" % (
paramName,
paramValue,
fileID,
seID)
return self.db._update(req, connection)
def _setFileParameter(self, fileID, paramName, paramValue, connection=False):
connection = self._getConnection(connection)
if not isinstance(fileID, (list, tuple)):
fileID = [fileID]
req = "UPDATE FC_Files SET %s='%s', ModificationDate=UTC_TIMESTAMP() WHERE FileID IN (%s)" % (
paramName,
paramValue,
intListToString(fileID))
return self.db._update(req, connection)
######################################################
#
# _getFileReplicas related methods
#
def _getFileReplicas(self, fileIDs, fields=['PFN'], connection=False):
connection = self._getConnection(connection)
if not fileIDs:
return S_ERROR("No such file or directory")
req = "SELECT FileID,SEID,Status,%s FROM FC_Replicas WHERE FileID IN (%s);" % (
intListToString(fields), intListToString(fileIDs))
res = self.db._query(req, connection)
if not res['OK']:
return res
replicas = {}
for fTuple in res['Value']:
fileID = fTuple[0]
if fileID not in replicas:
replicas[fileID] = {}
seID = fTuple[1]
res = self.db.seManager.getSEName(seID)
if not res['OK']:
continue
seName = res['Value']
statusID = fTuple[2]
res = self._getIntStatus(statusID, connection=connection)
if not res['OK']:
continue
status = res['Value']
replicas[fileID][seName] = {'Status': status}
replicas[fileID][seName].update(dict(zip(fields, fTuple[3:])))
for fileID in fileIDs:
if fileID not in replicas:
replicas[fileID] = {}
return S_OK(replicas)
|
fstagni/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/FileManagerFlat.py
|
Python
|
gpl-3.0
| 13,782
|
[
"DIRAC"
] |
cbe095cde1e3c2b45f36b20e0a67d54a02cf763dbebd38fc1c6d4a13120e6101
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyscf.pbc import scf
from pyscf.pbc.mp import mp2
from pyscf.pbc.mp import kmp2
def RMP2(mf, frozen=None, mo_coeff=None, mo_occ=None):
mf = scf.addons.convert_to_rhf(mf)
return mp2.RMP2(mf, frozen, mo_coeff, mo_occ)
MP2 = RMP2
def UMP2(mf, frozen=None, mo_coeff=None, mo_occ=None):
mf = scf.addons.convert_to_uhf(mf)
return mp2.UMP2(mf, frozen, mo_coeff, mo_occ)
def GMP2(mf, frozen=None, mo_coeff=None, mo_occ=None):
mf = scf.addons.convert_to_ghf(mf)
return mp2.GMP2(mf, frozen, mo_coeff, mo_occ)
def KRMP2(mf, frozen=None, mo_coeff=None, mo_occ=None):
return kmp2.KRMP2(mf, frozen, mo_coeff, mo_occ)
KMP2 = KRMP2
|
gkc1000/pyscf
|
pyscf/pbc/mp/__init__.py
|
Python
|
apache-2.0
| 1,289
|
[
"PySCF"
] |
81ddc5d78ae48c09d9698357ca1c358b3697fdd122a8b8f3b057d72da0b7182f
|
from unittest import skip
from datetime import date
import pytest
from . import GenericCalendarTest
from ..usa import (
UnitedStates,
Alabama, AlabamaBaldwinCounty, AlabamaMobileCounty, AlabamaPerryCounty,
Arkansas, Alaska, Arizona,
# California and others
California, CaliforniaEducation, CaliforniaBerkeley,
CaliforniaSanFrancisco, CaliforniaWestHollywood,
# Florida and others
Florida, FloridaLegal, FloridaCircuitCourts, FloridaMiamiDade,
Colorado, Connecticut, Delaware, DistrictOfColumbia, Georgia, Hawaii,
Indiana, Illinois, Idaho, Iowa, Kansas, Kentucky, Louisiana, Maine,
Maryland, Massachusetts, Minnesota, Michigan, Mississippi, Missouri,
Montana, Nebraska, Nevada, NewHampshire, NewJersey, NewMexico, NewYork,
NorthCarolina, NorthDakota, Ohio, Oklahoma, Oregon, Pennsylvania,
RhodeIsland, SouthCarolina, SouthDakota, Tennessee, TexasBase, Texas,
Utah, Vermont, Virginia, Washington, WestVirginia, Wisconsin, Wyoming,
# Other territories, cities...
AmericanSamoa, ChicagoIllinois, Guam, SuffolkCountyMassachusetts,
)
class UnitedStatesTest(GenericCalendarTest):
cal_class = UnitedStates
def test_martin_luther_king_day(self):
# All States observe this day, but it started in 1985 only.
holidays = self.cal.holidays_set(2013)
mlk_day = self.cal.get_martin_luther_king_date(2013)
self.assertEqual(date(2013, 1, 21), mlk_day)
self.assertIn(mlk_day, holidays)
holidays = self.cal.holidays_set(2014)
mlk_day = self.cal.get_martin_luther_king_date(2014)
self.assertEqual(date(2014, 1, 20), mlk_day)
self.assertIn(mlk_day, holidays)
# Shifted in 2015
holidays = self.cal.holidays_set(2015)
mlk_day = self.cal.get_martin_luther_king_date(2015)
self.assertEqual(date(2015, 1, 19), mlk_day)
self.assertIn(mlk_day, holidays)
# Let's get into the past
holidays = self.cal.holidays_set(1986)
mlk_day = self.cal.get_martin_luther_king_date(1986)
self.assertEqual(date(1986, 1, 20), mlk_day)
self.assertIn(mlk_day, holidays)
holidays = self.cal.holidays_set(1985)
mlk_day = self.cal.get_martin_luther_king_date(1985)
self.assertEqual(date(1985, 1, 21), mlk_day)
self.assertIn(mlk_day, holidays)
# No MLK Day before 1985
# 3rd Monday of January was the 16th
holidays = self.cal.holidays_set(1984)
self.assertNotIn(date(1984, 1, 16), holidays)
with self.assertRaises(ValueError):
self.cal.get_martin_luther_king_date(1984)
def test_mlk_label(self):
_, label = self.cal.get_martin_luther_king_day(2017)
self.assertEqual(label, "Birthday of Martin Luther King, Jr.")
def test_federal_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # New Year
self.assertIn(date(2013, 5, 27), holidays) # Memorial day
self.assertIn(date(2013, 7, 4), holidays) # Nation day
self.assertIn(date(2013, 9, 2), holidays) # Labour day
self.assertIn(date(2013, 11, 11), holidays) # Armistice
self.assertIn(date(2013, 11, 28), holidays) # Thanskgiving
self.assertIn(date(2013, 12, 25), holidays) # Christmas
def test_independence_day_nearest_weekday(self):
"""
Independence Day should shift to the nearest weekday.
"""
holidays = self.cal.holidays_set(2010)
observed = set(map(self.cal.get_observed_date, holidays))
self.assertIn(date(2010, 7, 5), observed)
holidays = self.cal.holidays_set(2011)
observed = set(map(self.cal.get_observed_date, holidays))
self.assertIn(date(2011, 7, 4), observed)
holidays = self.cal.holidays_set(2015)
observed = set(map(self.cal.get_observed_date, holidays))
self.assertIn(date(2015, 7, 3), observed)
def test_presidential_year(self):
self.assertTrue(UnitedStates.is_presidential_year(2012))
self.assertFalse(UnitedStates.is_presidential_year(2013))
self.assertFalse(UnitedStates.is_presidential_year(2014))
self.assertFalse(UnitedStates.is_presidential_year(2015))
self.assertTrue(UnitedStates.is_presidential_year(2016))
def test_election_day(self):
# Election day is:
# the Tuesday next after the first Monday in the month of November
self.assertEqual(date(2013, 11, 5), self.cal.get_election_date(2013))
self.assertEqual(date(2014, 11, 4), self.cal.get_election_date(2014))
self.assertEqual(date(2015, 11, 3), self.cal.get_election_date(2015))
self.assertEqual(date(2016, 11, 8), self.cal.get_election_date(2016))
self.assertEqual(date(2017, 11, 7), self.cal.get_election_date(2017))
self.assertEqual(date(2018, 11, 6), self.cal.get_election_date(2018))
self.assertEqual(date(2019, 11, 5), self.cal.get_election_date(2019))
self.assertEqual(date(2020, 11, 3), self.cal.get_election_date(2020))
def test_election_day_label(self):
_, label = self.cal.get_election_day(2017)
self.assertEqual(label, "Election Day")
def test_federal_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 1, 1), holidays) # New Year
self.assertIn(date(2014, 5, 26), holidays) # Memorial day
self.assertIn(date(2014, 7, 4), holidays) # Nation day
self.assertIn(date(2014, 9, 1), holidays) # Labour day
self.assertIn(date(2014, 11, 11), holidays) # Armistice
self.assertIn(date(2014, 11, 27), holidays) # Thanskgiving
self.assertIn(date(2014, 12, 25), holidays) # XMas
def test_federal_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 1, 1), holidays) # New Year
self.assertIn(date(2015, 5, 25), holidays) # Memorial day
self.assertIn(date(2015, 7, 4), holidays) # Nation day
self.assertIn(date(2015, 9, 7), holidays) # Labour day
self.assertIn(date(2015, 11, 11), holidays) # Armistice
self.assertIn(date(2015, 11, 26), holidays) # Thanskgiving
self.assertIn(date(2015, 12, 25), holidays) # XMas
def test_federal_year_2017(self):
holidays = self.cal.holidays_set(2017)
self.assertNotIn(date(2017, 12, 27), holidays) # XMas
def test_columbus_day(self):
holidays = self.cal.holidays_set(2017)
# Columbus Day is included here
self.assertIn(date(2017, 10, 9), holidays)
def test_columbus_day_label(self):
_, label = self.cal.get_columbus_day(2017)
self.assertEqual(label, "Columbus Day")
def test_presidential_day(self):
# Washington's birthday, or sometimes called otherwise, may not
# be included.
holidays = self.cal.holidays_set(2017)
day, _ = self.cal.get_presidents_day(2017)
# Washington's birthday is included here
self.assertIn(day, holidays)
def test_president_day_label(self):
_, label = self.cal.get_presidents_day(2017)
self.assertEqual(label, "Washington's Birthday")
def test_get_inauguration_date(self):
self.assertEqual(
date(2017, 1, 20), self.cal.get_inauguration_date(2017))
# Not an "inauguration day" year
with self.assertRaises(ValueError):
self.cal.get_inauguration_date(2016)
with self.assertRaises(ValueError):
self.cal.get_inauguration_date(2015)
with self.assertRaises(ValueError):
self.cal.get_inauguration_date(2014)
# Shifted to MON, since the 20th was on SUN
self.assertEqual(
date(2013, 1, 21), self.cal.get_inauguration_date(2013))
# 2009, back to normal
self.assertEqual(
date(2009, 1, 20), self.cal.get_inauguration_date(2009))
def test_inauguration_day(self):
# NOTE: 2013 test is not relevant, it's the same day as MLK day.
# NOTE: 1985 test is not relevant, it's the same day as MLK day.
# By default, it's not a public holiday
self.assertNotIn(
self.cal.get_inauguration_date(2017),
self.cal.holidays_set(2017)
)
self.assertNotIn(
self.cal.get_inauguration_date(2009),
self.cal.holidays_set(2009)
)
self.assertNotIn(
self.cal.get_inauguration_date(1957),
self.cal.holidays_set(1957)
)
def test_election_day_inclusion(self):
# By default, election day is not included
for year in range(2013, 2020):
holidays = self.cal.holidays_set(year)
self.assertNotIn(self.cal.get_election_date(year), holidays)
def test_thanksgiving_friday(self):
day, _ = self.cal.get_thanksgiving_friday(2017)
self.assertEqual(day, date(2017, 11, 24))
day, _ = self.cal.get_thanksgiving_friday(2018)
self.assertEqual(day, date(2018, 11, 23))
day, _ = self.cal.get_thanksgiving_friday(2019)
self.assertEqual(day, date(2019, 11, 29))
def test_thanksgiving_friday_label(self):
_, label = self.cal.get_thanksgiving_friday(2017)
self.assertEqual(label, "Thanksgiving Friday")
def test_national_memorial_label(self):
_, label = self.cal.get_national_memorial_day(2017)
self.assertEqual(label, "Memorial Day")
def test_veterans_label(self):
_, label = self.cal.get_veterans_day(2017)
self.assertEqual(label, "Veterans Day")
def test_mardi_gras(self):
year = 2017
day = self.cal.get_fat_tuesday(year)
holidays = self.cal.holidays_set(year)
self.assertNotIn(day, holidays)
class NoColumbus:
"""
Some States don't include Columbus Day:
* Alaska
* Arkansas
* California
* Delaware
"""
def test_columbus_day(self):
# This overrides UnitedStates.test_columbus_day
holidays = self.cal.holidays_set(2017)
# Columbus Day... Not included
self.assertNotIn(date(2017, 10, 9), holidays)
class NoPresidentialDay:
"""
Washington's birthday is not included in Delaware calendar.
"""
def test_presidential_day(self):
# This function *overwrites* UnitedStates.test_presidential_day
holidays = self.cal.holidays_set(2017)
day, _ = self.cal.get_presidents_day(2017)
# Washington's birthday not included here
self.assertNotIn(day, holidays)
class InaugurationDay:
"""
When Inauguration Day is a public holiday
"""
def test_inauguration_day(self):
# This method overwrites UnitedStatesTest.test_inauguration_day
self.assertIn(
self.cal.get_inauguration_date(2017),
self.cal.holidays_set(2017)
)
# NOTE: 2013 test is not relevant, it's the same as MLK Day
self.assertIn(
self.cal.get_inauguration_date(2009),
self.cal.holidays_set(2009)
)
# NOTE: 1985 is not relevant, it's the same as MLK Day
class ElectionDayEvenYears:
"""
Some state include the election day on even years
"""
def test_election_day_inclusion(self):
# This method overwrites UnitedStates.test_election_day_inclusion()
# Election Day is a public holiday on even years.
holidays = self.cal.holidays_set(2014)
self.assertIn(self.cal.get_election_date(2014), holidays)
# Odd year -- not included
holidays = self.cal.holidays_set(2015)
self.assertNotIn(self.cal.get_election_date(2015), holidays)
# Even year
holidays = self.cal.holidays_set(2016)
self.assertIn(self.cal.get_election_date(2016), holidays)
# Odd year -- not included
holidays = self.cal.holidays_set(2017)
self.assertNotIn(self.cal.get_election_date(2017), holidays)
class ElectionDayPresidentialYears:
"""
Some state include the election day on presidential years
"""
def test_election_day_inclusion(self):
# This method overwrites UnitedStates.test_election_day_inclusion()
# Election Day is a public holiday presidential years.
# not included
holidays = self.cal.holidays_set(2014)
self.assertNotIn(self.cal.get_election_date(2014), holidays)
# not included
holidays = self.cal.holidays_set(2015)
self.assertNotIn(self.cal.get_election_date(2015), holidays)
# 2016 election
holidays = self.cal.holidays_set(2016)
self.assertIn(self.cal.get_election_date(2016), holidays)
# not included
holidays = self.cal.holidays_set(2017)
self.assertNotIn(self.cal.get_election_date(2017), holidays)
class ElectionDayEveryYear:
"""
Some State include election day on every year
"""
def test_election_day_inclusion(self):
# Election day is included *every year*
for year in range(2013, 2020):
holidays = self.cal.holidays_set(year)
self.assertIn(self.cal.get_election_date(year), holidays)
class IncludeMardiGras:
"""
Louisiana and some areas (Alabama Counties) include Mardi Gras
"""
def test_mardi_gras(self):
year = 2017
day = self.cal.get_fat_tuesday(year)
holidays = self.cal.holidays(year)
holidays_dict = dict(holidays)
self.assertIn(day, holidays_dict)
self.assertEqual(holidays_dict[day], "Mardi Gras")
class AlabamaTest(UnitedStatesTest):
cal_class = Alabama
def test_mlk_label(self):
# Overwrite UnitedStatesTest.test_mlk_label
# Martin Luther King day is renamed in Alabama
_, label = self.cal.get_martin_luther_king_day(2017)
self.assertEqual(label, "Robert E. Lee/Martin Luther King Birthday")
def test_president_day_label(self):
# Overwrite UnitedStatesTest.test_president_day_label
# Presidents day is renamed in Alabama
_, label = self.cal.get_presidents_day(2017)
self.assertEqual(label, "George Washington/Thomas Jefferson Birthday")
def test_columbus_day_label(self):
# Overwrite UnitedStatesTest.test_columbus_day_label
# Columbus day is renamed in Alabama
_, label = self.cal.get_columbus_day(2017)
self.assertEqual(
label,
"Columbus Day / Fraternal Day / American Indian Heritage Day")
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 28), holidays) # Confederate Memorial Day
self.assertIn(date(2014, 6, 2), holidays) # Jefferson Davis' birthday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 27), holidays) # Confederate Memorial Day
self.assertIn(date(2015, 6, 1), holidays) # Jefferson Davis' birthday
class AlabamaBaldwinCountyTest(IncludeMardiGras, AlabamaTest):
cal_class = AlabamaBaldwinCounty
class AlabamaMobileCountyTest(IncludeMardiGras, AlabamaTest):
cal_class = AlabamaMobileCounty
class AlabamaPerryCountyTest(AlabamaTest):
cal_class = AlabamaPerryCounty
def test_county_year_2017(self):
holidays = self.cal.holidays_set(2017)
self.assertIn(date(2017, 11, 13), holidays) # Obama Day
class AlaskaTest(NoColumbus, UnitedStatesTest):
cal_class = Alaska
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 3, 31), holidays) # Seward's Day
self.assertIn(date(2014, 10, 18), holidays) # Alaska Day
observed = set(map(self.cal.get_observed_date, holidays))
# Alaska Day is on SAT, shift to FRI
self.assertIn(date(2014, 10, 17), observed)
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 3, 30), holidays) # Seward's Day
self.assertIn(date(2015, 10, 18), holidays) # Alaska Day
observed = set(map(self.cal.get_observed_date, holidays))
# Alaska day is on SUN: shifted to MON
self.assertIn(date(2015, 10, 19), observed)
def test_state_year_2017(self):
holidays = self.cal.holidays_set(2017)
self.assertIn(date(2017, 3, 27), holidays) # Seward's Day
self.assertIn(date(2017, 10, 18), holidays) # Alaska Day
observed = set(map(self.cal.get_observed_date, holidays))
# Alaska day is on WED: no shift
self.assertNotIn(date(2017, 10, 19), observed)
self.assertNotIn(date(2017, 10, 17), observed)
class ArizonaTest(UnitedStatesTest):
cal_class = Arizona
def test_mlk_label(self):
# Overwrite UnitedStatesTest.test_mlk_label
# Martin Luther King day is renamed in Arizona
_, label = self.cal.get_martin_luther_king_day(2017)
self.assertEqual(label, "Dr. Martin Luther King Jr./Civil Rights Day")
def test_president_day_label(self):
# Overwrite UnitedStatesTest.test_president_day_label
# Presidents day is renamed in Arizona
_, label = self.cal.get_presidents_day(2017)
self.assertEqual(label, "Lincoln/Washington Presidents' Day")
class ArkansasTest(NoColumbus, UnitedStatesTest):
cal_class = Arkansas
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 12, 24), holidays) # XMas Eve
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 12, 24), holidays) # XMas Eve
def test_christmas_2016(self):
holidays = self.cal.holidays_set(2016)
self.assertIn(date(2016, 12, 24), holidays) # XMas Eve
observed = set(map(self.cal.get_observed_date, holidays))
self.assertIn(date(2016, 12, 23), observed) # XMas Eve shifted
def test_president_day_label(self):
# Overwrite UnitedStatesTest.test_president_day_label
# Presidents day is renamed in Arkansas
_, label = self.cal.get_presidents_day(2017)
self.assertEqual(
label,
"George Washington's Birthday and Daisy Gatson Bates Day"
)
class CaliforniaTest(NoColumbus, UnitedStatesTest):
cal_class = California
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 3, 31), holidays) # Cesar Chavez Day
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 3, 31), holidays) # Cesar Chavez Day
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
def test_state_year_2018(self):
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 3, 31), holidays) # Cesar Chavez Day
# Happens on SAT, but is not shifted
self.assertNotIn(date(2018, 3, 30), holidays)
self.assertIn(date(2018, 11, 23), holidays) # Thanksgiving Friday
def test_state_year_2019(self):
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 3, 31), holidays) # Cesar Chavez Day
self.assertIn(date(2019, 4, 1), holidays) # Cesar Chavez Day Shift
self.assertIn(date(2019, 11, 29), holidays) # Thanksgiving Friday
def test_chavez_no_duplicates(self):
# See issue #528
holidays = self.cal.holidays(2019)
days = [item[0] for item in holidays]
assert days
for day in days:
assert days.count(day) == 1, f"{day} is duplicated"
class CaliforniaEducationTest(CaliforniaTest):
cal_class = CaliforniaEducation
def test_specific_lincoln_birthday(self):
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 2, 12), holidays) # Lincoln's Birthday
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 2, 12), holidays) # Lincoln's Birthday
# Lincoln's Birthday wasn't included in 2009
holidays = self.cal.holidays_set(2009)
self.assertNotIn(date(2009, 2, 12), holidays)
def test_specific_native_american_day(self):
# Native American Day occurs on the 4th MON of September
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 9, 24), holidays) # Native American Day
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 9, 23), holidays) # Native American Day
# Like California, except that it has:
# * No Chavez Day,
# * Includes Columbus day, but relabels it.
# * Adds Lincoln's Birthday.
class CaliforniaBerkeleyTest(UnitedStatesTest):
cal_class = CaliforniaBerkeley
def test_state_year_2014(self):
# Overwriting CaliforniaTest, there's no Chavez Day for Berkeley
holidays = self.cal.holidays_set(2014)
self.assertNotIn(date(2014, 3, 31), holidays) # NO Cesar Chavez Day
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
# Overwriting CaliforniaTest, there's no Chavez Day for Berkeley
holidays = self.cal.holidays_set(2015)
self.assertNotIn(date(2015, 3, 31), holidays) # NO Cesar Chavez Day
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
def test_specific_lincoln_birthday(self):
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 2, 12), holidays) # Lincoln's Birthday
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 2, 12), holidays) # Lincoln's Birthday
def test_specific_malcomx_birthday(self):
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 5, 19), holidays) # Malcom X Day
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 5, 19), holidays) # Malcom X Day
def test_columbus_day_label(self):
# Overwrite UnitedStatesTest.test_columbus_day_label
_, label = self.cal.get_columbus_day(2019)
self.assertEqual(label, "Indigenous People's Day")
# Like California, except:
# * No Chavez Day,
# * Added Columbus Day
class CaliforniaSanFranciscoTest(UnitedStatesTest):
cal_class = CaliforniaSanFrancisco
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertNotIn(date(2014, 3, 31), holidays) # NO Cesar Chavez Day
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertNotIn(date(2015, 3, 31), holidays) # NO Cesar Chavez Day
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
# Like California, except:
# * No Chavez Day,
# * No Thanksgiving Friday
# * Added Harvey Milk Day
class CaliforniaWestHollywoodTest(NoColumbus, UnitedStatesTest):
cal_class = CaliforniaWestHollywood
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertNotIn(date(2014, 3, 31), holidays) # NO Cesar Chavez Day
self.assertNotIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertNotIn(date(2015, 3, 31), holidays) # NO Cesar Chavez Day
self.assertNotIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
def test_harvey_milk_day(self):
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 5, 22), holidays) # Harvey Milk Day
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 5, 22), holidays) # Harvey Milk Day
class ColoradoTest(UnitedStatesTest):
cal_class = Colorado
# Colorado has only federal state holidays.
# NOTE: Cesar Chavez Day is an optional holiday
class ConnecticutTest(UnitedStatesTest):
cal_class = Connecticut
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 2, 12), holidays) # Lincoln's Birthday
self.assertIn(date(2014, 4, 18), holidays) # Good Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 2, 12), holidays) # Lincoln's Birthday
self.assertIn(date(2015, 4, 3), holidays) # Good Friday
class DelawareTest(ElectionDayEvenYears, NoPresidentialDay, NoColumbus,
UnitedStatesTest):
cal_class = Delaware
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 18), holidays) # Good Friday
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 3), holidays) # Good Friday
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
class DistrictOfColumbiaTest(InaugurationDay, UnitedStatesTest):
cal_class = DistrictOfColumbia
def test_state_year_2017(self):
# President elected in 2016, Inauguration Day is year+1
holidays = self.cal.holidays_set(2017)
self.assertIn(date(2017, 1, 20), holidays) # Inauguration Day
self.assertIn(date(2017, 4, 16), holidays) # Emancipation Day
def test_state_year_2016(self):
holidays = self.cal.holidays_set(2016)
# No Inauguration Day the other years
self.assertNotIn(date(2016, 1, 20), holidays)
self.assertIn(date(2016, 4, 16), holidays) # Emancipation Day
class FloridaBasicTest:
"""
Core Florida tests.
The difference is that it includes the Thanksgiving Friday *and* its label
is renamed.
"""
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
observed = set(map(self.cal.get_observed_date, holidays))
self.assertIn(date(2015, 7, 3), observed)
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
def test_thanksgiving_friday_label(self):
# Overwrite UnitedStatesTest.test_thanksgiving_friday_label
_, label = self.cal.get_thanksgiving_friday(2017)
self.assertEqual(label, "Friday after Thanksgiving")
class FloridaTest(NoColumbus, NoPresidentialDay, FloridaBasicTest,
UnitedStatesTest):
"""
Florida includes all federal holidays except
Washington's Birthday & Columbus day
"""
cal_class = Florida
class FloridaLegalTest(IncludeMardiGras, ElectionDayEveryYear,
FloridaBasicTest, UnitedStatesTest):
"""
Florida Legal Holidays include:
* All Florida State Holidays,
* Mardi Gras,
* Lincoln's Birthday,
* Susan B. Anthony Day,
* Washington's Birthday,
* Good Friday,
* Pascua Florida Day,
* Confederate Memorial Day,
* Jefferson Davies Birthday,
* Flag Day
* Columbus Day renamed as "Columbus and Farmers' Day"
* Election Day
"""
cal_class = FloridaLegal
def test_init_warning(self):
msg = (
"Florida's laws separate the definitions between "
"paid versus legal holidays."
)
with pytest.warns(UserWarning, match=msg):
self.cal_class()
def test_specific_lincoln_birthday(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 2, 12), holidays) # Lincoln's Birthday
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 2, 12), holidays) # Lincoln's Birthday
def test_susan_b_anthony_day(self):
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 2, 15), holidays) # Susan B. Anthony Day
def test_good_friday(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 18), holidays) # Good Friday
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 3), holidays) # Good Friday
def test_pascua_florida_day(self):
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 4, 2), holidays) # Pascua Florida Day
def test_confederate_holidays(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 26), holidays) # Confederate Memorial Day
self.assertIn(date(2014, 6, 3), holidays) # Jefferson Davis' birthday
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 26), holidays) # Confederate Memorial Day
self.assertIn(date(2015, 6, 3), holidays) # Jefferson Davis' birthday
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 4, 26), holidays) # Confederate Memorial Day
self.assertIn(date(2018, 6, 3), holidays) # Jefferson Davis' birthday
def test_flag_day(self):
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 6, 14), holidays) # Flag Day
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 6, 14), holidays) # Flag Day
def test_columbus_day_label(self):
_, label = self.cal.get_columbus_day(2017)
self.assertEqual(label, "Columbus Day and Farmers' Day")
class FloridaCircuitCourtsTest(NoColumbus, FloridaBasicTest, UnitedStatesTest):
cal_class = FloridaCircuitCourts
def test_good_friday(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 18), holidays) # Good Friday
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 3), holidays) # Good Friday
def test_rosh_hashanah_2018(self):
# src: https://www.firstjudicialcircuit.org/about-court/court-holidays
rosh_hashanah = self.cal.get_rosh_hashanah(2018)
self.assertEqual(rosh_hashanah, date(2018, 9, 10))
holidays = self.cal.holidays_set(2018)
self.assertIn(rosh_hashanah, holidays)
def test_rosh_hashanah_2019(self):
# src: https://www.firstjudicialcircuit.org/about-court/court-holidays
rosh_hashanah = self.cal.get_rosh_hashanah(2019)
self.assertEqual(rosh_hashanah, date(2019, 9, 30))
holidays = self.cal.holidays_set(2019)
self.assertIn(rosh_hashanah, holidays)
def test_yom_kippur_2018(self):
# src: https://www.firstjudicialcircuit.org/about-court/court-holidays
yom_kippur = self.cal.get_yom_kippur(2018)
self.assertEqual(yom_kippur, date(2018, 9, 19))
holidays = self.cal.holidays_set(2018)
self.assertIn(yom_kippur, holidays)
def test_yom_kippur_2019(self):
# src: https://www.firstjudicialcircuit.org/about-court/court-holidays
yom_kippur = self.cal.get_yom_kippur(2019)
self.assertEqual(yom_kippur, date(2019, 10, 9))
holidays = self.cal.holidays_set(2019)
self.assertIn(yom_kippur, holidays)
class FloridaMiamiDadeTests(FloridaBasicTest, UnitedStatesTest):
cal_class = FloridaMiamiDade
class GeorgiaTest(NoPresidentialDay, UnitedStatesTest):
cal_class = Georgia
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 28), holidays) # Confederate Memorial
self.assertIn(date(2014, 12, 26), holidays) # Washington bday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 27), holidays) # Confederate Memorial
self.assertIn(date(2015, 12, 24), holidays) # Washington bday
def test_washington_birthday(self):
# Sources:
# * https://georgia.gov/popular-topic/observing-state-holidays
# * https://georgia.gov/popular-topic/state-holidays
day, _ = self.cal.get_washington_birthday_december(2020)
self.assertEqual(day, date(2020, 12, 24))
day, _ = self.cal.get_washington_birthday_december(2019)
self.assertEqual(day, date(2019, 12, 24))
day, _ = self.cal.get_washington_birthday_december(2018)
self.assertEqual(day, date(2018, 12, 24))
day, _ = self.cal.get_washington_birthday_december(2017)
self.assertEqual(day, date(2017, 12, 26))
day, _ = self.cal.get_washington_birthday_december(2016)
self.assertEqual(day, date(2016, 12, 27))
day, _ = self.cal.get_washington_birthday_december(2015)
self.assertEqual(day, date(2015, 12, 24))
day, _ = self.cal.get_washington_birthday_december(2014)
self.assertEqual(day, date(2014, 12, 26))
day, _ = self.cal.get_washington_birthday_december(2013)
self.assertEqual(day, date(2013, 12, 24))
day, _ = self.cal.get_washington_birthday_december(2012)
self.assertEqual(day, date(2012, 12, 24))
# Source:
# https://web.archive.org/web/20110927122533/http://www.georgia.gov/00/channel_modifieddate/0,2096,4802_64437763,00.html # noqa
day, _ = self.cal.get_washington_birthday_december(2011)
self.assertEqual(day, date(2011, 12, 26))
# Source:
# https://web.archive.org/web/20100304032739/http://www.georgia.gov/00/channel_modifieddate/0,2096,4802_64437763,00.html # noqa
day, _ = self.cal.get_washington_birthday_december(2010)
self.assertEqual(day, date(2010, 12, 23))
def test_year_2019(self):
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 1, 1), holidays) # New Year
self.assertIn(date(2019, 1, 21), holidays) # MLK
self.assertIn(date(2019, 4, 22), holidays) # state holiday
self.assertIn(date(2019, 5, 27), holidays) # memorial day
self.assertIn(date(2019, 7, 4), holidays) # Independance day
self.assertIn(date(2019, 9, 2), holidays) # Labor day
self.assertIn(date(2019, 10, 14), holidays) # Columbus
self.assertIn(date(2019, 11, 11), holidays) # Veterans
self.assertIn(date(2019, 11, 28), holidays) # Thanksgiving
self.assertIn(date(2019, 11, 29), holidays) # State Holiday
# Washington's Birthday switched to XMAS eve
self.assertIn(date(2019, 12, 24), holidays)
self.assertIn(date(2019, 12, 25), holidays) # XMAS
def test_year_2020(self):
holidays = self.cal.holidays_set(2020)
self.assertIn(date(2020, 1, 1), holidays) # New Year
self.assertIn(date(2020, 1, 20), holidays) # MLK
# State holiday special case
# Confederate memorial day has been shifted to April 10th.
# Reason is unknown, so we're adding a single exception in the
# `get_confederate_day`
self.assertNotIn(date(2020, 4, 26), holidays)
self.assertIn(date(2020, 4, 10), holidays)
self.assertIn(date(2020, 5, 25), holidays) # memorial day
observed = set(map(self.cal.get_observed_date, holidays))
# Independance day (OBS)
self.assertIn(date(2020, 7, 3), observed)
self.assertIn(date(2020, 7, 4), holidays) # Independance day
self.assertIn(date(2020, 9, 7), holidays) # Labor day
self.assertIn(date(2020, 10, 12), holidays) # Columbus
self.assertIn(date(2020, 11, 11), holidays) # Veterans
self.assertIn(date(2020, 11, 26), holidays) # Thanksgiving
self.assertIn(date(2020, 11, 27), holidays) # State Holiday
self.assertIn(date(2020, 12, 24), holidays) # Washington B'day
self.assertIn(date(2020, 12, 25), holidays) # XMAS
def test_thanksgiving_friday_label(self):
# Overwrite UnitedStatesTest.test_thanksgiving_friday_label
# Until 2016, the 4th FRI of november was labelled
# "Robert E. Lee's Birthday (Observed)"
for year in (2013, 2014, 2015,):
_, label = self.cal.get_robert_lee_birthday(year)
self.assertEqual(label, "Robert E. Lee's Birthday (Observed)")
for year in (2016, 2017, 2018, 2019, 2020):
_, label = self.cal.get_robert_lee_birthday(year)
self.assertEqual(label, "State Holiday")
def test_get_confederate_day_label(self):
# Until 2016, it was labelled "Confederate Memorial Day"
for year in (2013, 2014, 2015,):
_, label = self.cal.get_confederate_day(year)
self.assertEqual(label, "Confederate Memorial Day")
for year in (2016, 2017, 2018, 2019, 2020):
_, label = self.cal.get_confederate_day(year)
self.assertEqual(label, "State Holiday")
class HawaiiTest(ElectionDayEvenYears, NoColumbus, UnitedStatesTest):
cal_class = Hawaii
def test_state_year_2017(self):
holidays = self.cal.holidays_set(2017)
observed = set(map(self.cal.get_observed_date, holidays))
self.assertIn(date(2017, 3, 26),
holidays) # Prince Jonah Kuhio Kalanianaole
self.assertIn(date(2017, 3, 27),
observed) # Prince Jonah Kuhio Kalanianaole (shifted)
self.assertIn(date(2017, 4, 14), holidays) # Good Friday
self.assertIn(date(2017, 6, 11), holidays) # Kamehameha
self.assertIn(date(2017, 6, 12), observed) # Kamehameha (shifted)
self.assertIn(date(2017, 8, 18), holidays) # Statehood day
def test_state_year_2018(self):
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 3, 26),
holidays) # Prince Jonah Kuhio Kalanianaole
self.assertIn(date(2018, 3, 30), holidays) # Good Friday
self.assertIn(date(2018, 6, 11), holidays) # Kamehameha
self.assertIn(date(2018, 8, 17), holidays) # Statehood day
# Prince Jonah Kuhio Kalanianaole is not shifted
self.assertNotIn(date(2018, 3, 27), holidays)
# Kamehameha is not shifted
self.assertNotIn(date(2018, 6, 12), holidays)
class IdahoTest(UnitedStatesTest):
cal_class = Idaho
# NOTE: Idaho only has federal holidays.
def test_mlk_label(self):
# Overwrite UnitedStatesTest.test_mlk_label
# Martin Luther King day is renamed in Idaho
_, label = self.cal.get_martin_luther_king_day(2017)
self.assertEqual(
label, "Martin Luther King Jr. / Idaho Human Rights Day")
class IllinoisTest(ElectionDayEvenYears, UnitedStatesTest):
cal_class = Illinois
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 2, 12), holidays) # Lincoln's Birthday
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 2, 12), holidays) # Lincoln's Birthday
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
class ChicagoIllinoisTest(ElectionDayEvenYears, UnitedStatesTest):
cal_class = ChicagoIllinois
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 2, 12), holidays) # Lincoln's Birthday
# Thanksgiving Friday is NOT a holiday in Chicago.
self.assertNotIn(date(2014, 11, 28), holidays)
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 2, 12), holidays) # Lincoln's Birthday
# Thanksgiving Friday is NOT a holiday in Chicago.
self.assertNotIn(date(2015, 11, 27), holidays)
def test_pulaski_day(self):
# Pulaski day is on the first MON in March.
# Source: https://en.wikipedia.org/wiki/Casimir_Pulaski_Day
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 3, 5), holidays)
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 3, 4), holidays)
holidays = self.cal.holidays_set(2020)
self.assertIn(date(2020, 3, 2), holidays)
holidays = self.cal.holidays_set(2021)
self.assertIn(date(2021, 3, 1), holidays)
class IndianaTest(ElectionDayEvenYears, NoPresidentialDay, UnitedStatesTest):
cal_class = Indiana
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 18), holidays) # Good Friday
# Thanksgiving Friday -- Renamed into Lincoln's Birthday
self.assertIn(date(2014, 11, 28), holidays)
# FIXME: this holiday rule is Confusing, probably false
self.assertIn(date(2014, 12, 26), holidays) # Washington bday
# Primary Election Day, only happen on even years
self.assertIn(date(2014, 5, 6), holidays)
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 3), holidays) # Good Friday
# Thanksgiving Friday -- Renamed into Lincoln's Birthday
self.assertIn(date(2015, 11, 27), holidays)
# FIXME: this holiday rule is Confusing, probably false
self.assertIn(date(2015, 12, 24), holidays) # Washington bday
# Primary Election Day, only happen on even years
self.assertNotIn(date(2015, 5, 5), holidays)
def test_primary_election_day(self):
# Source:
# -> https://www.timeanddate.com/holidays/us/primary-election-indiana
# Year 2010
election_day, _ = self.cal.get_primary_election_day(2010)
self.assertEqual(election_day, date(2010, 5, 4))
# Year 2012
election_day, _ = self.cal.get_primary_election_day(2012)
self.assertEqual(election_day, date(2012, 5, 8))
# Year 2014
election_day, _ = self.cal.get_primary_election_day(2014)
self.assertEqual(election_day, date(2014, 5, 6))
# Year 2016
election_day, _ = self.cal.get_primary_election_day(2016)
self.assertEqual(election_day, date(2016, 5, 3))
def test_election_day_label(self):
# Overwrite UnitedStatesTest.test_election_day_label
# Election Day is "General Election Day" in Indiana
_, label = self.cal.get_election_day(2017)
self.assertEqual(label, "General Election Day")
def test_thanksgiving_friday_label(self):
# Overwrite UnitedStatesTest.test_thanksgiving_friday_label
# Lincoln's Birthday is set on Thanksgiving Friday
_, label = self.cal.get_thanksgiving_friday(2017)
self.assertEqual(label, "Lincoln's Birthday")
@skip("Confusing Rule, it's impossible to decide")
def test_washington_birthday(self):
# Sources:
# http://www.in.gov/spd/files/2018_Holidays.pdf
# http://www.in.gov/spd/files/2017_Holidays.pdf
# http://www.in.gov/spd/files/2016_Holidays.pdf
# Year 2016, shifted to the 26th
washington_bday = self.cal.get_washington_birthday_december(2016)
self.assertEqual(date(2016, 12, 26), washington_bday)
# Year 2017, shifted to the 26th
washington_bday = self.cal.get_washington_birthday_december(2017)
self.assertEqual(date(2017, 12, 26), washington_bday)
# Year 2018, back to XMas Eve
washington_bday = self.cal.get_washington_birthday_december(2018)
self.assertEqual(date(2018, 12, 24), washington_bday)
class IowaTest(NoPresidentialDay, NoColumbus, UnitedStatesTest):
cal_class = Iowa
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
observed = set(map(self.cal.get_observed_date, holidays))
self.assertIn(date(2015, 7, 3), observed)
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
class KansasTest(NoPresidentialDay, NoColumbus, UnitedStatesTest):
cal_class = Kansas
class KentuckyTest(NoPresidentialDay, NoColumbus, UnitedStatesTest):
cal_class = Kentucky
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 18), holidays) # Good Friday
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
self.assertIn(date(2014, 12, 24), holidays) # XMas Eve
self.assertIn(date(2014, 12, 31), holidays) # NY Eve
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 3), holidays) # Good Friday
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
self.assertIn(date(2015, 12, 24), holidays) # XMas Eve
self.assertIn(date(2015, 12, 31), holidays) # NY Eve
class LouisianaTest(IncludeMardiGras, NoColumbus, ElectionDayEvenYears,
UnitedStatesTest):
cal_class = Louisiana
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 18), holidays) # Good Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 3), holidays) # Good Friday
class MaineTest(UnitedStatesTest):
cal_class = Maine
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 21), holidays) # Patriot's day
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 20), holidays) # Patriot's day
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
class MarylandTest(ElectionDayPresidentialYears, UnitedStatesTest):
cal_class = Maryland
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
# Thanksgiving Friday == Native American Heritage Day
self.assertIn(date(2014, 11, 28), holidays)
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
# Thanksgiving Friday == Native American Heritage Day
self.assertIn(date(2015, 11, 27), holidays)
def test_thanksgiving_friday_label(self):
# Overwrite UnitedStatesTest.test_thanksgiving_friday_label
# Thanksgiving Friday label changed to "Native American Heritage Day"
_, label = self.cal.get_thanksgiving_friday(2017)
self.assertEqual(label, "Native American Heritage Day")
class MassachusettsTest(UnitedStatesTest):
cal_class = Massachusetts
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 21), holidays) # Patriot's day
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 20), holidays) # Patriot's day
class SuffolkCountyMassachusettsTest(MassachusettsTest):
cal_class = SuffolkCountyMassachusetts
def test_county_year_2018(self):
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 3, 17), holidays) # Evacuation Day
self.assertIn(date(2018, 6, 17), holidays) # Bunker Hill Day
def test_county_year_2019(self):
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 3, 17), holidays) # Evacuation Day
self.assertIn(date(2019, 6, 17), holidays) # Bunker Hill Day
class MichiganTest(NoColumbus, ElectionDayEvenYears, UnitedStatesTest):
cal_class = Michigan
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
self.assertIn(date(2014, 12, 24), holidays) # XMas Eve
self.assertIn(date(2014, 12, 31), holidays) # New Years Eve
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
self.assertIn(date(2015, 12, 24), holidays) # XMas Eve
self.assertIn(date(2015, 12, 31), holidays) # New Years Eve
def test_state_year_2017(self):
holidays = self.cal.holidays_set(2017)
self.assertIn(date(2017, 11, 24), holidays) # Thanksgiving Friday
# XMas Eve
self.assertIn(date(2017, 12, 24), holidays)
# XMAs Eve is on SUN, shifted to Dec, 22nd
self.assertIn(date(2017, 12, 22), holidays)
# New Years Eve
self.assertIn(date(2017, 12, 31), holidays)
# New Years Eve is on SUN, shifted to Dec, 29nd
self.assertIn(date(2017, 12, 29), holidays)
class MinnesotaTest(NoColumbus, UnitedStatesTest):
cal_class = Minnesota
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
class MississippiTest(NoColumbus, UnitedStatesTest):
cal_class = Mississippi
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 28), holidays) # Confederate Memorial Day
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 27), holidays) # Confederate Memorial Day
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
def test_mlk_label(self):
# Overwrite UnitedStatesTest.test_mlk_label
# Martin Luther King day is renamed in Mississippi
_, label = self.cal.get_martin_luther_king_day(2017)
self.assertEqual(
label, "Martin Luther King's and Robert E. Lee's Birthdays")
def test_national_memorial_label(self):
# Overwrite UnitedStatesTest.test_national_memorial_label
# National Memorial Day is renamed in Mississippi
_, label = self.cal.get_national_memorial_day(2017)
self.assertEqual(
label, "National Memorial Day / Jefferson Davis Birthday")
def test_veterans_label(self):
# Overwrite UnitedStatesTest.test_veterans_label
# Veterans Day is renamed in Mississippi
_, label = self.cal.get_veterans_day(2017)
self.assertEqual(label, "Armistice Day (Veterans Day)")
class MissouriTest(UnitedStatesTest):
cal_class = Missouri
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 2, 12), holidays) # Lincoln's Birthday
self.assertIn(date(2014, 5, 8), holidays) # Truman Day
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 2, 12), holidays) # Lincoln's Birthday
self.assertIn(date(2015, 5, 8), holidays) # Truman Day
class MontanaTest(ElectionDayEvenYears, UnitedStatesTest):
cal_class = Montana
# NOTE: Montana include only Federal Holidays + General Election Day
class NebraskaTest(UnitedStatesTest):
cal_class = Nebraska
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 25), holidays) # Arbor Day
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 24), holidays) # Arbor Day
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
class NevadaTest(NoColumbus, UnitedStatesTest):
cal_class = Nevada
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 10, 31), holidays) # Nevada Day
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 10, 30), holidays) # Nevada Day
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
def test_thanksgiving_friday_label(self):
# Overwrite UnitedStatesTest.test_thanksgiving_friday_label
# Thanksgiving Friday Label is Family Day in Nevada
_, label = self.cal.get_thanksgiving_friday(2017)
self.assertEqual(label, "Family Day")
class NewHampshireTest(UnitedStatesTest):
cal_class = NewHampshire
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
def test_mlk_label(self):
# Overwrite UnitedStatesTest.test_mlk_label
# Martin Luther King day is renamed in New Hampshire
_, label = self.cal.get_martin_luther_king_day(2017)
self.assertEqual(label, "Martin Luther King, Jr. Civil Rights Day")
class NewJerseyTest(ElectionDayEveryYear, UnitedStatesTest):
cal_class = NewJersey
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 18), holidays) # Good Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 3), holidays) # Good Friday
class NewMexicoTest(NoPresidentialDay, UnitedStatesTest):
cal_class = NewMexico
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
def test_thanksgiving_friday_label(self):
# Overwrite UnitedStatesTest.test_thanksgiving_friday_label
# New Mexico is celebrating Presidents' Day on Thanksgiving Friday
_, label = self.cal.get_thanksgiving_friday(2017)
self.assertEqual(label, "Presidents' Day")
class NewYorkTest(ElectionDayEveryYear, UnitedStatesTest):
cal_class = NewYork
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 2, 12), holidays) # Lincoln's Birthday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 2, 12), holidays) # Lincoln's Birthday
class NorthCarolinaTest(NoPresidentialDay, NoColumbus, UnitedStatesTest):
cal_class = NorthCarolina
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 18), holidays) # Good Friday
self.assertIn(date(2014, 11, 27), holidays) # Thanksgiving Thursday
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
self.assertIn(date(2014, 12, 24), holidays) # XMas Eve
self.assertIn(date(2014, 12, 26), holidays) # Boxing Day
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 3), holidays) # Good Friday
self.assertIn(date(2015, 11, 26), holidays) # Thanksgiving Thursday
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
self.assertIn(date(2015, 12, 24), holidays) # Xmas Eve
self.assertIn(date(2015, 12, 26), holidays) # Boxing Day
def test_federal_year_2017(self):
# It is different from other federal days.
holidays = self.cal.holidays_set(2017)
self.assertIn(date(2017, 4, 14), holidays) # Good Friday
self.assertIn(date(2017, 11, 23), holidays) # Thanksgiving Thursday
self.assertIn(date(2017, 11, 24), holidays) # Thanksgiving Friday
self.assertIn(date(2017, 12, 24), holidays) # Xmas Eve
self.assertIn(date(2017, 12, 25), holidays) # Xmas Day
self.assertIn(date(2017, 12, 26), holidays) # Day after Xmas
self.assertIn(date(2017, 12, 27), holidays) # Xmas Shift
def test_state_year_2016_xmas(self):
# XMAS falls on SUN
holidays = self.cal.holidays_set(2016)
self.assertIn(date(2016, 12, 23), holidays) # Day 1 - FRI
self.assertIn(date(2016, 12, 26), holidays) # Day 2 - MON
self.assertIn(date(2016, 12, 27), holidays) # Day 3 - TUE
# 22nd and 28th are not included
self.assertNotIn(date(2016, 12, 22), holidays) # THU
self.assertNotIn(date(2016, 12, 28), holidays) # WED
def test_state_year_2017_xmas(self):
# XMAS falls on MON
holidays = self.cal.holidays_set(2017)
self.assertIn(date(2017, 12, 25), holidays) # Day 1 - MON
self.assertIn(date(2017, 12, 26), holidays) # Day 2 - TUE
self.assertIn(date(2017, 12, 27), holidays) # Day 3 - WED
# 22nd and 28th are not included
self.assertNotIn(date(2017, 12, 22), holidays) # FRI
self.assertNotIn(date(2017, 12, 23), holidays) # SAT
self.assertNotIn(date(2017, 12, 28), holidays) # THU
def test_state_year_2018_xmas(self):
# XMAS falls on TUE
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 12, 24), holidays) # Day 1 - MON
self.assertIn(date(2018, 12, 25), holidays) # Day 2 - TUE
self.assertIn(date(2018, 12, 26), holidays) # Day 3 - WED
# No shift:
self.assertNotIn(date(2018, 12, 23), holidays)
self.assertNotIn(date(2018, 12, 27), holidays)
def test_state_year_2019_xmas(self):
# XMAS falls on WED
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 12, 24), holidays) # Day 1 - TUE
self.assertIn(date(2019, 12, 25), holidays) # Day 2 - WED
self.assertIn(date(2019, 12, 26), holidays) # Day 3 - THU
# No shift:
self.assertNotIn(date(2019, 12, 23), holidays)
self.assertNotIn(date(2019, 12, 27), holidays)
def test_state_year_2020_xmas(self):
# XMAS falls on FRI
holidays = self.cal.holidays_set(2020)
self.assertIn(date(2020, 12, 24), holidays) # Day 1 - THU
self.assertIn(date(2020, 12, 25), holidays) # Day 2 - FRI
self.assertIn(date(2020, 12, 28), holidays) # Day 3 - MON
# 23rd and 29th are not included
self.assertNotIn(date(2020, 12, 23), holidays)
self.assertNotIn(date(2020, 12, 29), holidays)
def test_state_year_2021_xmas(self):
# XMAS falls on SAT
holidays = self.cal.holidays_set(2021)
self.assertIn(date(2021, 12, 23), holidays) # Day 1 - THU
self.assertIn(date(2021, 12, 24), holidays) # Day 2 - FRI
self.assertIn(date(2021, 12, 27), holidays) # Day 3 - MON
# 23rd and 29th are not included
self.assertNotIn(date(2021, 12, 22), holidays)
self.assertNotIn(date(2021, 12, 28), holidays)
class NorthDakotaTest(NoColumbus, UnitedStatesTest):
cal_class = NorthDakota
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 18), holidays) # Good Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 3), holidays) # Good Friday
class OhioTest(UnitedStatesTest):
cal_class = Ohio
# Ohio includes only Federal holidays.
# The wikipedia page say it also includes Election Day, but no official
# document confirms this.
class OklahomaTest(NoColumbus, UnitedStatesTest):
cal_class = Oklahoma
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
self.assertIn(date(2014, 12, 26), holidays) # Boxing day
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
self.assertIn(date(2015, 12, 26), holidays) # Boxing day
class OregonTest(NoColumbus, UnitedStatesTest):
cal_class = Oregon
# NOTE: Oregon has only the federal holidays, except Columbus Day
class PennsylvaniaTest(ElectionDayEveryYear, UnitedStatesTest):
cal_class = Pennsylvania
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 18), holidays) # Good Friday
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 3), holidays) # Good Friday
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
class RhodeIslandTest(NoPresidentialDay, ElectionDayEvenYears,
UnitedStatesTest):
cal_class = RhodeIsland
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 8, 11), holidays) # Victory Day
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 8, 10), holidays) # Victory Day
class SouthCarolinaTest(NoColumbus, UnitedStatesTest):
cal_class = SouthCarolina
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
observed = set(map(self.cal.get_observed_date, holidays))
# Confederate Memorial Day
self.assertIn(date(2014, 5, 10), holidays)
# Observed here, it falls on SAT
self.assertIn(date(2014, 5, 9), observed)
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
self.assertIn(date(2014, 12, 24), observed) # XMas Eve
self.assertIn(date(2014, 12, 26), observed) # Boxing day
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 5, 10), holidays) # Confederate Memorial Day
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
self.assertIn(date(2015, 12, 24), holidays) # Xmas Eve
self.assertIn(date(2015, 12, 26), holidays) # Boxing day
@skip("No clear rules for implementing the XMas Eve shift")
def test_state_year_2017(self):
holidays = self.cal.holidays_set(2017)
self.assertIn(date(2017, 5, 10), holidays) # Confederate Memorial Day
self.assertIn(date(2017, 11, 23), holidays) # Thanksgiving Friday
# Xmas Eve falls on SUN
self.assertIn(date(2017, 12, 24), holidays)
# Christmas Eve observed here
self.assertIn(date(2017, 12, 22), holidays)
self.assertIn(date(2017, 12, 26), holidays) # Boxing day
class SouthDakotaTest(UnitedStatesTest):
cal_class = SouthDakota
# NOTE: South Dakota has all federal holidays, except Columbus Day,
# but it's renamed as "Native Americans Day"
def test_columbus_day_label(self):
# Overwrite UnitedStatesTest.test_columbus_day_label
_, label = self.cal.get_columbus_day(2017)
self.assertEqual(label, "Native Americans Day")
class TennesseeTest(NoColumbus, UnitedStatesTest):
cal_class = Tennessee
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 4, 18), holidays) # Good Friday
self.assertIn(date(2014, 12, 24), holidays) # XMas Eve
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 4, 3), holidays) # Good Friday
self.assertIn(date(2015, 12, 24), holidays) # XMas Eve
class TexasBaseTest(NoColumbus, UnitedStatesTest):
cal_class = TexasBase
# NOTE: "Stock" Texas doesn't include Columbus Day,
# state holidays are handled differently
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
# Check that state holidays are not included here
self.assertNotIn(date(2014, 1, 19), holidays) # Confederate Heroes Day
self.assertNotIn(date(2014, 3, 2), holidays) # Texas Independence Day
self.assertNotIn(date(2014, 4, 21), holidays) # San Jacinto Day
self.assertNotIn(date(2014, 6, 19), holidays) # Emancipation Day
self.assertNotIn(date(2014, 8, 27), holidays) # Lyndon B. Johnson Day
self.assertNotIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
self.assertNotIn(date(2014, 12, 24), holidays) # XMas Eve
self.assertNotIn(date(2014, 12, 26), holidays) # Boxing day
class TexasTest(TexasBaseTest):
cal_class = Texas
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 1, 19), holidays) # Confederate Heroes Day
self.assertIn(date(2014, 3, 2), holidays) # Texas Independence Day
self.assertIn(date(2014, 4, 21), holidays) # San Jacinto Day
self.assertIn(date(2014, 6, 19), holidays) # Emancipation Day
self.assertIn(date(2014, 8, 27), holidays) # Lyndon B. Johnson Day
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
self.assertIn(date(2014, 12, 24), holidays) # XMas Eve
self.assertIn(date(2014, 12, 26), holidays) # Boxing day
class UtahTest(UnitedStatesTest):
cal_class = Utah
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 7, 24), holidays) # Pioneer Day
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 7, 24), holidays) # Pioneer Day
class VermontTest(NoColumbus, UnitedStatesTest):
cal_class = Vermont
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
observed = set(map(self.cal.get_observed_date, holidays))
self.assertIn(date(2014, 3, 4), holidays) # Town Meeting Day
self.assertIn(date(2014, 8, 16), holidays) # Bennington Battle Day
self.assertIn(date(2014, 8, 15), observed) # Shifted to FRI
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
observed = set(map(self.cal.get_observed_date, holidays))
self.assertIn(date(2015, 3, 3), holidays) # Town Meeting Day
self.assertIn(date(2015, 8, 16), holidays) # Bennington Battle Day
self.assertIn(date(2015, 8, 17), observed) # Shifted to MON
class VirginiaTest(UnitedStatesTest):
cal_class = Virginia
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 1, 17), holidays) # Lee-Jackson Day
self.assertIn(date(2014, 11, 26), holidays) # Thanksgiving Wednesday
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
self.assertIn(date(2014, 12, 24), holidays) # XMas Eve
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 1, 16), holidays) # Lee-Jackson Day
self.assertIn(date(2015, 11, 25), holidays) # Thanksgiving Wednesday
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
self.assertIn(date(2015, 12, 24), holidays) # XMas Eve
def test_exclude_thanksgiving_wednesday(self):
# Sub class
class VirginiaExclude(Virginia):
include_thanksgiving_wednesday = False
cal = VirginiaExclude()
holidays = cal.holidays_set(2015)
# Not Thanksgiving Wednesday
self.assertNotIn(date(2015, 11, 25), holidays)
def test_president_day_label(self):
# Overwrite UnitedStatesTest.test_president_day_label
_, label = self.cal.get_presidents_day(2017)
self.assertEqual(label, "George Washington Day")
def test_inauguration_day(self):
# Overwriting this test: in 2017, this day is a public holiday for
# Virginia State: Lee-Jackson Day
# NOTE: 2013 test is not relevant, it's the same day as MLK day.
# NOTE: 1985 test is not relevant, it's the same day as MLK day.
# By default, it's not a public holiday
self.assertNotIn(
self.cal.get_inauguration_date(2009),
self.cal.holidays_set(2009)
)
self.assertNotIn(
self.cal.get_inauguration_date(1957),
self.cal.holidays_set(1957)
)
class WashingtonTest(NoColumbus, UnitedStatesTest):
cal_class = Washington
# NOTE: Washington State includes all federal holidays, except Columbus Day
class WestVirginiaTest(ElectionDayEvenYears, UnitedStatesTest):
cal_class = WestVirginia
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 6, 20), holidays) # West Virginia Day
self.assertIn(date(2014, 11, 28), holidays) # Thanksgiving Friday
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 6, 20), holidays) # West Virginia Day
self.assertIn(date(2015, 11, 27), holidays) # Thanksgiving Friday
def test_state_half_holidays_base(self):
# Using the "stock" calendar
holidays = self.cal.holidays_set(2015)
self.assertNotIn(date(2015, 12, 24), holidays) # XMas Eve
self.assertNotIn(date(2015, 12, 31), holidays) # NYE
def test_state_half_holidays_included(self):
class WestVirginiaInclude(WestVirginia):
west_virginia_include_christmas_eve = True
west_virginia_include_nye = True
cal = WestVirginiaInclude()
holidays = cal.holidays_set(2015)
self.assertIn(date(2015, 12, 24), holidays) # XMas Eve
self.assertIn(date(2015, 12, 31), holidays) # NYE
# Test that these days are not shifted
# In 2016, XMas Eve and NYE are on SAT
holidays = cal.holidays_set(2016)
self.assertIn(date(2016, 12, 24), holidays) # XMas Eve
self.assertIn(date(2016, 12, 31), holidays) # NYE
self.assertNotIn(date(2016, 12, 23), holidays) # NO SHIFT for XMas Eve
self.assertNotIn(date(2016, 12, 30), holidays) # NO SHIFT for NYE
def test_election_day_label(self):
# Overwrite UnitedStatesTest.test_election_day_label
_, label = self.cal.get_election_day(2017)
self.assertEqual(label, "Election Day / Susan B. Anthony Day")
class WisconsinTest(NoPresidentialDay, NoColumbus, UnitedStatesTest):
cal_class = Wisconsin
def test_state_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertIn(date(2014, 12, 24), holidays) # Xmas Eve
self.assertIn(date(2014, 12, 31), holidays) # New Years Eve
def test_state_year_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 12, 24), holidays) # Xmas Eve
self.assertIn(date(2015, 12, 31), holidays) # New Years Eve
class WyomingTest(UnitedStatesTest):
cal_class = Wyoming
# NOTE: Wyoming only has all federal holidays
def test_mlk_label(self):
# Overwrite UnitedStatesTest.test_mlk_label
_, label = self.cal.get_martin_luther_king_day(2017)
self.assertEqual(
label,
"Martin Luther King, Jr. / Wyoming Equality Day"
)
class AmericanSamoaTest(UnitedStatesTest):
cal_class = AmericanSamoa
def test_family_day(self):
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 12, 26), holidays) # Family Day
def test_flag_day(self):
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 4, 17), holidays) # Flag Day
def test_family_day_label(self):
holidays = self.cal.holidays(2019)
holidays_dict = dict(holidays)
self.assertEqual(holidays_dict[date(2019, 12, 26)], "Family Day")
class Guam(UnitedStatesTest):
cal_class = Guam
def test_state_year_2019(self):
holidays = self.cal.holidays_set(2019)
# Guam History and Chamorro Heritage Day
self.assertIn(date(2019, 3, 7), holidays)
self.assertIn(date(2019, 7, 21), holidays) # Liberation Day
self.assertIn(date(2019, 11, 2), holidays) # All Souls Day
self.assertIn(date(2019, 12, 8), holidays) # Lady of Camarin Day
def test_state_year_2018(self):
holidays = self.cal.holidays_set(2020)
# Guam History and Chamorro Heritage Day
self.assertIn(date(2020, 3, 7), holidays)
self.assertIn(date(2020, 7, 21), holidays) # Liberation Day
self.assertIn(date(2020, 11, 2), holidays) # All Souls Day
self.assertIn(date(2020, 12, 8), holidays) # Lady of Camarin Day
def test_lady_of_camarin_label(self):
holidays = self.cal.holidays(2019)
holidays_dict = dict(holidays)
self.assertEqual(
holidays_dict[date(2019, 12, 8)],
"Lady of Camarin Day"
)
class NormalShiftTestCase(UnitedStatesTest):
# Using a fake calendar here
class NormalShiftUnitedStates(UnitedStates):
"Normal Shift Fake United State calendar"
include_christmas_eve = True
cal_class = NormalShiftUnitedStates
def test_shift_2015(self):
# Test a normal shift on 4th of July.
# 2015: Happens on a Saturday, observed on FRI
holidays = self.cal.holidays(2015)
holiday_dict = dict(holidays)
fourth_july = date(2015, 7, 4)
observed = date(2015, 7, 3)
self.assertIn(fourth_july, holiday_dict)
self.assertEqual(holiday_dict[fourth_july], "Independence Day")
self.assertNotIn(observed, holiday_dict)
def test_shift_2010(self):
# Test a normal shift on 4th of July.
# 2010: Happens on a SUN, observed on MON
holidays = self.cal.holidays(2010)
holiday_dict = dict(holidays)
fourth_july = date(2010, 7, 4)
observed = date(2010, 7, 5)
self.assertIn(fourth_july, holiday_dict)
self.assertEqual(holiday_dict[fourth_july], "Independence Day")
self.assertNotIn(observed, holiday_dict)
def test_new_years_shift(self):
# If January, 1st *of the year after* happens on SAT, add New Years Eve
holidays = self.cal.holidays(2010)
holiday_dict = dict(holidays)
new_years_eve = date(2010, 12, 31)
self.assertNotIn(new_years_eve, holiday_dict)
# The year after, it's not shifted
holidays = self.cal.holidays_set(2011)
new_years_eve = date(2011, 12, 31)
self.assertNotIn(new_years_eve, holidays)
def test_christmas_extra_shift_2010(self):
# XMAs Eve is included. *and* XMas falls on SAT.
# So you have the following holidays:
# * 24th & 25th (XMas Eve and XMas day)
# * 27th (XMas Shift)
# * 23rd (XMas Eve shifted on THU)
holidays = self.cal.holidays(2010)
holiday_dict = dict(holidays)
dec_24th = date(2010, 12, 24)
dec_25th = date(2010, 12, 25)
for day in (dec_24th, dec_25th):
self.assertIn(day, holiday_dict)
self.assertEqual(holiday_dict[dec_24th], "Christmas Eve")
self.assertEqual(holiday_dict[dec_25th], "Christmas Day")
def test_christmas_extra_shift_2006(self):
# XMAs Eve is included. *and* XMas falls on MON.
# So you have the following holidays:
# * 24th & 25th (XMas Eve and XMas day)
# * 26th (XMas Shift)
holidays = self.cal.holidays(2006)
holiday_dict = dict(holidays)
dec_24th = date(2006, 12, 24)
dec_25th = date(2006, 12, 25)
for day in (dec_24th, dec_25th):
self.assertIn(day, holiday_dict)
self.assertEqual(holiday_dict[dec_24th], "Christmas Eve")
self.assertEqual(holiday_dict[dec_25th], "Christmas Day")
class NormalShiftTestCaseExceptions(UnitedStatesTest):
# Using a fake calendar here
class NormalShiftUnitedStatesExceptions(UnitedStates):
"Normal Shift Fake United State calendar"
shift_exceptions = (
(7, 4), # Month/Day == Fourth of July.
)
cal_class = NormalShiftUnitedStatesExceptions
def test_shift_2015(self):
# Test a normal shift on 4th of July.
# 2015: Happens on a Saturday, not shifted
holidays = self.cal.holidays(2015)
holiday_dict = dict(holidays)
fourth_july = date(2015, 7, 4)
observed = date(2015, 7, 3)
self.assertIn(fourth_july, holiday_dict)
self.assertEqual(holiday_dict[fourth_july], "Independence Day")
self.assertNotIn(observed, holiday_dict)
def test_shift_2010(self):
# Test a normal shift on 4th of July.
# 2010: Happens on a SUN, not shifted
holidays = self.cal.holidays(2010)
holiday_dict = dict(holidays)
fourth_july = date(2010, 7, 4)
observed = date(2010, 7, 5)
self.assertIn(fourth_july, holiday_dict)
self.assertEqual(holiday_dict[fourth_july], "Independence Day")
self.assertNotIn(observed, holiday_dict)
|
jaraco/calendra
|
calendra/tests/test_usa.py
|
Python
|
mit
| 77,324
|
[
"COLUMBUS"
] |
d7d7cd8b29df335e7efa7e2380e02679e51a881eb55480278efbd8a3461d7c94
|
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import re
import socket
import sys
import time
import xml.etree.ElementTree
from ..compat import (
compat_cookiejar,
compat_http_client,
compat_urllib_error,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
compiled_regex_type,
ExtractorError,
float_or_none,
int_or_none,
RegexNotFoundError,
sanitize_filename,
unescapeHTML,
)
_NO_DEFAULT = object()
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* ext Will be calculated from url if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "m3u8" or so.
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
* language_preference Is this in the correct requested
language?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_referer HTTP Referer header value to set.
* http_method HTTP method to use for the download.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* http_post_data Additional data to send with a POST
request.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "url"
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
location: Physical location where the video was filmed.
subtitles: The subtitle file contents as a dictionary in the format
{language: subtitles}.
duration: Length of the video in seconds, as an integer.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
comment_count: Number of comments on the video
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The url to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title" and "id" attributes with the same
semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
self.initialize()
return self._real_extract(url)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return cls.__name__[:-2]
@property
def IE_NAME(self):
return type(self).__name__[:-2]
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal)
return (content, urlh)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if os.name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
""" Returns the data of the page as a string """
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None):
"""Returns a url that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if os.name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not _NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s; '
'please report this issue on http://yt-dl.org/bug' % _name)
return None
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_login_info(self):
"""
Get the the login info as (username, password)
It will look in the netrc file using the _NETRC_MACHINE value
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
username = None
password = None
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get('username', None) is not None:
username = downloader_params['username']
password = downloader_params['password']
elif downloader_params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
return (username, password)
def _get_tfa_info(self):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor', None) is not None:
return downloader_params['twofactor']
return None
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\')'
property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop)
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
def _og_search_property(self, prop, html, name=None, **kargs):
if name is None:
name = 'OpenGraph %s' % prop
escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail url', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if display_name is None:
display_name = name
return self._html_search_regex(
r'''(?ix)<meta
(?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1)
[^>]+content=(["\'])(?P<content>.*?)\1''' % re.escape(name),
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower(), None)
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _sort_formats(self, formats):
if not formats:
raise ExtractorError('No video formats found')
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
preference = f.get('preference')
if preference is None:
proto = f.get('protocol')
if proto is None:
proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme
preference = 0 if proto in ['http', 'https'] else -0.1
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
if f.get('vcodec') == 'none': # audio only
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
ext_preference,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id'),
)
formats.sort(key=_formats_key)
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest')
formats = []
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
format_id = 'f4m-%d' % (i if tbr is None else tbr)
formats.append({
'format_id': format_id,
'url': manifest_url,
'ext': 'flv',
'tbr': tbr,
'width': int_or_none(media_el.attrib.get('width')),
'height': int_or_none(media_el.attrib.get('height')),
})
self._sort_formats(formats)
return formats
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None):
formats = [{
'format_id': 'm3u8-meta',
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': -1,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
m3u8_doc = self._download_webpage(
m3u8_url, video_id,
note='Downloading m3u8 information',
errnote='Failed to download m3u8 information')
last_info = None
kv_rex = re.compile(
r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = {}
for m in kv_rex.finditer(line):
v = m.group('val')
if v.startswith('"'):
v = v[1:-1]
last_info[m.group('key')] = v
elif line.startswith('#') or not line.strip():
continue
else:
if last_info is None:
formats.append({'url': format_url(line)})
continue
tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
f = {
'format_id': 'm3u8-%d' % (tbr if tbr else len(formats)),
'url': format_url(line.strip()),
'tbr': tbr,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
codecs = last_info.get('CODECS')
if codecs:
# TODO: looks like video codec is not always necessarily goes first
va_codecs = codecs.split(',')
if va_codecs[0]:
f['vcodec'] = va_codecs[0].partition('.')[0]
if len(va_codecs) > 1 and va_codecs[1]:
f['acodec'] = va_codecs[1].partition('.')[0]
resolution = last_info.get('RESOLUTION')
if resolution:
width_str, height_str = resolution.split('x')
f['width'] = int(width_str)
f['height'] = int(height_str)
formats.append(f)
last_info = {}
self._sort_formats(formats)
return formats
# TODO: improve extraction
def _extract_smil_formats(self, smil_url, video_id):
smil = self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file')
base = smil.find('./head/meta').get('base')
formats = []
rtmp_count = 0
for video in smil.findall('./body/switch/video'):
src = video.get('src')
if not src:
continue
bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
width = int_or_none(video.get('width'))
height = int_or_none(video.get('height'))
proto = video.get('proto')
if not proto:
if base:
if base.startswith('rtmp'):
proto = 'rtmp'
elif base.startswith('http'):
proto = 'http'
ext = video.get('ext')
if proto == 'm3u8':
formats.extend(self._extract_m3u8_formats(src, video_id, ext))
elif proto == 'rtmp':
rtmp_count += 1
streamer = video.get('streamer') or base
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
self._sort_formats(formats)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime("%Y-%m-%d %H:%M")
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError("This method must be implemented by subclasses")
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
0x7678/youtube-dl
|
youtube_dl/extractor/common.py
|
Python
|
unlicense
| 38,143
|
[
"VisIt"
] |
eff8e80f931e8167f9caf755995ec499017a4bc34c0e475b550a97e04fc674ec
|
import logging
from kalliope import Utils, BrainLoader
from kalliope.core import NeuronModule
from kalliope.core.NeuronModule import MissingParameterException
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Brain(NeuronModule):
def __init__(self, **kwargs):
super(Brain, self).__init__(**kwargs)
self.synapse_name = kwargs.get('synapse_name', None)
self.enabled = kwargs.get('enabled', None)
if self._is_parameters_ok():
self.say(self._update_brain())
def _is_parameters_ok(self):
"""
Check if received parameters are ok to perform operations in the neuron
:return: true if parameters are ok, raise an exception otherwise
.. raises:: MissingParameterException
"""
if self.synapse_name is None or self.synapse_name == "":
raise MissingParameterException("[Brain neuron] You must specify a 'synapse_name'")
if self.enabled is None or self.enabled == "":
raise MissingParameterException("[Brain neuron] You must specify a 'enabled' boolean")
self.enabled = Utils.str_to_bool(self.enabled)
return True
def _update_brain(self):
new_status = "unknown"
brain = BrainLoader().brain
if self.enabled:
if brain.enable_synapse_by_name(self.synapse_name):
new_status = "enabled"
else:
if brain.disable_synapse_by_name(self.synapse_name):
new_status = "disabled"
message = {
"synapse_name": self.synapse_name,
"status": new_status
}
return message
|
kalliope-project/kalliope
|
kalliope/neurons/brain/brain.py
|
Python
|
gpl-3.0
| 1,654
|
[
"NEURON"
] |
c0100dfe10fd6d64e50429b4eab3e53f5ced644efca8699416e0f3ae3c9dfaa7
|
#!/bin/env python
import inspect
import os
from collections import OrderedDict
"""
# shell
aws s3 ls s3://grizli-v1/Pipeline/j000200m5558/Prep/j000200m5558_visits.npy --request-payer requester
# Python
import boto3
s3 = boto3.resource('s3')
bkt = s3.Bucket('grizli-v1')
field = 'j000200m5558'
s3_file = '{0}_visits.npy'.format(field)
s3_path = 'Pipeline/{0}/Prep'.format(field)
bkt.download_file(s3_path+'/'+s3_file, s3_file,
ExtraArgs={"RequestPayer": "requester"})
"""
class FilterDict(OrderedDict):
meta = OrderedDict()
@property
def nfiles(self):
"""
Count number of exposures
"""
n = 0
for k in self:
n += len(self[k])
return n
@property
def valid_filters(self):
"""
Return a list of filters with N >= 1 files
"""
valid = []
for k in self:
if len(self[k]) > 0:
valid.append(k)
return valid
def get_visit_files():
import boto3
from grizli.aws import db
engine = db.get_db_engine()
fields = db.from_sql("select field_root from charge_fields where log LIKE 'Finished%%'", engine=engine)
s3 = boto3.resource('s3')
bkt = s3.Bucket('grizli-v1')
for i, field in enumerate(fields['field_root']):
s3_file = '{0}_visits.npy'.format(field)
if not os.path.exists(s3_file):
s3_path = f'Pipeline/{field}/Prep'
try:
bkt.download_file(s3_path+'/'+s3_file, s3_file,
ExtraArgs={"RequestPayer": "requester"})
print(i, s3_file)
except:
print(i, f'Download failed: {field}')
else:
print(f'Skip {field}')
def make_visit_fits():
import glob
import numpy as np
from grizli import utils
visit_files = glob.glob('[egu]*visits.npy')
visit_files.sort()
indiv_files = glob.glob('j*visits.npy')
indiv_files.sort()
visit_files += indiv_files
for p in ['grizli-v1-19.12.04_visits.npy', 'grizli-v1-19.12.05_visits.npy', 'grizli-v1-20.10.12_visits.npy', 'grizli-cosmos-v2_visits.npy','grizli-v1-21.05.20_visits.npy']:
if p in visit_files:
visit_files.pop(visit_files.index(p))
all_visits = []
products = []
extra_visits = ['candels-july2019_visits.npy', 'grizli-cosmos-v2_visits.npy']
#extra_visits = ['candels-july2019_visits.npy', 'cosmos-dash-apr20_visits.npy']
extra_visits = ['candels-july2019_visits.npy', 'cosmos-dash-dec06_visits.npy']
for extra in extra_visits:
extra_visits = np.load(extra, allow_pickle=True)[0]
if 'cosmos-dash' in extra:
extra_products = [v['product'] + '-'+v['files'][0][:6] for v in extra_visits]
else:
extra_products = [v['product'] for v in extra_visits]
for i, p in enumerate(extra_products):
if p not in products:
parent = p.split('_')[0]
#print(parent, p)
v = extra_visits[i]
v['parent'] = parent
v['xproduct'] = v['product']
v['parent_file'] = extra # 'candels-july2019_visits.npy'
all_visits.append(v)
products.append(p)
else:
print('Skip: ', p, v['parent'])
# COSMOS footprint
cosmos_fp = None
for i, v in enumerate(extra_visits):
if v['product'].endswith('f814w'):
print(v['product'])
if cosmos_fp is None:
cosmos_fp = v['footprint'].buffer(1.e-6)
else:
cosmos_fp = cosmos_fp.union(v['footprint'])
for i, file in enumerate(visit_files):
visits, groups, info = np.load(file, allow_pickle=True)
print(file, len(visits))
for v in visits:
has_fp = ('footprints' in v)
if not has_fp:
print('No footprint: {0}'.format(v['product']))
if file.startswith('j'):
vprod = v['product'] + '-' + v['files'][0]
else:
vprod = v['product']
if has_fp & (vprod not in products):
v['parent'] = file.split("_visits")[0].split('-')[-1]
v['first'] = v['files'][0]
v['parent_file'] = file
v['xproduct'] = vprod
all_visits.append(v)
products.append(vprod)
for v in all_visits:
v['filter'] = v['product'].split('-')[-1]
v['first'] = v['files'][0]
# File dictionary
all_files = []
file_products = []
for v in all_visits:
all_files.extend(v['files'])
file_products.extend([v['xproduct']]*len(v['files']))
# duplicates?? seem to be in GOODS-S.
# Exclude them in all but the first product that contains them for now
if True:
_un = np.unique(all_files, return_counts=True, return_index=True, return_inverse=True)
un_file, un_index, un_inv, un_count = _un
dup = un_count > 1
dup_files = un_file[dup]
for file in dup_files:
prods = list(np.array(file_products)[np.array(all_files) == file])
for prod in prods[1:]:
i = products.index(prod)
v = all_visits[i]
j = v['files'].index(file)
print(file, v['parent'], prod, i, j)
pj = all_visits[i]['files'].pop(j)
pj = all_visits[i]['footprints'].pop(j)
if 'awspath' in all_visits[i]:
pj = all_visits[i]['awspath'].pop(j)
#print(file, prods[-1])
# WFC3/IR copied to "Exposures" paths in CANDELS fields
for v in all_visits:
if v['parent_file'] in ['grizli-cosmos-v2_visits.npy', 'cosmos-dash-apr20_visits.npy', 'cosmos-dash-dec06_visits.npy']:
continue
if v['parent_file'].startswith('j'):
v['awspath'] = ['grizli-v1/Pipeline/{0}/Prep'.format(v['parent']) for f in v['files']]
else:
#print(v['parent_file'], v['awspath'][0])
if v['filter'].startswith('f0') | v['filter'].startswith('f1'):
# print(v['product'])
v['awspath'] = ['grizli-v1/Exposures/{0}/{1}'.format(f[:4], f.split('_')[0]) for f in v['files']]
# Empty visits, seems to be from duplicates above and mostly in CANDELS
nexp = np.array([len(visit['files']) for visit in all_visits])
for i in np.where(nexp == 0)[0][::-1]:
v_i = all_visits.pop(i)
print(i, v_i['product'])
products.pop(i)
tab = utils.GTable()
for k in ['parent', 'product', 'filter', 'first']:
tab[k] = [visit[k] for visit in all_visits]
coo = np.array([np.array(visit['footprint'].centroid.xy).flatten() for visit in all_visits])
tab['ra'] = coo[:, 0]
tab['dec'] = coo[:, 1]
tab['nexp'] = [len(visit['files']) for visit in all_visits]
tab['bounds'] = [np.array(v['footprint'].bounds) for v in all_visits]
root = 'candels-july2019'
root = 'candels-sep2019'
root = 'grizli-v1-19.12.04'
root = 'grizli-v1-19.12.05'
root = 'grizli-v1-20.10.12'
root = 'grizli-v1-21.05.20'
root = 'grizli-v1-21.12.18'
tab.write(root+'_visits.fits', overwrite=True)
np.save(root+'_visits.npy', [all_visits])
# os.system('echo "# In https://s3.amazonaws.com/grizli-v1/Mosaics/" > candels-july2019.files.txt; ls candels-july2019* |grep -v files.txt >> candels-july2019.files.txt')
#os.system('aws s3 sync --exclude "*" --include "candels-july2019*" --include "grizli-v1-19.12.04*" ./ s3://grizli-v1/Mosaics/ --acl public-read')
os.system('echo "# In https://s3.amazonaws.com/grizli-v1/Mosaics/" > {0}.files.txt; ls {0}* |grep -v files.txt >> {0}.files.txt'.format(root))
os.system('aws s3 sync --exclude "*" --include "{0}*" ./ s3://grizli-v1/Mosaics/ --acl public-read'.format(root))
if False:
from shapely.geometry import Point
from grizli.aws import db
engine = db.get_db_engine()
fields = db.from_sql("select field_root, a_wfc3 from charge_fields where log LIKE 'Finished%%'", engine=engine)
candels = utils.column_values_in_list(tab['parent'], ['j141956p5255', 'j123656p6215', 'j033236m2748', 'j021732m0512', 'j100012p0210'])
cosmos = np.array([v['footprint'].intersection(cosmos_fp).area > 0 for v in all_visits])
extra = candels
extra = ~(candels | cosmos)
# Area
filter_polys = {}
filt = 'f160w'
for filt in np.unique(tab['filter']):
print(filt)
if filt in filter_polys:
print(filt)
continue
poly = None
count = 0
# Dec strips
di = np.arange(-90, 91, 5)
strips = []
for i in range(len(di)-1):
strip = (tab['dec'] > di[i]) & (tab['dec'] <= di[i+1]) & (tab['filter'] == filt)
strip &= extra
if strip.sum() == 0:
continue
indices = np.arange(len(tab))[strip]
poly = None
for j in indices:
v = all_visits[j]
if v['filter'] != filt:
continue
# for fp in v['footprints']:
for fp in [v['footprint']]:
count += 1
#print(i, v['product'], count)
if poly is None:
poly = fp.buffer(1.e-6)
else:
poly = poly.union(fp.buffer(1.e-6))
poly.dec = di[i]+2.5
strips.append(poly)
if len(strips) == 0:
filter_polys[filt] = Point(0, 0).buffer(1.e-6)
continue
full = strips[0].buffer(1.e-6)
for strip in strips[1:]:
full = full.union(strip.buffer(1.e-6))
filter_polys[filt] = full
optical = filter_polys['f606w'].union(filter_polys['f814w'])
optical = optical.union(filter_polys['f850lp'])
optical = optical.union(filter_polys['f775w'])
yband = filter_polys['f098m'].union(filter_polys['f105w'])
visy = optical.union(yband)
jband = filter_polys['f125w']
jband = jband.union(filter_polys['f110w'])
hband = filter_polys['f140w'].union(filter_polys['f160w'])
filter_polys[r'$\mathrm{opt} = i_{775} | i_{814} | z_{850}$'] = optical
filter_polys[r'$\mathrm{opty} = \mathrm{opt} | Y$'] = visy
filter_polys[r'$Y = y_{098 } | y_{105}$'] = yband
filter_polys[r'$J = j_{110} | j_{125}$'] = jband
filter_polys[r'$H = h_{140} | h_{160}$'] = hband
ydrop = visy.intersection(jband)
ydrop = ydrop.intersection(hband)
filter_polys[r'$Y-\mathrm{drop} = (\mathrm{opt} | Y) + J + H$'] = ydrop
yj = yband.union(jband)
jdrop = yj.intersection(hband)
filter_polys[r'$J-\mathrm{drop} = (Y | J) + H$'] = jdrop
for filt in filter_polys:
full = filter_polys[filt]
try:
areas = [f.area*np.cos(np.array(f.centroid.xy).flatten()[1]/180*np.pi) for f in full]
except:
try:
areas = [f.area*np.cos(np.array(f.centroid.xy).flatten()[1]/180*np.pi) for f in [full]]
except:
areas = [0]
full.total_area = np.sum(areas)
print(filt, filter_polys[filt].total_area)
ta = utils.GTable()
ta['filter'] = [f.upper() for f in filter_polys]
ta['area'] = [filter_polys[f].total_area*3600 for f in filter_polys]
ta['area'].format = '.0f'
# Compare areas
h = fields['a_wfc3_ir_f160w'] > 0
for root, aa in zip(fields['field_root'][h], fields['a_wfc3_ir_f160w'][h]):
sel = (tab['filter'] == 'f160w') & (tab['parent'] == root)
if sel.sum() > 0:
indices = np.where(sel)[0]
a = all_visits[indices[0]]['footprint'].buffer(1.e-6)
for i in indices:
a = a.union(all_visits[i]['footprint'])
a_i = a.area*3600*np.cos(tab['dec'][indices[0]]/180*np.pi)
print(root, aa, a_i, a_i/aa)
def group_by_filter():
"""
aws s3 sync --exclude "*" --include "cosmos_visits*" s3://grizli-preprocess/CosmosMosaic/ ./
"""
from grizli import prep, utils
import numpy as np
master = 'cosmos'
master = 'grizli-cosmos-v2'
master = 'grizli-jan2019'
master = 'grizli-v1-19.12.04'
master = 'grizli-v1-19.12.05'
master = 'grizli-v1-20.10.12'
master = 'grizli-v1-21.05.20'
master = 'grizli-v1-21.12.18'
tab = utils.read_catalog('{0}_visits.fits'.format(master))
all_visits = np.load('{0}_visits.npy'.format(master), allow_pickle=True)[0]
# By filter
# Exclude DASH
dash = utils.column_string_operation(tab['product'], 'icxe', 'startswith')
dash |= utils.column_string_operation(tab['product'], '_icxe',
'count', 'or')
# Don't exclude DASH
dash = utils.column_string_operation(tab['product'], 'xxxx', 'startswith')
groups = {}
fpstr = {}
for filt in np.unique(tab['filter']):
mat = (tab['filter'] == filt) & (~dash)
groups[filt] = {'filter': filt, 'files': [], 'awspath': [], 'footprints': []}
fpstr[filt] = 'fk5\n'
for ix in np.where(mat)[0]:
fp = all_visits[ix]['footprint']
if hasattr(fp, '__len__'):
fps = fp
else:
fps = [fp]
for fp in fps:
xy = fp.boundary.xy
pstr = 'polygon('+','.join(['{0:.6f}'.format(i) for i in np.array([xy[0].tolist(), xy[1].tolist()]).T.flatten()])+') # text={{{0}}}\n'.format(all_visits[ix]['product'])
fpstr[filt] += pstr
for k in ['files', 'awspath', 'footprints']:
groups[filt][k].extend(all_visits[ix][k])
fp = open('{0}-pointings-{1}.reg'.format(master, filt), 'w')
fp.write(fpstr[filt])
fp.close()
print('{0:6} {1:>3d} {2:>4d} ({3:>4d})'.format(filt, mat.sum(), len(groups[filt]['files']), len(np.unique(groups[filt]['files']))))
np.save('{0}_filter_groups.npy'.format(master), [groups])
os.system('aws s3 sync --exclude "*" --include "{0}*" ./ s3://grizli-v1/Mosaics/ --acl public-read'.format(master))
# RGB_PARAMS = {'xsize':4, 'rgb_min':-0.01, 'verbose':True, 'output_dpi': None, 'add_labels':False, 'output_format':'png', 'show_ir':False, 'scl':2, 'suffix':'.rgb', 'mask_empty':False}
RGB_PARAMS = {'xsize': 4,
'output_dpi': None,
'rgb_min': -0.01,
'add_labels': False,
'output_format': 'png',
'show_ir': False,
'scl': 2,
'suffix': '.rgb',
'mask_empty': False,
'tick_interval': 1,
'pl': 1, # 1 for f_lambda, 2 for f_nu
}
# xsize=4, output_dpi=None, HOME_PATH=None, show_ir=False, pl=1, pf=1, scl=1, rgb_scl=[1, 1, 1], ds9=None, force_ir=False, filters=all_filters, add_labels=False, output_format='png', rgb_min=-0.01, xyslice=None, pure_sort=False, verbose=True, force_rgb=None, suffix='.rgb', scale_ab=scale_ab)
def segmentation_figure(label, cat, segfile):
"""
Make a figure showing a cutout of the segmentation file
"""
import matplotlib.pyplot as plt
import numpy as np
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
from grizli import utils
plt.ioff()
seg = pyfits.open(segfile)
seg_data = seg[0].data
seg_wcs = pywcs.WCS(seg[0].header)
# Randomize seg to get dispersion between neighboring objects
np.random.seed(hash(label.split('_')[0]) % (10 ** 8))
rnd_ids = np.append([0], np.argsort(np.random.rand(len(cat)))+1)
# Make cutout
th = pyfits.open('{0}.thumb.fits'.format(label), mode='update')
th_wcs = pywcs.WCS(th[0].header)
blot_seg = utils.blot_nearest_exact(seg_data, seg_wcs, th_wcs,
stepsize=-1, scale_by_pixel_area=False)
rnd_seg = rnd_ids[np.cast[int](blot_seg)]*1.
th_ids = np.unique(blot_seg)
sh = th[0].data.shape
yp, xp = np.indices(sh)
thumb_height = 2.
fig = plt.figure(figsize=[thumb_height*sh[1]/sh[0], thumb_height])
ax = fig.add_subplot(111)
rnd_seg[rnd_seg == 0] = np.nan
ax.imshow(rnd_seg, aspect='equal', cmap='terrain_r',
vmin=-0.05*len(cat), vmax=1.05*len(cat))
ax.set_xticklabels([])
ax.set_yticklabels([])
ix = utils.column_values_in_list(cat['number'], th_ids)
xc, yc = th_wcs.all_world2pix(cat['ra'][ix], cat['dec'][ix], 0)
xc = np.clip(xc, 0.09*sh[1], 0.91*sh[1])
yc = np.clip(yc, 0.08*sh[0], 0.92*sh[0])
for th_id, x_i, y_i in zip(cat['number'][ix], xc, yc):
if th_id == 0:
continue
ax.text(x_i, y_i, '{0:.0f}'.format(th_id), ha='center', va='center', fontsize=8, color='w')
ax.text(x_i, y_i, '{0:.0f}'.format(th_id), ha='center', va='center', fontsize=8, color='k', alpha=0.95)
ax.set_xlim(0, sh[1]-1)
ax.set_ylim(0, sh[0]-1)
ax.set_axis_off()
fig.tight_layout(pad=0.01)
fig.savefig('{0}.seg.png'.format(label))
plt.close(fig)
# Append to thumbs file
seg_hdu = pyfits.ImageHDU(data=np.cast[int](blot_seg), name='SEG')
if 'SEG' in th:
th.pop('SEG')
th.append(seg_hdu)
th.writeto('{0}.thumb.fits'.format(label), overwrite=True,
output_verify='fix')
th.close()
def drizzle_images(label='macs0647-jd1', ra=101.9822125, dec=70.24326667, pixscale=0.1, size=10, wcs=None, pixfrac=0.33, kernel='square', theta=0, half_optical_pixscale=True, filters=['f160w', 'f140w', 'f125w', 'f105w', 'f110w', 'f098m', 'f850lp', 'f814w', 'f775w', 'f606w', 'f475w', 'f555w', 'f600lp', 'f390w', 'f350lp'], skip=None, remove=True, rgb_params=RGB_PARAMS, master='grizli-v1-19.12.04', aws_bucket='s3://grizli/CutoutProducts/', scale_ab=21, thumb_height=2.0, sync_fits=True, subtract_median=True, include_saturated=True, include_ir_psf=False, oversample_psf=False, show_filters=['visb', 'visr', 'y', 'j', 'h'], combine_similar_filters=True, single_output=True, aws_prep_dir=None, make_segmentation_figure=False, get_dict=False, dryrun=False, thumbnail_ext='png', **kwargs):
"""
label='cp561356'; ra=150.208875; dec=1.850241667; size=40; filters=['f160w','f814w', 'f140w','f125w','f105w','f606w','f475w']
master: These are sets of large lists of available exposures
'cosmos': deprecated
'grizli-cosmos-v2': All imaging covering the COSMOS field
'candels-july2019': CANDELS fields other than COSMOS
'grizli-v1': First processing of the Grizli CHArGE dataset
'grizli-v1-19.12.04': Updated CHArGE fields
** this is now a copy from 21.05.20 so that the old lambda
function can catch it **
'grizli-v1-21.05.20': ACS fields + new cosmos
"""
import glob
import copy
import os
import numpy as np
import astropy.io.fits as pyfits
from astropy.coordinates import SkyCoord
import astropy.units as u
from drizzlepac.adrizzle import do_driz
import boto3
from grizli import prep, utils
from grizli.pipeline import auto_script
# Function arguments
if get_dict:
frame = inspect.currentframe()
args = inspect.getargvalues(frame).locals
pop_args = ['get_dict', 'frame', 'kwargs']
pop_classes = (np.__class__, do_driz.__class__, SkyCoord.__class__)
for k in kwargs:
args[k] = kwargs[k]
for k in args:
if isinstance(args[k], pop_classes):
pop_args.append(k)
for k in pop_args:
if k in args:
args.pop(k)
return args
# Boto objects
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
if isinstance(ra, str):
coo = SkyCoord('{0} {1}'.format(ra, dec), unit=(u.hour, u.deg))
ra, dec = coo.ra.value, coo.dec.value
if label is None:
try:
import mastquery.utils
label = mastquery.utils.radec_to_targname(ra=ra, dec=dec, round_arcsec=(1/15, 1), targstr='j{rah}{ram}{ras}{sign}{ded}{dem}{des}')
except:
label = 'grizli-cutout'
#master = 'cosmos'
#master = 'grizli-jan2019'
if master == 'grizli-jan2019':
parent = 's3://grizli/MosaicTools/'
bkt = s3.Bucket('grizli')
elif master == 'cosmos':
parent = 's3://grizli-preprocess/CosmosMosaic/'
bkt = s3.Bucket('grizli-preprocess')
elif master == 'grizli-cosmos-v2':
parent = 's3://grizli-cosmos-v2/Mosaics/'
bkt = s3.Bucket('grizli-cosmos-v2')
elif master == 'candels-july2019':
parent = 's3://grizli-v1/Mosaics/'
bkt = s3.Bucket('grizli-v1')
elif master == 'grizli-v1-19.12.04':
parent = 's3://grizli-v1/Mosaics/'
bkt = s3.Bucket('grizli-v1')
elif master == 'grizli-v1-19.12.05':
parent = 's3://grizli-v1/Mosaics/'
bkt = s3.Bucket('grizli-v1')
elif master == 'grizli-v1-latest':
parent = 's3://grizli-v1/Mosaics/'
bkt = s3.Bucket('grizli-v1')
else:
# Run on local files, e.g., "Prep" directory
parent = None
bkt = None
#remove = False
# Download summary files from S3
for ext in ['_visits.fits', '_visits.npy', '_filter_groups.npy'][-1:]:
newfile = '{0}{1}'.format(master, ext)
if (not os.path.exists(newfile)) & (parent is not None):
s3_path = parent.split('/')[-2]
s3_file = '{0}{1}'.format(master, ext)
print('{0}{1}'.format(parent, s3_file))
bkt.download_file(s3_path+'/'+s3_file, s3_file,
ExtraArgs={"RequestPayer": "requester"})
#os.system('aws s3 cp {0}{1}{2} ./'.format(parent, master, ext))
#tab = utils.read_catalog('{0}_visits.fits'.format(master))
#all_visits = np.load('{0}_visits.npy'.format(master))[0]
if parent is not None:
groups = np.load('{0}_filter_groups.npy'.format(master), allow_pickle=True)[0]
else:
if aws_prep_dir is not None:
spl = aws_prep_dir.replace('s3://', '').split('/')
prep_bucket = spl[0]
prep_root = spl[2]
prep_bkt = s3.Bucket(prep_bucket)
s3_prep_path = 'Pipeline/{0}/Prep/'.format(prep_root)
s3_full_path = '{0}/{1}'.format(prep_bucket, s3_prep_path)
s3_file = '{0}_visits.npy'.format(prep_root)
# Make output path Prep/../Thumbnails/
if aws_bucket is not None:
aws_bucket = ('s3://' +
s3_full_path.replace('/Prep/', '/Thumbnails/'))
print('{0}{1}'.format(s3_prep_path, s3_file))
if not os.path.exists(s3_file):
prep_bkt.download_file(os.path.join(s3_prep_path, s3_file),
s3_file, ExtraArgs={"RequestPayer": "requester"})
groups_files = glob.glob('{0}_filter_groups.npy'.format(prep_root))
visit_query = prep_root+'_'
else:
groups_files = glob.glob('*filter_groups.npy')
visit_query = '*'
# Reformat local visits.npy into a groups file
if (len(groups_files) == 0):
visit_file = glob.glob(visit_query+'visits.npy')[0]
visits, groups, info = np.load(visit_file, allow_pickle=True)
visit_root = visit_file.split('_visits')[0]
visit_filters = np.array([v['product'].split('-')[-1] for v in visits])
groups = {}
for filt in np.unique(visit_filters):
groups[filt] = {}
groups[filt]['filter'] = filt
groups[filt]['files'] = []
groups[filt]['footprints'] = []
groups[filt]['awspath'] = []
ix = np.where(visit_filters == filt)[0]
for i in ix:
groups[filt]['files'].extend(visits[i]['files'])
groups[filt]['footprints'].extend(visits[i]['footprints'])
Nf = len(groups[filt]['files'])
print('{0:>6}: {1:>3} exposures'.format(filt, Nf))
if aws_prep_dir is not None:
groups[filt]['awspath'] = [s3_full_path
for file in range(Nf)]
np.save('{0}_filter_groups.npy'.format(visit_root), [groups])
else:
print('Use groups file: {0}'.format(groups_files[0]))
groups = np.load(groups_files[0], allow_pickle=True)[0]
#filters = ['f160w','f814w', 'f110w', 'f098m', 'f140w','f125w','f105w','f606w', 'f475w']
filt_dict = FilterDict()
filt_dict.meta['label'] = label
filt_dict.meta['ra'] = ra
filt_dict.meta['dec'] = dec
filt_dict.meta['size'] = size
filt_dict.meta['master'] = master
filt_dict.meta['parent'] = parent
if filters is None:
filters = list(groups.keys())
has_filts = []
lower_filters = [f.lower() for f in filters]
for filt in lower_filters:
if filt not in groups:
continue
visits = [copy.deepcopy(groups[filt])]
#visits[0]['reference'] = 'CarlosGG/ak03_j1000p0228/Prep/ak03_j1000p0228-f160w_drz_sci.fits'
visits[0]['product'] = label+'-'+filt
if wcs is None:
hdu = utils.make_wcsheader(ra=ra, dec=dec, size=size, pixscale=pixscale, get_hdu=True, theta=theta)
h = hdu.header
else:
h = utils.to_header(wcs)
if (filt[:2] in ['f0', 'f1', 'g1']) | (not half_optical_pixscale):
#data = hdu.data
pass
else:
for k in ['NAXIS1', 'NAXIS2', 'CRPIX1', 'CRPIX2']:
h[k] *= 2
h['CRPIX1'] -= 0.5
h['CRPIX2'] -= 0.5
for k in ['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2']:
if k in h:
h[k] /= 2
#data = np.zeros((h['NAXIS2'], h['NAXIS1']), dtype=np.int16)
#pyfits.PrimaryHDU(header=h, data=data).writeto('ref.fits', overwrite=True, output_verify='fix')
#visits[0]['reference'] = 'ref.fits'
print('\n\n###\nMake filter: {0}'.format(filt))
if include_ir_psf:
clean_i = False
else:
clean_i = remove
status = utils.drizzle_from_visit(visits[0], h, pixfrac=pixfrac, kernel=kernel, clean=clean_i, include_saturated=include_saturated, skip=skip, dryrun=dryrun)
if dryrun:
filt_dict[filt] = status
continue
elif status is not None:
sci, wht, outh, filt_dict[filt] = status
if subtract_median:
#med = np.median(sci[sci != 0])
try:
un_data = np.unique(sci[(sci != 0) & np.isfinite(sci)])
med = utils.mode_statistic(un_data)
except:
med = 0.
if not np.isfinite(med):
med = 0.
print('\n\nMedian {0} = {1:.3f}\n\n'.format(filt, med))
outh['IMGMED'] = (med, 'Median subtracted from the image')
else:
med = 0.
outh['IMGMED'] = (0., 'Median subtracted from the image')
pyfits.writeto('{0}-{1}_drz_sci.fits'.format(label, filt),
data=sci, header=outh, overwrite=True,
output_verify='fix')
pyfits.writeto('{0}-{1}_drz_wht.fits'.format(label, filt),
data=wht, header=outh, overwrite=True,
output_verify='fix')
has_filts.append(filt)
if include_ir_psf:
from grizli.galfit.psf import DrizzlePSF
hdu = pyfits.open('{0}-{1}_drz_sci.fits'.format(label, filt),
mode='update')
flt_files = [] # visits[0]['files']
for i in range(1, 10000):
key = 'FLT{0:05d}'.format(i)
if key not in hdu[0].header:
break
flt_files.append(hdu[0].header[key])
try:
dp = DrizzlePSF(flt_files=flt_files, driz_hdu=hdu[0])
if oversample_psf:
oN = oversample_psf*2+1
cosd = np.cos(dp.driz_wcs.wcs.crval[1]/180*np.pi)
dde = 1./(oversample_psf*2)*pixscale/3600
dra = dde*cosd
sh = sci.shape
psfd = np.zeros((oN*sh[0], oN*sh[1]),
dtype=np.float32)
for i in range(oN):
for j in range(oN):
ra_i = (dp.driz_wcs.wcs.crval[0] +
dra*(i-oversample_psf))
de_i = (dp.driz_wcs.wcs.crval[1] -
dde*(j-oversample_psf))
psf_i = dp.get_psf(ra=ra_i, dec=de_i,
filter=filt.upper(),
pixfrac=dp.driz_header['PIXFRAC'],
kernel=dp.driz_header['KERNEL'],
wcs_slice=dp.driz_wcs,
get_extended=filt.lower()[:2] in ['f1','f0'],
verbose=False, get_weight=False)
psfd[j::oN,i::oN] += psf_i[1].data
psf = pyfits.ImageHDU(data=psfd)
else:
psf = dp.get_psf(ra=dp.driz_wcs.wcs.crval[0],
dec=dp.driz_wcs.wcs.crval[1],
filter=filt.upper(),
pixfrac=dp.driz_header['PIXFRAC'],
kernel=dp.driz_header['KERNEL'],
wcs_slice=dp.driz_wcs,
get_extended=filt.lower()[:2] in ['f1','f0'],
verbose=False, get_weight=False)[1]
psf.header['OVERSAMP'] = oversample_psf
psf.header['EXTNAME'] = 'PSF'
#psf[1].header['EXTVER'] = filt
hdu.append(psf)
hdu.flush()
except:
pass
if remove:
os.system('rm *_fl*fits')
# Dry run, just return dictionary of the found exposure files
if dryrun:
return filt_dict
# Nothing found
if len(has_filts) == 0:
return []
if combine_similar_filters:
combine_filters(label=label)
if rgb_params:
#auto_script.field_rgb(root=label, HOME_PATH=None, filters=has_filts, **rgb_params)
show_all_thumbnails(label=label, thumb_height=thumb_height, scale_ab=scale_ab, close=True, rgb_params=rgb_params, filters=show_filters, ext=thumbnail_ext)
if (single_output != 0):
# Concatenate into a single FITS file
files = glob.glob('{0}-f*_dr[cz]_sci.fits'.format(label))
files.sort()
if combine_similar_filters:
comb_files = glob.glob('{0}-[a-eg-z]*_dr[cz]_sci.fits'.format(label))
comb_files.sort()
files += comb_files
hdul = None
for file in files:
hdu_i = pyfits.open(file)
hdu_i[0].header['EXTNAME'] = 'SCI'
if 'NCOMBINE' in hdu_i[0].header:
if hdu_i[0].header['NCOMBINE'] <= single_output:
continue
filt_i = file.split('-')[-1].split('_dr')[0]
else:
filt_i = utils.get_hst_filter(hdu_i[0].header)
for h in hdu_i:
h.header['EXTVER'] = filt_i
if hdul is None:
hdul = pyfits.HDUList([h])
else:
hdul.append(h)
print('Add to {0}.thumb.fits: {1}'.format(label, file))
# Weight
hdu_i = pyfits.open(file.replace('_sci', '_wht'))
hdu_i[0].header['EXTNAME'] = 'WHT'
for h in hdu_i:
h.header['EXTVER'] = filt_i
if hdul is None:
hdul = pyfits.HDUList([h])
else:
hdul.append(h)
hdul.writeto('{0}.thumb.fits'.format(label), overwrite=True,
output_verify='fix')
for file in files:
for f in [file, file.replace('_sci', '_wht')]:
if os.path.exists(f):
print('Remove {0}'.format(f))
os.remove(f)
# Segmentation figure
thumb_file = '{0}.thumb.fits'.format(label)
if (make_segmentation_figure) & (os.path.exists(thumb_file)) & (aws_prep_dir is not None):
print('Make segmentation figure')
# Fetch segmentation image and catalog
s3_prep_path = 'Pipeline/{0}/Prep/'.format(prep_root)
s3_full_path = '{0}/{1}'.format(prep_bucket, s3_prep_path)
s3_file = '{0}_visits.npy'.format(prep_root)
has_seg_files = True
seg_files = ['{0}-ir_seg.fits.gz'.format(prep_root),
'{0}_phot.fits'.format(prep_root)]
for s3_file in seg_files:
if not os.path.exists(s3_file):
remote_file = os.path.join(s3_prep_path, s3_file)
try:
print('Fetch {0}'.format(remote_file))
prep_bkt.download_file(remote_file, s3_file,
ExtraArgs={"RequestPayer": "requester"})
except:
has_seg_files = False
print('Make segmentation figure failed: {0}'.format(remote_file))
break
if has_seg_files:
s3_cat = utils.read_catalog(seg_files[1])
segmentation_figure(label, s3_cat, seg_files[0])
if aws_bucket:
#aws_bucket = 's3://grizli-cosmos/CutoutProducts/'
#aws_bucket = 's3://grizli/CutoutProducts/'
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
bkt = s3.Bucket(aws_bucket.split("/")[2])
aws_path = '/'.join(aws_bucket.split("/")[3:])
if sync_fits:
files = glob.glob('{0}*'.format(label))
else:
files = glob.glob('{0}*png'.format(label))
for file in files:
print('{0} -> {1}'.format(file, aws_bucket))
bkt.upload_file(file, '{0}/{1}'.format(aws_path, file).replace('//', '/'), ExtraArgs={'ACL': 'public-read'})
#os.system('aws s3 sync --exclude "*" --include "{0}*" ./ {1} --acl public-read'.format(label, aws_bucket))
#os.system("""echo "<pre>" > index.html; aws s3 ls AWSBUCKETX --human-readable | sort -k 1 -k 2 | grep -v index | awk '{printf("%s %s",$1, $2); printf(" %6s %s ", $3, $4); print "<a href="$5">"$5"</a>"}'>> index.html; aws s3 cp index.html AWSBUCKETX --acl public-read""".replace('AWSBUCKETX', aws_bucket))
return has_filts
def get_cutout_from_aws(label='macs0647-jd1', ra=101.9822125, dec=70.24326667, master='grizli-jan2019', scale_ab=21, thumb_height=2.0, remove=1, aws_bucket="s3://grizli/DropoutThumbnails/", lambda_func='grizliImagingCutout', force=False, **kwargs):
"""
Get cutout using AWS lambda
"""
import boto3
import json
#func = 'grizliImagingCutout'
#label = '{0}_{1:05d}'.format(self.cat['root'][ix], self.cat['id'][ix])
#url = 'https://s3.amazonaws.com/grizli/DropoutThumbnails/{0}.thumb.png'
session = boto3.Session()
client = session.client('lambda', region_name='us-east-1')
event = {
'label': label,
"ra": ra,
"dec": dec,
"scale_ab": scale_ab,
"thumb_height": thumb_height,
"aws_bucket": aws_bucket,
"remove": remove,
"master": master,
}
for k in kwargs:
event[k] = kwargs[k]
bucket_split = aws_bucket.strip("s3://").split('/')
bucket_name = bucket_split[0]
bucket_path = '/'.join(bucket_split[1:])
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
bkt = s3.Bucket(bucket_name)
files = [obj.key for obj in bkt.objects.filter(Prefix='{0}/{1}.thumb.png'.format(bucket_path, label))]
if (len(files) == 0) | force:
print('Call lambda: {0}'.format(label))
print(event)
response = client.invoke(
FunctionName=lambda_func,
InvocationType='Event',
LogType='Tail',
Payload=json.dumps(event))
else:
response = None
print('Thumb exists')
return response
def handler(event, context):
import os
import grizli
print(grizli.__version__)
os.chdir('/tmp/')
os.system('rm *')
os.system('rm -rf matplotlib*')
print(event) # ['s3_object_path'], event['verbose'])
drizzle_images(**event)
os.system('rm *')
def combine_filters(label='j022708p4901_00273', verbose=True):
"""
Group nearby filters
"""
import glob
import numpy as np
import astropy.io.fits as pyfits
from grizli import utils
filter_queries = {}
filter_queries['uv'] = '{0}-f[2-3]*sci.fits'.format(label)
filter_queries['visb'] = '{0}-f[4-5]*sci.fits'.format(label)
filter_queries['visr'] = '{0}-f[6-8]*sci.fits'.format(label)
filter_queries['y'] = '{0}-f[01][90][85]*sci.fits'.format(label)
filter_queries['j'] = '{0}-f1[12][05]*sci.fits'.format(label)
filter_queries['h'] = '{0}-f1[64]0*sci.fits'.format(label)
grouped_filters = {}
for qfilt in filter_queries:
drz_files = glob.glob(filter_queries[qfilt])
drz_files.sort()
grouped_filters[qfilt] = [f.split('_dr')[0].split('-')[-1] for f in drz_files]
if len(drz_files) > 0:
drz_files.sort()
if verbose:
print('# Combine filters, {0}={1}'.format(qfilt,
'+'.join(drz_files)))
for i, file in enumerate(drz_files[::-1]):
drz = pyfits.open(file)
wht = pyfits.open(file.replace('_sci', '_wht'))
sci = drz[0].data*1.
# Subtract background?
if 'IMGMED' in drz[0].header:
sci -= drz[0].header['IMGMED']
drz[0].header['IMGMED'] = 0.
if i == 0:
photflam = drz[0].header['PHOTFLAM']
num = sci*wht[0].data
den = wht[0].data
drz_ref = drz
drz_ref[0].header['CFILT{0}'.format(i+1)] = utils.get_hst_filter(drz[0].header)
drz_ref[0].header['NCOMBINE'] = (len(drz_files), 'Number of combined filters')
else:
scl = drz[0].header['PHOTFLAM']/photflam
num += sci*scl*(wht[0].data/scl**2)
den += wht[0].data/scl**2
drz_ref[0].header['CFILT{0}'.format(i+1)] = utils.get_hst_filter(drz[0].header)
drz_ref[0].header['NDRIZIM'] += drz[0].header['NDRIZIM']
sci = num/den
sci[den == 0] = 0
drz_ref[0].data = sci
pyfits.writeto('{0}-{1}_drz_sci.fits'.format(label, qfilt),
data=sci, header=drz_ref[0].header, overwrite=True,
output_verify='fix')
pyfits.writeto('{0}-{1}_drz_wht.fits'.format(label, qfilt),
data=den, header=drz_ref[0].header, overwrite=True,
output_verify='fix')
return grouped_filters
def show_all_thumbnails(label='j022708p4901_00273', filters=['visb', 'visr', 'y', 'j', 'h'], scale_ab=21, close=True, thumb_height=2., rgb_params=RGB_PARAMS, ext='png', xl=0.04, yl=0.98, fs=7):
"""
Show individual filter and RGB thumbnails
"""
import glob
#from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as pyfits
from astropy.visualization import make_lupton_rgb
from grizli.pipeline import auto_script
from grizli import utils
all_files = glob.glob('{0}-f*sci.fits'.format(label))
all_filters = [f.split('_dr')[0].split('-')[-1] for f in all_files]
ims = {}
for filter in filters:
drz_files = glob.glob('{0}-{1}*_dr*sci.fits'.format(label, filter))
if len(drz_files) > 0:
im = pyfits.open(drz_files[0])
ims[filter] = im
rgb_params['scale_ab'] = scale_ab
slx, sly, rgb_filts, fig = auto_script.field_rgb(root=label, HOME_PATH=None, **rgb_params) # xsize=4, output_dpi=None, HOME_PATH=None, show_ir=False, pl=1, pf=1, scl=1, rgb_scl=[1, 1, 1], ds9=None, force_ir=False, filters=all_filters, add_labels=False, output_format='png', rgb_min=-0.01, xyslice=None, pure_sort=False, verbose=True, force_rgb=None, suffix='.rgb', scale_ab=scale_ab)
if close:
plt.close()
#rgb = np.array(Image.open('{0}.rgb.png'.format(label)))
rgb = plt.imread('{0}.rgb.png'.format(label))
NX = (len(filters)+1)
fig = plt.figure(figsize=[thumb_height*NX, thumb_height])
ax = fig.add_subplot(1, NX, NX)
ax.imshow(rgb, origin='upper', interpolation='nearest')
# ax.text(0.05, 0.95, label, ha='left', va='top', transform=ax.transAxes, fontsize=7, color='w', bbox=dict(facecolor='k', edgecolor='None', alpha=0.8))
# ax.text(0.05, 0.05, ' '.join(rgb_filts), ha='left', va='bottom', transform=ax.transAxes, fontsize=6, color='w', bbox=dict(facecolor='k', edgecolor='None', alpha=0.8))
for i, filter in enumerate(filters):
if filter in ims:
zp_i = utils.calc_header_zeropoint(ims[filter], ext=0)
scl = 10**(-0.4*(zp_i-5-scale_ab))
pixscl = utils.get_wcs_pscale(ims[filter][0].header.copy())
scl *= (0.06/pixscl)**2
img = ims[filter][0].data*scl
image = make_lupton_rgb(img, img, img, stretch=0.1, minimum=-0.01)
ax = fig.add_subplot(1, NX, i+1)
ax.imshow(255-image, origin='lower', interpolation='nearest')
for ax in fig.axes:
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout(pad=0.1)
# Add labels
#xl, yl = 0.04, 0.98
for i, filter in enumerate(filters):
if filter in ims:
if filter in ['uv', 'visb', 'visr', 'y', 'j', 'h']:
grouped_filters = []
h_i = ims[filter][0].header
for j in range(h_i['NCOMBINE']):
grouped_filters.append(h_i['CFILT{0}'.format(j+1)])
text_label = '+'.join(grouped_filters)
else:
text_label = filter
fig.text((i+xl)/NX, yl, text_label, fontsize=fs,
ha='left', va='top', transform=fig.transFigure,
bbox=dict(facecolor='w', edgecolor='None', alpha=0.9))
fig.text((i+1+xl)/NX, yl, label, ha='left', va='top', transform=fig.transFigure, fontsize=fs, color='w', bbox=dict(facecolor='k', edgecolor='None', alpha=0.8))
fig.text((i+1+0.04)/NX, 1-yl, ' '.join(rgb_filts), ha='left', va='bottom', transform=fig.transFigure, fontsize=fs, color='w', bbox=dict(facecolor='k', edgecolor='None', alpha=0.8))
fig.savefig('{0}.thumb.{1}'.format(label, ext))
if close:
plt.close()
return fig
if __name__ == "__main__":
import sys
if len(sys.argv) < 5:
print('Usage: aws_drizzler.py cp561356 150.208875 1.850241667 40 ')
print(sys.argv)
exit()
# print('xxx')
drizzle_images(label=sys.argv[1], ra=float(sys.argv[2]), dec=float(sys.argv[3]), size=float(sys.argv[4]))
|
gbrammer/grizli
|
grizli/aws/aws_drizzler.py
|
Python
|
mit
| 44,559
|
[
"VisIt"
] |
93e6e2d2700de7aee371282fc58b93d7bd354a3b6b4270847303e30aadf50695
|
"""
==========================================
Statistical functions (:mod:`scipy.stats`)
==========================================
.. module:: scipy.stats
This module contains a large number of probability distributions as
well as a growing library of statistical functions.
Each univariate distribution is an instance of a subclass of `rv_continuous`
(`rv_discrete` for discrete distributions):
.. autosummary::
:toctree: generated/
rv_continuous
rv_discrete
rv_histogram
Continuous distributions
========================
.. autosummary::
:toctree: generated/
alpha -- Alpha
anglit -- Anglit
arcsine -- Arcsine
argus -- Argus
beta -- Beta
betaprime -- Beta Prime
bradford -- Bradford
burr -- Burr (Type III)
burr12 -- Burr (Type XII)
cauchy -- Cauchy
chi -- Chi
chi2 -- Chi-squared
cosine -- Cosine
crystalball -- Crystalball
dgamma -- Double Gamma
dweibull -- Double Weibull
erlang -- Erlang
expon -- Exponential
exponnorm -- Exponentially Modified Normal
exponweib -- Exponentiated Weibull
exponpow -- Exponential Power
f -- F (Snecdor F)
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
fisk -- Fisk
foldcauchy -- Folded Cauchy
foldnorm -- Folded Normal
frechet_r -- Deprecated. Alias for weibull_min
frechet_l -- Deprecated. Alias for weibull_max
genlogistic -- Generalized Logistic
gennorm -- Generalized normal
genpareto -- Generalized Pareto
genexpon -- Generalized Exponential
genextreme -- Generalized Extreme Value
gausshyper -- Gauss Hypergeometric
gamma -- Gamma
gengamma -- Generalized gamma
genhalflogistic -- Generalized Half Logistic
gilbrat -- Gilbrat
gompertz -- Gompertz (Truncated Gumbel)
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l -- Left Sided Gumbel, etc.
halfcauchy -- Half Cauchy
halflogistic -- Half Logistic
halfnorm -- Half Normal
halfgennorm -- Generalized Half Normal
hypsecant -- Hyperbolic Secant
invgamma -- Inverse Gamma
invgauss -- Inverse Gaussian
invweibull -- Inverse Weibull
johnsonsb -- Johnson SB
johnsonsu -- Johnson SU
kappa4 -- Kappa 4 parameter
kappa3 -- Kappa 3 parameter
ksone -- Kolmogorov-Smirnov one-sided (no stats)
kstwobign -- Kolmogorov-Smirnov two-sided test for Large N (no stats)
laplace -- Laplace
levy -- Levy
levy_l
levy_stable
logistic -- Logistic
loggamma -- Log-Gamma
loglaplace -- Log-Laplace (Log Double Exponential)
lognorm -- Log-Normal
lomax -- Lomax (Pareto of the second kind)
maxwell -- Maxwell
mielke -- Mielke's Beta-Kappa
moyal -- Moyal
nakagami -- Nakagami
ncx2 -- Non-central chi-squared
ncf -- Non-central F
nct -- Non-central Student's T
norm -- Normal (Gaussian)
norminvgauss -- Normal Inverse Gaussian
pareto -- Pareto
pearson3 -- Pearson type III
powerlaw -- Power-function
powerlognorm -- Power log normal
powernorm -- Power normal
rdist -- R-distribution
reciprocal -- Reciprocal
rayleigh -- Rayleigh
rice -- Rice
recipinvgauss -- Reciprocal Inverse Gaussian
semicircular -- Semicircular
skewnorm -- Skew normal
t -- Student's T
trapz -- Trapezoidal
triang -- Triangular
truncexpon -- Truncated Exponential
truncnorm -- Truncated Normal
tukeylambda -- Tukey-Lambda
uniform -- Uniform
vonmises -- Von-Mises (Circular)
vonmises_line -- Von-Mises (Line)
wald -- Wald
weibull_min -- Minimum Weibull (see Frechet)
weibull_max -- Maximum Weibull (see Frechet)
wrapcauchy -- Wrapped Cauchy
Multivariate distributions
==========================
.. autosummary::
:toctree: generated/
multivariate_normal -- Multivariate normal distribution
matrix_normal -- Matrix normal distribution
dirichlet -- Dirichlet
wishart -- Wishart
invwishart -- Inverse Wishart
multinomial -- Multinomial distribution
special_ortho_group -- SO(N) group
ortho_group -- O(N) group
unitary_group -- U(N) group
random_correlation -- random correlation matrices
Discrete distributions
======================
.. autosummary::
:toctree: generated/
bernoulli -- Bernoulli
binom -- Binomial
boltzmann -- Boltzmann (Truncated Discrete Exponential)
dlaplace -- Discrete Laplacian
geom -- Geometric
hypergeom -- Hypergeometric
logser -- Logarithmic (Log-Series, Series)
nbinom -- Negative Binomial
planck -- Planck (Discrete Exponential)
poisson -- Poisson
randint -- Discrete Uniform
skellam -- Skellam
zipf -- Zipf
Statistical functions
=====================
Several of these functions have a similar version in scipy.stats.mstats
which work for masked arrays.
.. autosummary::
:toctree: generated/
describe -- Descriptive statistics
gmean -- Geometric mean
hmean -- Harmonic mean
kurtosis -- Fisher or Pearson kurtosis
kurtosistest --
mode -- Modal value
moment -- Central moment
normaltest --
skew -- Skewness
skewtest --
kstat --
kstatvar --
tmean -- Truncated arithmetic mean
tvar -- Truncated variance
tmin --
tmax --
tstd --
tsem --
variation -- Coefficient of variation
find_repeats
trim_mean
.. autosummary::
:toctree: generated/
cumfreq
itemfreq
percentileofscore
scoreatpercentile
relfreq
.. autosummary::
:toctree: generated/
binned_statistic -- Compute a binned statistic for a set of data.
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
.. autosummary::
:toctree: generated/
obrientransform
bayes_mvs
mvsdist
sem
zmap
zscore
iqr
.. autosummary::
:toctree: generated/
sigmaclip
trimboth
trim1
.. autosummary::
:toctree: generated/
f_oneway
pearsonr
spearmanr
pointbiserialr
kendalltau
weightedtau
linregress
theilslopes
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
kstest
chisquare
power_divergence
ks_2samp
mannwhitneyu
tiecorrect
rankdata
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
jarque_bera
.. autosummary::
:toctree: generated/
ansari
bartlett
levene
shapiro
anderson
anderson_ksamp
binom_test
fligner
median_test
mood
.. autosummary::
:toctree: generated/
boxcox
boxcox_normmax
boxcox_llf
entropy
.. autosummary::
:toctree: generated/
wasserstein_distance
energy_distance
Circular statistical functions
==============================
.. autosummary::
:toctree: generated/
circmean
circvar
circstd
Contingency table functions
===========================
.. autosummary::
:toctree: generated/
chi2_contingency
contingency.expected_freq
contingency.margins
fisher_exact
Plot-tests
==========
.. autosummary::
:toctree: generated/
ppcc_max
ppcc_plot
probplot
boxcox_normplot
Masked statistics functions
===========================
.. toctree::
stats.mstats
Univariate and multivariate kernel density estimation (:mod:`scipy.stats.kde`)
==============================================================================
.. autosummary::
:toctree: generated/
gaussian_kde
For many more stat related functions install the software R and the
interface package rpy.
"""
from __future__ import division, print_function, absolute_import
from .stats import *
from .distributions import *
from .morestats import *
from ._binned_statistic import *
from .kde import gaussian_kde
from . import mstats
from .contingency import chi2_contingency
from ._multivariate import *
__all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders.
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/scipy/stats/__init__.py
|
Python
|
gpl-3.0
| 9,284
|
[
"Gaussian"
] |
652141394d833d480cd345893b5813c22c21ddd4b43a06f57bfa5aa1162fe0dc
|
import unittest
from bok_choy.web_app_test import WebAppTest
from pages import GitHubSearchPage, GitHubSearchResultsPage
class TestGitHub(WebAppTest):
"""
Tests for the GitHub site.
"""
def setUp(self):
"""
Instantiate the page object.
"""
super().setUp()
self.github_search_page = GitHubSearchPage(self.browser)
self.github_results_page = GitHubSearchResultsPage(self.browser)
def test_page_existence(self):
"""
Make sure that the page is accessible.
"""
self.github_search_page.visit()
def test_search(self):
"""
Make sure that you can search for something.
"""
self.github_search_page.visit().search_for_terms('user:openedx repo:edx-platform')
search_results = self.github_results_page.search_results
assert 'openedx/edx-platform' in search_results
assert search_results[0] == 'openedx/edx-platform'
if __name__ == '__main__':
unittest.main()
|
edx/bok-choy
|
docs/code/round_3/test_search.py
|
Python
|
apache-2.0
| 1,017
|
[
"VisIt"
] |
97d3911d0ae969afa3f9369ea384132d6717f740a8a9689e39f439eef9016bb5
|
'''
Created on Apr 19, 2013
@author: bill
'''
import unittest
from pylab import *
from brian import *
import sys
sys.path.append('../../')
from MLI_PKJ_net import *
defaultclock.dt = .25*ms
class MLITest(unittest.TestCase):
def test_model_equivalency(self):
T = 200*msecond
# spike train meant to cause neurons to spike
spikes = rand(int(T/defaultclock.dt))
spikes[spikes>.95] = 1.
spikes[spikes<=.95] = 0.
# Yamazaki implementation
self.YMLI = YamazakiNeuron(-53.,14.6,-68.,0.,-82.,-82.,1.6,1.3,4.,
50.,array([1.]),array([1.0]),
array([.8]),array([4.6]),2.5,0.,defaultclock.dt/ms)
conn_weight_gogr = 1.
# run Yamazaki implementation
MLI_spikes = []
MLI_V = [self.YMLI.u]
for s in spikes:
MLI_spikes.append(self.YMLI.update(s,0,conn_weight_gogr,0.,reset_V=False))
MLI_V.append(self.YMLI.u)
# BRIAN Implementation
MLI = MLIGroup(1)
MLI.V = MLI.El
MLI.gahp = 0. * nsiemens
# run BRIAN Implementation
GR = SpikeGeneratorGroup(1,[(0,t*defaultclock.dt) for t in nonzero(spikes)[0]])
S_GR_MLI = Synapses(GR,MLI,model='w:1',pre='g_ampa+=MLI.g_ampa_*conn_weight_gogr')
S_GR_MLI.connect_one_to_one()
M_V = StateMonitor(MLI,'V',record=0)
run(200*ms)
#M_V.plot()
#plot(M_V.times,array(MLI_V[:-1])*mV,color='g')
#show()
self.assertAlmostEqual(norm(array(MLI_V)[:-1] - M_V[0]/mV), 0., 10)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
blennon/MLI_PKJ_net
|
MLI_PKJ_net/tests/MLI_test.py
|
Python
|
mit
| 1,768
|
[
"Brian"
] |
283a62501a26dd9d79a13c6ae8380e280daf3c874dff2f4fd78a56f4fcc81bf0
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create a rendering window and renderer
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.StereoCapableWindowOn()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
reader = vtk.vtkGenericEnSightReader()
# Make sure all algorithms use the composite data pipeline
cdp = vtk.vtkCompositeDataPipeline()
reader.SetDefaultExecutivePrototype(cdp)
reader.SetCaseFileName("" + str(VTK_DATA_ROOT) + "/Data/EnSight/blow2_bin.case")
reader.SetTimeValue(1)
geom = vtk.vtkGeometryFilter()
geom.SetInputConnection(reader.GetOutputPort())
mapper = vtk.vtkHierarchicalPolyDataMapper()
mapper.SetInputConnection(geom.GetOutputPort())
mapper.SetColorModeToMapScalars()
mapper.SetScalarModeToUsePointFieldData()
mapper.ColorByArrayComponent("displacement",0)
mapper.SetScalarRange(0,2.08)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# assign our actor to the renderer
ren1.AddActor(actor)
# enable user interface interactor
iren.Initialize()
ren1.GetActiveCamera().SetPosition(99.3932,17.6571,-22.6071)
ren1.GetActiveCamera().SetFocalPoint(3.5,12,1.5)
ren1.GetActiveCamera().SetViewAngle(30)
ren1.GetActiveCamera().SetViewUp(0.239617,-0.01054,0.97081)
ren1.ResetCameraClippingRange()
renWin.Render()
# prevent the tk window from showing up then start the event loop
reader.SetDefaultExecutivePrototype(None)
# --- end of script --
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/IO/EnSight/Testing/Python/EnSightBlow2Bin.py
|
Python
|
gpl-3.0
| 1,508
|
[
"VTK"
] |
d0af4e101782806b3a057dc5aaf67256dbb85db379a0b2a6b3e1ae791bd17d80
|
# Threshold functions
import os
import cv2
import math
import numpy as np
from matplotlib import pyplot as plt
from plantcv.plantcv import fatal_error
from plantcv.plantcv import params
from skimage.feature import greycomatrix, greycoprops
from scipy.ndimage import generic_filter
from plantcv.plantcv._debug import _debug
# Binary threshold
def binary(gray_img, threshold, max_value, object_type="light"):
"""Creates a binary image from a grayscale image based on the threshold value.
Inputs:
gray_img = Grayscale image data
threshold = Threshold value (0-255)
max_value = value to apply above threshold (usually 255 = white)
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param threshold: int
:param max_value: int
:param object_type: str
:return bin_img: numpy.ndarray
"""
# Set the threshold method
threshold_method = ""
if object_type.upper() == "LIGHT":
threshold_method = cv2.THRESH_BINARY
elif object_type.upper() == "DARK":
threshold_method = cv2.THRESH_BINARY_INV
else:
fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')
params.device += 1
# Threshold the image
bin_img = _call_threshold(gray_img, threshold, max_value, threshold_method, "_binary_threshold_")
return bin_img
# Gaussian adaptive threshold
def gaussian(gray_img, max_value, object_type="light"):
"""Creates a binary image from a grayscale image based on the Gaussian adaptive threshold method.
Inputs:
gray_img = Grayscale image data
max_value = value to apply above threshold (usually 255 = white)
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param max_value: int
:param object_type: str
:return bin_img: numpy.ndarray
"""
# Set the threshold method
threshold_method = ""
if object_type.upper() == "LIGHT":
threshold_method = cv2.THRESH_BINARY
elif object_type.upper() == "DARK":
threshold_method = cv2.THRESH_BINARY_INV
else:
fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')
params.device += 1
bin_img = _call_adaptive_threshold(gray_img, max_value, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, threshold_method,
"_gaussian_threshold_")
return bin_img
# Mean adaptive threshold
def mean(gray_img, max_value, object_type="light"):
"""Creates a binary image from a grayscale image based on the mean adaptive threshold method.
Inputs:
gray_img = Grayscale image data
max_value = value to apply above threshold (usually 255 = white)
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param max_value: int
:param object_type: str
:return bin_img: numpy.ndarray
"""
# Set the threshold method
threshold_method = ""
if object_type.upper() == "LIGHT":
threshold_method = cv2.THRESH_BINARY
elif object_type.upper() == "DARK":
threshold_method = cv2.THRESH_BINARY_INV
else:
fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')
params.device += 1
bin_img = _call_adaptive_threshold(gray_img, max_value, cv2.ADAPTIVE_THRESH_MEAN_C, threshold_method,
"_mean_threshold_")
return bin_img
# Otsu autothreshold
def otsu(gray_img, max_value, object_type="light"):
"""Creates a binary image from a grayscale image using Otsu's thresholding.
Inputs:
gray_img = Grayscale image data
max_value = value to apply above threshold (usually 255 = white)
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param max_value: int
:param object_type: str
:return bin_img: numpy.ndarray
"""
# Set the threshold method
threshold_method = ""
if object_type.upper() == "LIGHT":
threshold_method = cv2.THRESH_BINARY + cv2.THRESH_OTSU
elif object_type.upper() == "DARK":
threshold_method = cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU
else:
fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')
params.device += 1
# Threshold the image
bin_img = _call_threshold(gray_img, 0, max_value, threshold_method, "_otsu_threshold_")
return bin_img
# Triangle autothreshold
def triangle(gray_img, max_value, object_type="light", xstep=1):
"""Creates a binary image from a grayscale image using Zack et al.'s (1977) thresholding.
Inputs:
gray_img = Grayscale image data
max_value = value to apply above threshold (usually 255 = white)
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
xstep = value to move along x-axis to determine the points from which to calculate distance recommended to
start at 1 and change if needed)
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param max_value: int
:param object_type: str
:param xstep: int
:return bin_img: numpy.ndarray
"""
# Calculate automatic threshold value based on triangle algorithm
hist = cv2.calcHist([gray_img], [0], None, [256], [0, 255])
# Make histogram one array
newhist = []
for item in hist:
newhist.extend(item)
# Detect peaks
show = False
if params.debug == "plot":
show = True
ind = _detect_peaks(newhist, mph=None, mpd=1, show=show)
# Find point corresponding to highest peak
# Find intensity value (y) of highest peak
max_peak_int = max(list(newhist[i] for i in ind))
# Find value (x) of highest peak
max_peak = [i for i, x in enumerate(newhist) if x == max(newhist)]
# Combine x,y
max_peak_xy = [max_peak[0], max_peak_int]
# Find final point at end of long tail
end_x = len(newhist) - 1
end_y = newhist[end_x]
end_xy = [end_x, end_y]
# Define the known points
points = [max_peak_xy, end_xy]
x_coords, y_coords = zip(*points)
# Get threshold value
peaks = []
dists = []
for i in range(x_coords[0], x_coords[1], xstep):
distance = (((x_coords[1] - x_coords[0]) * (y_coords[0] - hist[i])) -
((x_coords[0] - i) * (y_coords[1] - y_coords[0]))) / math.sqrt(
(float(x_coords[1]) - float(x_coords[0])) *
(float(x_coords[1]) - float(x_coords[0])) +
((float(y_coords[1]) - float(y_coords[0])) *
(float(y_coords[1]) - float(y_coords[0]))))
peaks.append(i)
dists.append(distance)
autothresh = [peaks[x] for x in [i for i, x in enumerate(list(dists)) if x == max(list(dists))]]
autothreshval = autothresh[0]
# Set the threshold method
threshold_method = ""
if object_type.upper() == "LIGHT":
threshold_method = cv2.THRESH_BINARY + cv2.THRESH_OTSU
elif object_type.upper() == "DARK":
threshold_method = cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU
else:
fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')
params.device += 1
# Threshold the image
bin_img = _call_threshold(gray_img, autothreshval, max_value, threshold_method, "_triangle_threshold_")
# Additional figures created by this method, if debug is on
if params.debug is not None:
if params.debug == 'print':
_, ax = plt.subplots()
ax.plot(hist)
ax.set(title='Threshold value = {t}'.format(t=autothreshval))
ax.axis([0, 256, 0, max(hist)])
ax.grid(True)
fig_name_hist = os.path.join(params.debug_outdir,
str(params.device) + '_triangle_thresh_hist_' + str(autothreshval) + ".png")
# write the figure to current directory
plt.savefig(fig_name_hist, dpi=params.dpi)
# close pyplot plotting window
plt.clf()
elif params.debug == 'plot':
print('Threshold value = {t}'.format(t=autothreshval))
_, ax = plt.subplots()
ax.plot(hist)
ax.axis([0, 256, 0, max(hist)])
ax.grid(True)
plt.show()
return bin_img
def texture(gray_img, ksize, threshold, offset=3, texture_method='dissimilarity', borders='nearest',
max_value=255):
"""Creates a binary image from a grayscale image using skimage texture calculation for thresholding.
This function is quite slow.
Inputs:
gray_img = Grayscale image data
ksize = Kernel size for texture measure calculation
threshold = Threshold value (0-255)
offset = Distance offsets
texture_method = Feature of a grey level co-occurrence matrix, either
'contrast', 'dissimilarity', 'homogeneity', 'ASM', 'energy',
or 'correlation'.For equations of different features see
scikit-image.
borders = How the array borders are handled, either 'reflect',
'constant', 'nearest', 'mirror', or 'wrap'
max_value = Value to apply above threshold (usually 255 = white)
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param ksize: int
:param threshold: int
:param offset: int
:param texture_method: str
:param borders: str
:param max_value: int
:return bin_img: numpy.ndarray
"""
# Function that calculates the texture of a kernel
def calc_texture(inputs):
inputs = np.reshape(a=inputs, newshape=[ksize, ksize])
inputs = inputs.astype(np.uint8)
# Greycomatrix takes image, distance offset, angles (in radians), symmetric, and normed
# http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.greycomatrix
glcm = greycomatrix(inputs, [offset], [0], 256, symmetric=True, normed=True)
diss = greycoprops(glcm, texture_method)[0, 0]
return diss
# Make an array the same size as the original image
output = np.zeros(gray_img.shape, dtype=gray_img.dtype)
# Apply the texture function over the whole image
generic_filter(gray_img, calc_texture, size=ksize, output=output, mode=borders)
# Threshold so higher texture measurements stand out
bin_img = binary(gray_img=output, threshold=threshold, max_value=max_value, object_type='light')
_debug(visual=bin_img, filename=os.path.join(params.debug_outdir, str(params.device) + "_texture_mask.png"))
return bin_img
def custom_range(img, lower_thresh, upper_thresh, channel='gray'):
"""Creates a thresholded image and mask from an RGB image and threshold values.
Inputs:
img = RGB or grayscale image data
lower_thresh = List of lower threshold values (0-255)
upper_thresh = List of upper threshold values (0-255)
channel = Color-space channels of interest (RGB, HSV, LAB, or gray)
Returns:
mask = Mask, binary image
masked_img = Masked image, keeping the part of image of interest
:param img: numpy.ndarray
:param lower_thresh: list
:param upper_thresh: list
:param channel: str
:return mask: numpy.ndarray
:return masked_img: numpy.ndarray
"""
if channel.upper() == 'HSV':
# Check threshold inputs
if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
fatal_error("If using the HSV colorspace, 3 thresholds are needed for both lower_thresh and " +
"upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and " +
"upper_thresh=255")
# Convert the RGB image to HSV colorspace
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# Separate channels
hue = hsv_img[:, :, 0]
sat = hsv_img[:, :, 1]
value = hsv_img[:, :, 2]
# Make a mask for each channel
h_mask = cv2.inRange(hue, lower_thresh[0], upper_thresh[0])
s_mask = cv2.inRange(sat, lower_thresh[1], upper_thresh[1])
v_mask = cv2.inRange(value, lower_thresh[2], upper_thresh[2])
# Apply the masks to the image
result = cv2.bitwise_and(img, img, mask=h_mask)
result = cv2.bitwise_and(result, result, mask=s_mask)
masked_img = cv2.bitwise_and(result, result, mask=v_mask)
# Combine masks
mask = cv2.bitwise_and(s_mask, h_mask)
mask = cv2.bitwise_and(mask, v_mask)
elif channel.upper() == 'RGB':
# Check threshold inputs
if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
fatal_error("If using the RGB colorspace, 3 thresholds are needed for both lower_thresh and " +
"upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and " +
"upper_thresh=255")
# Separate channels (pcv.readimage reads RGB images in as BGR)
blue = img[:, :, 0]
green = img[:, :, 1]
red = img[:, :, 2]
# Make a mask for each channel
b_mask = cv2.inRange(blue, lower_thresh[2], upper_thresh[2])
g_mask = cv2.inRange(green, lower_thresh[1], upper_thresh[1])
r_mask = cv2.inRange(red, lower_thresh[0], upper_thresh[0])
# Apply the masks to the image
result = cv2.bitwise_and(img, img, mask=b_mask)
result = cv2.bitwise_and(result, result, mask=g_mask)
masked_img = cv2.bitwise_and(result, result, mask=r_mask)
# Combine masks
mask = cv2.bitwise_and(b_mask, g_mask)
mask = cv2.bitwise_and(mask, r_mask)
elif channel.upper() == 'LAB':
# Check threshold inputs
if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
fatal_error("If using the LAB colorspace, 3 thresholds are needed for both lower_thresh and " +
"upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and " +
"upper_thresh=255")
# Convert the RGB image to LAB colorspace
lab_img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
# Separate channels (pcv.readimage reads RGB images in as BGR)
lightness = lab_img[:, :, 0]
green_magenta = lab_img[:, :, 1]
blue_yellow = lab_img[:, :, 2]
# Make a mask for each channel
l_mask = cv2.inRange(lightness, lower_thresh[0], upper_thresh[0])
gm_mask = cv2.inRange(green_magenta, lower_thresh[1], upper_thresh[1])
by_mask = cv2.inRange(blue_yellow, lower_thresh[2], upper_thresh[2])
# Apply the masks to the image
result = cv2.bitwise_and(img, img, mask=l_mask)
result = cv2.bitwise_and(result, result, mask=gm_mask)
masked_img = cv2.bitwise_and(result, result, mask=by_mask)
# Combine masks
mask = cv2.bitwise_and(l_mask, gm_mask)
mask = cv2.bitwise_and(mask, by_mask)
elif channel.upper() in ('GRAY', 'GREY'):
# Check threshold input
if not (len(lower_thresh) == 1 and len(upper_thresh) == 1):
fatal_error("If useing a grayscale colorspace, 1 threshold is needed for both the " +
"lower_thresh and upper_thresh.")
if len(np.shape(img)) == 3:
# Convert RGB image to grayscale colorspace
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray_img = img
# Make a mask
mask = cv2.inRange(gray_img, lower_thresh[0], upper_thresh[0])
# Apply the masks to the image
masked_img = cv2.bitwise_and(img, img, mask=mask)
else:
fatal_error(str(channel) + " is not a valid colorspace. Channel must be either 'RGB', 'HSV', or 'gray'.")
# Auto-increment the device counter
# Print or plot the binary image if debug is on
_debug(visual=masked_img, filename=os.path.join(params.debug_outdir,
str(params.device) + channel + 'custom_thresh.png'))
_debug(visual=mask, filename=os.path.join(params.debug_outdir,
str(params.device) + channel + 'custom_thresh_mask.png'))
return mask, masked_img
# Internal method for calling the OpenCV threshold function to reduce code duplication
def _call_threshold(gray_img, threshold, max_value, threshold_method, method_name):
# Threshold the image
ret, bin_img = cv2.threshold(gray_img, threshold, max_value, threshold_method)
if bin_img.dtype != 'uint16':
bin_img = np.uint8(bin_img)
# Print or plot the binary image if debug is on
_debug(visual=bin_img, filename=os.path.join(params.debug_outdir,
str(params.device) + method_name + str(threshold) + '.png'))
return bin_img
# Internal method for calling the OpenCV adaptiveThreshold function to reduce code duplication
def _call_adaptive_threshold(gray_img, max_value, adaptive_method, threshold_method, method_name):
# Threshold the image
bin_img = cv2.adaptiveThreshold(gray_img, max_value, adaptive_method, threshold_method, 11, 2)
# Print or plot the binary image if debug is on
_debug(visual=bin_img, filename=os.path.join(params.debug_outdir, str(params.device) + method_name + '.png'))
return bin_img
# Internal method for detecting peaks for the triangle autothreshold method
def _detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising', kpsh=False, valley=False, show=False, ax=None):
"""Marcos Duarte, https://github.com/demotu/BMC; version 1.0.4; license MIT
Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indices of the peaks in `x`.
Notes
-----
The detection of valleys instead of peaks is performed internally by simply
negating the data: `ind_valleys = detect_peaks(-x)`
The function can handle NaN's
See this IPython Notebook [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
Examples
--------
from detect_peaks import detect_peaks
x = np.random.randn(100)
x[60:81] = np.nan
# detect all peaks and plot data
ind = detect_peaks(x, show=True)
print(ind)
x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
# set minimum peak height = 0 and minimum peak distance = 20
detect_peaks(x, mph=0, mpd=20, show=True)
x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0]
# set minimum peak distance = 2
detect_peaks(x, mpd=2, show=True)
x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
# detection of valleys instead of peaks
detect_peaks(x, mph=0, mpd=20, valley=True, show=True)
x = [0, 1, 1, 0, 1, 1, 0]
# detect both edges
detect_peaks(x, edge='both', show=True)
x = [-2, 1, -2, 2, 1, 1, 3, 0]
# set threshold = 2
detect_peaks(x, threshold = 2, show=True)
"""
x = np.atleast_1d(x).astype('float64')
# It is always the case that x.size=256 since 256 hardcoded in line 186 ->
# cv2.calcHist([gray_img], [0], None, [256], [0, 255])
# if x.size < 3:
# return np.array([], dtype=int)
# # Where this function is used it is hardcoded to use the default valley=False so this will never be used
# if valley:
# x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
# indnan = np.where(np.isnan(x))[0]
# x will never contain NaN since calcHist will never return NaN
# if indnan.size:
# x[indnan] = np.inf
# dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# x will never contain NaN since calcHist will never return NaN
# if ind.size and indnan.size:
# # NaN's and values close to NaN's cannot be peaks
# ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan - 1, indnan + 1))), invert=True)]
# first and last values of x cannot be peaks
# if ind.size and ind[0] == 0:
# ind = ind[1:]
# if ind.size and ind[-1] == x.size - 1:
# ind = ind[:-1]
# We think the above code will never be reached given some of the hardcoded properties used
# # Where this function is used has hardcoded mph=None so this will never be used
# # remove peaks < minimum peak height
# if ind.size and mph is not None:
# ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if show:
# x will never contain NaN since calcHist will never return NaN
# if indnan.size:
# x[indnan] = np.nan
# # Where this function is used it is hardcoded to use the default valley=False so this will never be used
# if valley:
# x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
# Internal plotting function for the triangle autothreshold method
def _plot(x, mph, mpd, threshold, edge, valley, ax, ind):
"""Plot results of the detect_peaks function, see its help."""
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(x, 'b', lw=1)
if ind.size:
label = 'valley' if valley else 'peak'
label = label + 's' if ind.size > 1 else label
ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8,
label='%d %s' % (ind.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
ax.set_xlim(-.02 * x.size, x.size * 1.02 - 1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1 * yrange, ymax + 0.1 * yrange)
ax.set_xlabel('Data #', fontsize=14)
ax.set_ylabel('Amplitude', fontsize=14)
mode = 'Valley detection' if valley else 'Peak detection'
ax.set_title("%s (mph=%s, mpd=%d, threshold=%s, edge='%s')"
% (mode, str(mph), mpd, str(threshold), edge))
plt.show()
def saturation(rgb_img, threshold=255, channel="any"):
"""Return a mask filtering out saturated pixels.
Inputs:
rgb_img = RGB image
threshold = value for threshold, above which is considered saturated
channel = how many channels must be saturated for the pixel to be masked out ("any", "all")
Returns:
masked_img = A binary image with the saturated regions blacked out.
:param rgb_img: np.ndarray
:param threshold: int
:param channel: str
:return masked_img: np.ndarray
"""
# Mask red, green, and blue saturation separately
b, g, r = cv2.split(rgb_img)
b_saturated = cv2.inRange(b, threshold, 255)
g_saturated = cv2.inRange(g, threshold, 255)
r_saturated = cv2.inRange(r, threshold, 255)
# Combine channel masks
if channel.lower() == "any":
# Consider a pixel saturated if any channel is saturated
saturated = cv2.bitwise_or(b_saturated, g_saturated)
saturated = cv2.bitwise_or(saturated, r_saturated)
elif channel.lower() == "all":
# Consider a pixel saturated only if all channels are saturated
saturated = cv2.bitwise_and(b_saturated, g_saturated)
saturated = cv2.bitwise_and(saturated, r_saturated)
else:
fatal_error(str(channel) + " is not a valid option. Channel must be either 'any', or 'all'.")
# Invert "saturated" before returning, so saturated = black
bin_img = cv2.bitwise_not(saturated)
_debug(visual=bin_img, filename=os.path.join(params.debug_outdir, str(params.device), '_saturation_threshold.png'))
return bin_img
def mask_bad(float_img, bad_type='native'):
""" Create a mask with desired "bad" pixels of the input floaat image marked.
Inputs:
float_img = image represented by an nd-array (data type: float). Most probably, it is the result of some
calculation based on the original image. So the datatype is float, and it is possible to have some
"bad" values, i.e. nan and/or inf
bad_type = definition of "bad" type, can be 'nan', 'inf' or 'native'
Returns:
mask = A mask indicating the locations of "bad" pixels
:param float_img: numpy.ndarray
:param bad_type: str
:return mask: numpy.ndarray
"""
size_img = np.shape(float_img)
if len(size_img) != 2:
fatal_error('Input image is not a single channel image!')
mask = np.zeros(size_img, dtype='uint8')
idx_nan, idy_nan = np.where(np.isnan(float_img) == 1)
idx_inf, idy_inf = np.where(np.isinf(float_img) == 1)
# neither nan nor inf exists in the image, print out a message and the mask would just be all zero
if len(idx_nan) == 0 and len(idx_inf) == 0:
mask = mask
print('Neither nan nor inf appears in the current image.')
# at least one of the "bad" exists
# desired bad to mark is "native"
elif bad_type.lower() == 'native':
mask[idx_nan, idy_nan] = 255
mask[idx_inf, idy_inf] = 255
elif bad_type.lower() == 'nan' and len(idx_nan) >= 1:
mask[idx_nan, idy_nan] = 255
elif bad_type.lower() == 'inf' and len(idx_inf) >= 1:
mask[idx_inf, idy_inf] = 255
# "bad" exists but not the user desired bad type, return the all-zero mask
else:
mask = mask
print('{} does not appear in the current image.'.format(bad_type.lower()))
_debug(visual=mask, filename=os.path.join(params.debug_outdir, str(params.device) + "_bad_mask.png"))
return mask
|
danforthcenter/plantcv
|
plantcv/plantcv/threshold/threshold_methods.py
|
Python
|
mit
| 28,133
|
[
"Gaussian"
] |
40cb99ea09aaa5639e374560e3f487e4e48345aac9298cc0eff36b936398c1b4
|
# $Id$
#
# Copyright (C) 2003-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the BuildComposite functionality
"""
import unittest, os
import io
from rdkit.six.moves import cPickle as pickle
from rdkit.six import cmp
from rdkit import RDConfig
from rdkit.ML import BuildComposite
from rdkit.ML import ScreenComposite
from rdkit.Dbase.DbConnection import DbConnect
class TestCase(unittest.TestCase):
def setUp(self):
#print '\n%s: '%self.shortDescription(),
self.baseDir = os.path.join(RDConfig.RDCodeDir, 'ML', 'test_data')
self.dbName = RDConfig.RDTestDatabase
self.details = BuildComposite.SetDefaults()
self.details.dbName = self.dbName
self.details.dbUser = RDConfig.defaultDBUser
self.details.dbPassword = RDConfig.defaultDBPassword
def _init(self, refCompos, copyBounds=0):
BuildComposite._verbose = 0
conn = DbConnect(self.details.dbName, self.details.tableName)
cols = [x.upper() for x in conn.GetColumnNames()]
cDescs = [x.upper() for x in refCompos.GetDescriptorNames()]
self.assertEqual(cols, cDescs)
self.details.nModels = 10
self.details.lockRandom = 1
self.details.randomSeed = refCompos._randomSeed
self.details.splitFrac = refCompos._splitFrac
if self.details.splitFrac:
self.details.splitRun = 1
else:
self.details.splitRun = 0
if not copyBounds:
self.details.qBounds = [0] * len(cols)
else:
self.details.qBounds = refCompos.GetQuantBounds()[0]
def compare(self, compos, refCompos):
self.assertEqual(len(compos), len(refCompos))
cs = []
rcs = []
for i in range(len(compos)):
cs.append(compos[i])
rcs.append(refCompos[i])
def sortHelp(x, y):
if x[2] == y[2]:
return cmp(x[1], y[1])
else:
return cmp(x[2], y[2])
cs.sort(key=lambda x: (x[2], x[2]))
rcs.sort(key=lambda x: (x[2], x[2]))
for i in range(len(compos)):
tree, count, err = cs[i]
refTree, refCount, refErr = rcs[i]
self.assertEqual(count, refCount)
self.assertAlmostEqual(err, refErr, 4)
def test1(self):
""" basics """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
compos = BuildComposite.RunIt(self.details, saveIt=0)
#pickle.dump(compos,open(os.path.join(self.baseDir,refComposName), 'wb'))
#with open(os.path.join(self.baseDir,refComposName), 'rb') as pklF:
# refCompos = pickle.load(pklF)
self.compare(compos, refCompos)
def test2(self):
""" depth limit """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test3(self):
""" depth limit + less greedy """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10_3_lessgreedy.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
self.details.lessGreedy = 1
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test4(self):
""" more trees """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_50_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
self.details.nModels = 50
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test5(self):
""" auto bounds """
self.details.tableName = 'ferro_noquant'
refComposName = 'ferromag_auto_10_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos, copyBounds=1)
self.details.limitDepth = 3
self.details.nModels = 10
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test6(self):
""" auto bounds with a real valued activity"""
self.details.tableName = 'ferro_noquant_realact'
refComposName = 'ferromag_auto_10_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos, copyBounds=1)
self.details.limitDepth = 3
self.details.nModels = 10
self.details.activityBounds = [0.5]
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test7(self):
""" Test composite of naive bayes"""
self.details.tableName = 'ferro_noquant'
refComposName = 'ferromag_NaiveBayes.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTFile:
buf = pklTFile.read().replace('\r\n', '\n').encode('utf-8')
pklTFile.close()
with io.BytesIO(buf) as pklFile:
refCompos = pickle.load(pklFile)
self._init(refCompos, copyBounds=1)
self.details.useTrees = 0
self.details.useNaiveBayes = 1
self.details.mEstimateVal = 20.0
self.details.qBounds = [0] + [2] * 6 + [0]
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
if __name__ == '__main__':
unittest.main()
|
jandom/rdkit
|
rdkit/ML/UnitTestBuildComposite.py
|
Python
|
bsd-3-clause
| 6,780
|
[
"RDKit"
] |
e030cd93f26e0176426071f68f0e4bc52185adc49c0f777198be4e0a21b8c9fc
|
# ******************************************************************************
# pysimm.forcefield.charmm module
# ******************************************************************************
#
# ******************************************************************************
# License
# ******************************************************************************
# The MIT License (MIT)
#
# Copyright (c) 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import json
import os
import re
import sys
from itertools import permutations
import numpy
from . import gasteiger
from .. import error_print, verbose_print, debug_print
from ..system import Angle, Dihedral, Improper, ParticleType
from ..system import BondType, AngleType
from .forcefield import Forcefield
from ..utils import ItemContainer
class Charmm(Forcefield):
"""pysimm.forcefield.Charmm
Forcefield object with typing rules for CHARMM model.
By default reads data file in forcefields subdirectory.
Attributes:
ff_name: charmm
pair_style: lj/charmm
ff_class: 1
"""
def __init__(self, db_file=None):
if not db_file and db_file is not False:
db_file = os.path.join(
os.path.dirname(
os.path.realpath(__file__)
),
os.pardir, 'data', 'forcefields', 'charmm.json'
)
Forcefield.__init__(self, db_file)
with open(db_file) as f:
j = json.loads(f.read())
self.nbfix_types = ItemContainer()
for elem in j.get('nbfix_types'):
self.nbfix_types.add(ParticleType(**elem))
self.name = 'charmm'
self.pair_style = 'lj/charmm'
self.bond_style = 'harmonic'
self.angle_style = 'charmm'
self.dihedral_style = 'fourier'
self.improper_style = 'harmonic'
self.ff_class = '1'
def assign_ptypes(self, s):
"""pysimm.forcefield.Charmm.assign_ptypes
Charmm specific particle typing rules.
Requires :class:`~pysimm.system.System` object :class:`~pysimm.system.Particle` objects have bonds defined.
*** use System.add_particle_bonding() to ensure this ***
*** Not entirely inclusive - some atom types not used ***
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
s.pair_style = self.pair_style
s.add_particle_bonding()
for b in s.bonds:
if not b.order:
b.order = 1
for p in s.particles:
p.bond_orders = [x.order for x in p.bonds]
if None in p.bond_orders:
error_print('error: bond orders are not set')
p.bond_elements = [x.a.elem if p is x.b else x.b.elem for x in p.bonds]
p.nbonds = len(p.bond_elements)
if p.linker:
p.nbonds += 1
for p in s.particles:
if not p.type_name:
if p.elem == 'C':
# Some general definition of an -sp3 carbons
if (all(p.bond_orders) == 1) and (p.nbonds == 4):
n_partcls = [p_ for p_ in p.bonded_to if p_.elem == 'N']
if len(n_partcls) > 0 and (n_partcls[0].nbonds == 4) and (set(n_partcls[0].bond_elements) == {'C'}):
p.type_name = 'CG324'
else:
rng_count = __detect_rings__(p, [5, 6])
if rng_count == 0: # Linear sp3 carbon
hcount = p.bond_elements.count('H')
p.type_name = 'CG3{}1'.format(hcount)
elif rng_count > 0 : # tetrahydrofuran (THF) or tetrahydropyran (THP)
p.type_name = 'CG3C52'.format(rng_count)
if ('A' in p.bond_orders) or (4 in p.bond_orders):
p.type_name = 'CG2R61'
if (p.nbonds == 3): # carbonyl C condition
if set(p.bond_elements) == {'O', 'C', 'N'}: # in amide
p.type_name = 'CG2O1'
if p.bond_elements.count('O') == 2: # carbonyl C in esters or acids
tmp_part = [sb_p for sb_p in p.bonded_to if (sb_p.elem == 'O') and sb_p.nbonds == 2]
if len(tmp_part) > 0: # deprotonated
p.type_name = 'CG2O2'
else: # protonated
p.type_name = 'CG2O3'
if set(p.bond_elements) == {'O', 'C', 'H'}: # carbonyl C in aldehyde
p.type_name = 'CG2O4'
if (p.bond_elements.count('O') == 1) and (p.bond_elements.count('C') == 2): # in ketones
p.type_name = 'CG2O5'
elif p.elem == 'O':
if (p.nbonds == 2) and (all(p.bond_orders) == 1): # ethers, esters
if p.bond_elements.count('C') == 2:
is_ester = False
for p_ in p.bonded_to:
if (p_.bond_elements.count('O') == 2) and (p_.nbonds == 3):
is_ester = True
if is_ester:
p.type_name = 'OG302'
else:
p.type_name = 'OG301'
rng_count = __detect_rings__(p, [5, 6])
if rng_count > 0:
p.type_name = 'OG3C{}1'.format(rng_count)
if (p.nbonds == 1) and ('C' in p.bond_elements): # sp2 oxygen
p_ = [t for t in p.bonded_to][0]
if set(p_.bond_elements) == {'O', 'C', 'N'}: # in amide
p.type_name = 'OG2D1'
if p_.bond_elements.count('O') == 2: # in acids
tmp_part = [sb_p for sb_p in p_.bonded_to if (sb_p.elem == 'O') and sb_p.nbonds == 2]
if len(tmp_part) > 0:
p.type_name = 'OG2D1'
else:
p.type_name = 'OG2D2'
if p_.bond_elements.count('C') == 2: # in ketones
p.type_name = 'OG2D3'
if ('S' in p.bond_elements) or ('P' in p.bond_elements): # phosphate or sulfate
p.type_name = 'OG2P1'
if (p.nbonds == 2) and (set(p.bond_elements) == {'C', 'H'}):
p_ = [t for t in p.bonded_to if t.elem != 'H'][0]
if p_.bond_elements.count('O') == 2: # in acids
p.type_name = 'OG2D1'
if p_.bond_elements.count('O') == 1: # hydroxyl oxygen
p.type_name = 'OG311'
if(p.nbonds == 2) and all([t == 'H' for t in p.bond_elements]): # water oxygen
p.type_name = 'OT'
for sb_p in p.bonded_to: # type all hydrogens connected to this atom
if sb_p.elem == 'H':
sb_p.type_name = 'HT'
elif p.elem == 'N':
if (p.nbonds == 1) and ('C' in p.bond_elements): # nitrile (or cyano) group
p.type_name = 'NG1T1'
if (p.nbonds == 3) and (set(p.bond_elements) == {'H', 'N'}): # hydrazine
p.type_name = 'NG3N1'
if (p.nbonds == 3) and ('C' in p.bond_elements): # amide
p.type_name = 'NG2S{}'.format(p.bond_elements.count('H'))
if (p.nbonds == 4):
p.type_name = 'NG3P{}'.format(p.bond_elements.count('H'))
elif p.elem == 'H':
if p.bond_elements[0] == 'N':
p.type_name = 'HGP1'
if p.bond_elements[0] == 'O':
p.type_name = 'HGP1'
if p.bond_elements[0] == 'C':
host = [p_ for p_ in p.bonded_to][0]
nitrogen = [p_ for p_ in host.bonded_to if p_.elem == 'N']
if len(nitrogen) > 0 and (nitrogen[0].nbonds == 4):
p.type_name = 'HGP5'
else:
if ('A' in host.bond_orders) or (4 in host.bond_orders):
p.type_name = 'HGR61'
else:
hcount = [pt for pt in p.bonded_to][0].bond_elements.count('H')
p.type_name = 'HGA{}'.format(hcount)
elif p.elem == 'S':
if p.nbonds == 4:
p.type_name = 'SG3O{}'.format(4 - p.bond_elements.count('O'))
else:
print('cant type particle %s' % p.tag)
return p
all_types = set()
for p in s.particles:
tmp = self.particle_types.get(p.type_name)
if len(tmp) > 0:
all_types.add(tmp[0])
else:
debug_print('Current version of CHARMM-FF database file does not contain \'{}\' particle type'.format(p.type_name))
for pt in all_types:
s.particle_types.add(pt.copy())
for p in s.particles:
pt = s.particle_types.get(p.type_name)
if pt:
p.type = pt[0]
self.assign_extra_ljtypes(s)
def assign_extra_ljtypes(self, s):
"""pysimm.forcefield.Charmm.assign_extra_ljtypes
Addition to normal force field setup: filling up the non-diagonal interaction pair
coefficients (coefficients for interaction of particles of different type).
Assumes that all :class:`~pysimm.system.ParticleType` are defined for all particles in s
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
loc_lj_types = set()
for p in s.particle_types:
for p_ in s.particle_types:
if p != p_:
atm_type = tuple(sorted([p.tag, p_.tag]))
if not(atm_type in [at.atm_types for at in loc_lj_types]):
tmp = self.nbfix_types.get(','.join([p.name, p_.name]))
if len(tmp) > 0:
to_add = tmp[0].copy()
to_add.atm_types = atm_type
loc_lj_types.add(to_add)
if not s.nbfix_types:
s.nbfix_types = ItemContainer()
for ljt in loc_lj_types:
if not s.nbfix_types.get(ljt.name):
s.nbfix_types.add(ljt)
def assign_btypes(self, s):
"""pysimm.forcefield.Charmm.assign_btypes
Gaff specific bond typing rules.
Requires :class:`~pysimm.system.System` object :class:`~pysimm.system.Particle` objects have bonds, type and type.name defined.
*** use after assign_ptypes ***
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
all_types = set()
s.bond_style = self.bond_style
for b in s.bonds:
bt = self.bond_types.get('%s,%s' % (b.a.type.name, b.b.type.name))
if bt:
b.type_name = bt[0].name
else:
print('couldnt type this bond %s,%s'
% (b.a.type.name, b.b.type.name))
return b
all_types.add(self.bond_types.get(b.type_name)[0])
for bt in all_types:
bt = bt.copy()
s.bond_types.add(bt)
for b in s.bonds:
bt = s.bond_types.get(b.type_name)
if bt:
b.type = bt[0]
def assign_atypes(self, s):
"""pysimm.forcefield.Charmm.assign_atypes
Gaff specific boanglend typing rules.
Requires :class:`~pysimm.system.System` object :class:`~pysimm.system.Particle` objects have bonds, type and type.name defined.
*** use after assign_ptypes ***
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
all_types = set()
s.angle_style = self.angle_style
s.add_particle_bonding()
for p in s.particles:
for p1 in p.bonded_to:
for p2 in p.bonded_to:
if p1 is not p2:
unique = True
for a in s.angles:
if ((a.a is p1 and a.b is p and a.c is p2) or
(a.a is p2 and a.b is p and a.c is p1)):
unique = False
if unique:
at = self.angle_types.get('%s,%s,%s'
% (p1.type.name,
p.type.name,
p2.type.name))
if at:
s.angles.add(Angle(type_name=at[0].name,
a=p1, b=p, c=p2))
all_types.add(at[0])
else:
print('I cant type this angle %s,%s,%s'
% (p1.type.name,
p.type.name,
p2.type.name))
for at in all_types:
at = at.copy()
s.angle_types.add(at)
for a in s.angles:
at = s.angle_types.get(a.type_name)
if at:
a.type = at[0]
def assign_dtypes(self, s):
"""pysimm.forcefield.Charmm.assign_dtypes
CHARMM specific dihedral typing rules.
Requires :class:`~pysimm.system.System` object :class:`~pysimm.system.Particle` objects have bonds, type
and type.name defined.
*** use after assign_ptypes ***
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
all_types = set()
s.dihedral_style = self.dihedral_style
for b in s.bonds:
for p1 in b.a.bonded_to:
for p2 in b.b.bonded_to:
if p1 is b.b or p2 is b.a:
continue
unique = True
for d in s.dihedrals:
if ((d.a == p1 and d.b == b.a and
d.c == b.b and d.d == p2) or
(d.a == p2 and d.b == b.b and
d.c == b.a and d.d == p1)):
unique = False
if unique:
p1_name = p1.type.name
a_name = b.a.type.name
b_name = b.b.type.name
p2_name = p2.type.name
dt = self.dihedral_types.get('%s,%s,%s,%s'
% (p1_name, a_name,
b_name, p2_name))
if dt:
if len(dt) == 1:
all_types.add(dt[0])
s.dihedrals.add(Dihedral(type_name=dt[0].name,
a=p1, b=b.a,
c=b.b, d=p2))
else:
index = 0
x = 5
for i in range(len(dt)):
if dt[i].name.count('X') < x:
index = i
x = dt[i].name.count('X')
dt = dt[index]
all_types.add(dt)
s.dihedrals.add(Dihedral(type_name=dt.name,
a=p1, b=b.a,
c=b.b, d=p2))
else:
print('I cant type this dihedral %s,%s,%s,%s'
% (p1_name, a_name, b_name, p2_name))
for dt in all_types:
dt = dt.copy()
dt.w = 1.0
s.dihedral_types.add(dt)
verbose_print('Dihedrals assigned successfully. \nIMPORTANT: all dihedral weighting factors '
'(coefficients to compensate for double counting in rings) are currently set to 1.0.\n'
'If those values are different for your system please multiply corresponding force constants '
'by the weights manually.\n')
for d in s.dihedrals:
dt = s.dihedral_types.get(d.type_name, item_wildcard=None)
if dt:
d.type = dt[0]
def assign_itypes(self, s):
"""pysimm.forcefield.Charmm.assign_itypes
Gaff specific improper typing rules.
There are none.
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
all_types = set()
s.improper_style = self.improper_style
for p in s.particles:
if len(p.bonded_to) == 3:
for perm in permutations(p.bonded_to, 3):
p1_name = perm[0].type.eq_improper or perm[0].type.name
p2_name = perm[1].type.eq_improper or perm[1].type.name
p3_name = perm[2].type.eq_improper or perm[2].type.name
it = self.improper_types.get(','.join([p.type.name, p1_name,
p2_name, p3_name]), order=True)
if it:
all_types.add(it[0])
bonded_to = p.bonded_to.get('all')
s.impropers.add(Improper(type_name=it[0].name,
a=p, b=bonded_to[0],
c=bonded_to[1],
d=bonded_to[2]))
break
for it in all_types:
it = it.copy()
s.improper_types.add(it)
for i in s.impropers:
it = s.improper_types.get(i.type_name)
if it:
i.type = it[0]
def assign_charges(self, s, charges='gasteiger'):
"""pysimm.forcefield.Charmm.assign_charges
Charge assignment. Gasteiger is default for now.
Args:
s: :class:`~pysimm.system.System`
charges: gasteiger
Returns:
None
"""
if charges == 'gasteiger':
verbose_print('adding gasteiger charges')
gasteiger.set_charges(s)
else:
# initialize charges of all particles with zeros
for p in s.particles:
if not p.charge:
p.charge = 0
s.set_charge()
def __parse_add_file__(self, file):
"""
Private method to read/convert CHARMM specific FF parameters from the native format (.prm) to add on top of
currentely existing library of FF parameters. Will update this ForceField object with data from the file and will
write the output 'charmm_mod.json' DB file
Args:
file: (string) full (absolute or relative) path to an .prm file
Returns:
none
"""
headers = ['ATOMS', 'BONDS', 'ANGLES', 'DIHEDRALS', 'IMPROPERS', 'NONBONDED', 'NBFIX']
sigma_conv_mult = 2 / (2 ** (1.0 / 6.0))
with open(file, 'r') as pntr:
stream = pntr.read()
tmp_h = re.findall('|'.join(['\n' + h for h in headers]), stream)
tmp_b = re.split('|'.join(['\n' + h for h in headers]), stream)
nbfixes = []
for h,b in zip(tmp_h, tmp_b[1:]):
if h.lower == 'atoms':
pass
if h.strip().lower() == 'bonds':
pass
if h.strip().lower() == 'angles':
pass
if h.strip().lower() == 'dihedrals':
pass
if h.strip().lower() == 'impropers':
pass
if h.strip().lower() == 'nbfix':
lines = b.split('\n')
for l in lines:
if not l.startswith('!'):
l = l.split('!')[0]
tmp = l.split()
if len(tmp) > 3:
if not('nbfix_types' in self.__dict__.keys()):
nbfixes.append({'name': ','.join((tmp[0], tmp[1])),
'rname': ','.join((tmp[1], tmp[0])),
'epsilon': abs(float(tmp[2])),
'sigma': sigma_conv_mult * float(tmp[3]) })
else:
self.nbfix_types.add(ParticleType(name=','.join((tmp[0], tmp[1])),
rname=','.join((tmp[1], tmp[0])),
epsilon=abs(float(tmp[2])),
sigma=sigma_conv_mult * float(tmp[3]) ))
if h.strip().lower() == 'nonbonded':
lines = b.split('\n')
for l in lines[2:]:
if not l.startswith('!'):
l = l.split('!')[0]
tmp = l.split()
if len(tmp) > 3:
db_record = self.particle_types.get(tmp[0].strip())
try:
if len(db_record) > 0:
verbose_print('The atomtype {} is in the DB and will be overriden'.format(tmp[0]))
setattr(db_record[0], 'epsilon', abs(float(tmp[2])))
setattr(db_record[0], 'sigma', sigma_conv_mult * float(tmp[3]))
if len(tmp) > 5:
setattr(db_record[0], 'epsilon_14', abs(float(tmp[5])))
setattr(db_record[0], 'sigma_14', sigma_conv_mult * float(tmp[6]))
else:
if len(tmp) > 5:
self.particle_types.add(ParticleType(name=tmp[0].strip(),
epsilon=abs(float(tmp[2])),
sigma=sigma_conv_mult * float(tmp[3]),
epsilon_14=abs(float(tmp[5])),
sigma_14=sigma_conv_mult * float(tmp[6])))
else:
self.particle_types.add(ParticleType(name=tmp[0].strip(),
epsilon=abs(float(tmp[2])),
sigma=sigma_conv_mult * float(tmp[3])))
except ValueError:
verbose_print('Seems data line is corrupted continue with the next one...')
continue
outp_obj = {'angle_types': [], 'improper_types': [], 'bond_types': [], 'particle_types': [], 'dihedral_types': [],
'pair_style': '', 'bond_style': '', 'angle_style': '', 'dihedral_style': '', 'improper_style': ''}
for prp in outp_obj.keys():
if type(outp_obj[prp]) == list:
tmp = getattr(self, prp)._dict
for t in tmp.keys():
outp_obj[prp].append(tmp[t].__dict__)
elif type(outp_obj[prp]) == str:
outp_obj[prp] = getattr(self, prp)
if not ('nbfix_types' in self.__dict__.keys()):
outp_obj['nbfix_types'] = nbfixes
else:
tmp = getattr(self, 'nbfix_types')._dict
for t in tmp.keys():
outp_obj['nbfix_types'].append(tmp[t].__dict__)
DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'data', 'forcefields', 'charmm')
out_file = os.path.join(DATA_PATH, os.path.pardir, 'charmm_mod.json')
with open(out_file, 'w') as pntr:
pntr.write(json.dumps(outp_obj, indent=2))
return True
def __parse_charmm__():
"""
Private method to read/convert CHARMM specific FF parameters from the form of GROMACS input format (.atp, .itp)
to the PySIMM input format (.json).
Note: Because of the format specification, there are no \sigma_{14} or \epsilon_{14} parameters in the file as well
as explicit non-diagonal LJ parameters (NBFIXes). They are read from a different file types (see charmm.__parse_add_file__())
Returns:
None
"""
kj2kcal = 4.184
rounding = 8
bnded_lib = 'ffbonded.itp'
atmtype_lib = 'atomtypes.atp'
nonbnded_lib = 'ffnonbonded.itp'
DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'data', 'forcefields', 'charmm')
obj = {'angle_types': [], 'improper_types': [], 'bond_types': [], 'particle_types': [], 'dihedral_types': []}
# Parsing bonded parameters of the FF
try:
with open(os.path.join(DATA_PATH, bnded_lib), 'r') as f:
ff_file = f.readlines()
except OSError:
error_print('Required library file with CHARMM bonded parametrs \"{:}\" '
'cannot be opened or read. \nExiting...'.format(bnded_lib))
sys.exit(1)
i = 0
curr_type = ''
dig_names_check = []
while i < len(ff_file):
line = ff_file[i].split()
if ff_file[i][0] == '[':
curr_type = line[1]
if ff_file[i + 1].split()[1] == "'improper'":
curr_type = 'impropertypes'
i += 1
print(curr_type)
i += 1
elif line:
try:
if curr_type == 'bondtypes':
k = round(float(line[4]) / (2 * kj2kcal * 100), rounding)
b = round(float(line[3]) * 10, rounding)
name = ','.join(line[0:2])
rname = ','.join(reversed(line[0:2]))
obj['bond_types'].append({'k': k, 'tag': name, 'r0': b, 'name': name, 'rname': rname})
elif curr_type == 'angletypes':
theta0 = round(float(line[4]), rounding)
ktheta = round(float(line[5]) / (2 * kj2kcal), rounding)
ub0 = round(10 * float(line[6]), rounding)
kub = round(float(line[7]) / (2 * kj2kcal), rounding)
name = ','.join(line[0:3])
rname = ','.join(reversed(line[0:3]))
obj['angle_types'].append(
{'theta0': theta0, 'tag': name, 'k': ktheta, 'r_ub': ub0, 'k_ub': kub, 'name': name,
'rname': rname})
elif curr_type == 'impropertypes':
k = round(float(line[6]) / (2 * kj2kcal), rounding)
x0 = round(float(line[5]), rounding)
name = ','.join(line[0:4])
rname = ','.join(reversed(line[0:4]))
obj['improper_types'].append({'k': k, 'tag': name, 'x0': x0, 'name': name, 'rname': rname})
elif curr_type == 'dihedraltypes':
d = round(float(line[5]), rounding)
k = round(float(line[6]) / kj2kcal, rounding)
n = int(line[7])
name = ','.join(line[0:4])
rname = ','.join(reversed(line[0:4]))
if name not in dig_names_check:
obj['dihedral_types'].append({'tag': name, 'd': [d], 'k': [k], 'n': [n], 'm': 1,
'name': name, 'rname': rname})
dig_names_check.append(name)
else:
to_add = list(filter(lambda fields: fields['name'] == name, obj['dihedral_types']))[0]
to_add['d'].append(d)
to_add['k'].append(k)
to_add['n'].append(n)
to_add['m'] += 1
except ValueError:
print('improper value at line', i)
except IndexError:
print('missing value at line', i)
i += 1
# Parsing non-bonded parameters of the FF
elems_json = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'data', 'elements_by_mass.json')
with open(elems_json, 'r') as pntr:
elemsDict = json.load(pntr)
try:
with open(os.path.join(DATA_PATH, nonbnded_lib), 'r') as nb_file:
try:
with open(os.path.join(DATA_PATH, atmtype_lib), 'r') as f:
atp_data = f.read()
except OSError:
print('Required library file with CHARMM atom types \"{:}\" '
'cannot be opened or read. \nExiting...'.format(atmtype_lib))
sys.exit(1)
fields = ['name', 'epsilon', 'sigma', 'elem', 'tag', 'mass', 'desc']
for line in nb_file:
if not (line[0] in [';', '#', '[', '\n']):
line = line.strip().split()
if len(line) > 6:
# checking validity of the description field
descr = re.findall('(?<=' + '{:>6} '.format(line[0]) + '[\d| ][\d| ]\d\.\d{5} ; ).*', atp_data)
if len(descr) > 0:
descr = descr[0]
else:
descr = ''
# checking validity of the element name field
elemname = line[1]
if len(line[1]) > 0:
if int(line[1]) > 0:
elemname = elemsDict[line[1]]['symbol']
tmp = [line[0],
round(float(re.match('-?\d{1,}\.\d{1,}', line[6]).group(0)) / kj2kcal, rounding),
round(10 * float(re.match('-?\d{1,}\.\d{1,}', line[5]).group(0)), rounding),
elemname, line[0], float(line[2]), descr]
obj['particle_types'].append(dict(zip(fields, tmp)))
'''
# Parsing **non-diagonal** non-bonded parameters of the FF
nb_file.seek(0)
obj['nondiagonal_lj'] = []
for line in nb_file:
if not (line[0] in [';', '#', '[', '\n']):
line = line.strip().split()
if len(line) == 5:
i_name = line[0].strip()
j_name = line[1].strip()
obj['nondiagonal_lj'].append({'name': ','.join([i_name, j_name]),
'rname': ','.join([j_name, i_name]),
'epsilon': round(float(line[3]) / kj2kcal, rounding),
'sigma': round(10 * float(line[4]), rounding)
})
'''
except OSError:
print('Required library file with CHARMM non-bonded parametrs \"{:}\" '
'cannot be opened or read. \nExiting...'.format(nonbnded_lib))
sys.exit(1)
# Adding meta-information about FF styles and creating an output file
chrm_type = Charmm()
attr = ['pair_style', 'bond_style', 'angle_style', 'dihedral_style', 'improper_style']
obj.update(dict(zip(attr, [getattr(chrm_type, t) for t in attr])))
out_file = os.path.join(DATA_PATH, os.path.pardir, 'charmm.json')
with open(out_file, 'w') as pntr:
pntr.write(json.dumps(obj, indent=2))
def __detect_rings__(particle, orders):
"""
Private method for analysing whether a given particle is a part of a ring structure
Args:
particle: :class:`~pysimm.system.Particle` reference
orders: list of integers defining size of the rings which should be checked
Returns:
list of integers subset of orders which defines the sizes of the rings that contain particle;
returns 0 if no cyclic structures of size orders are detected
"""
rng_count = 0
neighb_list = []
ordr_count = 2
to_exclude = {particle}
neighb = []
for p in to_exclude:
neighb += [x.a if particle is x.b else x.b for x in p.bonds]
while ordr_count < max(orders):
to_include = []
for p in neighb:
to_include += [x.a if particle is x.b else x.b for x in p.bonds]
neighb_list.append(set(to_include) - to_exclude)
to_exclude = set(neighb)
ordr_count += 1
for o in orders:
if particle in neighb_list[o - 3]:
rng_count = o
return rng_count
|
polysimtools/pysimm
|
pysimm/forcefield/charmm.py
|
Python
|
mit
| 35,193
|
[
"CHARMM",
"Gromacs"
] |
a97b0396c218e2dda4c5f12e79c74860e1f3edb88208cfaaf75be97b25fdced0
|
#!/usr/bin/env python
# encoding: utf-8
"""
api.py
Created by Brian Whitman on 2010-06-16.
Copyright (c) 2010 The Echo Nest Corporation. All rights reserved.
"""
from __future__ import with_statement
import web
import fp
import re
try:
import json
except ImportError:
import simplejson as json
# Very simple web facing API for FP dist
urls = (
'/query', 'query',
'/query?(.*)', 'query',
'/ingest', 'ingest',
)
class ingest:
def POST(self):
params = web.input(track_id="default", fp_code="", artist=None, release=None, track=None, length=None, codever=None)
if params.track_id == "default":
track_id = fp.new_track_id()
else:
track_id = params.track_id
if params.length is None or params.codever is None:
return web.webapi.BadRequest()
# First see if this is a compressed code
if re.match('[A-Za-z\/\+\_\-]', params.fp_code) is not None:
code_string = fp.decode_code_string(params.fp_code)
if code_string is None:
return json.dumps({"track_id":track_id, "ok":False, "error":"cannot decode code string %s" % params.fp_code})
else:
code_string = params.fp_code
data = {"track_id": track_id,
"fp": code_string,
"length": params.length,
"codever": params.codever }
if params.artist: data["artist"] = params.artist
if params.release: data["release"] = params.release
if params.track: data["track"] = params.track
fp.ingest(data, do_commit=True, local=False)
return json.dumps({"track_id":track_id, "status":"ok"})
class query:
def POST(self):
return self.GET()
def GET(self):
stuff = web.input(fp_code="")
response = fp.best_match_for_query(stuff.fp_code)
return json.dumps({"ok":True, "query":stuff.fp_code, "message":response.message(), "match":response.match(), "score":response.score, \
"qtime":response.qtime, "track_id":response.TRID, "total_time":response.total_time})
application = web.application(urls, globals())#.wsgifunc()
if __name__ == "__main__":
application.run()
|
hzlf/openbroadcast
|
website/apps/ep/API/api.py
|
Python
|
gpl-3.0
| 2,258
|
[
"Brian"
] |
1decfac6bede59246af1cc3efc5d728d14e271414fc11c05727c88077b55dcbe
|
from glob import glob
import fitsio
import sys
from astrometry.util.fits import *
from astrometry.util.file import *
from astrometry.util.starutil_numpy import *
from astrometry.libkd.spherematch import *
from collections import Counter
from legacypipe.oneblob import _select_model
from legacypipe.survey import wcs_for_brick
from astrometry.util.multiproc import multiproc
def patch_one(X):
(ifn, Nfns, fn, outfn, fix_dup) = X
if os.path.exists(outfn):
print(ifn, 'of', Nfns, ':', fn, ': output', outfn, 'exists')
return
T8 = fits_table(fn)
phdr = fitsio.read_header(fn)
hdr = T8.get_header()
utypes = np.unique(T8.type)
T8.type = T8.type.astype('S4')
dupstring = np.array('DUP ').astype('S4')
I = np.flatnonzero(T8.type == dupstring)
#print(ifn, 'of', Nfns, ':', fn, ':', len(I), 'DUP', 'ver:', phdr['LEGPIPEV'])
print(ifn, 'of', Nfns, ':', fn, ':', 'ver:', phdr['LEGPIPEV'], 'types:', list(utypes))
if fix_dup and len(I) > 0:
T8.objid[I] = I
T8.brickname[I] = T8.brickname[0]
T8.brickid[I] = T8.brickid[0]
assert(len(np.unique(T8.objid)) == len(T8))
# Add mask bit definitions to headers
phdr.add_record(dict(name='COMMENT', value='WISEMASK bit values:'))
wisebits = [
(0, 'BRIGHT' , 'Bright star core and wings'),
(1, 'SPIKE' , 'PSF-based diffraction spike'),
(2, 'GHOST' , ''),
(3, 'LATENT' , 'First latent'),
(4, 'LATENT2', 'Second latent image'),
(5, 'HALO' , 'AllWISE-like circular halo'),
(6, 'SATUR' , 'Bright star saturation'),
(7, 'SPIKE2' , 'Geometric diffraction spike'),
]
#name_map = {'LATENT2': 'LATEN2'}
for bit,name,comm in wisebits:
phdr.add_record(dict(name='WBITN%i' % bit, value=name, comment=comm + ' (0x%x)' % (1<<bit)))
#for bit,name,comm in wisebits:
# phdr.add_record(dict(name='W%s' % name_map.get(name, name), value=1<<bit, comment=comm))
phdr.add_record(dict(name='COMMENT', value='MASKBITS bit values:'))
maskbits = [
(0 , 'NPRIMARY', 'Not-brick-primary'),
(1 , 'BRIGHT', 'Bright star in blob'),
(2 , 'SATUR_G', 'g saturated + margin'),
(3 , 'SATUR_R', 'r saturated + margin'),
(4 , 'SATUR_Z', 'z saturated + margin'),
(5 , 'ALLMASK_G', 'Any ALLMASK_G bit set'),
(6 , 'ALLMASK_R', 'Any ALLMASK_R bit set'),
(7 , 'ALLMASK_Z', 'Any ALLMASK_Z bit set'),
(8 , 'WISEM1', 'WISE W1 bright star mask'),
(9 , 'WISEM2', 'WISE W2 bright star mask'),
(10, 'BAILOUT', 'Bailed out of processing'),
(11, 'MEDIUM', 'Medium-bright star'),
(12, 'GALAXY', 'LSLGA large galaxy'),
(13, 'CLUSTER', 'Cluster'),
]
# name_map = {
# 'NPRIMARY': 'NPRIMRY',
# 'ALLMASK_G': 'ALLM_G',
# 'ALLMASK_R': 'ALLM_R',
# 'ALLMASK_Z': 'ALLM_Z',
# }
for bit,name,comm in maskbits:
phdr.add_record(dict(name='MBITN%i' % bit, value=name, comment=comm + ' (0x%x)' % (1<<bit)))
#for bit,name,comm in maskbits:
# phdr.add_record(dict(name='M%s' % name_map.get(name, name), value=1<<bit, comment=comm))
phdr.add_record(dict(name='COMMENT', value='ANYMASK/ALLMASK bit values:'))
anybits = [
(0, 'BADPIX', 'Bad columns, hot pixels, etc'),
(1, 'SATUR', 'Saturated'),
(2, 'INTERP', 'Interpolated'),
(4, 'CR', 'Cosmic ray'),
(6, 'BLEED', 'Bleed trail'),
(7, 'TRANS', 'Transient'),
(8, 'EDGE', 'Edge pixel'),
(9, 'EDGE2', 'Edge pixel, jr'),
(11,'OUTLIER', 'Outlier from stack'),
]
#name_map = {}
for bit,name,comm in anybits:
phdr.add_record(dict(name='ABITN%i' % bit, value=name, comment=comm + ' (0x%x)' % (1<<bit)))
#for bit,name,comm in anybits:
# phdr.add_record(dict(name='A%s' % name_map.get(name, name), value=1<<bit, comment=comm))
phdr.add_record(dict(name='COMMENT', value='BRIGHTBLOB bit values:'))
brightbits = [
(0, 'BRIGHT', 'Bright star'),
(1, 'MEDIUM', 'Medium-bright star'),
(2, 'CLUSTER', 'Globular cluster'),
(3, 'GALAXY', 'Large LSLGA galaxy'),
]
#name_map = {}
for bit,name,comm in brightbits:
phdr.add_record(dict(name='BBITN%i' % bit, value=name, comment=comm + ' (0x%x)' % (1<<bit)))
#for bit,name,comm in brightbits:
# phdr.add_record(dict(name='B%s' % name_map.get(name, name), value=1<<bit, comment=comm))
# Ugh, need to copy units
columns = T8.get_columns()
# Add in missing units
extraunits = dict(bx='pix', by='pix',
pmra='mas/yr', pmdec='mas/yr', parallax='mas',
pmra_ivar='1/(mas/yr)^2', pmdec_ivar='1/(mas/yr)^2', parallax_ivar='1/mas^2',
ref_epoch='yr',
gaia_phot_g_mean_mag='mag',
gaia_phot_bp_mean_mag='mag',
gaia_phot_rp_mean_mag='mag',
psfdepth_w1='1/nanomaggy^2',
psfdepth_w2='1/nanomaggy^2',
psfdepth_w3='1/nanomaggy^2',
psfdepth_w4='1/nanomaggy^2')
units = []
for i,col in enumerate(columns):
typekey = 'TTYPE%i' % (i+1)
assert(hdr[typekey].strip() == col)
unitkey = 'TUNIT%i' % (i+1)
if unitkey in hdr:
unit = hdr[unitkey]
else:
unit = extraunits.get(col, '')
units.append(unit)
outdir = os.path.dirname(outfn)
try:
os.makedirs(outdir)
except:
pass
try:
T8.writeto(outfn, header=hdr, primheader=phdr, units=units)
except:
print('Failed to write', outfn)
if os.path.exists(outfn):
os.remove(outfn)
def main():
#fns = glob('/global/project/projectdirs/cosmo/work/legacysurvey/dr8/90prime-mosaic/tractor/*/tractor-*.fits')
# DR8-south
#prefix = '/global/project/projectdirs/cosmo/work/legacysurvey/dr8/south/tractor/'
prefix = '/global/project/projectdirs/cosmo/work/legacysurvey/dr8-garage/south/original-tractor/'
out_prefix = 'patched-dr8-south-2/'
pat = prefix + '*/tractor-*.fits'
#pat = prefix + '013/tractor-0132m265.fits'
fix_dup = True
fns = glob(pat)
fns.sort()
print(len(fns), 'Tractor catalogs')
outfns = [fn.replace(prefix, out_prefix) for fn in fns]
# vers = Counter()
# keepfns = []
# for fn in fns:
# hdr = fitsio.read_header(fn)
# ver = hdr['LEGPIPEV']
# ver = ver.strip()
# vers[ver] += 1
# if ver == 'DR8.2.1':
# keepfns.append(fn)
#
# print('Header versions:', vers.most_common())
#
# fns = keepfns
# print('Keeping', len(fns), 'with bad version')
N = len(fns)
args = [(i,N,fn,outfn,fix_dup) for i,(fn,outfn) in enumerate(zip(fns, outfns))]
mp = multiproc(32)
#mp = multiproc()
mp.map(patch_one, args)
if __name__ == '__main__':
main()
|
legacysurvey/pipeline
|
py/legacyanalysis/fix-dup-data.py
|
Python
|
gpl-2.0
| 7,052
|
[
"Galaxy"
] |
e49cd23173c0f3ef974d50a0fdb8348e704f2d471c581d9fd9f07c42797f8615
|
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
from monty.json import MSONable
import numpy as np
import warnings
from pymatgen.core.spectrum import Spectrum
from copy import deepcopy
from veidt.elsie.preprocessing import Preprocessing
from veidt.elsie import similarity_measures
from scipy.interpolate import interp1d
from scipy import signal
class SpectraSimilarity(MSONable):
def __init__(self, sp1, sp2, interp_points=200):
"""
Initialization SpectrumSimilarity object to determine the similarity
between two spectra. Both spectra object should be follow pymatgen.
Args:
sp1: Spectrum object 1. Given spectrum to match, usually
collected from exp.
sp2: Spectrum object 2. Candidate spectrum, usually
computational reference spectrum.
interp_points: Number of points used for spectrum interpolation
throughout comparison
"""
self.sp1 = sp1
self.sp2 = sp2
self.shifted_sp1 = None
self.shifted_sp2 = None
self.interp_points = interp_points
self._energy_validation()
def _energy_validation(self):
"""
Valid the overlap absorption range of both spectra. Warning will raise
in the following two cases:
1. If the overlap energy range is less than 30 meV,
2. If there is no overlap energy, self.valid_comparison set to false
"""
min_energy_1, max_energy_1 = np.min(self.sp1.x), np.max(self.sp1.x)
min_energy_2, max_energy_2 = np.min(self.sp2.x), np.max(self.sp2.x)
max_min_energy = max(min_energy_1, min_energy_2)
min_max_energy = min(max_energy_1, max_energy_2)
if (min_energy_2 > max_energy_1) or (min_energy_1 > max_energy_2):
warning_msg = "Candidate spectrum has no overlap with given spectrum to match"
warnings.warn(warning_msg)
self.valid_comparison = False
elif (min_max_energy - max_min_energy) < 30:
warning_msg = "Candidate and given spectra's overlap absorption energy is less than 30 meV"
warnings.warn(warning_msg)
self.valid_comparison = True
else:
self.valid_comparison = True
def _spectrum_shift(self, algo='threshold_shift', intensity_threshold=0.06, preset_shift=None):
"""
Shift self.sp2 with respect to self.spec1. Self.spec1 will be
untouched.
Args:
algo: Algorithm used to determine the energy shift between two
spectra. Currently available types are:
"threshold_shift": Use the onset of absorption. Onset energy
are determined by the intensity_threshold.
"cross_correlate": Use the cross correlation function between
two spectra to determine the shift energy.
"user_specify": User specify the shift energy between the two
spectra. The shift energy value should be set
through the preset_shift.
intensity_threshold: The absorption peak intensity threshold used
to determine the absorption onset, default set to 0.1
preset_shift: The energy shift value between the two spectra.
preset_shift > 0 means sp2 needs to shift left w.r.t sp1
"""
if algo == 'user_specify':
if preset_shift is None:
raise ValueError('The energy shift value has not been set')
self.shifted_sp1, self.shifted_sp2, self.shifted_energy = \
preset_value_shift(self.sp1, self.sp2, preset_shift)
if algo == 'threshold_shift':
self.sp1, self.sp2 = spectra_lower_extend(self.sp1, self.sp2)
self.shifted_sp1, self.shifted_sp2, self.shifted_energy, self.abs_onset = \
absorption_onset_shift(self.sp1, self.sp2, intensity_threshold)
elif algo == 'cross_correlate':
self.sp1, self.sp2 = spectra_lower_extend(self.sp1, self.sp2)
self.shifted_sp1, self.shifted_sp2, self.shifted_energy = \
signal_corre_shift(self.sp1, self.sp2)
def get_shifted_similarity(self, similarity_metric, energy_variation=None,
spect_preprocess=None, **kwargs):
"""
Calculate the similarity between two shifted spectra
Args:
similarity_metric (string): The similarity metric used for comparison.
energy_variation (list): Energy variation value used to squeeze or broaden the candidate
spectrum (sp2) beyonds spectrum shift onset point. E.g., [-2, 2, 0.1]
specifies sp2's spectrum energy (Es) beyonds onset point will scale from Es - 2 to Es + 2
at 0.1 interval. Maximum similarity and its' corresponding scale energy will be returned.
spect_preprocess (list/tuple): Preprocessing steps need to taken for each spectrum
"""
if not self.valid_comparison:
return 0
if (self.shifted_sp1 is None) and (self.shifted_sp2 is None):
self._spectrum_shift(**kwargs)
simi_class = getattr(similarity_measures, similarity_metric)
if energy_variation is not None:
sp2_energy_scale_onset = self.shifted_sp2.x[np.argmax(
self.shifted_sp2.x > self.abs_onset)]
sp2_energy_scale_end = max(self.shifted_sp2.x)
sp2_scale_energy_den = (self.shifted_sp2.x > self.abs_onset).sum()
max_simi = float("-inf")
for scale_energy in np.arange(
energy_variation[0], energy_variation[1], energy_variation[2]):
sp2_scaled_energy = np.linspace(
sp2_energy_scale_onset,
sp2_energy_scale_end + scale_energy,
sp2_scale_energy_den)
shifted_sp2_scaled_energy = np.hstack(
(self.shifted_sp2.x[:np.argmax(
self.shifted_sp2.x > self.abs_onset)],
sp2_scaled_energy))
if shifted_sp2_scaled_energy.shape != self.shifted_sp2.x.shape:
raise ValueError('The scaled energy grid density is '
'different from pre-scaled')
scaled_shifted_sp2 = Spectrum(shifted_sp2_scaled_energy,
self.shifted_sp2.y)
# Interpolate and calculate the similarity between
# scaled_shifted_sp2 and shifted_sp1
overlap_energy = energy_overlap(self.shifted_sp1,
scaled_shifted_sp2)
overlap_energy_grid = np.linspace(
overlap_energy[0], overlap_energy[1], self.interp_points)
shifted_sp1_interp = spectra_energy_interpolate(
self.shifted_sp1, overlap_energy_grid)
scaled_shifted_sp2_interp = spectra_energy_interpolate(
scaled_shifted_sp2, overlap_energy_grid)
pre_shifted_sp1_interp = Preprocessing(shifted_sp1_interp)
pre_shifted_sp1_interp.spectrum_process(['intnorm'])
pre_scaled_shifted_sp2_interp = Preprocessing(scaled_shifted_sp2_interp)
pre_scaled_shifted_sp2_interp.spectrum_process(['intnorm'])
shifted_sp1_interp = pre_shifted_sp1_interp.spectrum
scaled_shifted_sp2_interp = pre_scaled_shifted_sp2_interp.spectrum
similarity_obj = simi_class(shifted_sp1_interp.y,
scaled_shifted_sp2_interp.y)
try:
similarity_value = similarity_obj.similarity_measure()
except Exception:
warnings.warn("Cannot generate valid similarity value for the two spectra")
similarity_value = np.NaN
if similarity_value > max_simi:
max_simi = similarity_value
self.interp_shifted_sp1 = shifted_sp1_interp
self.interp_scaled_shift_sp2 = scaled_shifted_sp2_interp
# max_scale_energy<0 means the sp2 should be squeeze for
# maximum matching
self.max_scale_energy = scale_energy
if spect_preprocess is not None:
pre_shifted_sp1_interp = Preprocessing(
self.interp_shifted_sp1)
pre_scaled_shifted_sp2_interp = Preprocessing(
self.interp_scaled_shift_sp2)
pre_shifted_sp1_interp.spectrum_process(spect_preprocess)
pre_scaled_shifted_sp2_interp.spectrum_process(
spect_preprocess)
shifted_sp1_interp = pre_shifted_sp1_interp.spectrum
scaled_shifted_sp2_interp = pre_scaled_shifted_sp2_interp.spectrum
similarity_obj = simi_class(shifted_sp1_interp.y,
scaled_shifted_sp2_interp.y)
max_simi = similarity_obj.similarity_measure()
return max_simi
elif energy_variation is None:
overlap_energy = energy_overlap(self.shifted_sp1,
self.shifted_sp2)
overlap_energy_grid = np.linspace(
overlap_energy[0], overlap_energy[1], self.interp_points)
shifted_sp1_interp = spectra_energy_interpolate(
self.shifted_sp1, overlap_energy_grid)
shifted_sp2_interp = spectra_energy_interpolate(
self.shifted_sp2, overlap_energy_grid)
if spect_preprocess is not None:
pre_shifted_sp1_interp = Preprocessing(shifted_sp1_interp)
pre_shifted_sp2_interp = Preprocessing(shifted_sp2_interp)
pre_shifted_sp1_interp.spectrum_process(spect_preprocess)
pre_shifted_sp2_interp.spectrum_process(spect_preprocess)
shifted_sp1_interp = pre_shifted_sp1_interp.spectrum
shifted_sp2_interp = pre_shifted_sp2_interp.spectrum
similarity_obj = simi_class(shifted_sp1_interp.y,
shifted_sp2_interp.y)
try:
similarity_value = similarity_obj.similarity_measure()
except Exception:
warnings.warn("Cannot generate valid similarity value "
"for the two spectra")
similarity_value = 0
return similarity_value
def energy_overlap(sp1, sp2):
"""
Calculate the overlap energy range of two spectra, i.e. lower bound is the
maximum of two spectra's minimum energy.
Upper bound is the minimum of two spectra's maximum energy
Args:
sp1: Spectrum object 1
sp2: Spectrum object 2
Returns:
Overlap energy range
"""
overlap_range = [max(sp1.x.min(), sp2.x.min()), min(sp1.x.max(),
sp2.x.max())]
return overlap_range
def spectra_energy_interpolate(sp1, energy_range):
"""
Use Scipy's interp1d and returns spectrum object with absorption value
interpolated with given energy_range
Args:
sp1: Spectrum object 1
energy_range: new energy range used in interpolate
Returns:
Spectrum object with given energy range and interpolated absorption value
"""
interp = interp1d(sp1.x, sp1.y)
interp_spect = interp(energy_range)
sp1.x = np.array(energy_range)
sp1.y = interp_spect
return sp1
def spectra_lower_extend(sp1, sp2):
"""
Extend the energy range of spectra and ensure both spectra has same lower
bound in energy. The spectrum with higher low energy
bound with be extended, the first absorption value will be used for
absorption extension.
Args:
sp1: Spectrum object 1
sp2: Spectrum object 2
Returns:
Two Spectrum objects with same lower energy bound
"""
min_energy = min(sp1.x.min(), sp2.x.min())
if sp1.x.min() > min_energy:
# Calculate spectrum point density used for padding
sp1_den = np.ptp(sp1.x) / sp1.ydim[0]
extend_spec1_energy = np.linspace(min_energy, sp1.x.min(),
retstep=sp1_den)[0][:-1]
sp1_ext_energy = np.hstack((extend_spec1_energy, sp1.x))
sp1_ext_mu = np.lib.pad(sp1.y, (len(extend_spec1_energy), 0), 'constant',
constant_values=(sp1.y[0], 0))
sp1.x = sp1_ext_energy
sp1.y = sp1_ext_mu
elif sp2.x.min() > min_energy:
sp2_den = np.ptp(sp2.x) / sp2.ydim[0]
extend_spec2_energy = np.linspace(min_energy, sp2.x.min(),
retstep=sp2_den)[0][:-1]
sp2_ext_energy = np.hstack((extend_spec2_energy, sp2.x))
sp2_ext_mu = np.lib.pad(sp2.y, (len(extend_spec2_energy), 0),
'constant',
constant_values=(sp2.y[0], 0))
sp2.x = sp2_ext_energy
sp2.y = sp2_ext_mu
return sp1, sp2
def absorption_onset_shift(sp1, sp2, intensity_threshold):
"""
Shift spectrum 2 with respect to spectrum 1 using the difference between
two spectra's onset of absorption.
The onset is determined by ascertaining the lowest incident energy at which
the spectra's absorption intensity reaches the 'intensity_threshold' of the
peak intensity.
Args:
sp1: Spectrum object 1
sp2: Spectrum object 2
intensity_threshold: The absorption peak intensity threshold used to
determine the absorption onset. Must be a float between 0 and 1.
Returns:
shifted_sp1: Spectrum object 1
shifted_sp2: Spectrum object with absorption same as sp2 and
shifted energy range
energy_diff: Energy difference between sp1 and sp2,
energy_diff > 0 mean sp2 needs to shift left
"""
if not 0 <= float(intensity_threshold) <= 1:
raise ValueError("The intensity threshold must be between 0 and 1")
sp1_inten_thres = max(sp1.y) * float(intensity_threshold)
sp2_inten_thres = max(sp2.y) * float(intensity_threshold)
threpoint_1_energy = sp1.x[np.argmax(sp1.y > sp1_inten_thres)]
threpoint_2_energy = sp2.x[np.argmax(sp2.y > sp2_inten_thres)]
energy_diff = threpoint_2_energy - threpoint_1_energy
# sp2 need to shift left
if energy_diff >= 0:
sp2_new_energy = sp2.x - energy_diff
sp2_new_mu = sp2.y
# sp2 need to shift right
elif energy_diff < 0:
sp2_new_energy = sp2.x - energy_diff
sp2_new_mu = sp2.y
shifted_sp1 = Spectrum(sp1.x, sp1.y)
shifted_sp2 = Spectrum(sp2_new_energy, sp2_new_mu)
return shifted_sp1, shifted_sp2, energy_diff, threpoint_1_energy
def signal_corre_shift(sp1, sp2):
"""
Using the cross correlation function between two spectra to determine the shift energy.
Args:
sp1: Spectrum object 1
sp2: Spectrum object 2
Returns:
energy_diff: Energy difference between sp1 and sp2,
energy_diff > 0 means sp2 needs to shift left
"""
overlap_energy = energy_overlap(sp1, sp2)
# Energy grid interpolate point density: 0.01 eV
overlap_energy_grid = np.linspace(overlap_energy[0], overlap_energy[1],
int(float(overlap_energy[1] - overlap_energy[0]) / 0.01))
interp_sp1 = spectra_energy_interpolate(Spectrum(sp1.x, sp1.y), overlap_energy_grid)
interp_sp2 = spectra_energy_interpolate(Spectrum(sp2.x, sp2.y), overlap_energy_grid)
if not np.allclose(interp_sp1.x, interp_sp2.x, 1e-5):
raise ValueError("Two scaled spectra's energy grid densities are different")
sp2_shift_index = np.argmax(signal.correlate(interp_sp2.y, interp_sp1.y))
# sp2 need to shift left
if sp2_shift_index > interp_sp2.x.shape[0]:
left_shift_index = sp2_shift_index - interp_sp2.x.shape[0]
energy_diff = interp_sp2.x[left_shift_index] - interp_sp2.x.min()
# sp2 need to shift right
elif sp2_shift_index < interp_sp2.x.shape[0]:
right_shift_index = interp_sp2.x.shape[0] - sp2_shift_index
energy_diff = -(interp_sp2.x[right_shift_index] - interp_sp2.x.min())
else:
energy_diff = 0
shifted_sp1 = Spectrum(sp1.x, sp1.y)
shifted_sp2 = Spectrum(sp2.x - energy_diff, sp2.y)
return shifted_sp1, shifted_sp2, energy_diff
def preset_value_shift(sp1, sp2, preset_shift):
"""
Using the preset value to shift the two spectra
Args:
sp1: Spectrum object 1
sp2: Spectrum object 2
preset_shift: Preset energy shift value between two spectra,
energy_diff > 0 means sp2 needs to shift left
"""
shifted_sp1 = Spectrum(sp1.x, sp1.y)
shifted_sp2 = Spectrum(sp2.x - preset_shift, sp2.y)
return shifted_sp1, shifted_sp2, preset_shift
|
materialsvirtuallab/veidt
|
veidt/elsie/spectra_similarity.py
|
Python
|
bsd-3-clause
| 17,232
|
[
"pymatgen"
] |
793b35c6eddcb249d44a4a395e9abeeb6780a2f705ca6fb2fad7aae0aa6458d9
|
# Copyright 2008-2009 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of django-facebookconnect.
#
# django-facebookconnect is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# django-facebookconnect is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with django-facebookconnect. If not, see <http://www.gnu.org/licenses/>.
import logging
log = logging.getLogger('facebookconnect.views')
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.contrib.auth import authenticate, login, logout, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth.models import User
from django.conf import settings
from facebook.djangofb import require_login as require_fb_login
from facebookconnect.models import FacebookProfile
from facebookconnect.forms import FacebookUserCreationForm
def facebook_login(request, redirect_url=None,
template_name='facebook/login.html',
extra_context=None):
"""
facebook_login
===============================
Handles logging in a facebook user. Usually handles the django side of
what happens when you click the facebook connect button. The user will get
redirected to the 'setup' view if thier facebook account is not on file.
If the user is on file, they will get redirected. You can specify the
redirect url in the following order of presidence:
1. whatever url is in the 'next' get parameter passed to the facebook_login url
2. whatever url is passed to the facebook_login view when the url is defined
3. whatever url is defined in the LOGIN_REDIRECT_URL setting directive
Sending a user here without login will display a login template.
If you define a url to use this view, you can pass the following parameters:
* redirect_url: defines where to send the user after they are logged in. This
can get overridden by the url in the 'next' get param passed on
the url.
* template_name: Template to use if a user arrives at this page without submitting
to it. Uses 'facebook/login.html' by default.
* extra_context: A context object whose contents will be passed to the template.
"""
# User is logging in
if request.method == "POST":
log.debug("OK logging in...")
url = reverse('facebook_setup')
if request.POST.get(REDIRECT_FIELD_NAME,False):
url += "?%s=%s" % (REDIRECT_FIELD_NAME, request.POST[REDIRECT_FIELD_NAME])
elif redirect_url:
url += "?%s=%s" % (REDIRECT_FIELD_NAME, redirect_url)
user = authenticate(request=request)
if user is not None:
if user.is_active:
login(request, user)
# Redirect to a success page.
log.debug("Redirecting to %s" % url)
return HttpResponseRedirect(url)
else:
log.debug("This account is disabled.")
raise FacebookAuthError('This account is disabled.')
elif request.facebook.uid:
#we have to set this user up
log.debug("Redirecting to setup")
return HttpResponseRedirect(url)
# User is already logged in
elif request.user.is_authenticated:
if request.REQUEST.get(REDIRECT_FIELD_NAME,False):
redirect_url = request.REQUEST[REDIRECT_FIELD_NAME]
elif redirect_url is None:
redirect_url = getattr(settings, "LOGIN_REDIRECT_URL", "/")
HttpResponseRedirect(redirect_url)
# User ain't logged in
# here we handle extra_context like it is done in django-registration
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
template_dict = {}
# we only need to set next if its been passed in the querystring or post vars
if request.REQUEST.get(REDIRECT_FIELD_NAME, False):
template_dict.update({REDIRECT_FIELD_NAME:request.REQUEST[REDIRECT_FIELD_NAME]})
return render_to_response(
template_name,
template_dict,
context_instance=context)
def facebook_logout(request, redirect_url=None):
"""
facebook_logout
============================
Logs the user out of facebook and django.
If you define a url to use this view, you can pass the following
parameters:
* redirect_url: defines where to send the user after they are logged out.
If no url is pass, it defaults to using the
'LOGOUT_REDIRECT_URL' setting.
"""
logout(request)
if getattr(request,'facebook',False):
request.facebook.session_key = None
request.facebook.uid = None
url = getattr(settings,'LOGOUT_REDIRECT_URL',redirect_url) or '/'
return HttpResponseRedirect(url)
def setup(request,redirect_url=None,
registration_form_class=FacebookUserCreationForm,
template_name='facebook/setup.html',
extra_context=None):
"""
setup
===============================
Handles a new facebook user. There are three ways to setup a new facebook user.
1. Link the facebook account with an existing django account.
2. Create a dummy django account to attach to facebook. The user must always use
facebook to login.
3. Ask the user to create a new django account
The built in template presents the user with all three options. Once setup is
complete the user will get redirected. The url used in the following order of
presidence:
1. whatever url is in the 'next' get parameter passed to the setup url
2. whatever url is passed to the setup view when the url is defined
3. whatever url is defined in the LOGIN_REDIRECT_URL setting directive
If you define a url to use this view, you can pass the following parameters:
* redirect_url: Defines where to send the user after they are setup. This
can get overridden by the url in the 'next' get param passed on
the url.
* registration_form_class: Django form class to use for new user way #3 explained
above. The form should create a new user.
* template_name: Template to use. Uses 'facebook/setup.html' by default.
* extra_context: A context object whose contents will be passed to the template.
"""
log.debug('in setup view')
#you need to be logged into facebook.
if not request.facebook.uid:
log.debug('Need to be logged into facebook')
url = reverse(facebook_login)
if request.REQUEST.get(REDIRECT_FIELD_NAME,False):
url += "?%s=%s" % (REDIRECT_FIELD_NAME, request.REQUEST[REDIRECT_FIELD_NAME])
return HttpResponseRedirect(url)
#setup forms
login_form = AuthenticationForm()
registration_form = registration_form_class()
#figure out where to go after setup
if request.REQUEST.get(REDIRECT_FIELD_NAME,False):
redirect_url = request.REQUEST[REDIRECT_FIELD_NAME]
elif redirect_url is None:
redirect_url = getattr(settings, "LOGIN_REDIRECT_URL", "/")
#check that this fb user is not already in the system
try:
FacebookProfile.objects.get(facebook_id=request.facebook.uid)
# already setup, move along please
return HttpResponseRedirect(redirect_url)
except FacebookProfile.DoesNotExist, e:
# not in the db, ok to continue
pass
#user submitted a form - which one?
if request.method == "POST":
log.debug('Submitted form')
#lets setup a facebook only account. The user will have to use
#facebook to login.
if request.POST.get('facebook_only',False):
log.debug('Facebook Only')
profile = FacebookProfile(facebook_id=request.facebook.uid)
user = User(username=request.facebook.uid,
email=profile.email)
user.set_unusable_password()
user.save()
profile.user = user
profile.save()
log.info("Added user and profile for %s!" % request.facebook.uid)
user = authenticate(request=request)
login(request, user)
return HttpResponseRedirect(redirect_url)
# user setup his/her own local account in addition to their facebook
# account. The user will have to login with facebook unless they
# reset their password.
elif request.POST.get('register',False):
log.debug('Register a new account')
profile = FacebookProfile(facebook_id=request.facebook.uid)
if profile.first_name != "(Private)":
fname = profile.first_name
if profile.last_name != "(Private)":
lname = profile.last_name
user = User(first_name=fname, last_name=lname)
registration_form = registration_form_class(
data=request.POST, instance=user)
if registration_form.is_valid():
user = registration_form.save()
profile.user = user
profile.save()
log.info("Added user and profile for %s!" % request.facebook.uid)
login(request, authenticate(request=request))
return HttpResponseRedirect(redirect_url)
else:
request.user = User()
request.user.facebook_profile = FacebookProfile(facebook_id=request.facebook.uid)
#user logs in in with an existing account, and the two are linked.
elif request.POST.get('login',False):
login_form = AuthenticationForm(data=request.POST)
if login_form.is_valid():
user = login_form.get_user()
log.debug("Trying to setup FB: %s, %s" % (user,request.facebook.uid))
if user and user.is_active:
FacebookProfile.objects.get_or_create(user=user, facebook_id=request.facebook.uid)
log.info("Attached facebook profile %s to user %s!" % (request.facebook.uid, user))
login(request, user)
return HttpResponseRedirect(redirect_url)
else:
request.user = User()
request.user.facebook_profile = FacebookProfile(facebook_id=request.facebook.uid)
#user didn't submit a form, but is logged in already. We'll just link up their facebook
#account automatically.
elif request.user.is_authenticated():
log.debug('Already logged in')
try:
request.user.facebook_profile
except FacebookProfile.DoesNotExist:
profile = FacebookProfile(facebook_id=request.facebook.uid)
profile.user = request.user
profile.save()
log.info("Attached facebook profile %s to user %s!" % (profile.facebook_id,profile.user))
return HttpResponseRedirect(redirect_url)
# user just showed up
else:
log.debug('Setting up form...')
request.user.facebook_profile = profile = FacebookProfile(facebook_id=request.facebook.uid)
login_form = AuthenticationForm(request)
log.debug('creating a dummy user')
fname = lname = ''
if profile.first_name != "(Private)":
fname = profile.first_name
if profile.last_name != "(Private)":
lname = profile.last_name
user = User(first_name=fname, last_name=lname)
registration_form = registration_form_class(instance=user)
log.debug('going all the way...')
# add the extra_context to this one
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
template_dict = {
"login_form":login_form,
"registration_form":registration_form
}
# we only need to set next if its been passed in the querystring or post vars
if request.REQUEST.get(REDIRECT_FIELD_NAME, False):
template_dict.update( {REDIRECT_FIELD_NAME: request.REQUEST[REDIRECT_FIELD_NAME]})
return render_to_response(
template_name,
template_dict,
context_instance=context)
class FacebookAuthError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
|
ryanmark/django-facebookconnect
|
facebookconnect/views.py
|
Python
|
gpl-3.0
| 13,397
|
[
"Brian"
] |
e9dc6636c09a821f7c1d5d8a2bd165f41c1dbc6e7594e8828308f99bbc9442b3
|
import os
import csv
import time
import json
import numpy as np
import logging as log
class Database():
""" Stores tables to respond to :meth:`API._feature_info`
Arguments
----------
path: str
The file path to store and access the database
_runtime: :class:`RUNTIME`
Gets stored as :data:`RUNTIME`
Attributes
-----------
RUNTIME: :class:`RUNTIME`
With keywords needed to load files and use tables
"""
def __init__(self, path, _runtime):
# Get the database keywords
self.RUNTIME = _runtime
def load_config(self, config):
""" Loads all files from ``config`` into database
Arguments
----------
config: dict
all data from :mod:`UtilityLayer.rh_config`
Returns
---------
:class:`Database`
the derived database class instance
"""
# Get file fields
k_files = self.RUNTIME.DB.FILE
# Get keywords for the BFLY_CONFIG
k_list = k_files.CONFIG.GROUP_LIST
k_range = range(len(k_list) - 1)
# Join lists from config groups
def cat_lists(groups, level):
# Get the next group key
lev_key = k_list[level]
# Get a list of lists of all the next groups
g_lists = [g.get(lev_key, []) for g in groups]
# Get list of all groups from groups
return [g for l in g_lists for g in l]
# Join all lists from within config
all_lists = reduce(cat_lists, k_range, [config])
# Load all files for each dataset
map(self.load_all, all_lists)
# Save to disk
self.commit()
return self
def load_all(self, source):
""" Load the tables, synapses, and neuron configs
Arguments
----------
source: dict
The configuration options for a dataset
"""
# Get file fields
k_files = self.RUNTIME.DB.FILE
# Get keywords for the BFLY_CONFIG
k_list = k_files.CONFIG.GROUP_LIST
k_dpath = k_files.CONFIG.DPATH.NAME
k_path = k_files.CONFIG.PATH.NAME
# Get the key to the channels
k_channel = k_list[-1]
# Set custom names of files
for nf in map(k_files.get, k_files.DB_LIST):
nf.VALUE = source.get(nf.NAME, nf.DEFAULT)
# list of channels for the dataset path
c_list = source.get(k_channel, [])
d_path = source.get(k_path, '')
# List used paths
done_paths = ['']
# Add all channel paths to database
for c_dict in c_list:
# Get paths to map to data
c_path = c_dict.get(k_path, '')
c_dpath = c_dict.get(k_dpath, d_path)
if c_dpath:
# Map the path to the data path
self.add_path(c_path, c_dpath)
# if we found a new data path
if c_dpath not in done_paths:
# Add all tables for the dataset path
self.add_tables(c_dpath)
# Load all synapses and neurons
synapses = self.load_synapses(c_dpath)
self.load_neurons(c_dpath, synapses)
# Mark the dataset path as fully loaded
done_paths.append(c_dpath)
def add_path(self, c_path, d_path):
""" store a link from a ``c_path`` to a ``d_path``
Must be overridden by derived class.
Arguments
----------
c_path: str
The path to a given channel with images
d_path: str
The path to the dataset with metadata files
"""
pass
def add_tables(self, path):
""" Store all the tables for a given path
Arguments
----------
path: str
The dataset path to metadata files
"""
# Get keywords for the database
k_list = self.RUNTIME.DB.TABLE.LIST
# Create all tables for the path
for k_table in k_list:
# Make a table from path and table name
self.add_table(k_table, path)
return self
def add_table(self, table, path):
""" Add a table to the database
Must be overridden by derived classes
Arguments
----------
table: str
The category of table for the database
path: str
The dataset path to metadata files
Returns
--------
str or bool
The table name combining ``table`` and ``path`` \
The derived classes should return whether the table was \
added successfully.
"""
k_join = self.RUNTIME.DB.JOIN.NAME
return k_join.format(table, path)
def load_synapses(self, path):
""" Load all the synapse information from files
Arguments
----------
path: str
The dataset path to metadata files
Returns
--------
numpy.ndarray
The Nx5 array of pre, post, z, y, x values \
where N is the number of synapses for the ``path``.
"""
# Get file fields
k_files = self.RUNTIME.DB.FILE
# Get keywords for input file
k_file = k_files.SYNAPSE.VALUE
k_point = k_files.SYNAPSE.POINT.NAME
k_points_in = k_files.SYNAPSE.POINT.LIST
k_nodes_in = k_files.SYNAPSE.NEURON_LIST
# Get the full path to the synapse file
full_path = os.path.join(path, k_file)
try:
# Load the file with the synapses
with open(full_path, 'r') as f:
all_dict = json.load(f)
point_dict = all_dict[k_point]
# Return if not valid file or json
except (IOError, ValueError):
return []
# Transpose the list of all synapses
links = map(all_dict.get, k_nodes_in)
center = map(point_dict.get, k_points_in)
synapses = np.uint32(links + center).T
# Add synapses to database
self.add_synapses(path, synapses)
return synapses
def load_neurons(self, path, synapses):
""" Load all the neuron information from files
Arguments
----------
path: str
The dataset path to metadata files
synapses: numpy.ndarray
The Nx5 array of pre, post, z, y, x values \
where N is the number of synapses for the ``path``.
Returns
--------
numpy.ndarray
The Nx4 array of id, z, y, x values \
where N is the number of neurons for the ``path``.
"""
# return if not synapses
if not len(synapses):
return synapses
####
# Get neurons from loaded synapses
####
# All unqiue source nodes and their keys
all_src, src_keys = np.unique(synapses.T[0], True)
# All unique target nodes and their keys
all_tgt, tgt_keys = np.unique(synapses.T[1], True)
# Find keys for neurons that are never targets
only_src = list(set(all_src) - set(all_tgt))
src_dict = dict(zip(all_src, src_keys))
src_keys = map(src_dict.get, only_src)
# Get all neuron lists from synapse targets, sources
neuron_tgt = np.delete(synapses[tgt_keys], 0, 1)
neuron_src = np.delete(synapses[src_keys], 1, 1)
# Get full neuron list from source and target
neurons = np.r_[neuron_src, neuron_tgt]
# Get file fields
k_files = self.RUNTIME.DB.FILE
# Get keywords for input file
k_file = k_files.SOMA.VALUE
k_file = os.path.join(path, k_file)
if os.path.exists(k_file):
msg = "Loading neuron centers from {}"
log.info(msg.format(k_file))
# Load the csv
with open(k_file, 'r') as jf:
# Keep a list of synapseless soma
new_neurons = []
# Add each new center point to database
for soma in json.load(jf):
# Make a numpy uint32 coorinate array
k_soma = ['neuron_id', 'z', 'y', 'x']
new_soma = np.uint32(map(soma.get, k_soma))
soma_id = new_soma[0]
# Find the correct ID
neuron_ids = neurons.T[0]
# If the soma has a synapse
if soma_id in neuron_ids:
# Insert into the correct ID
soma_index = np.argwhere(neuron_ids == soma_id)[0][0]
neurons[soma_index] = new_soma
else:
# Add new synapseless neuron
new_neurons.append(new_soma)
# Add new neurons to full neurons list
if len(new_neurons):
neurons = np.r_[neurons, new_neurons]
# Add neurons to database
self.add_neurons(path, neurons)
return neurons
def add_synapses(self, path, synapses):
""" Add all the synapases to the database
Arguments
----------
path: str
The dataset path to metadata files
synapses: numpy.ndarray
The Nx5 array of pre, post, z, y, x values \
where N is the number of synapses for the ``path``.
Returns
--------
list
A list of dicts from each row of ``synapses`` \
with dictionary keys taken from ``SYNAPSE.FULL_LIST`` \
field of :data:`RUNTIME.DB`
"""
# Get database fields
k_tables = self.RUNTIME.DB.TABLE
# Get keywords for the database
k_synapse = k_tables.SYNAPSE.NAME
# List all the syapse database keys
k_keys = k_tables.SYNAPSE.FULL_LIST
# Add entries
return self.add_entries(k_synapse, path, k_keys, synapses)
def add_neurons(self, path, neurons):
""" Add all the neurons to the database
Arguments
----------
path: str
The dataset path to metadata files
neurons: numpy.ndarray
The Nx4 array of id, z, y, x values \
where N is the number of neurons for the ``path``.
Returns
--------
list
A list of dicts from each row of ``neurons`` \
with dictionary keys from the ``NEURON.FULL_LIST`` \
field of :data:`RUNTIME.DB`
"""
# Get database fields
k_tables = self.RUNTIME.DB.TABLE
# Get keywords for the database
k_neuron = k_tables.NEURON.NAME
# List all the syapse database keys
k_keys = k_tables.NEURON.FULL_LIST
# Add entries
return self.add_entries(k_neuron, path, k_keys, neurons)
def add_entries(self, table, path, t_keys, entries):
""" Add an array or list of entries to a table
Must be overridden by derived class.
"""
k_join = self.RUNTIME.DB.JOIN.NAME
return k_join.format(table, path)
def add_entry(self, table, path, entry, update=1):
""" and a single entry to a table for a path
Overides :meth:`Database.add_entry`
Arguments
----------
table: str
The category of table for the database
path: str
The dataset path to metadata files
entry: dict
The mapping of keys to values for the entry
update: int
1 to update old entries matching keys, and \
0 to write new entries ignoring old entries. Default 1.
Returns
--------
dict
The value of the entry
"""
k_join = self.RUNTIME.DB.JOIN.NAME
return k_join.format(table, path)
def get_path(self, path):
""" Map a channel path to a dataset path
Must be overridden by derived class.
Arguments
----------
path: str
The path to the given channel
Returns
--------
str
The dataset path for the given ``path``
"""
pass
def get_table(self, table, path):
""" Get the actual table for a given path
Must be overridden by derived class.
Arguments
----------
table: str
The category of table for the database
path: str
The dataset path to metadata files
Returns
--------
str or object
Full database name of the table for a path. \
The derived classes should actually return the python \
object reference to the real table.
"""
real_path = self.get_path(path)
k_join = self.RUNTIME.DB.JOIN.NAME
return k_join.format(table, real_path)
def synapse_ids(self, table, path, start, stop):
"""
Must be overridden by derived class.
"""
return self.get_table(table, path)
def is_synapse(self, table, path, id_key):
"""
Must be overridden by derived class.
"""
return self.get_table(table, path)
def is_neuron(self, table, path, id_key):
"""
Must be overridden by derived class.
"""
return self.get_table(table, path)
def synapse_keypoint(self, table, path, id_key, scales):
"""
Must be overridden by derived class.
"""
return self.get_table(table, path)
def neuron_keypoint(self, table, path, id_key, scales):
"""
Must be overridden by derived class.
"""
return self.get_table(table, path)
def synapse_parent(self, table, path, id_key):
"""
Must be overridden by derived class.
"""
return self.get_table(table, path)
def neuron_children(self, table, path, id_key, start, stop):
"""
Must be overridden by derived class.
"""
return self.get_table(table, path)
def all_neurons(self, table, path):
"""
Must be overridden by derived class.
"""
return self.get_table(table, path)
def get_by_key(self, table, path, key):
""" Get the entry for the key in the table.
Must be overridden by derived class.
Arguments
----------
table: str
The category of table for the database
path: str
The dataset path to metadata files
key: int
The primary key value for any entry
Returns
--------
object or dict
The object reference from :meth:`get_table`. \
The derived class should give an entry in the table.
"""
return self.get_table(table, path)
def commit(self):
""" Save all database changes to the database file.
Must be overridden by derived class.
"""
pass
|
Rhoana/butterfly
|
bfly/CoreLayer/DatabaseLayer/Database.py
|
Python
|
mit
| 14,761
|
[
"NEURON"
] |
ce97ead7111a9ac055e90954f3aeae44ff325592d367506366008163819d19b8
|
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SearchIO object to model a single database hit."""
from __future__ import print_function
from itertools import chain
from Bio._py3k import filter
from Bio._utils import getattr_str, trim_str
from Bio.SearchIO._utils import allitems, optionalcascade
from ._base import _BaseSearchObject
from .hsp import HSP
class Hit(_BaseSearchObject):
"""Class representing a single database hit of a search result.
Hit objects are the second-level container in the SearchIO module. They
are the objects contained within a QueryResult (see QueryResult). They
themselves are container for HSP objects and will contain at least one
HSP.
To have a quick look at a Hit and its contents, invoke ``print`` on it::
>>> from Bio import SearchIO
>>> qresult = next(SearchIO.parse('Blast/mirna.xml', 'blast-xml'))
>>> hit = qresult[3]
>>> print(hit)
Query: 33211
mir_1
Hit: gi|301171322|ref|NR_035857.1| (86)
Pan troglodytes microRNA mir-520c (MIR520C), microRNA
HSPs: ---- -------- --------- ------ --------------- ---------------------
# E-value Bit score Span Query range Hit range
---- -------- --------- ------ --------------- ---------------------
0 8.9e-20 100.47 60 [1:61] [13:73]
1 3.3e-06 55.39 60 [0:60] [13:73]
You can invoke ``len`` on a Hit object to see how many HSP objects it contains::
>>> len(hit)
2
Hit objects behave very similar to Python lists. You can retrieve the HSP
object inside a Hit using the HSP's integer index. Hit objects can also be
sliced, which will return a new Hit objects containing only the sliced HSPs::
# HSP items inside the Hit can be retrieved using its integer index
>>> hit[0]
HSP(hit_id='gi|301171322|ref|NR_035857.1|', query_id='33211', 1 fragments)
# slicing returns a new Hit
>>> hit
Hit(id='gi|301171322|ref|NR_035857.1|', query_id='33211', 2 hsps)
>>> hit[:1]
Hit(id='gi|301171322|ref|NR_035857.1|', query_id='33211', 1 hsps)
>>> print(hit[1:])
Query: 33211
mir_1
Hit: gi|301171322|ref|NR_035857.1| (86)
Pan troglodytes microRNA mir-520c (MIR520C), microRNA
HSPs: ---- -------- --------- ------ --------------- ---------------------
# E-value Bit score Span Query range Hit range
---- -------- --------- ------ --------------- ---------------------
0 3.3e-06 55.39 60 [0:60] [13:73]
Hit objects provide ``filter`` and ``map`` methods, which are analogous to
Python's built-in ``filter`` and ``map`` except that they return a new Hit
object instead of a list.
Here is an example of using ``filter`` to select for HSPs whose e-value is
less than 1e-10::
>>> evalue_filter = lambda hsp: hsp.evalue < 1e-10
>>> filtered_hit = hit.filter(evalue_filter)
>>> len(hit)
2
>>> len(filtered_hit)
1
>>> print(filtered_hit)
Query: 33211
mir_1
Hit: gi|301171322|ref|NR_035857.1| (86)
Pan troglodytes microRNA mir-520c (MIR520C), microRNA
HSPs: ---- -------- --------- ------ --------------- ---------------------
# E-value Bit score Span Query range Hit range
---- -------- --------- ------ --------------- ---------------------
0 8.9e-20 100.47 60 [1:61] [13:73]
There are also other methods which are counterparts of Python lists' methods
with the same names: ``append``, ``index``, ``pop``, and ``sort``. Consult their
respective documentations for more details and examples of their usage.
"""
# attributes we don't want to transfer when creating a new Hit class
# from this one
_NON_STICKY_ATTRS = ('_items', )
def __init__(self, hsps=(), id=None, query_id=None):
"""Initializes a Hit object.
:param hsps: HSP objects contained in the Hit object
:type hsps: iterable yielding HSP
:param id: hit ID
:type id: string
:param query_id: query ID
:type query_id: string
If multiple HSP objects are used for initialization, they must all
have the same ``query_id``, ``query_description``, ``hit_id``, and
``hit_description`` properties.
"""
# default attribute values
self._id = id
self._id_alt = []
self._query_id = query_id
self._description = None
self._description_alt = []
self._query_description = None
# TODO - Move this into the for look below in case
# hsps is a single use iterator?
for attr in ('query_id', 'query_description', 'hit_id',
'hit_description'):
# HACK: setting the if clause to '> 1' allows for empty hit objects.
# This makes it easier to work with file formats with unpredictable
# hit-hsp ordering. The empty hit object itself is nonfunctional,
# however, since all its cascading properties are empty.
if len(set(getattr(hsp, attr) for hsp in hsps)) > 1:
raise ValueError("Hit object can not contain HSPs with "
"more than one %s." % attr)
self._items = []
for hsp in hsps:
# validate each HSP
self._validate_hsp(hsp)
# and store it them as an instance attribute
self.append(hsp)
def __repr__(self):
return "Hit(id=%r, query_id=%r, %r hsps)" % (self.id, self.query_id,
len(self))
def __iter__(self):
return iter(self.hsps)
def __len__(self):
return len(self.hsps)
# Python 3:
def __bool__(self):
return bool(self.hsps)
# Python 2:
__nonzero__ = __bool__
def __contains__(self, hsp):
return hsp in self._items
def __str__(self):
lines = []
# set query id line
qid_line = 'Query: %s' % self.query_id
if self.query_description:
qid_line += trim_str('\n %s' %
self.query_description, 80, '...')
lines.append(qid_line)
# set hit id line
hid_line = ' Hit: %s' % self.id
if hasattr(self, 'seq_len'):
hid_line += ' (%i)' % self.seq_len
if self.description:
hid_line += trim_str('\n %s' % self.description,
80, '...')
lines.append(hid_line)
# set hsp line and table
if not self.hsps:
lines.append(' HSPs: ?')
else:
lines.append(' HSPs: %s %s %s %s %s %s' %
('-' * 4, '-' * 8, '-' * 9, '-' * 6, '-' * 15, '-' * 21))
pattern = '%11s %8s %9s %6s %15s %21s'
lines.append(pattern % ('#', 'E-value', 'Bit score', 'Span',
'Query range', 'Hit range'))
lines.append(pattern % ('-' * 4, '-' * 8, '-' * 9, '-' * 6, '-' * 15, '-' * 21))
for idx, hsp in enumerate(self.hsps):
# evalue
evalue = getattr_str(hsp, 'evalue', fmt='%.2g')
# bitscore
bitscore = getattr_str(hsp, 'bitscore', fmt='%.2f')
# alignment length
aln_span = getattr_str(hsp, 'aln_span')
# query region
query_start = getattr_str(hsp, 'query_start')
query_end = getattr_str(hsp, 'query_end')
query_range = '[%s:%s]' % (query_start, query_end)
# max column length is 18
query_range = trim_str(query_range, 15, '~]')
# hit region
hit_start = getattr_str(hsp, 'hit_start')
hit_end = getattr_str(hsp, 'hit_end')
hit_range = '[%s:%s]' % (hit_start, hit_end)
hit_range = trim_str(hit_range, 21, '~]')
# append the hsp row
lines.append(pattern % (str(idx), evalue, bitscore, aln_span,
query_range, hit_range))
return '\n'.join(lines)
def __getitem__(self, idx):
# if key is slice, return a new Hit instance
if isinstance(idx, slice):
obj = self.__class__(self.hsps[idx])
self._transfer_attrs(obj)
return obj
return self._items[idx]
def __setitem__(self, idx, hsps):
# handle case if hsps is a list of hsp
if isinstance(hsps, (list, tuple)):
for hsp in hsps:
self._validate_hsp(hsp)
else:
self._validate_hsp(hsps)
self._items[idx] = hsps
def __delitem__(self, idx):
del self._items[idx]
# hsp properties #
def _validate_hsp(self, hsp):
"""Validates an HSP object.
Valid HSP objects have the same hit_id as the Hit object ID and the
same query_id as the Hit object's query_id.
"""
if not isinstance(hsp, HSP):
raise TypeError("Hit objects can only contain HSP objects.")
# HACK: to make validation during __init__ work
if self._items:
if self.id is not None:
if hsp.hit_id != self.id:
raise ValueError("Expected HSP with hit ID %r, "
"found %r instead." % (self.id, hsp.hit_id))
else:
self.id = hsp.hit_id
if self.description is not None:
if hsp.hit_description != self.description:
raise ValueError("Expected HSP with hit description %r, "
"found %r instead." % (self.description,
hsp.hit_description))
else:
self.description = hsp.hit_description
if self.query_id is not None:
if hsp.query_id != self.query_id:
raise ValueError("Expected HSP with query ID %r, "
"found %r instead." % (self.query_id, hsp.query_id))
else:
self.query_id = hsp.query_id
if self.query_description is not None:
if hsp.query_description != self.query_description:
raise ValueError("Expected HSP with query description %r, "
"found %r instead." % (self.query_description,
hsp.query_description))
else:
self.query_description = hsp.query_description
# properties #
description = optionalcascade('_description', 'hit_description',
"""Hit description""")
query_description = optionalcascade('_query_description',
'query_description',
"""Description of the query that produced the hit""")
id = optionalcascade('_id', 'hit_id', """Hit ID string.""")
query_id = optionalcascade('_query_id', 'query_id',
"""ID string of the query that produced the hit""")
# returns all hsps
hsps = allitems(doc="""HSP objects contained in the Hit""")
@property
def id_all(self):
"""Alternative ID(s) of the Hit"""
return [self.id] + self._id_alt
@property
def description_all(self):
"""Alternative descriptions of the Hit"""
return [self.description] + self._description_alt
@property
def fragments(self):
"""HSPFragment objects contained in the Hit"""
return [frag for frag in chain(*self._items)]
# public methods #
def append(self, hsp):
"""Adds a HSP object to the end of Hit.
Parameters
hsp -- HSP object to append.
Any HSP object appended must have the same ``hit_id`` property as the
Hit object's ``id`` property and the same ``query_id`` property as the
Hit object's ``query_id`` property.
"""
self._validate_hsp(hsp)
self._items.append(hsp)
def filter(self, func=None):
"""Creates a new Hit object whose HSP objects pass the filter
function.
:param func: function for filtering
:type func: callable, accepts HSP, returns bool
``filter`` is analogous to Python's built-in ``filter`` function, except
that instead of returning a list it returns a ``Hit`` object. Here is an
example of using ``filter`` to select for HSPs having bitscores bigger
than 60::
>>> from Bio import SearchIO
>>> qresult = next(SearchIO.parse('Blast/mirna.xml', 'blast-xml'))
>>> hit = qresult[3]
>>> evalue_filter = lambda hsp: hsp.bitscore > 60
>>> filtered_hit = hit.filter(evalue_filter)
>>> len(hit)
2
>>> len(filtered_hit)
1
>>> print(filtered_hit)
Query: 33211
mir_1
Hit: gi|301171322|ref|NR_035857.1| (86)
Pan troglodytes microRNA mir-520c (MIR520C), microRNA
HSPs: ---- -------- --------- ------ --------------- ---------------------
# E-value Bit score Span Query range Hit range
---- -------- --------- ------ --------------- ---------------------
0 8.9e-20 100.47 60 [1:61] [13:73]
"""
hsps = list(filter(func, self.hsps))
if hsps:
obj = self.__class__(hsps)
self._transfer_attrs(obj)
return obj
def index(self, hsp):
"""Returns the index of a given HSP object, zero-based.
:param hsp: object to look up
:type hsp: HSP
"""
return self._items.index(hsp)
def map(self, func=None):
"""Creates a new Hit object, mapping the given function to its HSPs.
:param func: function for mapping
:type func: callable, accepts HSP, returns HSP
``map`` is analogous to Python's built-in ``map`` function. It is applied to
all HSPs contained in the Hit object and returns a new Hit object.
"""
if func is not None:
hsps = [func(x) for x in self.hsps[:]] # this creates a shallow copy
else:
hsps = self.hsps[:]
if hsps:
obj = self.__class__(hsps)
self._transfer_attrs(obj)
return obj
def pop(self, index=-1):
"""Removes and returns the HSP object at the specified index.
:param index: index of HSP object to pop
:type index: int
"""
return self._items.pop(index)
def sort(self, key=None, reverse=False, in_place=True):
"""Sorts the HSP objects.
:param key: sorting function
:type key: callable, accepts HSP, returns key for sorting
:param reverse: whether to reverse sorting results or no
:type reverse: bool
:param in_place: whether to do in-place sorting or no
:type in_place: bool
``sort`` defaults to sorting in-place, to mimick Python's ``list.sort``
method. If you set the ``in_place`` argument to False, it will treat
return a new, sorted Hit object and keep the initial one unsorted
"""
if in_place:
self._items.sort(key=key, reverse=reverse)
else:
hsps = self.hsps[:]
hsps.sort(key=key, reverse=reverse)
obj = self.__class__(hsps)
self._transfer_attrs(obj)
return obj
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
zjuchenyuan/BioWeb
|
Lib/Bio/SearchIO/_model/hit.py
|
Python
|
mit
| 16,260
|
[
"BLAST",
"Biopython"
] |
3fc4534d824877ee956b4bf955274c17cfc205766636a54e3c5502eb37c9757c
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .stats import TotalStat
from .visitor import SuiteVisitor
class TotalStatistics(object):
"""Container for total statistics."""
def __init__(self):
#: Instance of :class:`~robot.model.stats.TotalStat` for critical tests.
self.critical = TotalStat('Critical Tests')
#: Instance of :class:`~robot.model.stats.TotalStat` for all the tests.
self.all = TotalStat('All Tests')
def visit(self, visitor):
visitor.visit_total_statistics(self)
def __iter__(self):
return iter([self.critical, self.all])
@property
def message(self):
"""String representation of the statistics.
For example::
2 critical tests, 1 passed, 1 failed
2 tests total, 1 passed, 1 failed
"""
ctotal, cend, cpass, cfail = self._get_counts(self.critical)
atotal, aend, apass, afail = self._get_counts(self.all)
return ('%d critical test%s, %d passed, %d failed\n'
'%d test%s total, %d passed, %d failed'
% (ctotal, cend, cpass, cfail, atotal, aend, apass, afail))
def _get_counts(self, stat):
ending = 's' if stat.total != 1 else ''
return stat.total, ending, stat.passed, stat.failed
class TotalStatisticsBuilder(SuiteVisitor):
def __init__(self, suite=None):
self.stats = TotalStatistics()
if suite:
suite.visit(self)
def add_test(self, test):
self.stats.all.add_test(test)
if test.critical:
self.stats.critical.add_test(test)
def visit_test(self, test):
self.add_test(test)
def visit_keyword(self, kw):
pass
|
alexandrul-ci/robotframework
|
src/robot/model/totalstatistics.py
|
Python
|
apache-2.0
| 2,319
|
[
"VisIt"
] |
ea0aef9579ab362a0f16a80faa2c01cf2e0f465be1f4470d4fed7adb477e67c0
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import espressomd
import numpy as np
class TabulatedTest(ut.TestCase):
s = espressomd.System(box_l=[1.0, 1.0, 1.0])
s.box_l = 3 * [10]
s.time_step = 0.01
s.cell_system.skin = 0.4
def setUp(self):
self.force = np.zeros((100,))
self.energy = np.zeros((100,))
self.min_ = 1.
self.max_ = 2.
self.dx = (self.max_ - self.min_) / 99.
for i in range(0, 100):
self.force[i] = 5 + i * 2.3 * self.dx
self.energy[i] = 5 - i * 2.3 * self.dx
self.s.part.clear()
self.s.part.add(id=0, type=0, pos=[5., 5., 5.0])
self.s.part.add(id=1, type=0, pos=[5., 5., 5.5])
def check(self):
# Below cutoff
np.testing.assert_allclose(np.copy(self.s.part[:].f), 0.0)
for z in np.linspace(0, self.max_ - self.min_, 200, endpoint=False):
self.s.part[1].pos = [5., 5., 6. + z]
self.s.integrator.run(0)
np.testing.assert_allclose(
np.copy(self.s.part[0].f), [0., 0., -(5. + z * 2.3)])
np.testing.assert_allclose(
np.copy(self.s.part[0].f), -np.copy(self.s.part[1].f))
self.assertAlmostEqual(
self.s.analysis.energy()['total'], 5. - z * 2.3)
@utx.skipIfMissingFeatures("TABULATED")
def test_non_bonded(self):
self.s.non_bonded_inter[0, 0].tabulated.set_params(
min=self.min_, max=self.max_, energy=self.energy, force=self.force)
np.testing.assert_allclose(
self.force, self.s.non_bonded_inter[0, 0].tabulated.get_params()['force'])
np.testing.assert_allclose(
self.energy, self.s.non_bonded_inter[0, 0].tabulated.get_params()['energy'])
self.assertAlmostEqual(
self.min_, self.s.non_bonded_inter[0, 0].tabulated.get_params()['min'])
self.assertAlmostEqual(
self.max_, self.s.non_bonded_inter[0, 0].tabulated.get_params()['max'])
self.check()
self.s.non_bonded_inter[0, 0].tabulated.set_params(
min=-1, max=-1, energy=[], force=[])
def test_bonded(self):
from espressomd.interactions import TabulatedDistance
tb = TabulatedDistance(min=self.min_, max=self.max_,
energy=self.energy, force=self.force)
self.s.bonded_inter.add(tb)
np.testing.assert_allclose(self.force, tb.params['force'])
np.testing.assert_allclose(self.energy, tb.params['energy'])
self.assertAlmostEqual(self.min_, tb.params['min'])
self.assertAlmostEqual(self.max_, tb.params['max'])
self.s.part[0].add_bond((tb, 1))
self.check()
self.s.part[0].delete_bond((tb, 1))
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/tabulated.py
|
Python
|
gpl-3.0
| 3,531
|
[
"ESPResSo"
] |
b580c6c6fec5413237667f5fcd07ae84491a297f2f00ffc21205fc8f0084df19
|
# -----------------------------------------------------------------------------
#
# Copyright (C) 2021 CERN & University of Surrey for the benefit of the
# BioDynaMo collaboration. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# See the LICENSE file distributed with this work for details.
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# -----------------------------------------------------------------------------
from paraview.simple import *
from paraview import vtk
from paraview import coprocessing
import json
# ------------------------------------------------------------------------------
# Helper functions -------------------------------------------------------------
# ------------------------------------------------------------------------------
is_insitu_pipeline = False
def ProcessSphere(agent_info, agent_data, render_view):
# create a new 'Glyph'
glyph_type = str(agent_info['shape'])
glyph1 = Glyph(Input=agent_data, GlyphType=glyph_type)
glyph1.GlyphTransform = 'Transform2'
glyph1.GlyphType = glyph_type
glyph1.ScaleFactor = 1.0
glyph1.GlyphMode = 'All Points'
#
# # show data in view
glyph1Display = Show(glyph1, render_view)
# # trace defaults for the display properties.
glyph1Display.Representation = 'Surface'
glyph1Display.ColorArrayName = [None, '']
glyph1Display.OSPRayScaleFunction = 'PiecewiseFunction'
glyph1Display.SelectOrientationVectors = 'None'
glyph1Display.ScaleFactor = 1.0
glyph1Display.SelectScaleArray = 'None'
glyph1Display.GlyphType = 'Sphere'
glyph1Display.GlyphTableIndexArray = 'None'
glyph1Display.DataAxesGrid = 'GridAxesRepresentation'
glyph1Display.PolarAxes = 'PolarAxesRepresentation'
glyph1Display.GaussianRadius = -1.0000000000000001e+298
glyph1Display.SetScaleArray = [None, '']
glyph1Display.ScaleTransferFunction = 'PiecewiseFunction'
glyph1Display.OpacityArray = [None, '']
glyph1Display.OpacityTransferFunction = 'PiecewiseFunction'
# following statement causes:
# Warning: In vtkSMPVRepresentationProxy.cxx, line 612
# vtkSMPVRepresentationProxy (0x522db70): Failed to determine the
# LookupTable being used.
# glyph1Display.SetScalarBarVisibility(renderView1, True)
# update the view to ensure updated data information
render_view.Update()
# Properties modified on glyph1
# ignored if set earlier
if paraview.servermanager.vtkSMProxyManager.GetVersionMinor() == 5:
glyph1.Scalars = ['POINTS', agent_info['scaling_attribute']]
else:
glyph1.ScaleArray = ['POINTS', agent_info['scaling_attribute']]
RenameSource('{0}s'.format(agent_info['name']), glyph1)
# update the view to ensure updated data information
render_view.Update()
# ------------------------------------------------------------------------------
def ProcessCylinder(agent_info, agent_data, render_view):
glyph_type = str(agent_info['shape'])
bDMGlyph1 = BDMGlyph(Input=agent_data, GlyphType=glyph_type)
bDMGlyph1.Vectors = ['POINTS', 'None']
bDMGlyph1.XScaling = ['POINTS', 'None']
bDMGlyph1.YScaling = ['POINTS', 'None']
bDMGlyph1.ZScaling = ['POINTS', 'None']
bDMGlyph1.MassLocation = ['POINTS', 'None']
bDMGlyph1.ScaleFactor = 0.1
bDMGlyph1.GlyphTransform = 'Transform2'
# show data in view
bDMGlyph1Display = Show(bDMGlyph1, render_view)
# trace defaults for the display properties.
bDMGlyph1Display.Representation = 'Surface'
bDMGlyph1Display.ColorArrayName = [None, '']
bDMGlyph1Display.OSPRayScaleArray = 'actual_length_'
bDMGlyph1Display.OSPRayScaleFunction = 'PiecewiseFunction'
bDMGlyph1Display.SelectOrientationVectors = 'None'
bDMGlyph1Display.ScaleFactor = 0.010000000149011612
bDMGlyph1Display.SelectScaleArray = 'None'
bDMGlyph1Display.GlyphType = glyph_type
bDMGlyph1Display.GlyphTableIndexArray = 'None'
bDMGlyph1Display.DataAxesGrid = 'GridAxesRepresentation'
bDMGlyph1Display.PolarAxes = 'PolarAxesRepresentation'
bDMGlyph1Display.GaussianRadius = 0.005000000074505806
bDMGlyph1Display.SetScaleArray = ['POINTS', 'actual_length_']
bDMGlyph1Display.ScaleTransferFunction = 'PiecewiseFunction'
bDMGlyph1Display.OpacityArray = ['POINTS', 'actual_length_']
bDMGlyph1Display.OpacityTransferFunction = 'PiecewiseFunction'
# update the view to ensure updated data information
render_view.Update()
# Properties modified on bDMGlyph1
bDMGlyph1.XScaling = ['POINTS', 'diameter_']
# update the view to ensure updated data information
render_view.Update()
# Properties modified on bDMGlyph1
bDMGlyph1.YScaling = ['POINTS', 'diameter_']
# update the view to ensure updated data information
render_view.Update()
# Properties modified on bDMGlyph1
bDMGlyph1.YScaling = ['POINTS', 'actual_length_']
# update the view to ensure updated data information
render_view.Update()
# Properties modified on bDMGlyph1
bDMGlyph1.ZScaling = ['POINTS', 'diameter_']
# update the view to ensure updated data information
render_view.Update()
# Properties modified on bDMGlyph1
bDMGlyph1.MassLocation = ['POINTS', 'mass_location_']
# update the view to ensure updated data information
render_view.Update()
# Properties modified on bDMGlyph1
bDMGlyph1.Vectors = ['POINTS', 'spring_axis_']
# update the view to ensure updated data information
render_view.Update()
# Properties modified on bDMGlyph1
bDMGlyph1.GlyphType = 'Cylinder'
# update the view to ensure updated data information
render_view.Update()
# Properties modified on bDMGlyph1
bDMGlyph1.ScaleMode = 'normal'
# update the view to ensure updated data information
render_view.Update()
# Properties modified on bDMGlyph1
bDMGlyph1.ScaleFactor = 1.0
# update the view to ensure updated data information
render_view.Update()
bDMGlyph1.GlyphMode = 'All Points'
RenameSource('{0}s'.format(agent_info['name']), bDMGlyph1)
render_view.Update()
# ------------------------------------------------------------------------------
def ProcessSimulationObject(agent_info, agent_data, render_view):
# following line was in trace, but seems to be superfluous
# agent_data.PointArrayStatus = ['diameter_', 'volume_']
# rename data source
agent_name = agent_info['name']
agent_data_name = '{0}-data'.format(agent_name)
RenameSource(agent_data_name, agent_data)
shape = str(agent_info['shape'])
if shape == "Sphere":
ProcessSphere(agent_info, agent_data, render_view)
elif shape == "Cylinder":
ProcessCylinder(agent_info, agent_data, render_view)
# reset view to fit data
render_view.ResetCamera()
# ------------------------------------------------------------------------------
def AddDiffusionGradientGlyph(substance_name, substance_data, render_view):
glyph1 = Glyph(Input=substance_data, GlyphType='Arrow')
glyph1.ScaleFactor = 10
glyph1.GlyphTransform = 'Transform2'
# show data in view
glyph1Display = Show(glyph1, render_view)
# trace defaults for the display properties.
glyph1Display.Representation = 'Surface'
glyph1Display.ColorArrayName = [None, '']
glyph1Display.OSPRayScaleArray = 'Diffusion Gradient'
glyph1Display.OSPRayScaleFunction = 'PiecewiseFunction'
glyph1Display.SelectOrientationVectors = 'GlyphVector'
glyph1Display.ScaleFactor = 10
glyph1Display.SelectScaleArray = 'Diffusion Gradient'
glyph1Display.GlyphType = 'Arrow'
glyph1Display.GlyphTableIndexArray = 'Diffusion Gradient'
glyph1Display.DataAxesGrid = 'GridAxesRepresentation'
glyph1Display.PolarAxes = 'PolarAxesRepresentation'
glyph1Display.GaussianRadius = 9.73499984741211
glyph1Display.SetScaleArray = ['POINTS', 'No scale array']
glyph1Display.ScaleTransferFunction = 'PiecewiseFunction'
glyph1Display.OpacityArray = ['POINTS', 'Substance Concentration']
glyph1Display.OpacityTransferFunction = 'PiecewiseFunction'
RenameSource('{0}-gradient'.format(substance_name), glyph1)
# update the view to ensure updated data information
render_view.Update()
# ------------------------------------------------------------------------------
def ProcessExtracellularSubstance(substance_info, substance_data, render_view):
# get display properties
substance_display = Show(substance_data, render_view)
# set scalar coloring
ColorBy(substance_display, ('POINTS', 'Substance Concentration', 'Magnitude'))
# rescale color and/or opacity maps used to include current data range
substance_display.RescaleTransferFunctionToDataRange(True, True)
# change representation type
# NB: Paraview v5.6.0 screenshots from within catalyst don't render volume
# rendering. -> Change default to Wireframe
if is_insitu_pipeline:
substance_display.SetRepresentationType('Wireframe')
else:
substance_display.SetRepresentationType('Volume')
# get color transfer function/color map for 'DiffusionGradient'
diffusionGradientLUT = GetColorTransferFunction('DiffusionGradient')
# rename data source
substance_name = substance_info['name']
RenameSource('{0}-concentration'.format(substance_name), substance_data)
if substance_info['has_gradient'] == "true":
AddDiffusionGradientGlyph(substance_name, substance_data, render_view)
# ------------------------------------------------------------------------------
# CoProcessor definition -------------------------------------------------------
# ------------------------------------------------------------------------------
def CreateCoProcessor():
def _CreatePipeline(coprocessor, datadescription):
class Pipeline :
#disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
renderview = CreateView('RenderView')
renderview.ViewTime = datadescription.GetTime()
user_data = datadescription.GetUserData()
json_string = user_data.GetAbstractArray("metadata").GetVariantValue(0).ToString()
build_info = json.loads(json_string)
insitu_script_arguments = build_info["insitu_script_arguments"].split(" ")
global is_insitu_pipeline
is_insitu_pipeline = True
# agents
for agent_info in build_info['agents']:
data = coprocessor.CreateProducer(datadescription, agent_info['name'])
ProcessSimulationObject(agent_info, data, renderview)
# extracellular substances
for substance_info in build_info['extracellular_substances']:
producer = coprocessor.CreateProducer(datadescription, substance_info['name'])
grid = datadescription.GetInputDescriptionByName(substance_info['name']).GetGrid()
producer.GetClientSideObject().SetOutput(grid)
producer.UpdatePipeline()
ProcessExtracellularSubstance(substance_info, producer, renderview)
# ------------------------------------------------------------------
# end default pipeline - start custom script
# check if a custom script was defined
try:
ExtendDefaultPipeline
except NameError:
pass
else:
ExtendDefaultPipeline(renderview, coprocessor, datadescription,
insitu_script_arguments)
return Pipeline()
class CoProcessor(coprocessing.CoProcessor):
def CreatePipeline(self, datadescription):
self.Pipeline = _CreatePipeline(self, datadescription)
coprocessor = CoProcessor()
return coprocessor
#-------------------------------------------------------------------------------
#Global variables that will hold the pipeline for each timestep
#Creating the CoProcessor object, doesn't actually create the ParaView pipeline.
#It will be automatically setup when coprocessor.UpdateProducers() is called the
#first time.
coprocessor = CreateCoProcessor()
#-------------------------------------------------------------------------------
#Enable Live - Visualizaton with ParaView
coprocessor.EnableLiveVisualization(True, 1)
#------------------------------- Data Selection method -------------------------
def RequestDataDescription(datadescription):
global coprocessor
if datadescription.GetForceOutput() == True:
#We are just going to request all fields and meshes from the simulation code / adaptor.
for i in range(datadescription.GetNumberOfInputDescriptions()):
datadescription.GetInputDescription(i).AllFieldsOn()
datadescription.GetInputDescription(i).GenerateMeshOn()
return
#setup requests for all inputs based on the requirements of the pipeline.
coprocessor.LoadRequestedData(datadescription)
#--------------------------------- Processing method ---------------------------
def DoCoProcessing(datadescription):
"Callback to do co-processing for current timestep"
global coprocessor
#Update the coprocessor by providing it the newly generated simulation data.
#If the pipeline hasn't been setup yet, this will setup the pipeline.
coprocessor.UpdateProducers(datadescription)
coprocessor.WriteImages(datadescription)
#Live Visualization, if enabled.
coprocessor.DoLiveVisualization(datadescription, "localhost", 22222)
|
BioDynaMo/biodynamo
|
src/core/visualization/paraview/default_insitu_pipeline.py
|
Python
|
apache-2.0
| 13,744
|
[
"ParaView",
"VTK"
] |
b0a4ad51861000331d0f4e00a48f7f4a607f6f0fc46c6a951970b5df7c76fbd2
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import unittest
from htmresearch.encoders import EncoderTypes
from htmresearch.frameworks.nlp.model_factory import createModel
from htmresearch.support.text_preprocess import TextPreprocess
class TestTextPreprocess(unittest.TestCase):
tokenIndexingFactor = 1000
documentLevel = {"CioDocumentFingerprint", "CioWordFingerprint"}
def setUp(self):
self.testDocuments = (
("Much of the world's data is streaming, time-series data, where "
"anomalies give significant information in critical situations; "
"examples abound in domains such as finance, IT, security, medical, "
"and energy. Yet detecting anomalies in streaming data is a difficult "
"task, requiring detectors to process data in real-time, not batches, "
"and learn while simultaneously making predictions... The goal for "
"[identifier deleted] is to provide a standard, open source framework "
"with which the research community can compare and evaluate different "
"algorithms for detecting anomalies in streaming data."),
("We propose a formal mathematical model for sparse representation in "
"neocortex based on a neuron model and associated operations... As such "
"the theory provides a unified and practical mathematical framework for "
"understanding the benefits and limits of sparse representation in "
"cortical networks."),
("Therefor the HTM sequence memory doesn't only advance our "
"understanding of how the brain may solve the sequence learning "
"problem, but it's applicable to a wide range of real-world problems "
"such as dicsrete and continuous sequence prediction, anomaly "
"detection, and sequence classification."),
("In this paper we extnd this idea by showing that a neuron with several "
"thousand synapses aranged along active dendrites can learn to "
"accurately and robustly recognize hundreds of unique patterns of "
"cellular activity, even in the presence of large amounts of noise and "
"pattern variation... Thus neurons need thousands of synapses to learn "
"the many temporal patterns in sensory stimuli and motor sequence."),
)
self.filteredProtoIds = ( [0, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17,
18, 20, 23, 25, 26, 28, 29, 30, 31, 33, 34, 37, 38, 39, 40, 42, 43, 45,
47, 49, 50, 51, 52, 53, 55, 57, 58, 61, 63, 64, 65, 66, 70, 71, 72, 73,
75, 76, 77, 79, 80, 82, 83, 1001, 1003, 1004, 1005, 1007, 1008, 1010,
1011, 1014, 1015, 1017, 1018, 1022, 1023, 1025, 1027, 1028, 1029, 1031,
1033, 1035, 1037, 1038, 1040, 1041, 2000, 2002, 2003, 2004, 2005, 2007,
2008, 2009, 2013, 2015, 2017, 2018, 2019, 2022, 2025, 2026, 2028, 2029,
2032, 2034, 2035, 2036, 2037, 2038, 2040, 2041, 3002, 3004, 3006, 3008,
3011, 3013, 3014, 3015, 3016, 3017, 3018, 3019, 3020, 3021, 3023, 3025,
3026, 3027, 3029, 3030, 3032, 3033, 3034, 3037, 3039, 3040, 3042, 3044,
3045, 3046, 3047, 3048, 3049, 3051, 3053, 3055, 3056, 3057, 3059, 3060,
3062, 3063] )
def _formatResults(self, modelName, distanceArray, idList):
""" Mimics the implementation in imbu.py: Format distances to reflect the
pctOverlapOfInput metric, return a list of results.
"""
formattedDistances = (1.0 - distanceArray) * 100
results = []
for protoId, dist in zip(idList, formattedDistances):
if modelName in self.documentLevel:
results.append({"sampleId": protoId,
"wordId": 0,
"text": self.testDocuments[protoId],
"score": dist.item()})
else:
# get the sampleId from the protoId
wordId = protoId % self.tokenIndexingFactor
sampleId = (protoId - wordId) / self.tokenIndexingFactor
results.append({"sampleId": sampleId,
"wordId": wordId,
"text": self.testDocuments[sampleId],
"score": dist.item()})
return results
def testMappingsWithImbuWordModel(self):
# Create a Keywords model
modelName = "Keywords"
kwargs = {"numLabels": 1,
"k": 42,
"classifierMetric": "pctOverlapOfInput",
"filterText": True,
"verbosity": 0}
model = createModel(modelName, **kwargs)
# Train the model for use in Imbu
for seqId, text in enumerate(self.testDocuments):
tokenList, mapping = model.tokenize(text)
lastTokenIndex = len(tokenList) - 1
for i, (token, tokenIndex) in enumerate(zip(tokenList, mapping)):
wordId = seqId * self.tokenIndexingFactor + tokenIndex
model.trainToken(token,
[0],
wordId,
reset=int(i == lastTokenIndex))
# Query the model, expecting two matches from one sample
query = ("The key to artificial intelligence has always been the "
"representation.")
_, sortedIds, sortedDistances = model.inferDocument(
query, returnDetailedResults=True, sortResults=True)
# Test for expected word-token mapping (in prototype IDs)
self.assertItemsEqual(self.filteredProtoIds, sortedIds,
"List of IDs returned from inference does not match the expected list of "
"prototype IDs.")
# Test for exact matching results
self.assertSequenceEqual([0.0, 0.0, 1.0], sortedDistances[:3].tolist(),
"Expected two exact-matching prototypes.")
# Test for multiple matches per sample
results = self._formatResults(modelName, sortedDistances, sortedIds)
self.assertEqual(results[0]["sampleId"], results[1]["sampleId"])
self.assertEqual(results[0]["text"], results[1]["text"])
self.assertNotEqual(results[0]["wordId"], results[1]["wordId"])
# Test the match maps back to the query
matchingWord = results[0]["text"].split(" ")[results[0]["wordId"]]
self.assertIn(matchingWord, query, "Matching word is indexed incorrectly.")
# Query the model again, expecting five matches from two samples
query = ("sequence")
_, sortedIds, sortedDistances = model.inferDocument(
query, returnDetailedResults=True, sortResults=True)
# Test for exact matching results
self.assertSequenceEqual(
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0], sortedDistances[:6].tolist(),
"Expected five exact-matching prototypes.")
# Test the exact matches map back to the query term
results = self._formatResults(modelName, sortedDistances, sortedIds)
for r in results[:5]:
self.assertIn(r["sampleId"], (2,3))
matchingWord = r["text"].split(" ")[r["wordId"]]
self.assertIn(query, matchingWord,
"Matching word is indexed incorrectly.")
def testMappingsWithImbuDocumentModel(self):
# Create the CioDocumentFingerprint model
modelName = "CioDocumentFingerprint"
kwargs = {"numLabels": 1,
"classifierMetric": "pctOverlapOfInput",
"filterText": True,
"verbosity": 0,
"fingerprintType": EncoderTypes.document,
"cacheRoot": None}
model = createModel("CioDocumentFingerprint", **kwargs)
# Train the model for use in Imbu
for seqId, text in enumerate(self.testDocuments):
model.trainDocument(text, [0], seqId)
# Query the model, expecting two matches from one sample
query = ("The key to artificial intelligence has always been the "
"representation.")
_, sortedIds, sortedDistances = model.inferDocument(
query, returnDetailedResults=True, sortResults=True)
self.assertEqual(len(self.testDocuments), len(sortedIds),
"Document-level models should have one prototype ID per document.")
results = self._formatResults(modelName, sortedDistances, sortedIds)
for r in results:
self.assertEqual(0, r["wordId"],
"wordId is insignificant in document-level models, and should be 0.")
def testIndexMapping(self):
originalWords = self.testDocuments[2].split(" ")
tokenList, mapping = TextPreprocess().tokenizeAndFilter(
self.testDocuments[2],
ignoreCommon=50,
removeStrings=["[identifier deleted]"],
correctSpell=True,
expandAbbr=True,
expandContr=True)
self.assertEqual(len(tokenList), len(mapping),
"There should be one mapping entry for each token.")
# Test filtering results
self.assertEqual("therefore", tokenList[0], "Spelling not corrected.")
self.assertEqual("discrete", tokenList[24], "Spelling not corrected.")
self.assertSequenceEqual(["hierarchical", "temporal", "memory"],
tokenList[1:4], "Abbreviation 'HTM' not expanded.")
self.assertNotIn("but", tokenList, "Common word 'but' not removed.")
self.assertNotIn("not", tokenList, "Common word 'not' not removed.")
self.assertIn("does", tokenList, "Contraction not expanded to 'does not'.")
# Test some token-to-word-mappings
mappedWords = [originalWords[i] for i in mapping]
self.assertNotEqual(len(originalWords), len(mappedWords))
for word in mappedWords[1:4]:
self.assertEqual("HTM", word,
"Tokens don't map to 'HTM' as expected.")
if __name__ == "__main__":
unittest.main()
|
marionleborgne/nupic.research
|
tests/nlp/integration/text_mapping_test.py
|
Python
|
agpl-3.0
| 10,290
|
[
"NEURON"
] |
41daed32ecfd05e3fcad160fd57a1ecc3edc1c065468c2a32d5b09a1d5e38239
|
# coding: utf-8
"""Tools for the submission of Tasks."""
from __future__ import unicode_literals, division, print_function
import os
import time
import collections
import yaml
from six.moves import cStringIO
from datetime import timedelta
from monty.io import get_open_fds
from monty.string import boxed, is_string
from monty.os.path import which
from monty.collections import AttrDict
try:
import apscheduler
has_sched_v3 = apscheduler.version >= "3.0.0"
except ImportError:
pass
import logging
logger = logging.getLogger(__name__)
__all__ = [
"ScriptEditor",
"PyLauncher",
"PyFlowScheduler",
]
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
class ScriptEditor(object):
"""
Simple editor that simplifies the writing of shell scripts
"""
_shell = '/bin/bash'
def __init__(self):
self._lines = []
@property
def shell(self):
return self._shell
def _add(self, text, pre=""):
if is_string(text):
self._lines.append(pre + text)
else:
self._lines.extend([pre + t for t in text])
def reset(self):
"""Reset the editor."""
try:
del self._lines
except AttributeError:
pass
def shebang(self):
"""Adds the shebang line."""
self._lines.append('#!' + self.shell)
def declare_var(self, key, val):
"""Declare a env variable. If val is None the variable is unset."""
if val is not None:
line = "export " + key + '=' + str(val)
else:
line = "unset " + key
self._add(line)
def declare_vars(self, d):
"""Declare the variables defined in the dictionary d."""
for k, v in d.items():
self.declare_var(k, v)
def export_envar(self, key, val):
"""Export an environment variable."""
line = "export " + key + "=" + str(val)
self._add(line)
def export_envars(self, env):
"""Export the environment variables contained in the dict env."""
for k, v in env.items():
self.export_envar(k, v)
def add_emptyline(self):
"""Add an empty line."""
self._add("", pre="")
def add_comment(self, comment):
"""Add a comment"""
self._add(comment, pre="# ")
def load_modules(self, modules):
"""Load the list of specified modules."""
for module in modules:
self.load_module(module)
def load_module(self, module):
self._add('module load ' + module)
def add_line(self, line):
self._add(line)
def add_lines(self, lines):
self._add(lines)
def get_script_str(self, reset=True):
"""Returns a string with the script and reset the editor if reset is True"""
s = "\n".join(l for l in self._lines)
if reset:
self.reset()
return s
class OmpEnv(AttrDict):
"""
Dictionary with the OpenMP environment variables
see https://computing.llnl.gov/tutorials/openMP/#EnvironmentVariables
"""
_KEYS = [
"OMP_SCHEDULE",
"OMP_NUM_THREADS",
"OMP_DYNAMIC",
"OMP_PROC_BIND",
"OMP_NESTED",
"OMP_STACKSIZE",
"OMP_WAIT_POLICY",
"OMP_MAX_ACTIVE_LEVELS",
"OMP_THREAD_LIMIT",
"OMP_STACKSIZE",
"OMP_PROC_BIND",
]
def __init__(self, *args, **kwargs):
"""
Constructor method inherited from dictionary:
>>> assert OmpEnv(OMP_NUM_THREADS=1).OMP_NUM_THREADS == 1
To create an instance from an INI file, use:
OmpEnv.from_file(filename)
"""
super(OmpEnv, self).__(*args, **kwargs)
err_msg = ""
for key, value in self.items():
self[key] = str(value)
if key not in self._KEYS:
err_msg += "unknown option %s\n" % key
if err_msg:
raise ValueError(err_msg)
@classmethod
def from_file(cls, filename, allow_empty=False):
"""Reads the OpenMP variables from a INI file."""
if filename.endswith(".ini"):
from ConfigParser import SafeConfigParser, NoOptionError
parser = SafeConfigParser()
parser.read(filename)
obj = OmpEnv()
# Consistency check. Note that we only check if the option name is correct,
# we do not check whether the value is correct or not.
if "openmp" not in parser.sections():
if not allow_empty:
raise ValueError("%s does not contain any [openmp] section" % filename)
return obj
err_msg = ""
for key in parser.options("openmp"):
if key.upper() not in cls._KEYS:
err_msg += "unknown option %s, maybe a typo" % key
if err_msg:
raise ValueError(err_msg)
for key in cls._KEYS:
try:
obj[key] = str(parser.get("openmp", key))
except NoOptionError:
try:
obj[key] = str(parser.get("openmp", key.lower()))
except NoOptionError:
pass
if not allow_empty and not obj:
raise ValueError("Refusing to return with an empty dict")
return obj
else:
raise NotImplementedError("Don't how how to read data from %s" % filename)
class PyLauncherError(Exception):
"""Error class for PyLauncher."""
class PyLauncher(object):
"""
This object handle the submission of the tasks contained in a `AbinitFlow`
"""
Error = PyLauncherError
def __init__(self, flow, **kwargs):
"""
Initialize the object
Args:
flow:
`AbinitFlow` object
kwargs:
max_njobs_inqueue:
The launcher will stop submitting jobs when the
number of jobs in the queue is >= Max number of jobs
"""
self.flow = flow
self.max_njobs_inqueue = kwargs.get("max_njobs_inqueue", 200)
def single_shot(self):
"""
Run the first `Task` than is ready for execution.
Returns:
Number of jobs launched.
"""
num_launched = 0
# Get the tasks that can be executed in each workflow.
tasks = []
for work in self.flow:
try:
task = work.fetch_task_to_run()
if task is not None:
tasks.append(task)
else:
# No task found, this usually happens when we have dependencies.
# Beware of possible deadlocks here!
logger.debug("No task to run! Possible deadlock")
except StopIteration:
logger.info("All tasks completed.")
# Submit the tasks and update the database.
if tasks:
tasks[0].start()
num_launched += 1
self.flow.pickle_dump()
return num_launched
def rapidfire(self, max_nlaunch=-1, max_loops=1, sleep_time=5):
"""
Keeps submitting `Tasks` until we are out of jobs or no job is ready to run.
Args:
max_nlaunch:
Maximum number of launches. default: no limit.
max_loops:
Maximum number of loops
sleep_time:
secs to sleep between rapidfire loop iterations
Returns:
The number of tasks launched.
"""
num_launched, do_exit, launched = 0, False, []
for count in range(max_loops):
if do_exit:
break
if count > 0:
time.sleep(sleep_time)
tasks = self.fetch_tasks_to_run()
# I don't know why but we receive duplicated tasks.
if any(task in launched for task in tasks):
logger.critical("numtasks %d already in launched list:\n%s" % (len(task), launched))
# Preventive test.
tasks = [t for t in tasks if t not in launched]
if not tasks:
continue
#njobs_inqueue = tasks[0].manager.qadapter.get_njobs_in_queue()
#if njobs_inqueue is None:
# print('Cannot get njobs_inqueue, going back to sleep...')
# continue
#if len(tasks) > 0:
# n_jobs_in_queue = tasks[0].manager.qadapter.get_njobs_in_queue()
# if n_jobs_in_queue is None:
# n_jobs_in_queue = 0
# n_to_run = self.max_jobs - n_jobs_in_queue
#else:
# n_to_run = 0
#rest = self.max_njobs_inqueue - njobs_inqueue
#if rest <= 0:
# print('too many jobs in the queue, going back to sleep...')
# continue
stop = len(tasks) #if rest > len(tasks) else rest
#print("Will fire %d jobs" % stop)
for task in tasks[:stop]:
fired = task.start()
if fired:
launched.append(task)
num_launched += 1
if num_launched >= max_nlaunch > 0:
print('num_launched >= max_nlaunch, going back to sleep')
do_exit = True
break
# Update the database.
self.flow.pickle_dump()
return num_launched
def fetch_tasks_to_run(self):
"""
Return the list of tasks that can be submitted.
Empty list if no task has been found.
"""
tasks_to_run = []
for work in self.flow:
tasks_to_run.extend(work.fetch_alltasks_to_run())
return tasks_to_run
class PyFlowSchedulerError(Exception):
"""Exceptions raised by `PyFlowScheduler`."""
class PyFlowScheduler(object):
"""
This object schedules the submission of the tasks in an `AbinitFlow`.
There are two types of errors that might occur during the execution of the jobs:
#. Python exceptions
#. Abinit Errors.
Python exceptions are easy to detect and are usually due to a bug in abinitio or random errors such as IOError.
The set of Abinit Errors is much much broader. It includes wrong input data, segmentation
faults, problems with the resource manager, etc. Abinitio tries to handle the most common cases
but there's still a lot of room for improvement.
Note, in particular, that `PyFlowScheduler` will shutdown automatically if
#. The number of python exceptions is > MAX_NUM_PYEXC
#. The number of Abinit Errors (i.e. the number of tasks whose status is S_ERROR) is > MAX_NUM_ERRORS
#. The number of jobs launched becomes greater than (SAFETY_RATIO * total_number_of_tasks).
#. The scheduler will send an email to the user (specified by mailto) every REMINDME_S seconds.
If the mail cannot be sent, it will shutdown automatically.
This check prevents the scheduler from being trapped in an infinite loop.
"""
# Configuration file.
YAML_FILE = "scheduler.yml"
USER_CONFIG_DIR = os.path.join(os.getenv("HOME"), ".abinit", "abipy")
DEBUG = 0
Error = PyFlowSchedulerError
def __init__(self, **kwargs):
"""
Args:
weeks:
number of weeks to wait
days:
number of days to wait
hours:
number of hours to wait
minutes:
number of minutes to wait
seconds:
number of seconds to wait
verbose:
(int) verbosity level
max_njobs_inque:
Limit on the number of jobs that can be present in the queue
use_dynamic_manager:
True if the task manager must be re-initialized from
file before launching the jobs. Default: False
max_nlaunch:
Maximum number of tasks launched by radpifire (default -1 i.e. no limit)
"""
# Options passed to the scheduler.
self.sched_options = AttrDict(
weeks=kwargs.pop("weeks", 0),
days=kwargs.pop("days", 0),
hours=kwargs.pop("hours", 0),
minutes=kwargs.pop("minutes", 0),
seconds=kwargs.pop("seconds", 0),
#start_date=kwargs.pop("start_date", None),
)
if all(not v for v in self.sched_options.values()):
raise self.Error("Wrong set of options passed to the scheduler.")
self.mailto = kwargs.pop("mailto", None)
self.verbose = int(kwargs.pop("verbose", 0))
self.use_dynamic_manager = kwargs.pop("use_dynamic_manager", False)
self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)
self.REMINDME_S = float(kwargs.pop("REMINDME_S", 4 * 24 * 3600))
self.MAX_NUM_PYEXCS = int(kwargs.pop("MAX_NUM_PYEXCS", 0))
self.MAX_NUM_ABIERRS = int(kwargs.pop("MAX_NUM_ABIERRS", 0))
self.SAFETY_RATIO = int(kwargs.pop("SAFETY_RATIO", 5))
#self.MAX_ETIME_S = kwargs.pop("MAX_ETIME_S", )
self.max_nlaunch = kwargs.pop("max_nlaunch", -1)
if kwargs:
raise self.Error("Unknown arguments %s" % kwargs)
if has_sched_v3:
from apscheduler.schedulers.blocking import BlockingScheduler
self.sched = BlockingScheduler()
else:
from apscheduler.scheduler import Scheduler
self.sched = Scheduler(standalone=True)
self.nlaunch = 0
self.num_reminders = 1
# Used to keep track of the exceptions raised while the scheduler is running
self.exceptions = collections.deque(maxlen=self.MAX_NUM_PYEXCS + 10)
# Used to push additional info during the execution.
self.history = collections.deque(maxlen=100)
@classmethod
def from_file(cls, filepath):
"""Read the configuration parameters from a Yaml file."""
with open(filepath, "r") as fh:
return cls(**yaml.load(fh))
@classmethod
def from_string(cls, s):
"""Create an istance from string s containing a YAML dictionary."""
stream = cStringIO(s)
stream.seek(0)
return cls(**yaml.load(stream))
@classmethod
def from_user_config(cls):
"""
Initialize the `PyFlowScheduler` from the YAML file 'scheduler.yml'.
Search first in the working directory and then in the configuration
directory of abipy.
Raises:
RuntimeError if file is not found.
"""
# Try in the current directory.
path = os.path.join(os.getcwd(), cls.YAML_FILE)
if os.path.exists(path):
return cls.from_file(path)
# Try in the configuration directory.
path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)
if os.path.exists(path):
return cls.from_file(path)
err_msg = "Cannot locate %s neither in current directory nor in %s" % (cls.YAML_FILE, path)
raise cls.Error(err_msg)
def __str__(self):
"""String representation."""
lines = [self.__class__.__name__ + ", Pid: %d" % self.pid]
app = lines.append
app("Scheduler options: %s" % str(self.sched_options))
app(80 * "=")
app(str(self.flow))
return "\n".join(lines)
@property
def pid(self):
"""The pid of the process associated to the scheduler."""
try:
return self._pid
except AttributeError:
self._pid = os.getpid()
return self._pid
@property
def pid_file(self):
"""
Absolute path of the file with the pid.
The file is located in the workdir of the flow
"""
return self._pid_file
@property
def flow(self):
"""`AbinitFlow`."""
return self._flow
@property
def num_excs(self):
"""Number of exceptions raised so far."""
return len(self.exceptions)
def get_delta_etime(self):
"""Returns a `timedelta` object representing with the elapsed time."""
return timedelta(seconds=(time.time() - self.start_time))
def add_flow(self, flow):
"""Add an `AbinitFlow` flow to the scheduler."""
if hasattr(self, "_flow"):
raise self.Error("Only one flow can be added to the scheduler.")
pid_file = os.path.join(flow.workdir, "_PyFlowScheduler.pid")
if os.path.isfile(pid_file):
flow.show_status()
err_msg = ("""
pid_file %s already exists
There are two possibilities:
1) There's an another instance of PyFlowScheduler running
2) The previous scheduler didn't exit in a clean way
To solve case 1:
Kill the previous scheduler (use 'kill pid' where pid is the number reported in the file)
Then you can restart the new scheduler.
To solve case 2:
Remove the pid_file and restart the scheduler.
Exiting""" % pid_file)
raise self.Error(err_msg)
with open(pid_file, "w") as fh:
fh.write(str(self.pid))
self._pid_file = pid_file
self._flow = flow
def start(self):
"""
Starts the scheduler in a new thread. Returns True if success.
In standalone mode, this method will block until there are no more scheduled jobs.
"""
self.history.append("Started on %s" % time.asctime())
self.start_time = time.time()
if has_sched_v3:
self.sched.add_job(self.callback, "interval", **self.sched_options)
else:
self.sched.add_interval_job(self.callback, **self.sched_options)
errors = self.flow.look_before_you_leap()
if errors:
print(errors)
self.exceptions.append(errors)
return False
# Try to run the job immediately. If something goes wrong return without initializing the scheduler.
self._runem_all()
if self.exceptions:
self.cleanup()
self.send_email(msg="Error while trying to run the flow for the first time!\n %s" % self.exceptions)
return False
self.sched.start()
return True
def _runem_all(self):
"""
This function checks the status of all tasks,
tries to fix tasks that went unconverged, abicritical, or queuecritical
and tries to run all the tasks that can be submitted.+
"""
excs = []
flow = self.flow
# Allow to change the manager at run-time
if self.use_dynamic_manager:
from pymatgen.io.abinitio.tasks import TaskManager
new_manager = TaskManager.from_user_config()
for work in flow:
work.set_manager(new_manager)
nqjobs = flow.get_njobs_in_queue()
if nqjobs is None:
nqjobs = 0
print('Cannot get njobs_inqueue')
if nqjobs >= self.max_njobs_inqueue:
print("Too many jobs in the queue, returning")
return
if self.max_nlaunch == -1:
max_nlaunch = self.max_njobs_inqueue - nqjobs
else:
max_nlaunch = min(self.max_njobs_inqueue - nqjobs, self.max_nlaunch)
# check status
flow.check_status()
flow.show_status()
# fix problems
# Try to restart the unconverged tasks
# todo donot fire here but prepare for fireing in rapidfire
for task in self.flow.unconverged_tasks:
try:
logger.info("AbinitFlow will try restart task %s" % task)
fired = task.restart()
if fired:
self.nlaunch += 1
max_nlaunch -= 1
if max_nlaunch == 0:
print("Restart: too many jobs in the queue, returning")
flow.pickle_dump()
return
except Exception:
excs.append(straceback())
# move here from withing rapid fire ...
# fix only prepares for restarting, and sets to ready
flow.fix_critical()
# update database
flow.pickle_dump()
#if self.num_restarts == self.max_num_restarts:
# info_msg = "Reached maximum number of restarts. Cannot restart anymore Returning"
# logger.info(info_msg)
# self.history.append(info_msg)
# return 1
# Submit the tasks that are ready.
try:
nlaunch = PyLauncher(flow).rapidfire(max_nlaunch=max_nlaunch, sleep_time=10)
self.nlaunch += nlaunch
if nlaunch:
print("[%s] Number of launches: %d" % (time.asctime(), nlaunch))
except Exception:
excs.append(straceback())
flow.show_status()
if excs:
logger.critical("*** Scheduler exceptions:\n *** %s" % "\n".join(excs))
self.exceptions.extend(excs)
def callback(self):
"""The function that will be executed by the scheduler."""
try:
return self._callback()
except:
# All exceptions raised here will trigger the shutdown!
self.exceptions.append(straceback())
self.shutdown(msg="Exception raised in callback!")
def _callback(self):
"""The actual callback."""
if self.DEBUG:
# Show the number of open file descriptors
print(">>>>> _callback: Number of open file descriptors: %s" % get_open_fds())
#print('before _runem_all in _callback')
self._runem_all()
# Mission accomplished. Shutdown the scheduler.
all_ok = self.flow.all_ok
if self.verbose:
print("all_ok", all_ok)
if all_ok:
self.shutdown(msg="All tasks have reached S_OK. Will shutdown the scheduler and exit")
# Handle failures.
err_msg = ""
# Shall we send a reminder to the user?
delta_etime = self.get_delta_etime()
if delta_etime.total_seconds() > self.num_reminders * self.REMINDME_S:
self.num_reminders += 1
msg = ("Just to remind you that the scheduler with pid %s, flow %s\n has been running for %s " %
(self.pid, self.flow, delta_etime))
retcode = self.send_email(msg, tag="[REMINDER]")
if retcode:
# Cannot send mail, shutdown now!
msg += ("\nThe scheduler tried to send an e-mail to remind the user\n" +
" but send_email returned %d. Aborting now" % retcode)
err_msg += msg
#if delta_etime.total_seconds() > self.MAX_ETIME_S:
# err_msg += "\nExceeded MAX_ETIME_S %s. Will shutdown the scheduler and exit" % self.MAX_ETIME_S
# Too many exceptions. Shutdown the scheduler.
if self.num_excs > self.MAX_NUM_PYEXCS:
msg = "Number of exceptions %s > %s. Will shutdown the scheduler and exit" % (
self.num_excs, self.MAX_NUM_PYEXCS)
err_msg += boxed(msg)
# Paranoid check: disable the scheduler if we have submitted
# too many jobs (it might be due to some bug or other external reasons
# such as race conditions between difference callbacks!)
if self.nlaunch > self.SAFETY_RATIO * self.flow.num_tasks:
msg = "Too many jobs launched %d. Total number of tasks = %s, Will shutdown the scheduler and exit" % (
self.nlaunch, self.flow.num_tasks)
err_msg += boxed(msg)
# Count the number of tasks with status == S_ERROR.
if self.flow.num_errored_tasks > self.MAX_NUM_ABIERRS:
msg = "Number of tasks with ERROR status %s > %s. Will shutdown the scheduler and exit" % (
self.flow.num_errored_tasks, self.MAX_NUM_ABIERRS)
err_msg += boxed(msg)
# Count the number of tasks with status == S_UNCONVERGED.
#if self.flow.num_unconverged_tasks:
# # TODO: this is needed to avoid deadlocks, automatic restarting is not available yet
# msg = ("Found %d unconverged tasks."
# "Automatic restarting is not available yet. Will shutdown the scheduler and exit"
# % self.flow.num_unconverged_tasks)
# err_msg += boxed(msg)
#deadlocks = self.detect_deadlocks()
#if deadlocks:
# msg = ("Detected deadlocks in flow. Will shutdown the scheduler and exit"
# % self.flow.num_unconverged_tasks)
# err_msg += boxed(msg)
if err_msg:
# Something wrong. Quit
self.shutdown(err_msg)
return len(self.exceptions)
def cleanup(self):
"""
Cleanup routine: remove the pid file and save the pickle database
"""
try:
os.remove(self.pid_file)
except OSError:
logger.critical("Could not remove pid_file")
pass
# Save the final status of the flow.
self.flow.pickle_dump()
def shutdown(self, msg):
"""Shutdown the scheduler."""
try:
self.cleanup()
#if False and self.flow.has_db:
# try:
# self.flow.db_insert()
# except Exception:
# logger.critical("MongoDb insertion failed.")
self.history.append("Completed on %s" % time.asctime())
self.history.append("Elapsed time %s" % self.get_delta_etime())
if self.DEBUG:
print(">>>>> shutdown: Number of open file descriptors: %s" % get_open_fds())
retcode = self.send_email(msg)
if self.DEBUG:
print("send_mail retcode", retcode)
# Write file with the list of exceptions:
if self.exceptions:
dump_file = os.path.join(self.flow.workdir, "_exceptions")
with open(dump_file, "w") as fh:
fh.writelines(self.exceptions)
fh.write("Shutdown message:\n%s" % msg)
finally:
# Shutdown the scheduler thus allowing the process to exit.
print('this should be the shutdown of the scheduler')
# Unschedule all the jobs before calling shutdown
self.sched.print_jobs()
for job in self.sched.get_jobs():
self.sched.unschedule_job(job)
self.sched.print_jobs()
self.sched.shutdown()
# Uncomment the line below if shutdown does not work!
#os.system("kill -9 %d" % os.getpid())
def send_email(self, msg, tag=None):
"""
Send an e-mail before completing the shutdown.
Returns 0 if success.
"""
try:
return self._send_email(msg, tag)
except:
self.exceptions.append(straceback())
return -2
def _send_email(self, msg, tag):
if self.mailto is None:
return -1
header = msg.splitlines()
app = header.append
app("Submitted on %s" % time.ctime(self.start_time))
app("Completed on %s" % time.asctime())
app("Elapsed time %s" % str(self.get_delta_etime()))
app("Number of errored tasks: %d" % self.flow.num_errored_tasks)
app("Number of unconverged tasks: %d" % self.flow.num_unconverged_tasks)
strio = cStringIO()
strio.writelines("\n".join(header) + 4 * "\n")
# Add the status of the flow.
self.flow.show_status(stream=strio)
if self.exceptions:
# Report the list of exceptions.
strio.writelines(self.exceptions)
if tag is None:
tag = " [ALL OK]" if self.flow.all_ok else " [WARNING]"
return sendmail(subject=self.flow.name + tag, text=strio.getvalue(), mailto=self.mailto)
def sendmail(subject, text, mailto, sender=None):
"""
Sends an e-mail with unix sendmail.
Args:
subject:
String with the subject of the mail.
text:
String with the body of the mail.
mailto:
String or list of string with the recipients.
sender:
string with the sender address.
If sender is None, username@hostname is used.
Returns:
exit status
"""
def user_at_host():
from socket import gethostname
return os.getlogin() + "@" + gethostname()
# Body of the message.
sender = user_at_host() if sender is None else sender
if is_string(mailto): mailto = [mailto]
from email.mime.text import MIMEText
mail = MIMEText(text)
mail["Subject"] = subject
mail["From"] = sender
mail["To"] = ", ".join(mailto)
msg = mail.as_string()
# sendmail works much better than the python interface.
# Note that sendmail is available only on Unix-like OS.
from subprocess import Popen, PIPE
sendmail = which("sendmail")
if sendmail is None: return -1
p = Popen([sendmail, "-t"], stdin=PIPE, stderr=PIPE)
outdata, errdata = p.communicate(msg)
return len(errdata)
#def test_sendmail():
# retcode = sendmail("sendmail_test", text="hello\nworld", mailto="nobody@nowhere.com")
# print("Retcode", retcode)
|
yanikou19/pymatgen
|
pymatgen/io/abinitio/launcher.py
|
Python
|
mit
| 29,690
|
[
"ABINIT",
"pymatgen"
] |
d41ff5d4f04e00267ac7f30728808b52e09165a477dd694a8b06b123041177e3
|
# pylint: disable=missing-docstring, useless-object-inheritance
from __future__ import print_function
from enum import Enum
class Aaaa(object): # [too-few-public-methods]
def __init__(self):
pass
def meth1(self):
print(self)
def _dontcount(self):
print(self)
# Don't emit for these cases.
class Klass(object):
"""docstring"""
def meth1(self):
"""first"""
def meth2(self):
"""second"""
class EnoughPublicMethods(Klass):
"""We shouldn't emit too-few-public-methods for this."""
class BossMonster(Enum):
"""An enum does not need methods to be useful."""
megashark = 1
octopus = 2
class DumbList:
"""A class can define only special methods."""
def __init__(self, iterable):
self._list = list(iterable)
def __len__(self):
return len(self._list)
def __getitem__(self, index):
return self._list[index]
|
kczapla/pylint
|
pylint/test/functional/too_few_public_methods.py
|
Python
|
gpl-2.0
| 934
|
[
"Octopus"
] |
cc7b672e8ec23d6a0bfe1bb7dc4ab413d88e1f6c983a8da7e830044ac2f20d2f
|
import rhomb
import reactivityModifier
import math
import log
import corr
import random
import os.path
import warnings
import numpy as np
from PIL import Image
from PIL import ImageDraw
class Kagome():
"""Creates a two-dimensional Kagome lattice and all tools for drawing on it."""
def deprecated(func):
"""
This is a decorator which is used to mark functions as deprecated. It will result in a warning being emitted when the function is used.
taken from: https://wiki.python.org/moin/PythonDecoratorLibrary#CA-03ade855b8be998a2a4befd0f5f810b63abcfd7d_3
"""
def new_func(*args, **kwargs):
warnings.warn("Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning)
print("\nCall to deprecated function: {}.\n".format(func.__name__))
return func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
def __init__(self, latticeWidth, latticePoints, imageSize, outputFolder):
"""Constructor
latticeWidth ... int width of rhombs in pixel
latticePoints ... (int, int) lattice points in x and y direction
imageSize ... (int, int) dimension of the resulting output images
outputFolder ... str location of the folder to save images"""
# logging related stuff
self.outputFolder = outputFolder
# check if outputfolder exists and create it if not
if not os.path.exists(self.outputFolder):
os.makedirs(self.outputFolder)
print("created %s" % self.outputFolder)
# create a name for the model
self.modelName = os.path.basename(os.path.normpath(self.outputFolder))
# start the loggers
self.log = log.Logger("Log", self.outputFolder)
self.log.log_text("Program initialized")
self.log_conversion = log.Logger("conversion", self.outputFolder)
self.log.log_text("Conversion log created")
# set pixel dimensions for drawing
self.latticeWidth = latticeWidth
# this is a simple mathematical relation of hexagon width to height
self.latticeHeight = 1/2 * 2 * math.sqrt((latticeWidth / 2) ** 2 -
(latticeWidth / 2 * math.cos(math.radians(60))) ** 2)
# generate the dimensions of the lattice, always even
self.latticePointsX = latticePoints[0]
if self.latticePointsX % 2 == 1: self.latticePointsX += 1
self.latticePointsY = latticePoints[1]
if self.latticePointsY % 2 == 1: self.latticePointsY += 1
# stuff for drawing and image saving
self.image = Image.new('RGB', imageSize, 'white')
self.draw = ImageDraw.Draw(self.image)
self.rhombColor = 'red'
# centering the image on the tiling
self.imageXOffset = int((self.latticePointsX / 2 * self.latticeWidth - self.image.size[0]) / 2)
self.imageYOffset = int((self.latticePointsY * self.latticeHeight - self.image.size[1]) / 2)
# generate rhomb lattice
self.numberAllLatticePoints = 0
self.lattice = self.generate_lattice_array()
for y in range(len(self.lattice)):
for x in range(len(self.lattice[y])):
self.lattice[y][x] = rhomb.Rhomb(x, y, self.latticePointsX, self.latticePointsY)
self.numberAllLatticePoints += 1
self.rhombCount = self.latticePointsY * self.latticePointsX / 2 + self.latticePointsY * self.latticePointsX / 4
self.log.log_text("Lattice created")
self.log.log_text("Created %i rhombs" % self.rhombCount)
def __del__(self):
"""Destructor, cleaning up :) """
self.log.log_text("Destructor called")
def debug_draw_neighbors(self, x, y):
"""
Debug function. Draws all first neighbors.
x ... int x coordinate in the lattice
y ... int y coordinate in the lattice
"""
neighbors = self.lattice[y][x].fn
for t in neighbors:
self.rhomb_at_kagome(t[0], t[1])
def getRhomb(self, x, y):
"""
Gets a rhomb at the specific coordinates. This also ensures the torus like shape of the sheet.
x ... int x coordinate in the lattice
y ... int y coordinate in the lattice
retruns Rhomb a rhomb at the given lattice points
"""
# check y
if y < 0:
y = y + self.latticePointsY
else:
y = y % self.latticePointsY
if x >= self.latticePointsX:
x = x % self.latticePointsX
# # check x
if y % 2 == 1:
divisor = 2
else:
divisor = 1
if x >= (self.latticePointsX / divisor):
x = x % (self.latticePointsX / divisor)
return self.lattice[y][x]
def calculate_Nth_neighbor(self, nMinus1, nMinus2):
"""
Calculates the second and higher neighbors. The order of the neighbors is given by N
nMinus1 ... array of tuples of the N - 2 neighbors
nMinus2 ... array of tuples of the N - 2 neighbors
returns a tuple array of the coordinates of the Nth neighbors
"""
everything = [] # holds all neighbors of the first nightbors
toremove = [] # holds all items which should be removed
for t in nMinus2:
# tuples get turned into string for numpy to handle it
toremove.append(str(t))
for t in nMinus1:
# tuples get turned into string for numpy to handle it
toremove.append(str(t))
for t2 in self.getRhomb(t[0], t[1]).fn:
everything.append(str(t2)) # tuples get turned into string for numpy to handle it
everything = np.array(everything)
toremove = np.array(toremove)
reduced = np.setdiff1d(everything, toremove) # this numpy function cant handle tuples
# reversing the string array into a tuple array
complete = np.empty(len(reduced), dtype=object)
for i in range(len(reduced)):
complete[i] = eval(reduced[i])
return complete
def generate_lattice_array(self):
"""
Creates an empty numpy array with all lattice points.
return ... array[array] emtpy array with correct indices
"""
lattice = np.empty(self.latticePointsY, dtype=object)
for y in range(len(lattice)):
lattice[y] = np.empty(int(self.latticePointsX / (y % 2 + 1)), dtype=object)
return lattice
def kag_to_screen(self, x, y):
"""
Transforms a kagome coordinate to a point on screen.
x ... int x-coordinate of the Kagome lattice point
y ... int y-coordinate of the Kagome lattice point
returns a tuple of (x, y) coordinates to draw on an image.
"""
if y % 2 == 0:
indent = self.latticeWidth / 4
step = self.latticeWidth / 2
else:
indent = 0
step = self.latticeWidth
if (y + 1) % 4 == 0:
indent = self.latticeWidth / 2
return (x * step + indent - self.imageXOffset,
y * self.latticeHeight - self.imageYOffset)
def rhomb_at_kagome(self, x, y):
"""
Draws a rhomb in the correct orientation at a given kagome lattice point.
x ... int x-coordinate of the Kagome lattice point
y ... int y-coordinate of the Kagome lattice point
"""
draw_x, draw_y = self.kag_to_screen(x, y) # converting to drawing coordinates
# figure out the right orientation
if y % 2 == 1:
self.draw.polygon(rhomb.lying(draw_x, draw_y, self.latticeWidth, self.latticeHeight), self.rhombColor)
elif ((y % 2 == 0 and x % 2 == 1 and y % 4 == 0) or
(y % 2 == 0 and x % 2 == 0 and y % 4 == 2)):
self.draw.polygon(rhomb.right(draw_x, draw_y, self.latticeWidth, self.latticeHeight), self.rhombColor)
else:
self.draw.polygon(rhomb.left(draw_x, draw_y, self.latticeWidth, self.latticeHeight), self.rhombColor)
def get_random_point(self):
"""
Generates a random coordinate from the lattice.
return ... (int, int) kagome lattice coordinates
"""
y = random.randint(0, len(self.lattice) - 1)
x = random.randint(0, len(self.lattice[y]) - 1)
return (x, y)
def draw_tiling(self):
"""
Creates an outline overlay of the rhombille tiling and fills it with reacted rhombs.
"""
for y in range(len(self.lattice)):
for x in range(len(self.lattice[y])):
draw_x, draw_y = self.kag_to_screen(x, y) # converting to drawing coordinates
# determine the orientation
if y % 2 == 1:
self.draw.polygon(rhomb.lying(draw_x, draw_y, self.latticeWidth, self.latticeHeight), outline=1)
elif ((y % 2 == 0 and x % 2 == 1 and y % 4 == 0) or
(y % 2 == 0 and x % 2 == 0 and y % 4 == 2)):
self.draw.polygon(rhomb.right(draw_x, draw_y, self.latticeWidth, self.latticeHeight), outline=1)
else:
self.draw.polygon(rhomb.left(draw_x, draw_y, self.latticeWidth, self.latticeHeight), outline=1)
def draw_image(self):
"""
Draws an image of the current state.
"""
for y in range(len(self.lattice)):
for x in range(len(self.lattice[y])):
r = self.lattice[y][x]
if r.reacted:
self.rhomb_at_kagome(r.x, r.y)
self.draw_tiling()
def save_image(self, cycle):
"""
Saves the current image.
cycle ... int number of the image, i.e. position number of the current Monte Carlo cycle.
"""
self.image.save(self.outputFolder + "%s.png" % cycle)
def model2DPropagation(self, reactivityModifiers, MCcycleMax, seeds=0, imageCycle=0):
"""
Run a Monte Carlo Simulation with a given rule set.
reactivityModifiers ... array of ReactivityModifier rule set which is applied to the simulation
MCcycleMax ... int or float if int, this is the number of how many time steps the simulation should run, if float, simulation stops when the conversion reaches that value
seeds ... int number of randomly created seeds before the model should run
imageCycle ... int determines after how many Monte Carlo iterations an image of the current state should be created and saved, a value of 0 turns it of
"""
# calculating the highest neighbor correlations
maxNeighborOrder = 1
for modifier in reactivityModifiers:
maxNeighborOrder = max(maxNeighborOrder, modifier.neighborOrder)
# calculating higher neighbors of rhombs and building the grid
if maxNeighborOrder > 1:
count = 0
for y in range(len(self.lattice)):
for x in range(len(self.lattice[y])):
count += 1
rhomb = self.getRhomb(x, y)
completeShells = 1 # first neighbor are already known
# run through increasing neighboring shells and fill them with neighbors
while completeShells < maxNeighborOrder:
print("Working on neighbor %i of rhomb %i of %i " % (completeShells + 1, count, self.rhombCount), end='\r')
# special case for the second neighbors
if completeShells == 1:
nMinus1 = rhomb.neighbors[0]
nMinus2 = rhomb.identifier
else:
nMinus1 = rhomb.neighbors[completeShells - 1]
nMinus2 = rhomb.neighbors[completeShells - 2]
# remove duplicate and lower neighbors
nth = self.calculate_Nth_neighbor(nMinus1, nMinus2)
rhomb.neighbors[completeShells] = nth
completeShells += 1
print("\nFinished with neighbors!")
converted = 0
if seeds > 0:
print("Generating seeds")
self.generate_seeds(seeds)
converted += seeds
print("Starting MC simulation...")
runSimulation = True
MCcycle = 0
self.log.log_text("Starting MC simulation")
while runSimulation:
# each run is a single time step
if type(MCcycleMax) == float:
print("Current step: %i, conversion is %0.02f" % (MCcycle, converted / self.numberAllLatticePoints), end='\r')
else:
print("Current step: %i of %i" % (MCcycle + 1, MCcycleMax), end='\r')
# select a rhomb a do stuff with it
x, y = self.get_random_point()
currentRhomb = self.getRhomb(x, y)
if not currentRhomb.reacted:
chanceToReact = 1 # when a photon arrives, it reacts
# applying all modifiers to reactivity
for modifier in reactivityModifiers:
if self.modifierApplies(currentRhomb, modifier):
chanceToReact *= modifier.r
if random.random() <= chanceToReact:
self.lattice[y][x].reacted = True
converted += 1
# save an image after ever imageCycle Monte Carlo interations
if imageCycle > 0:
if MCcycle % imageCycle == 0:
self.log_conversion.log_xy(MCcycle, converted / self.numberAllLatticePoints)
self.image = Image.new('RGB', self.image.size, 'white')
self.draw = ImageDraw.Draw(self.image)
self.draw_image()
self.save_image(MCcycle)
# step up in the Monte Carlo cycle
MCcycle += 1
# check if the simulation should continue or end
if type(MCcycleMax) == float:
if converted / self.numberAllLatticePoints >= MCcycleMax:
runSimulation = False
elif type(MCcycleMax) == int:
if MCcycle >= MCcycleMax:
runSimulation = False
# writing out the last state
self.log_conversion.log_xy(MCcycle, converted / self.numberAllLatticePoints)
self.image = Image.new('RGB', self.image.size, 'white')
self.draw = ImageDraw.Draw(self.image)
self.draw_image()
self.save_image(MCcycle)
print("\nDone!")
self.log.log_text("MC ended")
# ****************************************************************************
# old code snippet about bond breaking
# destroy a reacted dimer but only if there was a change in the crystal
# if random.random() < destroy:
# converted -= 1
# allreacted = []
# for y in range(len(self.lattice)):
# for x in range(len(self.lattice[y])):
# if self.lattice[y][x].reacted:
# allreacted.append((x,y))
# x, y = random.choice(allreacted)
# self.lattice[y][x].reacted = False
# self.getRhomb(x, y).reacted = False
# self.reactionSites[y][x] = False
# ****************************************************************************
def modifierApplies(self, currentRhomb, modifier):
"""
Verifies wether or not a given modifer apllies to a given rhomb.
currentRhomb ... rhomb the rhomb for which the reactivity conditions should be tested for
modifier ... ReactivityModifier the rule set which is tested
returns bool wether or not the given rule should be applied
"""
reactedNeighbors, allNeighbors = self.count_reacted_neighbors(currentRhomb, modifier.neighborOrder)
unreactedNeighbors = allNeighbors - reactedNeighbors
# nan means that the modifier does not care about the number of neighbors
return ((math.isnan(modifier.reactedLateralNeighborsRequired) or (modifier.reactedLateralNeighborsRequired <= reactedNeighbors)) and
(math.isnan(modifier.unreactedLateralNeighborsRequired) or (modifier.unreactedLateralNeighborsRequired <= unreactedNeighbors)))
def count_reacted_neighbors(self, rhomb, order):
"""Counts how mean of the neighbors of a given order have reacted.
rhomb ... rhomb center of neighbor finding
order ... order of the nearest neighbor
returns (int, int) a tuple with the number of reacted neighbors and the total amount of neighbors
"""
neighborRhombs = rhomb.neighbors[order - 1]
reactedNeighbors = 0
allNeighbors = 0
# count through the neighbors
for currentRhomb in neighborRhombs:
allNeighbors += 1 # counts all possible neighbors, independent of their state
if self.getRhomb(currentRhomb[0], currentRhomb[1]).reacted:
reactedNeighbors += 1 # counts all reacted neighbors
return reactedNeighbors, allNeighbors
def generate_seeds(self, seeds):
"""
Turns a given number of rhombs at random locations into a reacted state.
seeds ... int number of how many rhombs should be turned into the reacted state
"""
self.rhombColor = 'blue' # change of color to highlight the random seeds
for i in range(seeds):
coords = self.get_random_point()
# set the new state and mark it
self.lattice[coords[1]][coords[0]].reacted = True
self.rhomb_at_kagome(coords[0], coords[1])
self.rhombColor = 'red' # revert color
self.draw_tiling()
self.save_image("start.png")
|
k-eks/AceOfDiamonds
|
kagome_lattice.py
|
Python
|
gpl-3.0
| 17,964
|
[
"CRYSTAL"
] |
c9537f6c2b9f289952449149624fde9a60c794b349c879c52137607a32fd5782
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import espressomd
import espressomd.interactions
import numpy as np
import itertools
@utx.skipIfMissingFeatures("BOND_CONSTRAINT")
class RigidBondTest(ut.TestCase):
def test(self):
target_acc = 1E-3
tol = 1.2 * target_acc
system = espressomd.System(box_l=[10., 10., 10.])
system.cell_system.skin = 0.4
system.time_step = 0.01
system.thermostat.set_langevin(kT=1, gamma=1, seed=42)
rigid_bond = espressomd.interactions.RigidBond(
r=1.2, ptol=1E-3, vtol=target_acc)
system.bonded_inter.add(rigid_bond)
# create polymer
last_p = None
for i in range(5):
p = system.part.add(pos=(i * 1.2, 0, 0))
if last_p is not None:
p.add_bond((rigid_bond, last_p))
last_p = p
system.integrator.run(5000)
# check every bond
p1_iter, p2_iter = itertools.tee(system.part)
next(p2_iter, None) # advance second iterator by 1 step
for p1, p2 in zip(p1_iter, p2_iter):
d = system.distance(p2, p1)
v_d = system.distance_vec(p2, p1)
self.assertAlmostEqual(d, 1.2, delta=tol)
# Velocity projection on distance vector
vel_proj = np.dot(p2.v - p1.v, v_d) / d
self.assertLess(vel_proj, tol)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/python/rigid_bond.py
|
Python
|
gpl-3.0
| 2,148
|
[
"ESPResSo"
] |
700705f1cd431c9c3977835e47da91abbe6c0ed79b0b48880b6a1315920f4946
|
#!/usr/bin/env python
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2018 NIWA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Cylc scheduler server."""
from collections import deque
from logging import DEBUG
import os
from Queue import Empty, Queue
from shutil import copytree, rmtree
from subprocess import Popen, PIPE
import sys
from time import sleep, time
import traceback
import isodatetime.data
import isodatetime.parsers
from parsec.util import printcfg
from cylc.cfgspec.glbl_cfg import glbl_cfg
from cylc.config import SuiteConfig
from cylc.cycling import PointParsingError
from cylc.cycling.loader import get_point, standardise_point_string
from cylc.daemonize import daemonize
from cylc.exceptions import CylcError
import cylc.flags
from cylc.log_diagnosis import LogSpec
from cylc.mp_pool import SuiteProcPool
from cylc.network import PRIVILEGE_LEVELS
from cylc.network.httpserver import HTTPServer
from cylc.state_summary_mgr import StateSummaryMgr
from cylc.suite_db_mgr import SuiteDatabaseManager
from cylc.task_job_logs import JOB_LOG_JOB, get_task_job_log
from cylc.suite_events import (
SuiteEventContext, SuiteEventError, SuiteEventHandler)
from cylc.hostuserutil import get_host, get_user
from cylc.suite_logging import SuiteLog, SUITE_LOG, SUITE_ERR, ERR, LOG
from cylc.suite_srv_files_mgr import (
SuiteSrvFilesManager, SuiteServiceFileError)
from cylc.suite_status import (
KEY_DESCRIPTION, KEY_GROUP, KEY_META, KEY_NAME, KEY_OWNER, KEY_STATES,
KEY_TASKS_BY_STATE, KEY_TITLE, KEY_UPDATE_TIME)
from cylc.taskdef import TaskDef
from cylc.task_id import TaskID
from cylc.task_job_mgr import TaskJobManager
from cylc.task_pool import TaskPool
from cylc.task_proxy import TaskProxy, TaskProxySequenceBoundsError
from cylc.task_state import (
TASK_STATUSES_ACTIVE, TASK_STATUSES_NEVER_ACTIVE, TASK_STATUS_FAILED)
from cylc.templatevars import load_template_vars
from cylc.version import CYLC_VERSION
from cylc.wallclock import (
get_current_time_string, get_seconds_as_interval_string,
get_time_string_from_unix_time as time2str)
from cylc.profiler import Profiler
class SchedulerError(CylcError):
"""Scheduler error."""
pass
class SchedulerStop(CylcError):
"""Scheduler has stopped."""
pass
class Scheduler(object):
"""Cylc scheduler server."""
EVENT_STARTUP = SuiteEventHandler.EVENT_STARTUP
EVENT_SHUTDOWN = SuiteEventHandler.EVENT_SHUTDOWN
EVENT_TIMEOUT = SuiteEventHandler.EVENT_TIMEOUT
EVENT_INACTIVITY_TIMEOUT = SuiteEventHandler.EVENT_INACTIVITY_TIMEOUT
EVENT_STALLED = SuiteEventHandler.EVENT_STALLED
# Intervals in seconds
INTERVAL_MAIN_LOOP = 1.0
INTERVAL_MAIN_LOOP_QUICK = 0.5
INTERVAL_STOP_KILL = 10.0
INTERVAL_STOP_PROCESS_POOL_EMPTY = 0.5
START_MESSAGE_PREFIX = 'Suite starting: '
START_MESSAGE_TMPL = (
START_MESSAGE_PREFIX + 'server=%(host)s:%(port)s pid=%(pid)s')
# Dependency negotation etc. will run after these commands
PROC_CMDS = (
'release_suite',
'release_tasks',
'kill_tasks',
'reset_task_states',
'spawn_tasks',
'trigger_tasks',
'nudge',
'insert_tasks',
'reload_suite'
)
REF_LOG_TEXTS = (
'triggered off', 'Initial point', 'Start point', 'Final point')
def __init__(self, is_restart, options, args):
self.options = options
self.suite = args[0]
self.profiler = Profiler(self.options.profile_mode)
self.suite_srv_files_mgr = SuiteSrvFilesManager()
try:
self.suite_srv_files_mgr.register(self.suite, options.source)
except SuiteServiceFileError as exc:
sys.exit(exc)
# Register suite if not already done
self.suite_dir = self.suite_srv_files_mgr.get_suite_source_dir(
self.suite)
self.suiterc = self.suite_srv_files_mgr.get_suite_rc(self.suite)
self.suiterc_update_time = None
# For user-defined batch system handlers
sys.path.append(os.path.join(self.suite_dir, 'python'))
self.suite_run_dir = glbl_cfg().get_derived_host_item(
self.suite, 'suite run directory')
self.config = None
self.is_restart = is_restart
self.cli_initial_point_string = None
self.cli_start_point_string = None
start_point_str = None
if len(args) > 1:
start_point_str = args[1]
if getattr(self.options, 'warm', None):
self.cli_start_point_string = start_point_str
else:
self.cli_initial_point_string = start_point_str
self.template_vars = load_template_vars(
self.options.templatevars, self.options.templatevars_file)
self.run_mode = self.options.run_mode
self.owner = get_user()
self.host = get_host()
self.is_stalled = False
self.contact_data = None
# initialize some items in case of early shutdown
# (required in the shutdown() method)
self.state_summary_mgr = None
self.pool = None
self.proc_pool = None
self.task_job_mgr = None
self.task_events_mgr = None
self.suite_event_handler = None
self.httpserver = None
self.port = None
self.command_queue = None
self.message_queue = None
self.ext_trigger_queue = None
self._profile_amounts = {}
self._profile_update_times = {}
self.stop_mode = None
# TODO - stop task should be held by the task pool.
self.stop_task = None
self.stop_point = None
self.stop_clock_time = None # When not None, in Unix time
self.stop_clock_time_string = None # Human-readable format.
self.initial_point = None
self.start_point = None
self.final_point = None
self.pool_hold_point = None
self.suite_timer_timeout = 0.0
self.suite_timer_active = False
self.suite_inactivity_timeout = 0.0
self.already_inactive = False
self.time_next_kill = None
self.already_timed_out = False
self.suite_db_mgr = SuiteDatabaseManager(
self.suite_srv_files_mgr.get_suite_srv_dir(self.suite), # pri_d
os.path.join(self.suite_run_dir, 'log')) # pub_d
self.suite_log = None
self.ref_test_allowed_failures = []
# Last 10 durations (in seconds) of the main loop
self.main_loop_intervals = deque(maxlen=10)
self.can_auto_stop = True
self.previous_profile_point = 0
self.count = 0
self.time_next_fs_check = None
def start(self):
"""Start the server."""
self._start_print_blurb()
glbl_cfg().create_cylc_run_tree(self.suite)
if self.is_restart:
self.suite_db_mgr.restart_upgrade()
try:
detach = not self.options.no_detach
if detach:
daemonize(self)
# Setup the suite log.
SuiteLog.get_inst(self.suite).pimp(detach)
self.proc_pool = SuiteProcPool()
self.httpserver = HTTPServer(self.suite)
self.port = self.httpserver.port
self.configure()
self.profiler.start()
self.run()
except SchedulerStop as exc:
# deliberate stop
self.shutdown(exc)
except SchedulerError as exc:
self.shutdown(exc)
sys.exit(1)
except KeyboardInterrupt as exc:
try:
self.shutdown(str(exc))
except Exception:
# In case of exceptions in the shutdown method itself.
ERR.warning(traceback.format_exc())
sys.exit(1)
except Exception as exc:
ERR.critical(traceback.format_exc())
ERR.error("error caught: cleaning up before exit")
try:
self.shutdown('ERROR: ' + str(exc))
except Exception:
# In case of exceptions in the shutdown method itself
ERR.warning(traceback.format_exc())
if cylc.flags.debug:
raise
else:
sys.exit(1)
else:
# main loop ends (not used?)
self.shutdown()
self.profiler.stop()
@staticmethod
def _start_print_blurb():
"""Print copyright and license information."""
logo = (
" ._. \n"
" | | \n"
"._____._. ._| |_____. \n"
"| .___| | | | | .___| \n"
"| !___| !_! | | !___. \n"
"!_____!___. |_!_____! \n"
" .___! | \n"
" !_____! \n"
)
cylc_license = """
The Cylc Suite Engine [%s]
Copyright (C) 2008-2018 NIWA
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
This program comes with ABSOLUTELY NO WARRANTY;
see `cylc warranty`. It is free software, you
are welcome to redistribute it under certain
conditions; see `cylc conditions`.
""" % CYLC_VERSION
logo_lines = logo.splitlines()
license_lines = cylc_license.splitlines()
lmax = max(len(line) for line in license_lines)
for i, logo_line in enumerate(logo_lines):
print logo_line, ('{0: ^%s}' % lmax).format(license_lines[i])
def configure(self):
"""Configure suite server program."""
self.profiler.log_memory("scheduler.py: start configure")
# Start up essential services
self.proc_pool = SuiteProcPool()
self.suite_log = SuiteLog.get_inst(self.suite)
self.state_summary_mgr = StateSummaryMgr()
self.command_queue = Queue()
self.message_queue = Queue()
self.ext_trigger_queue = Queue()
self.suite_event_handler = SuiteEventHandler(self.proc_pool)
self.task_job_mgr = TaskJobManager(
self.suite, self.proc_pool, self.suite_db_mgr,
self.suite_srv_files_mgr)
self.task_events_mgr = self.task_job_mgr.task_events_mgr
if self.is_restart:
# This logic handles the lack of initial cycle point in "suite.rc".
# Things that can't change on suite reload.
pri_dao = self.suite_db_mgr.get_pri_dao()
pri_dao.select_suite_params(self._load_suite_params_1)
# Configure contact data only after loading UUID string
self.configure_contact()
pri_dao.select_suite_template_vars(self._load_template_vars)
# Take checkpoint and commit immediately so that checkpoint can be
# copied to the public database.
pri_dao.take_checkpoints("restart")
pri_dao.execute_queued_items()
else:
self.configure_contact()
self.profiler.log_memory("scheduler.py: before load_suiterc")
self.load_suiterc()
self.profiler.log_memory("scheduler.py: after load_suiterc")
self.httpserver.connect(self)
self.suite_db_mgr.on_suite_start(self.is_restart)
if self.config.cfg['scheduling']['hold after point']:
self.pool_hold_point = get_point(
self.config.cfg['scheduling']['hold after point'])
if self.options.hold_point_string:
self.pool_hold_point = get_point(
self.options.hold_point_string)
if self.pool_hold_point:
LOG.info("Suite will hold after %s" % self.pool_hold_point)
reqmode = self.config.cfg['cylc']['required run mode']
if reqmode:
if reqmode != self.run_mode:
raise SchedulerError(
'ERROR: this suite requires the %s run mode' % reqmode)
self.task_events_mgr.broadcast_mgr.linearized_ancestors.update(
self.config.get_linearized_ancestors())
self.task_events_mgr.mail_interval = self._get_cylc_conf(
"task event mail interval")
self.task_events_mgr.mail_footer = self._get_events_conf("mail footer")
self.task_events_mgr.suite_url = self.config.cfg['meta']['URL']
self.task_events_mgr.suite_cfg = self.config.cfg['meta']
if self.options.genref or self.options.reftest:
self.configure_reftest()
LOG.info(self.START_MESSAGE_TMPL % {
'host': self.host, 'port': self.httpserver.port,
'pid': os.getpid()})
# Note that the following lines must be present at the top of
# the suite log file for use in reference test runs:
LOG.info('Cylc version: %s' % CYLC_VERSION)
LOG.info('Run mode: %s' % self.run_mode)
LOG.info('Initial point: %s' % self.initial_point)
if self.start_point != self.initial_point:
LOG.info('Start point: %s' % self.start_point)
LOG.info('Final point: %s' % self.final_point)
self.pool = TaskPool(
self.config, self.final_point, self.suite_db_mgr,
self.task_events_mgr)
self.profiler.log_memory("scheduler.py: before load_tasks")
if self.is_restart:
self.load_tasks_for_restart()
else:
self.load_tasks_for_run()
self.profiler.log_memory("scheduler.py: after load_tasks")
self.suite_db_mgr.put_suite_params(self)
self.suite_db_mgr.put_suite_template_vars(self.template_vars)
self.suite_db_mgr.put_runtime_inheritance(self.config)
self.configure_suite_environment()
# Copy local python modules from source to run directory
for sub_dir in ["python", os.path.join("lib", "python")]:
# TODO - eventually drop the deprecated "python" sub-dir.
suite_py = os.path.join(self.suite_dir, sub_dir)
if (os.path.realpath(self.suite_dir) !=
os.path.realpath(self.suite_run_dir) and
os.path.isdir(suite_py)):
suite_run_py = os.path.join(self.suite_run_dir, sub_dir)
try:
rmtree(suite_run_py)
except OSError:
pass
copytree(suite_py, suite_run_py)
self.already_timed_out = False
self.set_suite_timer()
self.already_inactive = False
if self._get_events_conf(self.EVENT_INACTIVITY_TIMEOUT):
self.set_suite_inactivity_timer()
self.profiler.log_memory("scheduler.py: end configure")
def load_tasks_for_run(self):
"""Load tasks for a new run."""
if self.start_point is not None:
if self.options.warm:
LOG.info('Warm Start %s' % self.start_point)
else:
LOG.info('Cold Start %s' % self.start_point)
task_list = self.filter_initial_task_list(
self.config.get_task_name_list())
for name in task_list:
if self.start_point is None:
# No start cycle point at which to load cycling tasks.
continue
try:
self.pool.add_to_runahead_pool(TaskProxy(
self.config.get_taskdef(name), self.start_point,
is_startup=True))
except TaskProxySequenceBoundsError as exc:
LOG.debug(str(exc))
continue
def load_tasks_for_restart(self):
"""Load tasks for restart."""
self.suite_db_mgr.pri_dao.select_suite_params(
self._load_suite_params_2, self.options.checkpoint)
if self.cli_start_point_string:
self.start_point = self.cli_start_point_string
self.suite_db_mgr.pri_dao.select_broadcast_states(
self.task_events_mgr.broadcast_mgr.load_db_broadcast_states,
self.options.checkpoint)
self.suite_db_mgr.pri_dao.select_task_job_run_times(
self._load_task_run_times)
self.suite_db_mgr.pri_dao.select_task_pool_for_restart(
self.pool.load_db_task_pool_for_restart, self.options.checkpoint)
self.suite_db_mgr.pri_dao.select_task_action_timers(
self.pool.load_db_task_action_timers)
# Re-initialise run directory for user@host for each submitted and
# running tasks.
# Note: tasks should all be in the runahead pool at this point.
auths = set()
for itask in self.pool.get_rh_tasks():
if itask.state.status in TASK_STATUSES_ACTIVE:
auths.add((itask.task_host, itask.task_owner))
while auths:
for host, owner in auths.copy():
if self.task_job_mgr.task_remote_mgr.remote_init(
host, owner) is not None:
auths.remove((host, owner))
if auths:
sleep(1.0)
# Remote init is done via process pool
self.proc_pool.process()
self.command_poll_tasks()
def _load_suite_params_2(self, row_idx, row):
"""Load previous initial/final cycle point."""
if row_idx == 0:
LOG.info("LOADING suite parameters")
key, value = row
if key == "is_held":
self.pool.is_held = bool(value)
LOG.info("+ hold suite = %s" % (bool(value),))
return
for key_str, self_attr, option_ignore_attr in [
("initial", "start_point", "ignore_start_point"),
("final", "stop_point", "ignore_stop_point")]:
if key != key_str + "_point" or value is None:
continue
# the suite_params table prescribes a start/stop cycle
# (else we take whatever the suite.rc file gives us)
point = get_point(value)
my_point = getattr(self, self_attr)
if getattr(self.options, option_ignore_attr):
# ignore it and take whatever the suite.rc file gives us
if my_point is not None:
ERR.warning(
"I'm ignoring the old " + key_str +
" cycle point as requested,\n"
"but I can't ignore the one set"
" on the command line or in the suite definition.")
elif my_point is not None:
# Given in the suite.rc file
if my_point != point:
ERR.warning(
"old %s cycle point %s, overriding suite.rc %s" %
(key_str, point, my_point))
setattr(self, self_attr, point)
else:
# reinstate from old
setattr(self, self_attr, point)
LOG.info("+ %s cycle point = %s" % (key_str, value))
def _load_task_run_times(self, row_idx, row):
"""Load run times of previously succeeded task jobs."""
if row_idx == 0:
LOG.info("LOADING task run times")
name, run_times_str = row
try:
taskdef = self.config.taskdefs[name]
maxlen = TaskDef.MAX_LEN_ELAPSED_TIMES
for run_time_str in run_times_str.rsplit(",", maxlen)[-maxlen:]:
run_time = int(run_time_str)
taskdef.elapsed_times.append(run_time)
LOG.info("+ %s: %s" % (
name, ",".join(str(s) for s in taskdef.elapsed_times)))
except (KeyError, ValueError, AttributeError):
return
def process_queued_task_messages(self):
"""Handle incoming task messages for each task proxy."""
messages = {}
while self.message_queue.qsize():
try:
task_job, event_time, severity, message = (
self.message_queue.get(block=False))
except Empty:
break
self.message_queue.task_done()
if '/' in task_job: # cycle/task-name/submit-num
cycle, task_name, submit_num = task_job.split('/', 2)
task_id = TaskID.get(task_name, cycle)
submit_num = int(submit_num, 10)
else: # back compat: task-name.cycle
task_id = task_job
submit_num = None
messages.setdefault(task_id, [])
messages[task_id].append(
(submit_num, event_time, severity, message))
# Note on to_poll_tasks: If an incoming message is going to cause a
# reverse change to task state, it is desirable to confirm this by
# polling.
to_poll_tasks = []
for itask in self.pool.get_tasks():
message_items = messages.get(itask.identity)
if message_items is None:
continue
should_poll = False
for submit_num, event_time, severity, message in message_items:
if self.task_events_mgr.process_message(
itask, severity, message, event_time,
self.task_events_mgr.INCOMING_FLAG, submit_num):
should_poll = True
if should_poll:
to_poll_tasks.append(itask)
self.task_job_mgr.poll_task_jobs(
self.suite, to_poll_tasks, poll_succ=True)
def process_command_queue(self):
"""Process queued commands."""
qsize = self.command_queue.qsize()
if qsize > 0:
log_msg = 'Processing ' + str(qsize) + ' queued command(s)'
else:
return
while True:
try:
name, args, kwargs = self.command_queue.get(False)
except Empty:
break
args_string = ', '.join(str(a) for a in args)
cmdstr = name + '(' + args_string
kwargs_string = ', '.join(
('%s=%s' % (key, value) for key, value in kwargs.items()))
if kwargs_string and args_string:
cmdstr += ', '
cmdstr += kwargs_string + ')'
log_msg += '\n+\t' + cmdstr
try:
n_warnings = getattr(self, "command_%s" % name)(
*args, **kwargs)
except SchedulerStop:
LOG.info('Command succeeded: ' + cmdstr)
raise
except Exception as exc:
# Don't let a bad command bring the suite down.
LOG.warning(traceback.format_exc())
LOG.warning(str(exc))
LOG.warning('Command failed: ' + cmdstr)
else:
if n_warnings:
LOG.info(
'Command succeeded with %s warning(s): %s' %
(n_warnings, cmdstr))
else:
LOG.info('Command succeeded: ' + cmdstr)
cylc.flags.iflag = True
if name in self.PROC_CMDS:
self.task_events_mgr.pflag = True
self.command_queue.task_done()
LOG.info(log_msg)
def _task_type_exists(self, name_or_id):
"""Does a task name or id match a known task type in this suite?"""
name = name_or_id
if TaskID.is_valid_id(name_or_id):
name = TaskID.split(name_or_id)[0]
return name in self.config.get_task_name_list()
@staticmethod
def get_standardised_point_string(point_string):
"""Return a standardised point string.
Used to process incoming command arguments.
"""
try:
point_string = standardise_point_string(point_string)
except PointParsingError as exc:
# (This is only needed to raise a clearer error message).
raise ValueError(
"Invalid cycle point: %s (%s)" % (point_string, exc))
return point_string
def get_standardised_point(self, point_string):
"""Return a standardised point."""
return get_point(self.get_standardised_point_string(point_string))
def get_standardised_taskid(self, task_id):
"""Return task ID with standardised cycle point."""
name, point_string = TaskID.split(task_id)
return TaskID.get(
name, self.get_standardised_point_string(point_string))
def info_get_task_jobfile_path(self, task_id):
"""Return task job file path."""
name, point = TaskID.split(task_id)
return get_task_job_log(
self.suite, point, name, suffix=JOB_LOG_JOB)
def info_get_suite_info(self):
"""Return a dict containing the suite title and description."""
return self.config.cfg['meta']
def info_get_suite_state_summary(self):
"""Return the global, task, and family summary data structures."""
return self.state_summary_mgr.get_state_summary()
def info_get_task_info(self, names):
"""Return info of a task."""
results = {}
for name in names:
try:
results[name] = self.config.describe(name)
except KeyError:
results[name] = {}
return results
def info_get_latest_state(self, client_info, full_mode):
"""Return latest suite state (suitable for a GUI update).
If previous update time is set, return only information since previous
update time. Otherwise, return full information required to populate
the GUI tree and LED views.
Args:
client_info (dict): store 'prev_time', 'prev_err_size'.
full_mode (bool): force full update
Return:
(dict):
cylc_version (str): version of cylc running this suite
full_mode (bool): is this returning a full update?
summary (tuple): (global_summary, task_summary, family_summary)
ancestors (dict): first parent ancestors
ancestors_pruned (dict):
first parent ancestors, without non-task namespaces
descendants (dict): first parent descendants
err_content (str): new content in error log
err_size (int): new size of error log
mean_main_loop_interval (float):
average time interval (seconds) of last 10 main loops
See Also:
info_get_graph_raw
"""
ret = {
'cylc_version': CYLC_VERSION,
'full_mode': full_mode,
}
if full_mode:
client_info['prev_time'] = None
client_info['prev_err_size'] = None
prev_time = client_info.get('prev_time')
if prev_time is None:
full_mode = True
ret['full_mode'] = True
if full_mode or (
self.state_summary_mgr.update_time and
prev_time < self.state_summary_mgr.update_time):
ret['summary'] = self.state_summary_mgr.get_state_summary()
if full_mode or (
self.suiterc_update_time and
prev_time < self.suiterc_update_time):
ret['ancestors'] = self.config.get_first_parent_ancestors()
ret['ancestors_pruned'] = self.config.get_first_parent_ancestors(
pruned=True)
ret['descendants'] = self.config.get_first_parent_descendants()
if full_mode or ERR.update_time and prev_time < ERR.update_time:
ret['err_content'], ret['err_size'] = self.suite_log.get_lines(
SUITE_ERR, client_info.get('prev_err_size'))
client_info['prev_err_size'] = ret['err_size']
client_info['prev_time'] = client_info['time']
if self.main_loop_intervals:
ret['mean_main_loop_interval'] = (
sum(self.main_loop_intervals) / len(self.main_loop_intervals))
return ret
def info_get_graph_raw(self, cto, ctn, group_nodes=None,
ungroup_nodes=None,
ungroup_recursive=False, group_all=False,
ungroup_all=False):
"""Return raw graph."""
return (
self.config.get_graph_raw(
cto, ctn, group_nodes, ungroup_nodes, ungroup_recursive,
group_all, ungroup_all),
self.config.suite_polling_tasks,
self.config.leaves,
self.config.feet)
def info_get_identity(self, privileges):
"""Return suite identity, (description, (states))."""
result = {}
if PRIVILEGE_LEVELS[0] in privileges:
result[KEY_NAME] = self.suite
result[KEY_OWNER] = self.owner
if PRIVILEGE_LEVELS[1] in privileges:
result[KEY_META] = self.config.cfg[KEY_META]
for key in (KEY_TITLE, KEY_DESCRIPTION, KEY_GROUP):
result[key] = self.config.cfg[KEY_META].get(key)
if PRIVILEGE_LEVELS[2] in privileges:
result[KEY_UPDATE_TIME] = self.state_summary_mgr.update_time
result[KEY_STATES] = self.state_summary_mgr.get_state_totals()
result[KEY_TASKS_BY_STATE] = (
self.state_summary_mgr.get_tasks_by_state())
return result
def info_get_task_requisites(self, items, list_prereqs=False):
"""Return prerequisites of a task."""
return self.pool.get_task_requisites(items, list_prereqs=list_prereqs)
def info_ping_task(self, task_id, exists_only=False):
"""Return True if task exists and running."""
task_id = self.get_standardised_taskid(task_id)
return self.pool.ping_task(task_id, exists_only)
def command_set_stop_cleanly(self, kill_active_tasks=False):
"""Stop job submission and set the flag for clean shutdown."""
self._set_stop()
if kill_active_tasks:
self.time_next_kill = time()
def command_stop_now(self, terminate=False):
"""Shutdown immediately."""
if terminate:
self._set_stop(TaskPool.STOP_REQUEST_NOW_NOW)
else:
self._set_stop(TaskPool.STOP_REQUEST_NOW)
def _set_stop(self, stop_mode=None):
"""Set shutdown mode."""
self.proc_pool.set_stopping()
if stop_mode is None:
stop_mode = TaskPool.STOP_REQUEST_CLEAN
self.stop_mode = stop_mode
def command_set_stop_after_point(self, point_string):
"""Set stop after ... point."""
self.set_stop_point(self.get_standardised_point_string(point_string))
def command_set_stop_after_clock_time(self, arg):
"""Set stop after clock time.
format: ISO 8601 compatible or YYYY/MM/DD-HH:mm (backwards comp.)
"""
parser = isodatetime.parsers.TimePointParser()
try:
stop_point = parser.parse(arg)
except ValueError as exc:
try:
stop_point = parser.strptime(arg, "%Y/%m/%d-%H:%M")
except ValueError:
raise exc # Raise the first (prob. more relevant) ValueError.
stop_time_in_epoch_seconds = int(stop_point.get(
"seconds_since_unix_epoch"))
self.set_stop_clock(stop_time_in_epoch_seconds, str(stop_point))
def command_set_stop_after_task(self, task_id):
"""Set stop after a task."""
task_id = self.get_standardised_taskid(task_id)
if TaskID.is_valid_id(task_id):
self.set_stop_task(task_id)
def command_release_tasks(self, items):
"""Release tasks."""
return self.pool.release_tasks(items)
def command_poll_tasks(self, items=None, poll_succ=False):
"""Poll pollable tasks or a task/family if options are provided.
Don't poll succeeded tasks unless poll_succ is True.
"""
if self.run_mode == 'simulation':
return
itasks, bad_items = self.pool.filter_task_proxies(items)
self.task_job_mgr.poll_task_jobs(self.suite, itasks,
poll_succ=poll_succ)
return len(bad_items)
def command_kill_tasks(self, items=None):
"""Kill all tasks or a task/family if options are provided."""
itasks, bad_items = self.pool.filter_task_proxies(items)
if self.run_mode == 'simulation':
for itask in itasks:
if itask.state.status in TASK_STATUSES_ACTIVE:
itask.state.reset_state(TASK_STATUS_FAILED)
return len(bad_items)
self.task_job_mgr.kill_task_jobs(self.suite, itasks)
return len(bad_items)
def command_release_suite(self):
"""Release all task proxies in the suite."""
self.release_suite()
def command_hold_tasks(self, items):
"""Hold selected task proxies in the suite."""
return self.pool.hold_tasks(items)
def command_hold_suite(self):
"""Hold all task proxies in the suite."""
self.hold_suite()
def command_hold_after_point_string(self, point_string):
"""Hold tasks AFTER this point (itask.point > point)."""
point = self.get_standardised_point(point_string)
self.hold_suite(point)
LOG.info(
"The suite will pause when all tasks have passed %s" % point)
@staticmethod
def command_set_verbosity(lvl):
"""Remove suite verbosity."""
LOG.logger.setLevel(lvl)
ERR.logger.setLevel(lvl)
cylc.flags.debug = (lvl == DEBUG)
return True, 'OK'
def command_remove_tasks(self, items, spawn=False):
"""Remove tasks."""
return self.pool.remove_tasks(items, spawn)
def command_insert_tasks(self, items, stop_point_string=None,
no_check=False):
"""Insert tasks."""
return self.pool.insert_tasks(items, stop_point_string, no_check)
def command_nudge(self):
"""Cause the task processing loop to be invoked"""
self.task_events_mgr.pflag = True
def command_reload_suite(self):
"""Reload suite configuration."""
LOG.info("Reloading the suite definition.")
old_tasks = set(self.config.get_task_name_list())
self.suite_db_mgr.checkpoint("reload-init")
self.load_suiterc(is_reload=True)
self.task_events_mgr.broadcast_mgr.linearized_ancestors = (
self.config.get_linearized_ancestors())
self.suite_db_mgr.put_runtime_inheritance(self.config)
if self.stop_point is None:
stop_point = self.final_point
else:
stop_point = self.stop_point
self.pool.set_do_reload(self.config, stop_point)
self.task_events_mgr.mail_interval = self._get_cylc_conf(
"task event mail interval")
self.task_events_mgr.mail_footer = self._get_events_conf("mail footer")
# Log tasks that have been added by the reload, removed tasks are
# logged by the TaskPool.
add = set(self.config.get_task_name_list()) - old_tasks
for task in add:
LOG.warning("Added task: '%s'" % (task,))
self.configure_suite_environment()
if self.options.genref or self.options.reftest:
self.configure_reftest(recon=True)
self.suite_db_mgr.put_suite_params(self)
cylc.flags.iflag = True
def set_suite_timer(self):
"""Set suite's timeout timer."""
timeout = self._get_events_conf(self.EVENT_TIMEOUT)
if timeout is None:
return
self.suite_timer_timeout = time() + timeout
if cylc.flags.verbose:
LOG.info("%s suite timer starts NOW: %s" % (
get_seconds_as_interval_string(timeout),
get_current_time_string()))
self.suite_timer_active = True
def set_suite_inactivity_timer(self):
"""Set suite's inactivity timer."""
self.suite_inactivity_timeout = time() + (
self._get_events_conf(self.EVENT_INACTIVITY_TIMEOUT)
)
if cylc.flags.verbose:
LOG.info("%s suite inactivity timer starts NOW: %s" % (
get_seconds_as_interval_string(
self._get_events_conf(self.EVENT_INACTIVITY_TIMEOUT)),
get_current_time_string()))
def configure_contact(self):
"""Create contact file."""
# Make sure another suite of the same name has not started while this
# one is starting
self.suite_srv_files_mgr.detect_old_contact_file(self.suite)
# Get "pid,args" process string with "ps"
pid_str = str(os.getpid())
proc = Popen(
['ps', self.suite_srv_files_mgr.PS_OPTS, pid_str],
stdin=open(os.devnull), stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
ret_code = proc.wait()
process_str = None
for line in out.splitlines():
if line.split(None, 1)[0].strip() == pid_str:
process_str = line.strip()
break
if ret_code or not process_str:
raise SchedulerError(
'ERROR, cannot get process "args" from "ps": %s' % err)
# Write suite contact file.
# Preserve contact data in memory, for regular health check.
mgr = self.suite_srv_files_mgr
contact_data = {
mgr.KEY_API: str(self.httpserver.API),
mgr.KEY_COMMS_PROTOCOL: glbl_cfg().get(
['communication', 'method']),
mgr.KEY_DIR_ON_SUITE_HOST: os.environ['CYLC_DIR'],
mgr.KEY_HOST: self.host,
mgr.KEY_NAME: self.suite,
mgr.KEY_OWNER: self.owner,
mgr.KEY_PORT: str(self.httpserver.port),
mgr.KEY_PROCESS: process_str,
mgr.KEY_SSH_USE_LOGIN_SHELL: str(glbl_cfg().get_host_item(
'use login shell')),
mgr.KEY_SUITE_RUN_DIR_ON_SUITE_HOST: self.suite_run_dir,
mgr.KEY_TASK_MSG_MAX_TRIES: str(glbl_cfg().get(
['task messaging', 'maximum number of tries'])),
mgr.KEY_TASK_MSG_RETRY_INTVL: str(float(glbl_cfg().get(
['task messaging', 'retry interval']))),
mgr.KEY_TASK_MSG_TIMEOUT: str(float(glbl_cfg().get(
['task messaging', 'connection timeout']))),
mgr.KEY_UUID: self.task_job_mgr.task_remote_mgr.uuid_str,
mgr.KEY_VERSION: CYLC_VERSION}
try:
mgr.dump_contact_file(self.suite, contact_data)
except IOError as exc:
raise SchedulerError(
'ERROR, cannot write suite contact file: %s: %s' %
(mgr.get_contact_file(self.suite), exc))
else:
self.contact_data = contact_data
def load_suiterc(self, is_reload=False):
"""Load and log the suite definition."""
self.config = SuiteConfig(
self.suite, self.suiterc, self.template_vars,
run_mode=self.run_mode,
cli_initial_point_string=self.cli_initial_point_string,
cli_start_point_string=self.cli_start_point_string,
cli_final_point_string=self.options.final_point_string,
is_reload=is_reload,
mem_log_func=self.profiler.log_memory,
output_fname=os.path.join(
self.suite_run_dir,
self.suite_srv_files_mgr.FILE_BASE_SUITE_RC + '.processed'),
)
self.suiterc_update_time = time()
# Dump the loaded suiterc for future reference.
cfg_logdir = glbl_cfg().get_derived_host_item(
self.suite, 'suite config log directory')
time_str = get_current_time_string(
override_use_utc=True, use_basic_format=True,
display_sub_seconds=False
)
if is_reload:
load_type = "reload"
elif self.is_restart:
load_type = "restart"
else:
load_type = "run"
base_name = "%s-%s.rc" % (time_str, load_type)
file_name = os.path.join(cfg_logdir, base_name)
try:
with open(file_name, "wb") as handle:
handle.write("# cylc-version: %s\n" % CYLC_VERSION)
printcfg(self.config.cfg, none_str=None, handle=handle)
except IOError as exc:
ERR.error(str(exc))
raise SchedulerError("Unable to log the loaded suite definition")
# Initial and final cycle times - command line takes precedence.
# self.config already alters the 'initial cycle point' for CLI.
self.initial_point = self.config.initial_point
self.start_point = self.config.start_point
self.final_point = get_point(
self.options.final_point_string or
self.config.cfg['scheduling']['final cycle point']
)
if self.final_point is not None:
self.final_point.standardise()
if not self.initial_point and not self.is_restart:
ERR.warning('No initial cycle point provided - no cycling tasks '
'will be loaded.')
if self.run_mode != self.config.run_mode:
self.run_mode = self.config.run_mode
def _load_suite_params_1(self, _, row):
"""Load previous initial cycle point or (warm) start cycle point.
For restart, these may be missing from "suite.rc", but was specified as
a command line argument on cold/warm start.
"""
key, value = row
if key == 'initial_point':
self.cli_initial_point_string = value
self.task_events_mgr.pflag = True
elif key in ['start_point', 'warm_point']:
# 'warm_point' for back compat <= 7.6.X
self.cli_start_point_string = value
self.task_events_mgr.pflag = True
elif key == 'uuid_str':
self.task_job_mgr.task_remote_mgr.uuid_str = str(value)
def _load_template_vars(self, _, row):
"""Load suite start up template variables."""
key, value = row
# Command line argument takes precedence
if key not in self.template_vars:
self.template_vars[key] = value
def configure_suite_environment(self):
"""Configure suite environment."""
# Pass static cylc and suite variables to job script generation code
self.task_job_mgr.job_file_writer.set_suite_env({
'CYLC_UTC': str(cylc.flags.utc),
'CYLC_DEBUG': str(cylc.flags.debug).lower(),
'CYLC_VERBOSE': str(cylc.flags.verbose).lower(),
'CYLC_SUITE_NAME': self.suite,
'CYLC_CYCLING_MODE': str(cylc.flags.cycling_mode),
'CYLC_SUITE_INITIAL_CYCLE_POINT': str(self.initial_point),
'CYLC_SUITE_FINAL_CYCLE_POINT': str(self.final_point),
})
# Make suite vars available to [cylc][environment]:
for var, val in self.task_job_mgr.job_file_writer.suite_env.items():
os.environ[var] = val
# Set local values of variables that are potenitally task-specific
# due to different directory paths on different task hosts. These
# are overridden by tasks prior to job submission, but in
# principle they could be needed locally by event handlers:
for var, val in [
('CYLC_SUITE_RUN_DIR', self.suite_run_dir),
('CYLC_SUITE_LOG_DIR', self.suite_log.get_dir()),
('CYLC_SUITE_WORK_DIR', glbl_cfg().get_derived_host_item(
self.suite, 'suite work directory')),
('CYLC_SUITE_SHARE_DIR', glbl_cfg().get_derived_host_item(
self.suite, 'suite share directory')),
('CYLC_SUITE_DEF_PATH', self.suite_dir)]:
os.environ[var] = val
# (global config auto expands environment variables in local paths)
cenv = self.config.cfg['cylc']['environment'].copy()
for var, val in cenv.items():
cenv[var] = os.path.expandvars(val)
# path to suite bin directory for suite and event handlers
cenv['PATH'] = os.pathsep.join([
os.path.join(self.suite_dir, 'bin'), os.environ['PATH']])
# and to suite event handlers in this process.
for var, val in cenv.items():
os.environ[var] = val
def configure_reftest(self, recon=False):
"""Configure the reference test."""
if self.options.genref:
self.config.cfg['cylc']['log resolved dependencies'] = True
elif self.options.reftest:
rtc = self.config.cfg['cylc']['reference test']
req = rtc['required run mode']
if req and req != self.run_mode:
raise SchedulerError(
'ERROR: suite allows only ' + req + ' reference tests')
handlers = self._get_events_conf('shutdown handler')
if handlers:
ERR.warning('shutdown handlers replaced by reference test')
self.config.cfg['cylc']['events']['shutdown handler'] = [
rtc['suite shutdown event handler']]
self.config.cfg['cylc']['log resolved dependencies'] = True
self.config.cfg['cylc']['events'][
'abort if shutdown handler fails'] = True
if not recon:
spec = LogSpec(os.path.join(self.config.fdir, 'reference.log'))
self.initial_point = get_point(spec.get_initial_point_string())
self.start_point = get_point(
spec.get_start_point_string()) or self.initial_point
self.final_point = get_point(spec.get_final_point_string())
self.ref_test_allowed_failures = rtc['expected task failures']
if (not rtc['allow task failures'] and
not self.ref_test_allowed_failures):
self.config.cfg['cylc']['abort if any task fails'] = True
self.config.cfg['cylc']['events']['abort on timeout'] = True
timeout = rtc[self.run_mode + ' mode suite timeout']
if not timeout:
raise SchedulerError(
'ERROR: timeout not defined for %s reference tests' % (
self.run_mode))
self.config.cfg['cylc']['events'][self.EVENT_TIMEOUT] = (
timeout)
self.config.cfg['cylc']['events']['reset timer'] = False
def run_event_handlers(self, event, reason):
"""Run a suite event handler.
Run suite event hooks in simulation and dummy mode ONLY if enabled.
"""
try:
if (self.run_mode in ['simulation', 'dummy'] and
self.config.cfg['cylc']['simulation'][
'disable suite event handlers']):
return
except KeyError:
pass
try:
self.suite_event_handler.handle(self.config, SuiteEventContext(
event, reason, self.suite, self.owner, self.host,
self.httpserver.port))
except SuiteEventError as exc:
if event == self.EVENT_SHUTDOWN and self.options.reftest:
LOG.error('SUITE REFERENCE TEST FAILED')
raise SchedulerError(exc.args[0])
else:
if event == self.EVENT_SHUTDOWN and self.options.reftest:
LOG.info('SUITE REFERENCE TEST PASSED')
def initialise_scheduler(self):
"""Prelude to the main scheduler loop.
Determines whether suite is held or should be held.
Determines whether suite can be auto shutdown.
Begins profile logs if needed.
"""
if self.pool_hold_point is not None:
self.hold_suite(self.pool_hold_point)
if self.options.start_held:
LOG.info("Held on start-up (no tasks will be submitted)")
self.hold_suite()
self.run_event_handlers(self.EVENT_STARTUP, 'suite starting')
self.profiler.log_memory("scheduler.py: begin run while loop")
self.time_next_fs_check = None
cylc.flags.iflag = True
if self.options.profile_mode:
self.previous_profile_point = 0
self.count = 0
self.can_auto_stop = (
not self.config.cfg['cylc']['disable automatic shutdown'] and
not self.options.no_auto_shutdown)
def process_task_pool(self):
"""Process ALL TASKS whenever something has changed that might
require renegotiation of dependencies, etc"""
if cylc.flags.debug:
LOG.debug("BEGIN TASK PROCESSING")
time0 = time()
if (self._get_events_conf(self.EVENT_INACTIVITY_TIMEOUT) and
self._get_events_conf('reset inactivity timer')):
self.set_suite_inactivity_timer()
self.pool.match_dependencies()
if self.stop_mode is None:
itasks = self.pool.get_ready_tasks()
if itasks:
cylc.flags.iflag = True
done_tasks = self.task_job_mgr.submit_task_jobs(
self.suite, itasks, self.run_mode == 'simulation')
if self.config.cfg['cylc']['log resolved dependencies']:
for itask in done_tasks:
deps = itask.state.get_resolved_dependencies()
LOG.info('triggered off %s' % deps, itask=itask)
for meth in [
self.pool.spawn_all_tasks,
self.pool.remove_spent_tasks,
self.pool.remove_suiciding_tasks]:
if meth():
cylc.flags.iflag = True
self.task_events_mgr.broadcast_mgr.expire_broadcast(
self.pool.get_min_point())
if cylc.flags.debug:
LOG.debug("END TASK PROCESSING (took %s seconds)" %
(time() - time0))
def process_suite_db_queue(self):
"""Update suite DB."""
try:
self.suite_db_mgr.process_queued_ops()
except OSError as err:
if cylc.flags.debug:
ERR.debug(traceback.format_exc())
raise SchedulerError(str(err))
def database_health_check(self):
"""If public database is stuck, blast it away by copying the content
of the private database into it."""
try:
self.suite_db_mgr.recover_pub_from_pri()
except (IOError, OSError) as exc:
# Something has to be very wrong here, so stop the suite
raise SchedulerError(str(exc))
def late_tasks_check(self):
"""Report tasks that are never active and are late."""
now = time()
for itask in self.pool.get_tasks():
if (not itask.is_late and itask.get_late_time() and
itask.state.status in TASK_STATUSES_NEVER_ACTIVE and
now > itask.get_late_time()):
msg = '%s (late-time=%s)' % (
self.task_events_mgr.EVENT_LATE,
time2str(itask.get_late_time()))
itask.is_late = True
LOG.warning(msg, itask=itask)
self.task_events_mgr.setup_event_handlers(
itask, self.task_events_mgr.EVENT_LATE, msg)
self.suite_db_mgr.put_insert_task_late_flags(itask)
def timeout_check(self):
"""Check suite and task timers."""
self.check_suite_timer()
if self._get_events_conf(self.EVENT_INACTIVITY_TIMEOUT):
self.check_suite_inactive()
# check submission and execution timeout and polling timers
if self.run_mode != 'simulation':
self.task_job_mgr.check_task_jobs(self.suite, self.pool)
def suite_shutdown(self):
"""Determines if the suite can be shutdown yet."""
if (self.config.cfg['cylc']['abort if any task fails'] and
self.pool.any_task_failed()):
# Task failure + abort if any task fails
self._set_stop(TaskPool.STOP_AUTO_ON_TASK_FAILURE)
elif self.options.reftest and self.ref_test_allowed_failures:
# In reference test mode and unexpected failures occured
bad_tasks = []
for itask in self.pool.get_failed_tasks():
if itask.identity not in self.ref_test_allowed_failures:
bad_tasks.append(itask)
if bad_tasks:
sys.stderr.write(
'Failed task(s) not in allowed failures list:\n')
for itask in bad_tasks:
sys.stderr.write("\t%s\n" % itask.identity)
self._set_stop(TaskPool.STOP_AUTO_ON_TASK_FAILURE)
# Can suite shut down automatically?
if self.stop_mode is None and (
self.stop_clock_done() or self.stop_task_done() or
self.can_auto_stop and self.pool.check_auto_shutdown()):
self._set_stop(TaskPool.STOP_AUTO)
# Is the suite ready to shut down now?
if self.pool.can_stop(self.stop_mode):
self.update_state_summary()
self.proc_pool.close()
if self.stop_mode != TaskPool.STOP_REQUEST_NOW_NOW:
# Wait for process pool to complete,
# unless --now --now is requested
stop_process_pool_empty_msg = (
"Waiting for the command process pool to empty" +
" for shutdown")
while self.proc_pool.is_not_done():
sleep(self.INTERVAL_STOP_PROCESS_POOL_EMPTY)
if stop_process_pool_empty_msg:
LOG.info(stop_process_pool_empty_msg)
stop_process_pool_empty_msg = None
self.proc_pool.process()
self.process_command_queue()
if self.options.profile_mode:
self.profiler.log_memory(
"scheduler.py: end main loop (total loops %d): %s" %
(self.count, get_current_time_string()))
if self.stop_mode == TaskPool.STOP_AUTO_ON_TASK_FAILURE:
raise SchedulerError(self.stop_mode)
else:
raise SchedulerStop(self.stop_mode)
elif (self.time_next_kill is not None and
time() > self.time_next_kill):
self.command_poll_tasks()
self.command_kill_tasks()
self.time_next_kill = time() + self.INTERVAL_STOP_KILL
def suite_health_check(self, has_changes):
"""Health check.
Suite run directory still there.
Suite contact file has the right info.
"""
if self.stop_mode is None and not has_changes:
self.check_suite_stalled()
now = time()
if self.time_next_fs_check is None or now > self.time_next_fs_check:
if not os.path.exists(self.suite_run_dir):
raise SchedulerError(
"%s: suite run directory not found" % self.suite_run_dir)
try:
contact_data = self.suite_srv_files_mgr.load_contact_file(
self.suite)
if contact_data != self.contact_data:
raise AssertionError()
except (AssertionError, IOError, ValueError,
SuiteServiceFileError):
ERR.critical(traceback.format_exc())
exc = SchedulerError(
("%s: suite contact file corrupted/modified and" +
" may be left") %
self.suite_srv_files_mgr.get_contact_file(self.suite))
raise exc
self.time_next_fs_check = (
now + self._get_cylc_conf('health check interval'))
def update_profiler_logs(self, tinit):
"""Update info for profiler."""
now = time()
self._update_profile_info("scheduler loop dt (s)", now - tinit,
amount_format="%.3f")
self._update_cpu_usage()
if now - self.previous_profile_point >= 60:
# Only get this every minute.
self.previous_profile_point = now
self.profiler.log_memory("scheduler.py: loop #%d: %s" % (
self.count, get_current_time_string()))
self.count += 1
def run(self):
"""Main loop."""
self.initialise_scheduler()
while True: # MAIN LOOP
tinit = time()
if self.pool.do_reload:
self.pool.reload_taskdefs()
self.suite_db_mgr.checkpoint("reload-done")
cylc.flags.iflag = True
self.process_command_queue()
if self.pool.release_runahead_tasks():
cylc.flags.iflag = True
self.task_events_mgr.pflag = True
self.proc_pool.process()
# PROCESS ALL TASKS whenever something has changed that might
# require renegotiation of dependencies, etc.
if self.should_process_tasks():
self.process_task_pool()
self.late_tasks_check()
self.process_queued_task_messages()
self.process_command_queue()
self.task_events_mgr.process_events(self)
# Update database
self.suite_db_mgr.put_task_event_timers(self.task_events_mgr)
has_changes = cylc.flags.iflag
if cylc.flags.iflag:
self.suite_db_mgr.put_task_pool(self.pool)
self.update_state_summary() # Will reset cylc.flags.iflag
self.process_suite_db_queue()
# If public database is stuck, blast it away by copying the content
# of the private database into it.
self.database_health_check()
# Shutdown suite if timeouts have occurred
self.timeout_check()
# Does the suite need to shutdown on task failure?
self.suite_shutdown()
# Suite health checks
self.suite_health_check(has_changes)
if self.options.profile_mode:
self.update_profiler_logs(tinit)
# Sleep a bit for things to catch up.
# Quick sleep if there are items pending in process pool.
# (Should probably use quick sleep logic for other queues?)
elapsed = time() - tinit
quick_mode = self.proc_pool.is_not_done()
if (elapsed >= self.INTERVAL_MAIN_LOOP or
quick_mode and elapsed >= self.INTERVAL_MAIN_LOOP_QUICK):
# Main loop has taken quite a bit to get through
# Still yield control to other threads by sleep(0.0)
sleep(0.0)
elif quick_mode:
sleep(self.INTERVAL_MAIN_LOOP_QUICK - elapsed)
else:
sleep(self.INTERVAL_MAIN_LOOP - elapsed)
# Record latest main loop interval
self.main_loop_intervals.append(time() - tinit)
# END MAIN LOOP
def update_state_summary(self):
"""Update state summary, e.g. for GUI."""
self.state_summary_mgr.update(self)
cylc.flags.iflag = False
self.is_stalled = False
if self.suite_timer_active:
self.suite_timer_active = False
if cylc.flags.verbose:
LOG.info("%s suite timer stopped NOW: %s" % (
get_seconds_as_interval_string(
self._get_events_conf(self.EVENT_TIMEOUT)),
get_current_time_string()))
def check_suite_timer(self):
"""Check if suite has timed out or not."""
if (self._get_events_conf(self.EVENT_TIMEOUT) is None or
self.already_timed_out or not self.is_stalled):
return
if time() > self.suite_timer_timeout:
self.already_timed_out = True
message = 'suite timed out after %s' % (
get_seconds_as_interval_string(
self._get_events_conf(self.EVENT_TIMEOUT))
)
LOG.warning(message)
self.run_event_handlers(self.EVENT_TIMEOUT, message)
if self._get_events_conf('abort on timeout'):
raise SchedulerError('Abort on suite timeout is set')
def check_suite_inactive(self):
"""Check if suite is inactive or not."""
if self.already_inactive:
return
if time() > self.suite_inactivity_timeout:
self.already_inactive = True
message = 'suite timed out after inactivity for %s' % (
get_seconds_as_interval_string(
self._get_events_conf(self.EVENT_INACTIVITY_TIMEOUT)))
LOG.warning(message)
self.run_event_handlers(self.EVENT_INACTIVITY_TIMEOUT, message)
if self._get_events_conf('abort on inactivity'):
raise SchedulerError('Abort on suite inactivity is set')
def check_suite_stalled(self):
"""Check if suite is stalled or not."""
if self.is_stalled: # already reported
return
self.is_stalled = self.pool.is_stalled()
if self.is_stalled:
message = 'suite stalled'
LOG.warning(message)
self.run_event_handlers(self.EVENT_STALLED, message)
self.pool.report_stalled_task_deps()
if self._get_events_conf('abort on stalled'):
raise SchedulerError('Abort on suite stalled is set')
# Start suite timeout timer
if self._get_events_conf(self.EVENT_TIMEOUT):
self.set_suite_timer()
def should_process_tasks(self):
"""Return True if waiting tasks are ready."""
# do we need to do a pass through the main task processing loop?
process = False
if self.task_events_mgr.pflag:
# This flag is turned on by commands that change task state
process = True
self.task_events_mgr.pflag = False # reset
if self.task_job_mgr.task_remote_mgr.ready:
# This flag is turned on when a host init/select command completes
process = True
self.task_job_mgr.task_remote_mgr.ready = False # reset
broadcast_mgr = self.task_events_mgr.broadcast_mgr
broadcast_mgr.add_ext_triggers(self.ext_trigger_queue)
now = time()
for itask in self.pool.get_tasks():
# External trigger matching and task expiry must be done
# regardless, so they need to be in separate "if ..." blocks.
if broadcast_mgr.match_ext_trigger(itask):
process = True
if self.pool.set_expired_task(itask, now):
process = True
if itask.is_ready(now):
process = True
if self.run_mode == 'simulation' and self.pool.sim_time_check(
self.message_queue):
process = True
return process
def shutdown(self, reason=None):
"""Shutdown the suite."""
msg = "Suite shutting down"
if isinstance(reason, CylcError):
msg += ' - %s' % reason.args[0]
if isinstance(reason, SchedulerError):
sys.stderr.write(msg + '\n')
reason = reason.args[0]
elif reason:
msg += ' - %s' % reason
LOG.info(msg)
if self.options.genref:
try:
handle = open(
os.path.join(self.config.fdir, 'reference.log'), 'wb')
for line in open(
self.suite_log.get_log_path(SUITE_LOG)):
if any(text in line for text in self.REF_LOG_TEXTS):
handle.write(line)
handle.close()
except IOError as exc:
ERR.error(str(exc))
if self.proc_pool:
if self.proc_pool.is_not_done():
# e.g. KeyboardInterrupt
self.proc_pool.terminate()
self.proc_pool.process()
if self.pool is not None:
self.pool.warn_stop_orphans()
try:
self.suite_db_mgr.put_task_event_timers(self.task_events_mgr)
self.suite_db_mgr.put_task_pool(self.pool)
except Exception as exc:
ERR.error(str(exc))
if self.httpserver:
self.httpserver.shutdown()
# Flush errors and info before removing suite contact file
sys.stdout.flush()
sys.stderr.flush()
if self.contact_data:
fname = self.suite_srv_files_mgr.get_contact_file(self.suite)
try:
os.unlink(fname)
except OSError as exc:
ERR.warning("failed to remove suite contact file: %s\n%s\n" % (
fname, exc))
if self.task_job_mgr:
self.task_job_mgr.task_remote_mgr.remote_tidy()
# disconnect from suite-db, stop db queue
try:
self.suite_db_mgr.process_queued_ops()
self.suite_db_mgr.on_suite_shutdown()
except StandardError as exc:
ERR.error(str(exc))
# The getattr() calls and if tests below are used in case the
# suite is not fully configured before the shutdown is called.
if getattr(self, "config", None) is not None:
# run shutdown handlers
self.run_event_handlers(self.EVENT_SHUTDOWN, str(reason))
LOG.info("DONE") # main thread exit
def set_stop_point(self, stop_point_string):
"""Set stop point."""
stop_point = get_point(stop_point_string)
self.stop_point = stop_point
LOG.info("Setting stop cycle point: %s" % stop_point_string)
self.pool.set_stop_point(self.stop_point)
def set_stop_clock(self, unix_time, date_time_string):
"""Set stop clock time."""
LOG.info("Setting stop clock time: %s (unix time: %s)" % (
date_time_string, unix_time))
self.stop_clock_time = unix_time
self.stop_clock_time_string = date_time_string
def set_stop_task(self, task_id):
"""Set stop after a task."""
name = TaskID.split(task_id)[0]
if name in self.config.get_task_name_list():
task_id = self.get_standardised_taskid(task_id)
LOG.info("Setting stop task: " + task_id)
self.stop_task = task_id
else:
LOG.warning("Requested stop task name does not exist: %s" % name)
def stop_task_done(self):
"""Return True if stop task has succeeded."""
if self.stop_task and self.pool.task_succeeded(self.stop_task):
LOG.info("Stop task %s finished" % self.stop_task)
return True
else:
return False
def hold_suite(self, point=None):
"""Hold all tasks in suite."""
if point is None:
self.pool.hold_all_tasks()
sdm = self.suite_db_mgr
sdm.db_inserts_map[sdm.TABLE_SUITE_PARAMS].append(
{"key": "is_held", "value": 1})
else:
LOG.info("Setting suite hold cycle point: " + str(point))
self.pool.set_hold_point(point)
def release_suite(self):
"""Release (un-hold) all tasks in suite."""
if self.pool.is_held:
LOG.info("RELEASE: new tasks will be queued when ready")
self.pool.set_hold_point(None)
self.pool.release_all_tasks()
sdm = self.suite_db_mgr
sdm.db_deletes_map[sdm.TABLE_SUITE_PARAMS].append({"key": "is_held"})
def paused(self):
"""Is the suite paused?"""
return self.pool.is_held
def command_trigger_tasks(self, items, back_out=False):
"""Trigger tasks."""
return self.pool.trigger_tasks(items, back_out)
def command_dry_run_tasks(self, items, check_syntax=True):
"""Dry-run tasks, e.g. edit run."""
itasks, bad_items = self.pool.filter_task_proxies(items)
n_warnings = len(bad_items)
if len(itasks) > 1:
LOG.warning("Unique task match not found: %s" % items)
return n_warnings + 1
while self.stop_mode is None:
prep_tasks, bad_tasks = self.task_job_mgr.prep_submit_task_jobs(
self.suite, [itasks[0]], dry_run=True,
check_syntax=check_syntax)
if itasks[0] in prep_tasks:
return n_warnings
elif itasks[0] in bad_tasks:
return n_warnings + 1
else:
self.proc_pool.process()
sleep(self.INTERVAL_MAIN_LOOP_QUICK)
def command_reset_task_states(self, items, state=None, outputs=None):
"""Reset the state of tasks."""
return self.pool.reset_task_states(items, state, outputs)
def command_spawn_tasks(self, items):
"""Force spawn task successors."""
return self.pool.spawn_tasks(items)
def command_take_checkpoints(self, items):
"""Insert current task_pool, etc to checkpoints tables."""
return self.suite_db_mgr.checkpoint(items[0])
def filter_initial_task_list(self, inlist):
"""Return list of initial tasks after applying a filter."""
included_by_rc = self.config.cfg[
'scheduling']['special tasks']['include at start-up']
excluded_by_rc = self.config.cfg[
'scheduling']['special tasks']['exclude at start-up']
outlist = []
for name in inlist:
if name in excluded_by_rc:
continue
if len(included_by_rc) > 0:
if name not in included_by_rc:
continue
outlist.append(name)
return outlist
def stop_clock_done(self):
"""Return True if wall clock stop time reached."""
if self.stop_clock_time is not None and time() > self.stop_clock_time:
time_point = (
isodatetime.data.get_timepoint_from_seconds_since_unix_epoch(
self.stop_clock_time
)
)
LOG.info("Wall clock stop time reached: %s" % time_point)
self.stop_clock_time = None
return True
else:
return False
def _update_profile_info(self, category, amount, amount_format="%s"):
"""Update the 1, 5, 15 minute dt averages for a given category."""
now = time()
self._profile_amounts.setdefault(category, [])
amounts = self._profile_amounts[category]
amounts.append((now, amount))
self._profile_update_times.setdefault(category, None)
last_update = self._profile_update_times[category]
if last_update is not None and now < last_update + 60:
return
self._profile_update_times[category] = now
averages = {1: [], 5: [], 15: []}
for then, amount in list(amounts):
age = (now - then) / 60.0
if age > 15:
amounts.remove((then, amount))
continue
for minute_num in averages.keys():
if age <= minute_num:
averages[minute_num].append(amount)
output_text = "PROFILE: %s:" % category
for minute_num, minute_amounts in sorted(averages.items()):
averages[minute_num] = sum(minute_amounts) / len(minute_amounts)
output_text += (" %d: " + amount_format) % (
minute_num, averages[minute_num])
LOG.info(output_text)
def _update_cpu_usage(self):
"""Obtain CPU usage statistics."""
proc = Popen(
["ps", "-o%cpu= ", str(os.getpid())],
stdin=open(os.devnull), stdout=PIPE)
try:
cpu_frac = float(proc.communicate()[0])
except (TypeError, OSError, IOError, ValueError) as exc:
LOG.warning("Cannot get CPU % statistics: %s" % exc)
return
self._update_profile_info("CPU %", cpu_frac, amount_format="%.1f")
def _get_cylc_conf(self, key, default=None):
"""Return a named setting under [cylc] from suite.rc or global.rc."""
for getter in [self.config.cfg['cylc'], glbl_cfg().get(['cylc'])]:
try:
value = getter[key]
except KeyError:
pass
else:
if value is not None:
return value
return default
def _get_events_conf(self, key, default=None):
"""Return a named [cylc][[events]] configuration."""
return self.suite_event_handler.get_events_conf(
self.config, key, default)
|
jonnyhtw/cylc
|
lib/cylc/scheduler.py
|
Python
|
gpl-3.0
| 72,789
|
[
"BLAST"
] |
6d224f952c48d2cdebeb826cb24eec19f826dab921f0caa3bd7af251ae371241
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`http` module enables OpenLP to retrieve scripture from bible websites.
"""
import os
import logging
import re
import socket
import urllib.request, urllib.parse, urllib.error
from html.parser import HTMLParseError
from bs4 import BeautifulSoup, NavigableString, Tag
from openlp.core.lib import Registry, translate
from openlp.core.lib.ui import critical_error_message_box
from openlp.core.utils import get_web_page
from openlp.plugins.bibles.lib import SearchResults
from openlp.plugins.bibles.lib.db import BibleDB, BiblesResourcesDB, Book
CLEANER_REGEX = re.compile(r' |<br />|\'\+\'')
FIX_PUNKCTUATION_REGEX = re.compile(r'[ ]+([.,;])')
REDUCE_SPACES_REGEX = re.compile(r'[ ]{2,}')
UGLY_CHARS = {
'\u2014': ' - ',
'\u2018': '\'',
'\u2019': '\'',
'\u201c': '"',
'\u201d': '"',
' ': ' '
}
VERSE_NUMBER_REGEX = re.compile(r'v(\d{1,2})(\d{3})(\d{3}) verse.*')
log = logging.getLogger(__name__)
class BGExtract(object):
"""
Extract verses from BibleGateway
"""
def __init__(self, proxy_url=None):
log.debug('BGExtract.init("%s")', proxy_url)
self.proxy_url = proxy_url
socket.setdefaulttimeout(30)
def _remove_elements(self, parent, tag, class_=None):
"""
Remove a particular element from the BeautifulSoup tree.
``parent``
The element from which items need to be removed.
``tag``
A string of the tab type, e.g. "div"
``class_``
An HTML class attribute for further qualification.
"""
if class_:
all_tags = parent.find_all(tag, class_)
else:
all_tags = parent.find_all(tag)
for element in all_tags:
element.extract()
def _extract_verse(self, tag):
"""
Extract a verse (or part of a verse) from a tag.
``tag``
The BeautifulSoup Tag element with the stuff we want.
"""
if isinstance(tag, NavigableString):
return None, str(tag)
elif tag.get('class')[0] == "versenum" or tag.get('class')[0] == 'versenum mid-line':
verse = str(tag.string).replace('[', '').replace(']', '').strip()
return verse, None
elif tag.get('class')[0] == 'chapternum':
verse = '1'
return verse, None
else:
verse = None
text = ''
for child in tag.contents:
c_verse, c_text = self._extract_verse(child)
if c_verse:
verse = c_verse
if text and c_text:
text += c_text
elif c_text is not None:
text = c_text
return verse, text
def _clean_soup(self, tag):
"""
Remove all the rubbish from the HTML page.
``tag``
The base tag within which we want to remove stuff.
"""
self._remove_elements(tag, 'sup', 'crossreference')
self._remove_elements(tag, 'sup', 'footnote')
self._remove_elements(tag, 'div', 'footnotes')
self._remove_elements(tag, 'div', 'crossrefs')
self._remove_elements(tag, 'h3')
self._remove_elements(tag, 'h4')
self._remove_elements(tag, 'h5')
def _extract_verses(self, tags):
"""
Extract all the verses from a pre-prepared list of HTML tags.
``tags``
A list of BeautifulSoup Tag elements.
"""
verses = []
tags = tags[::-1]
current_text = ''
for tag in tags:
verse = None
text = ''
for child in tag.contents:
c_verse, c_text = self._extract_verse(child)
if c_verse:
verse = c_verse
if text and c_text:
text += c_text
elif c_text is not None:
text = c_text
if not verse:
current_text = text + ' ' + current_text
else:
text += ' ' + current_text
current_text = ''
if text:
for old, new in UGLY_CHARS.items():
text = text.replace(old, new)
text = ' '.join(text.split())
if verse and text:
verse = verse.strip()
try:
verse = int(verse)
except ValueError:
verse_parts = verse.split('-')
if len(verse_parts) > 1:
verse = int(verse_parts[0])
except TypeError:
log.warn('Illegal verse number: %s', str(verse))
verses.append((verse, text))
verse_list = {}
for verse, text in verses[::-1]:
verse_list[verse] = text
return verse_list
def _extract_verses_old(self, div):
"""
Use the old style of parsing for those Bibles on BG who mysteriously have not been migrated to the new (still
broken) HTML.
``div``
The parent div.
"""
verse_list = {}
# Cater for inconsistent mark up in the first verse of a chapter.
first_verse = div.find('versenum')
if first_verse and first_verse.contents:
verse_list[1] = str(first_verse.contents[0])
for verse in div('sup', 'versenum'):
raw_verse_num = verse.next_element
clean_verse_num = 0
# Not all verses exist in all translations and may or may not be represented by a verse number. If they are
# not fine, if they are it will probably be in a format that breaks int(). We will then have no idea what
# garbage may be sucked in to the verse text so if we do not get a clean int() then ignore the verse
# completely.
try:
clean_verse_num = int(str(raw_verse_num))
except ValueError:
verse_parts = str(raw_verse_num).split('-')
if len(verse_parts) > 1:
clean_verse_num = int(verse_parts[0])
except TypeError:
log.warn('Illegal verse number: %s', str(raw_verse_num))
if clean_verse_num:
verse_text = raw_verse_num.next_element
part = raw_verse_num.next_element.next_element
while not (isinstance(part, Tag) and part.get('class')[0] == 'versenum'):
# While we are still in the same verse grab all the text.
if isinstance(part, NavigableString):
verse_text += part
if isinstance(part.next_element, Tag) and part.next_element.name == 'div':
# Run out of verses so stop.
break
part = part.next_element
verse_list[clean_verse_num] = str(verse_text)
return verse_list
def get_bible_chapter(self, version, book_name, chapter):
"""
Access and decode Bibles via the BibleGateway website.
``version``
The version of the Bible like 31 for New International version.
``book_name``
Name of the Book.
``chapter``
Chapter number.
"""
log.debug('BGExtract.get_bible_chapter("%s", "%s", "%s")', version, book_name, chapter)
url_book_name = urllib.parse.quote(book_name.encode("utf-8"))
url_params = 'search=%s+%s&version=%s' % (url_book_name, chapter, version)
soup = get_soup_for_bible_ref(
'http://www.biblegateway.com/passage/?%s' % url_params,
pre_parse_regex=r'<meta name.*?/>', pre_parse_substitute='')
if not soup:
return None
div = soup.find('div', 'result-text-style-normal')
self._clean_soup(div)
span_list = div.find_all('span', 'text')
log.debug('Span list: %s', span_list)
if not span_list:
# If we don't get any spans then we must have the old HTML format
verse_list = self._extract_verses_old(div)
else:
verse_list = self._extract_verses(span_list)
if not verse_list:
log.debug('No content found in the BibleGateway response.')
send_error_message('parse')
return None
return SearchResults(book_name, chapter, verse_list)
def get_books_from_http(self, version):
"""
Load a list of all books a Bible contaions from BibleGateway website.
``version``
The version of the Bible like NIV for New International Version
"""
log.debug('BGExtract.get_books_from_http("%s")', version)
url_params = urllib.parse.urlencode({'action': 'getVersionInfo', 'vid': '%s' % version})
reference_url = 'http://www.biblegateway.com/versions/?%s#books' % url_params
page = get_web_page(reference_url)
if not page:
send_error_message('download')
return None
page_source = page.read()
try:
page_source = str(page_source, 'utf8')
except UnicodeDecodeError:
page_source = str(page_source, 'cp1251')
try:
soup = BeautifulSoup(page_source)
except HTMLParseError:
log.error('BeautifulSoup could not parse the Bible page.')
send_error_message('parse')
return None
if not soup:
send_error_message('parse')
return None
self.application.process_events()
content = soup.find('table', 'infotable')
if content:
content = content.find_all('tr')
if not content:
log.error('No books found in the Biblegateway response.')
send_error_message('parse')
return None
books = []
for book in content:
book = book.find('td')
if book:
books.append(book.contents[0])
return books
def _get_application(self):
"""
Adds the openlp to the class dynamically.
Windows needs to access the application in a dynamic manner.
"""
if os.name == 'nt':
return Registry().get('application')
else:
if not hasattr(self, '_application'):
self._application = Registry().get('application')
return self._application
application = property(_get_application)
class BSExtract(object):
"""
Extract verses from Bibleserver.com
"""
def __init__(self, proxy_url=None):
log.debug('BSExtract.init("%s")', proxy_url)
self.proxy_url = proxy_url
socket.setdefaulttimeout(30)
def get_bible_chapter(self, version, book_name, chapter):
"""
Access and decode bibles via Bibleserver mobile website
``version``
The version of the bible like NIV for New International Version
``book_name``
Text name of bible book e.g. Genesis, 1. John, 1John or Offenbarung
``chapter``
Chapter number
"""
log.debug('BSExtract.get_bible_chapter("%s", "%s", "%s")', version, book_name, chapter)
url_version = urllib.parse.quote(version.encode("utf-8"))
url_book_name = urllib.parse.quote(book_name.encode("utf-8"))
chapter_url = 'http://m.bibleserver.com/text/%s/%s%d' % (url_version, url_book_name, chapter)
header = ('Accept-Language', 'en')
soup = get_soup_for_bible_ref(chapter_url, header)
if not soup:
return None
self.application.process_events()
content = soup.find('div', 'content')
if not content:
log.error('No verses found in the Bibleserver response.')
send_error_message('parse')
return None
content = content.find('div').find_all('div')
verses = {}
for verse in content:
self.application.process_events()
versenumber = int(VERSE_NUMBER_REGEX.sub(r'\3', ' '.join(verse['class'])))
verses[versenumber] = verse.contents[1].rstrip('\n')
return SearchResults(book_name, chapter, verses)
def get_books_from_http(self, version):
"""
Load a list of all books a Bible contains from Bibleserver mobile website.
``version``
The version of the Bible like NIV for New International Version
"""
log.debug('BSExtract.get_books_from_http("%s")', version)
url_version = urllib.parse.quote(version.encode("utf-8"))
chapter_url = 'http://m.bibleserver.com/overlay/selectBook?translation=%s' % (url_version)
soup = get_soup_for_bible_ref(chapter_url)
if not soup:
return None
content = soup.find('ul')
if not content:
log.error('No books found in the Bibleserver response.')
send_error_message('parse')
return None
content = content.find_all('li')
return [book.contents[0].contents[0] for book in content]
def _get_application(self):
"""
Adds the openlp to the class dynamically.
Windows needs to access the application in a dynamic manner.
"""
if os.name == 'nt':
return Registry().get('application')
else:
if not hasattr(self, '_application'):
self._application = Registry().get('application')
return self._application
application = property(_get_application)
class CWExtract(object):
"""
Extract verses from CrossWalk/BibleStudyTools
"""
def __init__(self, proxy_url=None):
log.debug('CWExtract.init("%s")', proxy_url)
self.proxy_url = proxy_url
socket.setdefaulttimeout(30)
def get_bible_chapter(self, version, book_name, chapter):
"""
Access and decode bibles via the Crosswalk website
``version``
The version of the Bible like niv for New International Version
``book_name``
Text name of in english e.g. 'gen' for Genesis
``chapter``
Chapter number
"""
log.debug('CWExtract.get_bible_chapter("%s", "%s", "%s")', version, book_name, chapter)
url_book_name = book_name.replace(' ', '-')
url_book_name = url_book_name.lower()
url_book_name = urllib.parse.quote(url_book_name.encode("utf-8"))
chapter_url = 'http://www.biblestudytools.com/%s/%s/%s.html' % (version, url_book_name, chapter)
soup = get_soup_for_bible_ref(chapter_url)
if not soup:
return None
self.application.process_events()
html_verses = soup.find_all('span', 'versetext')
if not html_verses:
log.error('No verses found in the CrossWalk response.')
send_error_message('parse')
return None
verses = {}
for verse in html_verses:
self.application.process_events()
verse_number = int(verse.contents[0].contents[0])
verse_text = ''
for part in verse.contents:
self.application.process_events()
if isinstance(part, NavigableString):
verse_text += part
elif part and part.attrMap and \
(part.attrMap['class'] == 'WordsOfChrist' or part.attrMap['class'] == 'strongs'):
for subpart in part.contents:
self.application.process_events()
if isinstance(subpart, NavigableString):
verse_text += subpart
elif subpart and subpart.attrMap and subpart.attrMap['class'] == 'strongs':
for subsub in subpart.contents:
self.application.process_events()
if isinstance(subsub, NavigableString):
verse_text += subsub
self.application.process_events()
# Fix up leading and trailing spaces, multiple spaces, and spaces between text and , and .
verse_text = verse_text.strip('\n\r\t ')
verse_text = REDUCE_SPACES_REGEX.sub(' ', verse_text)
verse_text = FIX_PUNKCTUATION_REGEX.sub(r'\1', verse_text)
verses[verse_number] = verse_text
return SearchResults(book_name, chapter, verses)
def get_books_from_http(self, version):
"""
Load a list of all books a Bible contain from the Crosswalk website.
``version``
The version of the bible like NIV for New International Version
"""
log.debug('CWExtract.get_books_from_http("%s")', version)
chapter_url = 'http://www.biblestudytools.com/%s/' % (version)
soup = get_soup_for_bible_ref(chapter_url)
if not soup:
return None
content = soup.find('div', {'class': 'Body'})
content = content.find('ul', {'class': 'parent'})
if not content:
log.error('No books found in the Crosswalk response.')
send_error_message('parse')
return None
content = content.find_all('li')
books = []
for book in content:
book = book.find('a')
books.append(book.contents[0])
return books
def _get_application(self):
"""
Adds the openlp to the class dynamically.
Windows needs to access the application in a dynamic manner.
"""
if os.name == 'nt':
return Registry().get('application')
else:
if not hasattr(self, '_application'):
self._application = Registry().get('application')
return self._application
application = property(_get_application)
class HTTPBible(BibleDB):
log.info('%s HTTPBible loaded', __name__)
def __init__(self, parent, **kwargs):
"""
Finds all the bibles defined for the system. Creates an Interface Object for each bible containing connection
information.
Throws Exception if no Bibles are found.
Init confirms the bible exists and stores the database path.
"""
BibleDB.__init__(self, parent, **kwargs)
self.download_source = kwargs['download_source']
self.download_name = kwargs['download_name']
# TODO: Clean up proxy stuff. We probably want one global proxy per connection type (HTTP and HTTPS) at most.
self.proxy_server = None
self.proxy_username = None
self.proxy_password = None
if 'path' in kwargs:
self.path = kwargs['path']
if 'proxy_server' in kwargs:
self.proxy_server = kwargs['proxy_server']
if 'proxy_username' in kwargs:
self.proxy_username = kwargs['proxy_username']
if 'proxy_password' in kwargs:
self.proxy_password = kwargs['proxy_password']
def do_import(self, bible_name=None):
"""
Run the import. This method overrides the parent class method. Returns ``True`` on success, ``False`` on
failure.
"""
self.wizard.progress_bar.setMaximum(68)
self.wizard.increment_progress_bar(translate('BiblesPlugin.HTTPBible', 'Registering Bible and loading books...'))
self.save_meta('download_source', self.download_source)
self.save_meta('download_name', self.download_name)
if self.proxy_server:
self.save_meta('proxy_server', self.proxy_server)
if self.proxy_username:
# Store the proxy userid.
self.save_meta('proxy_username', self.proxy_username)
if self.proxy_password:
# Store the proxy password.
self.save_meta('proxy_password', self.proxy_password)
if self.download_source.lower() == 'crosswalk':
handler = CWExtract(self.proxy_server)
elif self.download_source.lower() == 'biblegateway':
handler = BGExtract(self.proxy_server)
elif self.download_source.lower() == 'bibleserver':
handler = BSExtract(self.proxy_server)
books = handler.get_books_from_http(self.download_name)
if not books:
log.exception('Importing books from %s - download name: "%s" '\
'failed' % (self.download_source, self.download_name))
return False
self.wizard.progress_bar.setMaximum(len(books) + 2)
self.wizard.increment_progress_bar(translate( 'BiblesPlugin.HTTPBible', 'Registering Language...'))
bible = BiblesResourcesDB.get_webbible(self.download_name, self.download_source.lower())
if bible['language_id']:
language_id = bible['language_id']
self.save_meta('language_id', language_id)
else:
language_id = self.get_language(bible_name)
if not language_id:
log.exception('Importing books from %s failed' % self.filename)
return False
for book in books:
if self.stop_import_flag:
break
self.wizard.increment_progress_bar(translate(
'BiblesPlugin.HTTPBible', 'Importing %s...', 'Importing <book name>...') % book)
book_ref_id = self.get_book_ref_id_by_name(book, len(books), language_id)
if not book_ref_id:
log.exception('Importing books from %s - download name: "%s" '\
'failed' % (self.download_source, self.download_name))
return False
book_details = BiblesResourcesDB.get_book_by_id(book_ref_id)
log.debug('Book details: Name:%s; id:%s; testament_id:%s',
book, book_ref_id, book_details['testament_id'])
self.create_book(book, book_ref_id, book_details['testament_id'])
if self.stop_import_flag:
return False
else:
return True
def get_verses(self, reference_list, show_error=True):
"""
A reimplementation of the ``BibleDB.get_verses`` method, this one is specifically for web Bibles. It first
checks to see if the particular chapter exists in the DB, and if not it pulls it from the web. If the chapter
DOES exist, it simply pulls the verses from the DB using the ancestor method.
``reference_list``
This is the list of references the media manager item wants. It is a list of tuples, with the following
format::
(book_reference_id, chapter, start_verse, end_verse)
Therefore, when you are looking for multiple items, simply break them up into references like this, bundle
them into a list. This function then runs through the list, and returns an amalgamated list of ``Verse``
objects. For example::
[(u'35', 1, 1, 1), (u'35', 2, 2, 3)]
"""
log.debug('HTTPBible.get_verses("%s")', reference_list)
for reference in reference_list:
book_id = reference[0]
db_book = self.get_book_by_book_ref_id(book_id)
if not db_book:
if show_error:
critical_error_message_box(
translate('BiblesPlugin', 'No Book Found'),
translate('BiblesPlugin', 'No matching book could be found in this Bible. Check that you have '
'spelled the name of the book correctly.'))
return []
book = db_book.name
if BibleDB.get_verse_count(self, book_id, reference[1]) == 0:
self.application.set_busy_cursor()
search_results = self.get_chapter(book, reference[1])
if search_results and search_results.has_verse_list():
## We have found a book of the bible lets check to see
## if it was there. By reusing the returned book name
## we get a correct book. For example it is possible
## to request ac and get Acts back.
book_name = search_results.book
self.application.process_events()
# Check to see if book/chapter exists.
db_book = self.get_book(book_name)
self.create_chapter(db_book.id, search_results.chapter, search_results.verse_list)
self.application.process_events()
self.application.set_normal_cursor()
self.application.process_events()
return BibleDB.get_verses(self, reference_list, show_error)
def get_chapter(self, book, chapter):
"""
Receive the request and call the relevant handler methods.
"""
log.debug('HTTPBible.get_chapter("%s", "%s")', book, chapter)
log.debug('source = %s', self.download_source)
if self.download_source.lower() == 'crosswalk':
handler = CWExtract(self.proxy_server)
elif self.download_source.lower() == 'biblegateway':
handler = BGExtract(self.proxy_server)
elif self.download_source.lower() == 'bibleserver':
handler = BSExtract(self.proxy_server)
return handler.get_bible_chapter(self.download_name, book, chapter)
def get_books(self):
"""
Return the list of books.
"""
log.debug('HTTPBible.get_books("%s")', Book.name)
return self.get_all_objects(Book, order_by_ref=Book.id)
def get_chapter_count(self, book):
"""
Return the number of chapters in a particular book.
``book``
The book object to get the chapter count for.
"""
log.debug('HTTPBible.get_chapter_count("%s")', book.name)
return BiblesResourcesDB.get_chapter_count(book.book_reference_id)
def get_verse_count(self, book_id, chapter):
"""
Return the number of verses for the specified chapter and book.
``book``
The name of the book.
``chapter``
The chapter whose verses are being counted.
"""
log.debug('HTTPBible.get_verse_count("%s", %s)', book_id, chapter)
return BiblesResourcesDB.get_verse_count(book_id, chapter)
def _get_application(self):
"""
Adds the openlp to the class dynamically.
Windows needs to access the application in a dynamic manner.
"""
if os.name == 'nt':
return Registry().get('application')
else:
if not hasattr(self, '_application'):
self._application = Registry().get('application')
return self._application
application = property(_get_application)
def get_soup_for_bible_ref(reference_url, header=None, pre_parse_regex=None, pre_parse_substitute=None):
"""
Gets a webpage and returns a parsed and optionally cleaned soup or None.
``reference_url``
The URL to obtain the soup from.
``header``
An optional HTTP header to pass to the bible web server.
``pre_parse_regex``
A regular expression to run on the webpage. Allows manipulation of the webpage before passing to BeautifulSoup
for parsing.
``pre_parse_substitute``
The text to replace any matches to the regular expression with.
"""
if not reference_url:
return None
page = get_web_page(reference_url, header, True)
if not page:
send_error_message('download')
return None
page_source = page.read()
if pre_parse_regex and pre_parse_substitute is not None:
page_source = re.sub(pre_parse_regex, pre_parse_substitute, page_source)
soup = None
try:
soup = BeautifulSoup(page_source)
CLEANER_REGEX.sub('', str(soup))
except HTMLParseError:
log.exception('BeautifulSoup could not parse the bible page.')
if not soup:
send_error_message('parse')
return None
Registry().get('application').process_events()
return soup
def send_error_message(error_type):
"""
Send a standard error message informing the user of an issue.
``error_type``
The type of error that occured for the issue.
"""
if error_type == 'download':
critical_error_message_box(
translate('BiblesPlugin.HTTPBible', 'Download Error'),
translate('BiblesPlugin.HTTPBible', 'There was a problem downloading your verse selection. Please check '
'your Internet connection, and if this error continues to occur please consider reporting a bug.'))
elif error_type == 'parse':
critical_error_message_box(
translate('BiblesPlugin.HTTPBible', 'Parse Error'),
translate('BiblesPlugin.HTTPBible', 'There was a problem extracting your verse selection. If this error '
'continues to occur please consider reporting a bug.'))
|
marmyshev/item_title
|
openlp/plugins/bibles/lib/http.py
|
Python
|
gpl-2.0
| 31,053
|
[
"Brian"
] |
25a839847d10e12c29f03e068cac25610caaee59c687b9443208cc9a177c1031
|
"""
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from student.models import CourseEnrollment
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegCodeItem
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import CertificateGenerationConfiguration
from certificates import api as certs_api
from util.date_utils import get_default_time_display
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url, bulk_email_is_enabled_for_course
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument,redefined-outer-name
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if settings.ANALYTICS_DASHBOARD_URL:
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = analytics_dashboard_message.format(
link_start=link_start, link_end="</a>", analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if bulk_email_is_enabled_for_course(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, is_white_label))
# Gate access to Proctoring tab
# only global staff (user.is_staff) is allowed to see this tab
can_see_proctoring = (
settings.FEATURES.get('ENABLE_PROCTORED_EXAMS', False) and
course.enable_proctored_exams and
request.user.is_staff
)
if can_see_proctoring:
sections.append(_section_proctoring(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
certs_enabled = CertificateGenerationConfiguration.current().enabled
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
context = {
'course': course,
'old_dashboard_url': reverse('instructor_dashboard_legacy', kwargs={'course_id': unicode(course_key)}),
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_proctoring(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'proctoring',
'section_display_name': _('Proctoring'),
'access': access,
'course_id': unicode(course_key)
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course.id, course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=getattr(course_honor_mode[0], 'min_price'), currency=getattr(course_honor_mode[0], 'currency'),
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': get_default_time_display(course.start),
'end_date': get_default_time_display(course.end) or _('No end date set'),
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if settings.ANALYTICS_DASHBOARD_URL:
dashboard_link = _get_dashboard_link(course_key)
message = _("Enrollment data is now available in {dashboard_link}.").format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
sorted_cutoffs = sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True)
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
# section_data['offline_grades'] = offline_grades_available(course_key)
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_PROCTORED_EXAMS', False) and
course.enable_proctored_exams
)
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': unicode(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_proctored_results_url': reverse('get_proctored_exam_results', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = u"<a href=\"{0}\" target=\"_blank\">{1}</a>".format(analytics_dashboard_url,
settings.ANALYTICS_DASHBOARD_NAME)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
insights_message = _("For analytics about your course, go to {analytics_dashboard_name}.")
insights_message = insights_message.format(
analytics_dashboard_name='{0}{1}</a>'.format(link_start, settings.ANALYTICS_DASHBOARD_NAME)
)
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'insights_message': insights_message,
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
|
don-github/edx-platform
|
lms/djangoapps/instructor/views/instructor_dashboard.py
|
Python
|
agpl-3.0
| 28,494
|
[
"VisIt"
] |
bc40e046ee6b669b636cf745aaafbcf0e332da57be33ec68850f731f94a9d9d9
|
# Orca
#
# Copyright 2008 Sun Microsystems Inc.
# Copyright 2012 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Output logger for regression testing."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2012 Igalia, S.L."
__license__ = "LGPL"
import io
import logging
class Logger:
def __init__(self):
self._logs = {}
def getLogNames(self):
return self._logs.keys()
def newLog(self, name, level=logging.INFO):
log = logging.getLogger(name)
log.setLevel(level)
handler = logging.StreamHandler(io.StringIO())
handler.setFormatter(logging.Formatter('%(message)s'))
log.addHandler(handler)
self._logs[name] = handler.stream
return log
def clearLog(self, name):
stream = self._logs.get(name)
if stream:
stream.truncate(0)
stream.seek(0)
def getLogContent(self, name):
stream = self._logs.get(name)
if stream:
return stream.getvalue()
return ""
def shutdown(self):
for name in self._logs.keys():
stream = self._logs.get(name)
stream.close()
_logger = Logger()
def getLogger():
return _logger
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/logger.py
|
Python
|
gpl-3.0
| 2,014
|
[
"ORCA"
] |
8cd56add5b06c951b2a0d94f5ed731fb2ffdd0611d9274024e10dc586a50323e
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import math
import numpy
from . import _ni_support
from . import _nd_image
from scipy.misc import doccer
from scipy._lib._version import NumpyVersion
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
_input_doc = \
"""input : array_like
Input array to filter."""
_axis_doc = \
"""axis : int, optional
The axis of `input` along which to calculate. Default is -1."""
_output_doc = \
"""output : array, optional
The `output` parameter passes an array in which to store the
filter output."""
_size_foot_doc = \
"""size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
"""
_mode_doc = \
"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'"""
_cval_doc = \
"""cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0"""
_origin_doc = \
"""origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0.0."""
_extra_arguments_doc = \
"""extra_arguments : sequence, optional
Sequence of extra positional arguments to pass to passed function"""
_extra_keywords_doc = \
"""extra_keywords : dict, optional
dict of extra keyword arguments to pass to passed function"""
docdict = {
'input': _input_doc,
'axis': _axis_doc,
'output': _output_doc,
'size_foot': _size_foot_doc,
'mode': _mode_doc,
'cval': _cval_doc,
'origin': _origin_doc,
'extra_arguments': _extra_arguments_doc,
'extra_keywords': _extra_keywords_doc,
}
docfiller = doccer.filldoc(docdict)
@docfiller
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if (len(weights) // 2 + origin < 0) or (len(weights) // 2 +
origin > len(weights)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return return_value
@docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
@docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : {0, 1, 2, 3}, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. An order of 1, 2, or 3 corresponds to convolution with
the first, second or third derivatives of a Gaussian. Higher
order derivatives are not implemented
%(output)s
%(mode)s
%(cval)s
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
"""
if order not in range(4):
raise ValueError('Order outside 0..3 not implemented')
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd = sd * sd
# calculate the kernel:
for ii in range(1, lw + 1):
tmp = math.exp(-0.5 * float(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
# implement first, second and third order derivatives:
if order == 1: # first derivative
weights[lw] = 0.0
for ii in range(1, lw + 1):
x = float(ii)
tmp = -x / sd * weights[lw + ii]
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
elif order == 2: # second derivative
weights[lw] *= -1.0 / sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (x * x / sd - 1.0) * weights[lw + ii] / sd
weights[lw + ii] = tmp
weights[lw - ii] = tmp
elif order == 3: # third derivative
weights[lw] = 0.0
sd2 = sd * sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (3.0 - x * x / sd) * x * weights[lw + ii] / sd2
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
return correlate1d(input, weights, axis, output, mode, cval, 0)
@docfiller
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : {0, 1, 2, 3} or sequence from same set, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. An order of 1, 2, or 3
corresponds to convolution with the first, second or third
derivatives of a Gaussian. Higher order derivatives are not
implemented
%(output)s
%(mode)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
if not set(orders).issubset(set(range(4))):
raise ValueError('Order outside 0..4 not implemented')
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, mode, cval, 0,)
return return_value
@docfiller
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, mode, cval, 0)
return return_value
@docfiller
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords = None):
"""N-dimensional Laplace filter using a provided second derivative function
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
derivative2(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return return_value
@docfiller
def laplace(input, output=None, mode="reflect", cval=0.0):
"""N-dimensional Laplace filter based on approximate second derivatives.
Parameters
----------
%(input)s
%(output)s
%(mode)s
%(cval)s
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@docfiller
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0, **kwargs):
"""Multidimensional Laplace filter using gaussian second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval,
**kwargs)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments=(sigma,),
extra_keywords=kwargs)
@docfiller
def generic_gradient_magnitude(input, derivative, output=None,
mode="reflect", cval=0.0,
extra_arguments=(), extra_keywords = None):
"""Gradient magnitude using a provided gradient function.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
derivative(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
numpy.sqrt(output, output, casting='unsafe')
else:
output[...] = input[...]
return return_value
@docfiller
def gaussian_gradient_magnitude(input, sigma, output=None,
mode="reflect", cval=0.0, **kwargs):
"""Multidimensional gradient magnitude using Gaussian derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode,
cval, **kwargs)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments=(sigma,),
extra_keywords=kwargs)
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw):
raise ValueError('invalid origin')
if not weights.flags.contiguous:
weights = weights.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
return return_value
@docfiller
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multi-dimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
input : array-like
input array to filter
weights : ndarray
array of weights, same number of dimensions as input
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
See Also
--------
convolve : Convolve an image with a kernel.
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@docfiller
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
input : array_like
Input array to filter.
weights : array_like
Array of weights, same number of dimensions as input
output : ndarray, optional
The `output` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
the `mode` parameter determines how the array borders are
handled. For 'constant' mode, values beyond borders are set to be
`cval`. Default is 'reflect'.
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
origin : array_like, optional
The `origin` parameter controls the placement of the filter,
relative to the centre of the current element of the input.
Default of 0 is equivalent to ``(0,)*input.ndim``.
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
W is the `weights` kernel,
j is the n-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e. where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`.
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@docfiller
def uniform_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : int
length of uniform filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
return return_value
@docfiller
def uniform_filter(input, size=3, output=None, mode="reflect",
cval=0.0, origin=0):
"""Multi-dimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints, optional
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def minimum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
This function implements the MINLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return return_value
@docfiller
def maximum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
Length along which to calculate the 1-D maximum.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
maximum1d : ndarray, None
Maximum-filtered array with same shape as input.
None if `output` is not None
Notes
-----
This function implements the MAXLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return return_value
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable = True
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
if numpy.alltrue(numpy.ravel(footprint), axis=0):
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
return return_value
@docfiller
def minimum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@docfiller
def maximum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@docfiller
def _rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0, operation='rank'):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origins)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origins)
else:
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
return return_value
@docfiller
def rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional rank filter.
Parameters
----------
%(input)s
rank : int
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@docfiller
def median_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Calculates a multidimensional median filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
median_filter : ndarray
Return of same shape as `input`.
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@docfiller
def percentile_filter(input, percentile, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@docfiller
def generic_filter1d(input, function, filter_size, axis=-1,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords = None):
"""Calculate a one-dimensional filter along the given axis.
`generic_filter1d` iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : callable
Function to apply along given axis.
filter_size : scalar
Length of the filter.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = _ni_support._check_axis(axis, input.ndim)
if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
filter_size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments, extra_keywords)
return return_value
@docfiller
def generic_filter(input, function, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords = None):
"""Calculates a multi-dimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1D array of double values.
Parameters
----------
%(input)s
function : callable
Function to apply at each element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return return_value
|
chatcannon/scipy
|
scipy/ndimage/filters.py
|
Python
|
bsd-3-clause
| 40,512
|
[
"Gaussian"
] |
7cae5bb21baaea3118ce79fcd9accde07d716c3291695abc064e4c269fa2f757
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyCrossmap(PythonPackage, SourceforgePackage):
"""CrossMap is a program for convenient conversion of genome coordinates
(or annotation files) between different assemblies"""
homepage = "http://crossmap.sourceforge.net/"
sourceforge_mirror_path = "crossmap/CrossMap-0.3.3.tar.gz"
version('0.3.9', sha256='e20a4653e9fc313ac0f5a6cfc37b42e83c3cf2b42f9483706cfb9ec9ff72c74c')
version('0.3.3', sha256='56d99fd606e13e399b83438953d0d89fc281df1c1e8e47eed7d773e7ec9c88f8')
version('0.2.9', sha256='57243ee5051352c93088874c797ceac0426f249704ba897360fb628b3365d0af', deprecated=True)
depends_on('python@3:', type=('build', 'run'), when='@0.3.0:')
depends_on('python@2.7:2.8', type=('build', 'run'), when='@:0.2.9')
depends_on('py-setuptools', type='build')
depends_on('py-cython@0.17:', type=('build', 'run'))
depends_on('py-pysam', type=('build', 'run'))
depends_on('py-bx-python', type=('build', 'run'))
depends_on('py-pybigwig', type=('build', 'run'), when='@0.3.0:')
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-crossmap/package.py
|
Python
|
lgpl-2.1
| 1,249
|
[
"pysam"
] |
f05e58daa96f24fcfc47918a8562039b92078124fd2fc423f8d94120f6672100
|
from collections import defaultdict, Counter
from dark.utils import median
from dark.filter import ReadSetFilter
from dark.intervals import ReadIntervals
def titleCounts(readsAlignments):
"""
Count the number of times each title in a readsAlignments instance is
matched. This is useful for rapidly discovering what titles were matched
and with what frequency.
@param readsAlignments: A L{dark.alignments.ReadsAlignments} instance.
@return: A C{dict} whose keys are titles and whose values are the integer
counts of the number of reads that matched that title.
"""
titles = defaultdict(int)
for readAlignments in readsAlignments:
for alignment in readAlignments:
titles[alignment.subjectTitle] += 1
return titles
class TitleAlignment(object):
"""
Hold information about a read's HSPs for a title alignment.
@param read: The C{Read} that aligned.
@param hsps: A C{list} of L{dark.hsp.HSP} (or subclass) instances.
"""
def __init__(self, read, hsps):
self.read = read
self.hsps = hsps
class TitleAlignments(list):
"""
Holds information about a list of alignments against a sequence.
@param subjectTitle: The C{str} title of the sequence the read matched
against.
@param subjectLength: The C{int} length of the sequence the read matched
against.
"""
def __init__(self, subjectTitle, subjectLength):
# TODO: Do we need the title in here?
self.subjectTitle = subjectTitle
self.subjectLength = subjectLength
def addAlignment(self, alignment):
"""
Add an alignment to the list of alignments that matched this title.
@param alignment: A L{TitleAlignment} instance.
"""
self.append(alignment)
def reads(self):
"""
Find the set of reads matching this title.
@return: A generator that yields C{dark.reads.Read} instances (or one
of its subclasses).
"""
return (alignment.read for alignment in self)
def readCount(self):
"""
Find out how many reads aligned to this title.
@return: The C{int} number of reads that aligned to this title.
"""
return len(self)
def hspCount(self):
"""
How many HSPs were there in total for all the alignments to a title.
@return: The C{int} number of HSPs for the alignments to this title.
"""
return sum(len(alignment.hsps) for alignment in self)
def readIds(self):
"""
Find the set of read ids that matched the title.
@return: A C{set} of read ids that aligned to this title.
"""
return set(alignment.read.id for alignment in self)
def hsps(self):
"""
Get all HSPs for the alignments to a title.
@return: A generator yielding L{dark.hsp.HSP} instances.
"""
return (hsp for alignment in self for hsp in alignment.hsps)
def bestHsp(self):
"""
Find the HSP with the best score.
@raise ValueError: If there are no HSPs.
@return: The C{dark.hsp.HSP} instance (or a subclass) with the best
score.
"""
return max(hsp for hsp in self.hsps())
def worstHsp(self):
"""
Find the HSP with the worst score.
@raise ValueError: If there are no HSPs.
@return: The C{dark.hsp.HSP} instance (or a subclass) with the worst
score.
"""
return min(hsp for hsp in self.hsps())
def hasScoreBetterThan(self, score):
"""
Is there an HSP with a score better than a given value?
@return: A C{bool}, C{True} if there is at least one HSP in the
alignments for this title with a score better than C{score}.
"""
# Note: Do not assume that HSPs in an alignment are sorted in
# decreasing order (as they are in BLAST output). If we could
# assume that, we could just check the first HSP in each alignment.
for hsp in self.hsps():
if hsp.betterThan(score):
return True
return False
def medianScore(self):
"""
Find the median score for the HSPs in the alignments that match
this title.
@raise ValueError: If there are no HSPs.
@return: The C{float} median score of HSPs in alignments matching the
title.
"""
return median([hsp.score.score for hsp in self.hsps()])
def coverage(self):
"""
Get the fraction of this title sequence that is matched by its reads.
@return: The C{float} fraction of the title sequence matched by its
reads.
"""
intervals = ReadIntervals(self.subjectLength)
for hsp in self.hsps():
intervals.add(hsp.subjectStart, hsp.subjectEnd)
return intervals.coverage()
def coverageCounts(self):
"""
For each location in the title sequence, return a count of how many
times that location is covered by a read.
"""
intervals = ReadIntervals(self.subjectLength)
for hsp in self.hsps():
intervals.add(hsp.subjectStart, hsp.subjectEnd)
return intervals.coverageCounts()
def coverageInfo(self):
"""
Return information about the bases found at each location in our title
sequence.
@return: A C{dict} whose keys are C{int} subject offsets and whose
values are unsorted lists of (score, base) 2-tuples, giving all the
bases from reads that matched the subject at subject location,
along with the bit score of the matching read.
"""
result = defaultdict(list)
for titleAlignment in self:
for hsp in titleAlignment.hsps:
score = hsp.score.score
for (subjectOffset, base, _) in titleAlignment.read.walkHSP(
hsp, includeWhiskers=False):
result[subjectOffset].append((score, base))
return result
def residueCounts(self, convertCaseTo='upper'):
"""
Count residue frequencies at all sequence locations matched by reads.
@param convertCaseTo: A C{str}, 'upper', 'lower', or 'none'.
If 'none', case will not be converted (both the upper and lower
case string of a residue will be present in the result if they are
present in the read - usually due to low complexity masking).
@return: A C{dict} whose keys are C{int} offsets into the title
sequence and whose values are C{Counters} with the residue as keys
and the count of that residue at that location as values.
"""
if convertCaseTo == 'none':
convert = lambda x: x
elif convertCaseTo == 'lower':
convert = str.lower
elif convertCaseTo == 'upper':
convert = str.upper
else:
raise ValueError(
"convertCaseTo must be one of 'none', 'lower', or 'upper'")
counts = defaultdict(Counter)
for titleAlignment in self:
read = titleAlignment.read
for hsp in titleAlignment.hsps:
for (subjectOffset, residue, inMatch) in read.walkHSP(hsp):
counts[subjectOffset][convert(residue)] += 1
return counts
def summary(self):
"""
Summarize the alignments for this subject.
@return: A C{dict} with C{str} keys:
bestScore: The C{float} best score of the matching reads.
coverage: The C{float} fraction of the subject genome that is
matched by at least one read.
hspCount: The C{int} number of hsps that match the subject.
medianScore: The C{float} median score of the matching reads.
readCount: The C{int} number of reads that match the subject.
subjectLength: The C{int} length of the subject.
subjectTitle: The C{str} title of the subject.
"""
return {
'bestScore': self.bestHsp().score.score,
'coverage': self.coverage(),
'hspCount': self.hspCount(),
'medianScore': self.medianScore(),
'readCount': self.readCount(),
'subjectLength': self.subjectLength,
'subjectTitle': self.subjectTitle,
}
class TitlesAlignments(dict):
"""
Holds (as a dictionary) a set of titles, each with its alignments.
@param readsAlignments: A L{dark.alignments.ReadsAlignments} instance.
@param scoreClass: A class to hold and compare scores. If C{None},
the score class from readsAlignments will be used.
@param readSetFilter: An instance of dark.filter.ReadSetFilter, or C{None}.
This can be used to pass a previously used title filter for ongoing
use in filtering.
@param importReadsAlignmentsTitles: If C{True}, titles from
C{readsAlignments} will be added to self. This argument is only used
by the filtering function to make a new instance without reading its
titles.
"""
def __init__(self, readsAlignments, scoreClass=None, readSetFilter=None,
importReadsAlignmentsTitles=True):
dict.__init__(self)
self.readsAlignments = readsAlignments
self.scoreClass = scoreClass or readsAlignments.scoreClass
self.readSetFilter = readSetFilter
if importReadsAlignmentsTitles:
for readAlignments in readsAlignments:
for alignment in readAlignments:
title = alignment.subjectTitle
try:
titleAlignments = self[title]
except KeyError:
titleAlignments = self[title] = TitleAlignments(
title, alignment.subjectLength)
titleAlignments.addAlignment(
TitleAlignment(readAlignments.read, alignment.hsps))
def addTitle(self, title, titleAlignments):
"""
Add a new title to self.
@param title: A C{str} title.
@param titleAlignments: An instance of L{TitleAlignments}.
@raises KeyError: If the title is already present.
"""
if title in self:
raise KeyError('Title %r already present in TitlesAlignments '
'instance.' % title)
else:
self[title] = titleAlignments
def filter(self, minMatchingReads=None, minMedianScore=None,
withScoreBetterThan=None, minNewReads=None, minCoverage=None,
maxTitles=None, sortOn='maxScore'):
"""
Filter the titles in self to create another TitlesAlignments.
@param minMatchingReads: titles that are matched by fewer reads
are unacceptable.
@param minMedianScore: sequences that are matched with a median
bit score that is less are unacceptable.
@param withScoreBetterThan: if the best score for a title is not
as good as this value, the title is not acceptable.
@param minNewReads: The C{float} fraction of its reads by which a new
title's read set must differ from the read sets of all previously
seen titles in order for this title to be considered acceptably
different (and therefore interesting).
@param minCoverage: The C{float} minimum fraction of the title sequence
that must be matched by at least one read.
@param maxTitles: A non-negative C{int} maximum number of titles to
keep. If more titles than this are present, titles will be sorted
(according to C{sortOn}) and only the best will be retained.
@param sortOn: A C{str} attribute to sort on, used only if C{maxTitles}
is not C{None}. See the C{sortTitles} method below for the legal
values.
@raise: C{ValueError} if C{maxTitles} is less than zero or the value of
C{sortOn} is unknown.
@return: A new L{TitlesAlignments} instance containing only the
matching titles.
"""
# Use a ReadSetFilter only if we're checking that read sets are
# sufficiently new.
if minNewReads is None:
readSetFilter = None
else:
if self.readSetFilter is None:
self.readSetFilter = ReadSetFilter(minNewReads)
readSetFilter = self.readSetFilter
result = TitlesAlignments(
self.readsAlignments, self.scoreClass, self.readSetFilter,
importReadsAlignmentsTitles=False)
if maxTitles is not None and len(self) > maxTitles:
if maxTitles < 0:
raise ValueError('maxTitles (%r) cannot be negative.' %
maxTitles)
else:
# There are too many titles. Make a sorted list of them so
# we loop through them (below) in the desired order and can
# break when/if we've reached the maximum. We can't just
# take the first maxTitles titles from the sorted list now,
# as some of those titles might later be discarded by the
# filter and then we'd return a result with fewer titles
# than we should.
titles = self.sortTitles(sortOn)
else:
titles = self.keys()
for title in titles:
# Test max titles up front, as it may be zero.
if maxTitles is not None and len(result) == maxTitles:
break
titleAlignments = self[title]
if (minMatchingReads is not None and
titleAlignments.readCount() < minMatchingReads):
continue
# To compare the median score with another score, we must
# convert both values to instances of the score class used in
# this data set so they can be compared without us needing to
# know if numerically greater scores are considered better or
# not.
if (minMedianScore is not None and
self.scoreClass(titleAlignments.medianScore()) <
self.scoreClass(minMedianScore)):
continue
if (withScoreBetterThan is not None and not
titleAlignments.hasScoreBetterThan(withScoreBetterThan)):
continue
if (minCoverage is not None and
titleAlignments.coverage() < minCoverage):
continue
if (readSetFilter and not
readSetFilter.accept(title, titleAlignments)):
continue
result.addTitle(title, titleAlignments)
return result
def hsps(self):
"""
Get all HSPs for all the alignments for all titles.
@return: A generator yielding L{dark.hsp.HSP} instances.
"""
return (hsp for titleAlignments in self.values()
for alignment in titleAlignments for hsp in alignment.hsps)
def sortTitles(self, by):
"""
Sort titles by a given attribute and then by title.
@param by: A C{str}, one of 'length', 'maxScore', 'medianScore',
'readCount', or 'title'.
@raise ValueError: If an unknown C{by} value is given.
@return: A sorted C{list} of titles.
"""
# First sort titles by the secondary key, which is always the title.
titles = sorted(iter(self))
# Then sort on the primary key (if any).
if by == 'length':
return sorted(
titles, reverse=True,
key=lambda title: self[title].subjectLength)
if by == 'maxScore':
return sorted(
titles, reverse=True, key=lambda title: self[title].bestHsp())
if by == 'medianScore':
return sorted(
titles, reverse=True,
key=lambda title: self.scoreClass(self[title].medianScore()))
if by == 'readCount':
return sorted(
titles, reverse=True,
key=lambda title: self[title].readCount())
if by == 'title':
return titles
raise ValueError('Sort attribute must be one of "length", "maxScore", '
'"medianScore", "readCount", "title".')
def summary(self, sortOn=None):
"""
Summarize all the alignments for this title.
@param sortOn: A C{str} attribute to sort titles on. One of 'length',
'maxScore', 'medianScore', 'readCount', or 'title'.
@raise ValueError: If an unknown C{sortOn} value is given.
@return: A generator that yields C{dict} instances as produced by
C{TitleAlignments} (see class earlier in this file), sorted by
C{sortOn}.
"""
titles = self if sortOn is None else self.sortTitles(sortOn)
for title in titles:
yield self[title].summary()
def tabSeparatedSummary(self, sortOn=None):
"""
Summarize all the alignments for this title as multi-line string with
TAB-separated values on each line.
@param sortOn: A C{str} attribute to sort titles on. One of 'length',
'maxScore', 'medianScore', 'readCount', or 'title'.
@raise ValueError: If an unknown C{sortOn} value is given.
@return: A newline-separated C{str}, each line with a summary of a
title. Each summary line is TAB-separated.
"""
# The order of the fields returned here is somewhat arbitrary. The
# subject titles are last because they are so variable in length.
# Putting them last makes it more likely that the initial columns in
# printed output will be easier to read down.
#
# Note that post-processing scripts will be relying on the field
# ordering here. So you can't just add fields. It's probably safe
# to add them at the end, but be careful / think.
#
# A TAB-separated file can easily be read by awk using e.g.,
# awk 'BEGIN {FS = "\t"} ...'
result = []
for titleSummary in self.summary(sortOn):
result.append('\t'.join([
'%(coverage)f',
'%(medianScore)f',
'%(bestScore)f',
'%(readCount)d',
'%(hspCount)d',
'%(subjectLength)d',
'%(subjectTitle)s',
]) % titleSummary)
return '\n'.join(result)
|
bamueh/dark-matter
|
dark/titles.py
|
Python
|
mit
| 18,728
|
[
"BLAST"
] |
8537bd9165fe09d5cebed08c742c7ee592abf9e285f4a2d795c492466610f77f
|
# coding: utf-8
#!/usr/bin/env python
"""
Created on Nov 12, 2011
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Nov 12, 2011"
import itertools
import argparse
from pymatgen.io.vasp import Incar
from pymatgen.util.string_utils import str_aligned
parser = argparse.ArgumentParser(description='''Convenient INCAR diff.
Author: Shyue Ping Ong
Version: 1.0
Last updated: Oct 26 2011''')
parser.add_argument('incar_file', metavar='filename', type=str, nargs=2,
help='files to process')
args = parser.parse_args()
filepath1 = args.incar_file[0]
filepath2 = args.incar_file[1]
incar1 = Incar.from_file(filepath1)
incar2 = Incar.from_file(filepath2)
def format_lists(v):
if isinstance(v, (tuple, list)):
return " ".join(["%d*%.2f" % (len(tuple(group)), i)
for (i, group) in itertools.groupby(v)])
return v
d = incar1.diff(incar2)
output = [['SAME PARAMS', '', '']]
output.append(['---------------', '', ''])
output.extend([(k, format_lists(d['Same'][k]), format_lists(d['Same'][k]))
for k in sorted(d['Same'].keys()) if k != "SYSTEM"])
output.append(['', '', ''])
output.append(['DIFFERENT PARAMS', '', ''])
output.append(['----------------', '', ''])
output.extend([(k, format_lists(d['Different'][k]['INCAR1']),
format_lists(d['Different'][k]['INCAR2']))
for k in sorted(d['Different'].keys()) if k != "SYSTEM"])
print str_aligned(output, ['', filepath1, filepath2])
|
rousseab/pymatgen
|
scripts/diff_incar.py
|
Python
|
mit
| 1,616
|
[
"VASP",
"pymatgen"
] |
ac05386fd219d663a0db9e45a13598a78afd91251fd70599514dfd310315ed00
|
from rdflib.namespace import Namespace
import csv
import re
import traceback
from ..utils import normalize_cell_name
from ..channel import Channel, ExpressionPattern
from ..evidence import Evidence
from ..muscle import Muscle
from ..network import Network
from ..neuron import Neuron
from ..website import Website
from ..worm import Worm
from .common_data import DS_NS, TRANS_NS
from .csv_ds import CSVDataSource, CSVDataTranslator
from .data_with_evidence_ds import DataWithEvidenceDataSource
class WormbaseTextMatchCSVDataSource(CSVDataSource):
rdf_namespace = Namespace(DS_NS['WormbaseTextMatchCSVDataSource#'])
def __init__(self, cell_type, initial_cell_column, **kwargs):
"""
Parameters
----------
cell_type : type
The type of cell to generate
initial_cell_column : int
The index of the first column with a cell name
"""
super(WormbaseTextMatchCSVDataSource, self).__init__(**kwargs)
self.cell_type = cell_type
self.initial_cell_column = initial_cell_column
class WormbaseIonChannelCSVDataSource(CSVDataSource):
rdf_namespace = Namespace(DS_NS['WormbaseIonChannelCSVDataSource#'])
csv_header = ['channel_name',
'gene_name',
'gene_WB_ID',
'expression_pattern',
'description']
class WormbaseIonChannelCSVTranslator(CSVDataTranslator):
input_type = WormbaseIonChannelCSVDataSource
output_type = DataWithEvidenceDataSource
translator_identifier = TRANS_NS.WormbaseIonChannelCSVTranslator
def translate(self, data_source):
res = self.make_new_output((data_source,))
try:
with res.evidence_context(Evidence=Evidence, Website=Website) as ctx:
doc = ctx.Website(key="wormbase", url="http://Wormbase.org", title="WormBase")
doc_ctx = res.data_context_for(document=doc)
ctx.Evidence(reference=doc, supports=doc_ctx.rdf_object)
with open(data_source.csv_file_name.onedef(), 'r') as csvfile:
next(csvfile, None)
csvreader = csv.reader(csvfile, skipinitialspace=True)
with doc_ctx(Channel=Channel,
ExpressionPattern=ExpressionPattern) as ctx:
for line in csvreader:
channel_name = normalize_cell_name(line[0]).upper()
gene_name = line[1].upper()
gene_WB_ID = line[2].upper()
expression_pattern = line[3]
description = line[4]
c = ctx.Channel(name=str(channel_name))
c.gene_name(gene_name)
c.gene_WB_ID(gene_WB_ID)
c.description(description)
patterns = expression_pattern.split(r' | ')
regex = re.compile(r' *\[([^\]]+)\] *(.*) *')
matches = [regex.match(pat) for pat in patterns]
patterns = [ctx.ExpressionPattern(wormbaseid=m.group(1),
description=m.group(2))
for m in matches if m is not None]
for pat in patterns:
c.expression_pattern(pat)
except Exception:
traceback.print_exc()
return res
class WormbaseTextMatchCSVTranslator(CSVDataTranslator):
input_type = WormbaseTextMatchCSVDataSource
output_type = DataWithEvidenceDataSource
translator_identifier = TRANS_NS.WormbaseTextMatchCSVTranslator
def translate(self, data_source):
initcol = data_source.initial_cell_column
ctype = data_source.cell_type
res = self.make_new_output((data_source,))
try:
with res.evidence_context(Evidence=Evidence, Website=Website) as ctx:
doc = ctx.Website(key="wormbase", url="http://Wormbase.org", title="WormBase")
doc_ctx = res.data_context_for(document=doc)
ctx.Evidence(reference=doc, supports=doc_ctx.rdf_object)
with open(data_source.csv_file_name.onedef(), 'r') as f:
reader = csv.reader(f, delimiter='\t')
header = self.skip_to_header(reader)
with doc_ctx(Channel=Channel, CType=ctype) as ctx:
for row in reader:
cells = self.extract_cell_names(header,
initcol,
row)
ch = ctx.Channel(name=str(row[0]))
for cell in cells:
m = ctx.CType(name=str(cell))
ch.appearsIn(m)
except Exception:
traceback.print_exc()
return res
def skip_to_header(self, reader):
rows = 0
for row in reader:
if rows == 3:
return row
rows += 1
return None
def extract_cell_names(self, header, initial_cell_column, row):
res = []
cols = 0
for col in row:
if cols > initial_cell_column:
if col == '1' or col == '2':
res.append(header[cols])
cols += 1
return res
class WormBaseCSVDataSource(CSVDataSource):
rdf_namespace = Namespace(DS_NS['MuscleCSVDataSource#'])
csv_header = ["Cell",
"Lineage Name",
"Description",
"Total count of identified adult-only hermaphrodite cells",
"Total count of adult-only male cells",
"Neurons (no male-specific cells)",
"Neurons (male-specific)",
"Body wall muscles",
"Pharynx muscles",
"Other muscles",
"Other adult-only cells in the hermaphrodite",
"Other adult-only hermaphrodite-specific cells (not present in males)",
"Motor neurons related to body wall muscles",
"Embryonic cells not present in adult",
"Male-specific cells",
"Male-specific adult-only cells",
"Cells with non-unique name",
"",
"VirtualWorm blender model names",
"WormBase ID",
"Synonyms"]
class MuscleWormBaseCSVTranslator(CSVDataTranslator):
input_type = WormBaseCSVDataSource
output_type = DataWithEvidenceDataSource
translator_identifier = TRANS_NS.MuscleWormBaseCSVTranslator
def translate(self, data_source):
""" Upload muscles and the neurons that connect to them """
res = self.make_new_output((data_source,))
with open(data_source.csv_file_name.onedef()) as csvfile:
csvreader = csv.reader(csvfile)
# TODO: Improve this evidence by going back to the actual research
# by using the wormbase REST API in addition to or instead of the CSV file
with res.evidence_context(Evidence=Evidence, Website=Website) as ctx:
doc = ctx.Website(key="wormbase", url="http://Wormbase.org", title="WormBase")
doc_ctx = res.data_context_for(document=doc)
ctx.Evidence(reference=doc, supports=doc_ctx.rdf_object)
with doc_ctx(Worm=Worm, Muscle=Muscle) as ctx:
w = ctx.Worm()
for num, line in enumerate(csvreader):
if num < 4: # skip rows with no data
continue
if line[7] or line[8] or line[9] == '1': # muscle's marked in these columns
muscle_name = normalize_cell_name(line[0]).upper()
m = ctx.Muscle(name=muscle_name)
w.muscle(m)
return res
class NeuronWormBaseCSVTranslator(CSVDataTranslator):
input_type = WormBaseCSVDataSource
output_type = DataWithEvidenceDataSource
translator_identifier = TRANS_NS.NeuronWormBaseCSVTranslator
def translate(self, data_source):
res = self.make_new_output((data_source,))
# TODO: Improve this evidence by going back to the actual research
# by using the wormbase REST API in addition to or instead of the CSV file
with res.evidence_context(Evidence=Evidence, Website=Website) as ctx:
doc = ctx.Website(key="wormbase", url="http://Wormbase.org", title="WormBase")
doc_ctx = res.data_context_for(document=doc)
ctx.Evidence(reference=doc, supports=doc_ctx.rdf_object)
with doc_ctx(Worm=Worm, Network=Network, Neuron=Neuron) as ctx:
w = ctx.Worm()
n = ctx.Network()
n.worm(w)
with open(data_source.csv_file_name.onedef()) as csvfile:
csvreader = csv.reader(csvfile)
for num, line in enumerate(csvreader):
if num < 4: # skip rows with no data
continue
if line[5] == '1': # neurons marked in this column
neuron_name = normalize_cell_name(line[0]).upper()
n.neuron(ctx.Neuron(name=neuron_name))
return res
__yarom_mapped_classes__ = (WormbaseTextMatchCSVDataSource,
WormbaseIonChannelCSVDataSource,
WormbaseIonChannelCSVTranslator,
WormbaseTextMatchCSVTranslator,
WormBaseCSVDataSource,
MuscleWormBaseCSVTranslator,
NeuronWormBaseCSVTranslator)
|
gsarma/PyOpenWorm
|
PyOpenWorm/data_trans/wormbase.py
|
Python
|
mit
| 9,822
|
[
"NEURON"
] |
7f727d1e8d34a5f27f9a54a3350877a910346fc1e5b6832c5ae18695b3c6ac2d
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name="housepy",
version="0.0.1",
description="Personal utility library for Python 3",
author="Brian House",
url="https://github.com/brianhouse/housepy",
py_modules=['housepy'],
package_dir={'housepy': ""},
packages=['housepy'],
install_requires=[
'beautifulsoup4>=4.4.1',
'boto>=2.39.0',
'cairocffi>=0.7.2',
'cffi>=1.5.2',
'cycler>=0.10.0',
'Jinja2>=2.8',
'Markdown>=2.6.6',
'MarkupSafe>=0.23',
'matplotlib>=1.5.1',
'numpy>=1.11.0',
'Pillow>=3.1.1',
'PyAudio>=0.2.9',
'pycrypto>=2.6.1',
'pyglet>=1.2.4',
'pymongo>=3.2.2',
'pyparsing>=2.1.1',
'pyserial>=3.0.1',
'python-dateutil>=2.5.2',
'python-geohash>=0.8.5',
'python-rtmidi==0.5b1',
'pytz>=2016.3',
'PyYAML>=3.11',
'requests>=2.9.1',
'scipy>=0.17.0',
'six>=1.10.0',
'tinys3>=0.1.11',
'tornado>=4.3',
],
)
|
brianhouse/housepy
|
setup.py
|
Python
|
mit
| 1,078
|
[
"Brian"
] |
a095f8c2262278a3b9c605500fae3d93748e4551f68c4d90249158cc93e7a727
|
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-06-12 19:13:30
# @Last modified by: Brian Cherinka
# @Last Modified time: 2017-06-12 19:23:07
from __future__ import print_function, division, absolute_import
from marvin.core import marvin_pickle
import pytest
import os
data = dict(a=7, b=[10, 2])
class TestMarvinPickle(object):
def test_specify_path(self, temp_scratch):
file = temp_scratch.join('tmp_testMarvinPickleSpecifyPath.pck')
path_out = marvin_pickle.save(obj=data, path=str(file), overwrite=False)
assert file.check() is True
revived_data = marvin_pickle.restore(path_out)
assert data == revived_data
def test_overwrite_true(self, temp_scratch):
file = temp_scratch.join('tmp_testMarvinPickleOverwriteTrue.pck')
open(str(file), 'a').close()
path_out = marvin_pickle.save(obj=data, path=str(file), overwrite=True)
assert file.check() is True
revived_data = marvin_pickle.restore(path_out)
assert data == revived_data
def test_delete_on_restore(self, temp_scratch):
file = temp_scratch.join('tmp_testMarvinPickleDeleteOnRestore.pck')
path_out = marvin_pickle.save(obj=data, path=str(file), overwrite=False)
assert file.check() is True
revived_data = marvin_pickle.restore(path_out, delete=True)
assert data == revived_data
assert os.path.isfile(str(file)) is False
|
sdss/marvin
|
tests/core/test_marvin_pickle.py
|
Python
|
bsd-3-clause
| 1,519
|
[
"Brian"
] |
57179036a1de2c3dfe18cdee66ab198defbc0ab5d7af42154d8ba8aa1b464574
|
# BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
#system libs:
import numpy
import scipy.optimize as opt
import scipy.integrate as integrate
import math
import matplotlib.pyplot as pyplot
#own libs:
import os, sys
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1,os.path.abspath('..'))
sys.path.insert(1,os.path.abspath('.'))
import burnman
import geotherm
from tools import *
# TODO: add up weight percent and check <100 and tell them how much
molar_mass = {'Fe':55.845/1000., 'Mg':24.305/1000., 'O':15.999/1000., 'Al':26.982/1000., 'Ca':40.078/1000., 'Si':28.085/1000.} # kg/mol
Av = 6.022141e23 # Avogadro constant in 1/mol
boltzmann_constant = 1.3806503e-23 # in m^2 kg s^-2 K^-1
gas_constant = Av * boltzmann_constant # in J mol^-1 K^-1
lower_mantle_mass = 4.043e24*.75 # in kg
# convert weight percentage (amount, 1.00 = 100%) of a given element to molar mass
def weight_pct_to_mol(element, amount):
return amount * lower_mantle_mass / molar_mass[element] * Av
def calculate_phase_percents(inp):
"""
Converts given weight percentages into the requisite percent of each phase
in mols and also returns the fraction of perovskite versus ferropericlase,
assuming all of the silcon goes into the perovskite phase
and with any remaining Fe or Mg going into the oxide phase.
Input:
inp={'Mg': ..., 'Fe': ..., ...} # in weight percent
Returns:
phase_per={'fp': ..., 'pv': ...} # as a fraction
rel_mol_per={'MgO: ..., 'FeO': ..., ...} # in mols
"""
names = {'Mg':'MgO','Fe':'FeO','Si':'SiO2', 'Ca':'Ca', 'Al':'Al'}
rel_mol_per = {}
out = {}
for a in inp:
out[names[a]] = weight_pct_to_mol(a,inp[a])
norm = out['MgO']+out['FeO']
for a in inp:
rel_mol_per[names[a]] = out[names[a]]/norm
frac_mol_SiO2 = rel_mol_per['SiO2']
phase_per={'fp':(1.-frac_mol_SiO2),'pv':frac_mol_SiO2}
return phase_per,rel_mol_per
def part_coef_calc(inp2,StartP,EndP,deltaP):
a = [] #partition coefficent of Fe in fp
b = [] #partition coefficent of Fe in pv
Pressure= []
Temperature=[]
counter = 0
def calculate_partition_coefficient(pressure, temperature, components, initial_distribution_coefficient):
""" calculate the partition coefficient given [...] initial_distribution_coefficient is known as Kd_0 """
frac_mol_FeO = components['FeO']
frac_mol_SiO2 = components['SiO2']
Kd_0 = initial_distribution_coefficient
delV = 2.e-7 #in m^3/mol, average taken from Nakajima et al 2012, JGR
rs = ((25.e9-pressure)*(delV)/(gas_constant*temperature))+math.log(Kd_0) #eq 5 Nakajima et al 2012
K = math.exp(rs) #The exchange coefficent at P and T
num_to_sqrt = (-4.*frac_mol_FeO*(K-1.)*K*frac_mol_SiO2)+(pow(1.+(frac_mol_FeO*(K-1))+((K-1.)*frac_mol_SiO2),2.))
b = (-1. + frac_mol_FeO - (frac_mol_FeO*K)+frac_mol_SiO2 - (frac_mol_SiO2*K) + math.sqrt(num_to_sqrt)) \
/ (2.*frac_mol_SiO2*(1.-K))
a = b /(((1.-b)*K)+b)
return (a,b) #a is partition coefficient array with P for mw, b is pcarray for pv
# test some composition (Javoy 2010, Table 6, PLoM)
if __name__ == "__main__":
inp1 = {'Mg':0.213, 'Fe': 0.0626, 'Si':0.242, 'Ca':0., 'Al':0.} # wt%
phase_per,rel_mol_per = calculate_phase_percents(inp1)
StartP = 23.83 #in GPa
EndP = 110.0
deltaP = 1.
#P,T,a,b,frac_mol_pv,frac_mol_mw = part_coef_calc(inp2,StartP,EndP,deltaP)
gt = lambda p: geotherm.brown_shankland(p)
pressure = StartP
temperature = gt([StartP,])
calculate_partition_coefficient(pressure, temperature, rel_mol_per, 0.5)
#part_coef_calc(inp2,StartP,EndP,deltaP)
#print inp1
#print inp2
#print t
|
tjhei/burnman_old2
|
burnman/partitioning.py
|
Python
|
gpl-2.0
| 3,876
|
[
"Avogadro"
] |
4ba6e6994b7f086c6d11c82af451c6453083d58ca605508389838934f0cd74c0
|
########################################################################
# File : Utilities.py
# Author : Federico Stagni
########################################################################
"""
Utilities for Transformation system
"""
import ast
import random
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.SiteSEMapping import getSitesForSE
# from DIRAC.Core.Utilities.Time import timeThis
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
__RCSID__ = "$Id$"
class PluginUtilities(object):
"""
Utility class used by plugins
"""
def __init__(self, plugin='Standard', transClient=None, dataManager=None, fc=None,
debug=False, transInThread=None, transID=None):
"""
c'tor
Setting defaults
"""
# clients
if transClient is None:
self.transClient = TransformationClient()
else:
self.transClient = transClient
if dataManager is None:
self.dm = DataManager()
else:
self.dm = dataManager
if fc is None:
self.fc = FileCatalog()
else:
self.fc = fc
self.dmsHelper = DMSHelpers()
self.plugin = plugin
self.transID = transID
self.params = {}
self.groupSize = 0
self.maxFiles = 0
self.cachedLFNSize = {}
self.transString = ''
self.debug = debug
self.seConfig = {}
if transInThread is None:
self.transInThread = {}
else:
self.transInThread = transInThread
self.log = gLogger.getSubLogger(plugin)
def logVerbose(self, message, param=''):
""" logger helper """
if self.debug:
self.log.info('(V)' + self.transString + message, param)
else:
self.log.verbose(self.transString + message, param)
def logDebug(self, message, param=''):
""" logger helper """
self.log.debug(self.transString + message, param)
def logInfo(self, message, param=''):
""" logger helper """
self.log.info(self.transString + message, param)
def logWarn(self, message, param=''):
""" logger helper """
self.log.warn(self.transString + message, param)
def logError(self, message, param=''):
""" logger helper """
self.log.error(self.transString + message, param)
def logException(self, message, param='', lException=False):
""" logger helper """
self.log.exception(self.transString + message, param, lException)
def setParameters(self, params):
""" Set the transformation parameters and extract transID """
self.params = params
self.transID = params['TransformationID']
self.transString = self.transInThread.get(self.transID, ' [NoThread] [%d] ' % self.transID)
# @timeThis
def groupByReplicas(self, files, status):
"""
Generates tasks based on the location of the input data
:param dict fileReplicas:
{'/this/is/at.1': ['SE1'],
'/this/is/at.12': ['SE1', 'SE2'],
'/this/is/at.2': ['SE2'],
'/this/is/at_123': ['SE1', 'SE2', 'SE3'],
'/this/is/at_23': ['SE2', 'SE3'],
'/this/is/at_4': ['SE4']}
"""
tasks = []
nTasks = 0
if not files:
return S_OK(tasks)
files = dict(files)
# Parameters
if not self.groupSize:
self.groupSize = self.getPluginParam('GroupSize', 10)
flush = (status == 'Flush')
self.logVerbose("groupByReplicas: %d files, groupSize %d, flush %s" % (len(files), self.groupSize, flush))
# Consider files by groups of SEs, a file is only in one group
# Then consider files site by site, but a file can now be at more than one site
for groupSE in (True, False):
if not files:
break
seFiles = getFileGroups(files, groupSE=groupSE)
self.logDebug("fileGroups set: ", seFiles)
for replicaSE in sortSEs(seFiles):
lfns = seFiles[replicaSE]
if lfns:
tasksLfns = breakListIntoChunks(lfns, self.groupSize)
lfnsInTasks = []
for taskLfns in tasksLfns:
if flush or (len(taskLfns) >= self.groupSize):
tasks.append((replicaSE, taskLfns))
lfnsInTasks += taskLfns
# In case the file was at more than one site, remove it from the other sites' list
# Remove files from global list
for lfn in lfnsInTasks:
files.pop(lfn)
if not groupSE:
# Remove files from other SEs
for se in [se for se in seFiles if se != replicaSE]:
seFiles[se] = [lfn for lfn in seFiles[se] if lfn not in lfnsInTasks]
self.logVerbose("groupByReplicas: %d tasks created (groupSE %s)" % (len(tasks) - nTasks, str(groupSE)),
"%d files not included in tasks" % len(files))
nTasks = len(tasks)
return S_OK(tasks)
def createTasksBySize(self, lfns, replicaSE, fileSizes=None, flush=False):
"""
Split files in groups according to the size and create tasks for a given SE
"""
tasks = []
if fileSizes is None:
fileSizes = self._getFileSize(lfns).get('Value')
if fileSizes is None:
self.logWarn('Error getting file sizes, no tasks created')
return tasks
taskLfns = []
taskSize = 0
if not self.groupSize:
# input size in GB converted to bytes
self.groupSize = float(self.getPluginParam('GroupSize', 1.)) * 1000 * 1000 * 1000
if not self.maxFiles:
# FIXME: prepare for chaging the name of the ambiguoug CS option
self.maxFiles = self.getPluginParam('MaxFilesPerTask', self.getPluginParam('MaxFiles', 100))
lfns = sorted(lfns, key=fileSizes.get)
for lfn in lfns:
size = fileSizes.get(lfn, 0)
if size:
if size > self.groupSize:
tasks.append((replicaSE, [lfn]))
else:
taskSize += size
taskLfns.append(lfn)
if (taskSize > self.groupSize) or (len(taskLfns) >= self.maxFiles):
tasks.append((replicaSE, taskLfns))
taskLfns = []
taskSize = 0
if flush and taskLfns:
tasks.append((replicaSE, taskLfns))
if not tasks and not flush and taskLfns:
self.logVerbose('Not enough data to create a task, and flush not set (%d bytes for groupSize %d)' %
(taskSize, self.groupSize))
return tasks
# @timeThis
def groupBySize(self, files, status):
"""
Generate a task for a given amount of data
"""
tasks = []
nTasks = 0
if not len(files):
return S_OK(tasks)
files = dict(files)
# Parameters
if not self.groupSize:
# input size in GB converted to bytes
self.groupSize = float(self.getPluginParam('GroupSize', 1)) * 1000 * 1000 * 1000
flush = (status == 'Flush')
self.logVerbose("groupBySize: %d files, groupSize: %d, flush: %s" % (len(files), self.groupSize, flush))
# Get the file sizes
res = self._getFileSize(files.keys())
if not res['OK']:
return res
fileSizes = res['Value']
for groupSE in (True, False):
if not files:
break
seFiles = getFileGroups(files, groupSE=groupSE)
for replicaSE in sorted(seFiles) if groupSE else sortSEs(seFiles):
lfns = seFiles[replicaSE]
newTasks = self.createTasksBySize(lfns, replicaSE, fileSizes=fileSizes, flush=flush)
lfnsInTasks = []
for task in newTasks:
lfnsInTasks += task[1]
tasks += newTasks
# Remove the selected files from the size cache
self.clearCachedFileSize(lfnsInTasks)
if not groupSE:
# Remove files from other SEs
for se in [se for se in seFiles if se != replicaSE]:
seFiles[se] = [lfn for lfn in seFiles[se] if lfn not in lfnsInTasks]
# Remove files from global list
for lfn in lfnsInTasks:
files.pop(lfn)
self.logVerbose("groupBySize: %d tasks created with groupSE %s" % (len(tasks) - nTasks, str(groupSE)))
self.logVerbose("groupBySize: %d files have not been included in tasks" % len(files))
nTasks = len(tasks)
self.logVerbose("Grouped %d files by size" % len(files))
return S_OK(tasks)
def getExistingCounters(self, normalise=False, requestedSites=[]):
res = self.transClient.getCounters('TransformationFiles', ['UsedSE'],
{'TransformationID': self.params['TransformationID']})
if not res['OK']:
return res
usageDict = {}
for usedDict, count in res['Value']:
usedSE = usedDict['UsedSE']
if usedSE != 'Unknown':
usageDict[usedSE] = count
if requestedSites:
siteDict = {}
for se, count in usageDict.items():
res = getSitesForSE(se)
if not res['OK']:
return res
for site in res['Value']:
if site in requestedSites:
siteDict[site] = count
usageDict = siteDict.copy()
if normalise:
usageDict = self._normaliseShares(usageDict)
return S_OK(usageDict)
# @timeThis
def _getFileSize(self, lfns):
""" Get file size from a cache, if not from the catalog
#FIXME: have to fill the cachedLFNSize!
"""
lfns = list(lfns)
cachedLFNSize = dict(self.cachedLFNSize)
fileSizes = {}
for lfn in [lfn for lfn in lfns if lfn in cachedLFNSize]:
fileSizes[lfn] = cachedLFNSize[lfn]
self.logDebug("Found cache hit for File size for %d files out of %d" % (len(fileSizes), len(lfns)))
lfns = [lfn for lfn in lfns if lfn not in cachedLFNSize]
if lfns:
fileSizes = self._getFileSizeFromCatalog(lfns, fileSizes)
if not fileSizes['OK']:
self.logError(fileSizes['Message'])
return fileSizes
fileSizes = fileSizes['Value']
return S_OK(fileSizes)
# @timeThis
def _getFileSizeFromCatalog(self, lfns, fileSizes):
"""
Get file size from the catalog
"""
lfns = list(lfns)
fileSizes = dict(fileSizes)
res = self.fc.getFileSize(lfns)
if not res['OK']:
return S_ERROR("Failed to get sizes for all files: %s" % res['Message'])
if res['Value']['Failed']:
errorReason = sorted(set(res['Value']['Failed'].values()))
self.logWarn("Failed to get sizes for %d files:" % len(res['Value']['Failed']), errorReason)
fileSizes.update(res['Value']['Successful'])
self.cachedLFNSize.update((res['Value']['Successful']))
self.logVerbose("Got size of %d files from catalog" % len(lfns))
return S_OK(fileSizes)
def clearCachedFileSize(self, lfns):
""" Utility function
"""
for lfn in [lfn for lfn in lfns if lfn in self.cachedLFNSize]:
self.cachedLFNSize.pop(lfn)
def getPluginParam(self, name, default=None):
""" Get plugin parameters using specific settings or settings defined in the CS
Caution: the type returned is that of the default value
"""
# get the value of a parameter looking 1st in the CS
if default != None:
valueType = type(default)
else:
valueType = None
# First look at a generic value...
optionPath = "TransformationPlugins/%s" % (name)
value = Operations().getValue(optionPath, None)
self.logVerbose("Default plugin param %s: '%s'" % (optionPath, value))
# Then look at a plugin-specific value
optionPath = "TransformationPlugins/%s/%s" % (self.plugin, name)
value = Operations().getValue(optionPath, value)
self.logVerbose("Specific plugin param %s: '%s'" % (optionPath, value))
if value != None:
default = value
# Finally look at a transformation-specific parameter
value = self.params.get(name, default)
self.logVerbose("Transformation plugin param %s: '%s'. Convert to %s" % (name, value, str(valueType)))
if valueType and not isinstance(value, valueType):
if valueType is list:
try:
value = ast.literal_eval(value) if value and value != 'None' else []
except ValueError:
value = [val for val in value.replace(' ', '').split(',') if val]
elif valueType is int:
value = int(value)
elif valueType is float:
value = float(value)
elif valueType is bool:
if value in ('False', 'No', 'None', None, 0):
value = False
else:
value = bool(value)
elif valueType is not str:
self.logWarn("Unknown parameter type (%s) for %s, passed as string" % (str(valueType), name))
self.logVerbose("Final plugin param %s: '%s'" % (name, value))
return value
@staticmethod
def _normaliseShares(originalShares):
""" Normalize shares to 1 """
total = sum(float(share) for share in originalShares.values())
return dict([(site, 100. * float(share) / total if total else 0.) for site, share in originalShares.items()])
def uniqueSEs(self, ses):
""" return a list of SEs that are not physically the same """
newSEs = []
for se in ses:
if not self.isSameSEInList(se, newSEs):
newSEs.append(se)
return newSEs
def isSameSE(self, se1, se2):
""" Check if 2 SEs are indeed the same """
if se1 == se2:
return True
for se in (se1, se2):
if se not in self.seConfig:
self.seConfig[se] = {}
res = StorageElement(se).getStorageParameters(protocol='srm')
if res['OK']:
params = res['Value']
for item in ('Host', 'Path'):
self.seConfig[se][item] = params[item].replace('t1d1', 't0d1')
else:
self.logError("Error getting StorageElement parameters for %s" % se, res['Message'])
return self.seConfig[se1] == self.seConfig[se2]
def isSameSEInList(self, se1, seList):
""" Check if an SE is the same as any in a list """
if se1 in seList:
return True
for se in seList:
if self.isSameSE(se1, se):
return True
return False
def closerSEs(self, existingSEs, targetSEs, local=False):
""" Order the targetSEs such that the first ones are closer to existingSEs. Keep all elements in targetSEs
"""
setTarget = set(targetSEs)
sameSEs = set([se1 for se1 in setTarget for se2 in existingSEs if self.isSameSE(se1, se2)])
targetSEs = setTarget - set(sameSEs)
if targetSEs:
# Some SEs are left, look for sites
existingSites = [self.dmsHelper.getLocalSiteForSE(se).get('Value')
for se in existingSEs if not self.dmsHelper.isSEArchive(se)]
existingSites = set([site for site in existingSites if site])
closeSEs = set([se for se in targetSEs
if self.dmsHelper.getLocalSiteForSE(se).get('Value') in existingSites])
# print existingSEs, existingSites, targetSEs, closeSEs
otherSEs = targetSEs - closeSEs
targetSEs = list(closeSEs)
random.shuffle(targetSEs)
if not local and otherSEs:
otherSEs = list(otherSEs)
random.shuffle(otherSEs)
targetSEs += otherSEs
else:
targetSEs = []
return (targetSEs + list(sameSEs)) if not local else targetSEs
def getFileGroups(fileReplicas, groupSE=True):
"""
Group files by set of SEs
:param dict fileReplicas:
{'/this/is/at.1': ['SE1'],
'/this/is/at.12': ['SE1', 'SE2'],
'/this/is/at.2': ['SE2'],
'/this/is/at_123': ['SE1', 'SE2', 'SE3'],
'/this/is/at_23': ['SE2', 'SE3'],
'/this/is/at_4': ['SE4']}
If groupSE == False, group by SE, in which case a file can be in more than one element
"""
fileGroups = {}
for lfn, replicas in fileReplicas.items():
if not replicas:
continue
replicas = sorted(list(set(replicas)))
if not groupSE or len(replicas) == 1:
for rep in replicas:
fileGroups.setdefault(rep, []).append(lfn)
else:
replicaSEs = ','.join(replicas)
fileGroups.setdefault(replicaSEs, []).append(lfn)
return fileGroups
def sortSEs(ses):
""" Returnes an ordered list of SEs, disk first """
seSvcClass = {}
for se in ses:
if len(se.split(',')) != 1:
return sorted(ses)
if se not in seSvcClass:
seSvcClass[se] = StorageElement(se).status()['DiskSE']
diskSEs = [se for se in ses if seSvcClass[se]]
tapeSEs = [se for se in ses if se not in diskSEs]
return sorted(diskSEs) + sorted(tapeSEs)
def sortExistingSEs(lfnSEs, lfns=None):
""" Sort SEs according to the number of files in each (most first)
"""
seFrequency = {}
archiveSEs = []
if not lfns:
lfns = lfnSEs.keys()
else:
lfns = [lfn for lfn in lfns if lfn in lfnSEs]
for lfn in lfns:
existingSEs = lfnSEs[lfn]
archiveSEs += [s for s in existingSEs if isArchive(s) and s not in archiveSEs]
for se in [s for s in existingSEs if not isFailover(s) and s not in archiveSEs]:
seFrequency[se] = seFrequency.setdefault(se, 0) + 1
sortedSEs = seFrequency.keys()
# sort SEs in reverse order of frequency
sortedSEs.sort(key=seFrequency.get, reverse=True)
# add the archive SEs at the end
return sortedSEs + archiveSEs
def isArchive(se):
""" Is the SE an archive """
return DMSHelpers().isSEArchive(se)
def isFailover(se):
""" Is the SE a failover SE """
return DMSHelpers().isSEFailover(se)
def getActiveSEs(seList, access='Write'):
""" Utility function - uses the StorageElement cached status
"""
return [se for se in seList if StorageElement(se).status().get(access, False)]
|
Andrew-McNab-UK/DIRAC
|
TransformationSystem/Client/Utilities.py
|
Python
|
gpl-3.0
| 17,682
|
[
"DIRAC"
] |
8757bd2ee0d43d18833945946789de2f0f7cd7f2cdb72f9337ac305e8e9a0ad8
|
#-------------------------------------------------------------------------------
# Name: netcdf_builder
# Purpose: Selection of functions to open, create and manage netCDF
# objects and files. These routines have been developed and
# tested with netCDF3 and netCDF4 file formats.
#
# Author: Matt Paget, Edward King
#
# Created: 24 March 2011
# Copyright: 2011-2014 CSIRO (Commonwealth Science and Industry Research
# Organisation, Australia).
# License: This software is open source under the CSIRO BSD (3 clause)
# License variant as provided in the accompanying LICENSE file or
# available from
# https://stash.csiro.au/projects/CMAR_RS/repos/netcdf-tools/browse/LICENSE.
# By continuing, you acknowledge that you have read and you
# accept and will abide by the terms of the License.
#
# Updates:
# 24 Mar 2011 Initial demonstration of the idea.
# 16 Nov 2011 Separated the code into logical components and made the netCDF
# object the primary object to be passed between routines.
# Generalised the nc3_add_data() routine to accept slices (or
# slice-like elements) and thus give the user more control over
# where data is placed in the NetCDF variable.
# 22 Nov 2011 Added optional timeunit argument to nc3_set_timelatlon().
# Some minor changes to the comments.
# 10 Feb 2013 Renamed to netcdf_builder.py
# Changed the 'nc3' prefix of all routines to 'nc'. Retained
# 'nc3*' function names for backward compatibility.
# Refreshed routines to work with the netCDF4-python package.
# Updated add variables commands to add a _FillValue by default.
# Added mode keyword in call to Dataset for permission parameter
# in nc_open.
# Added zlib option to nc_set_var.
# 11 Apr 2013 Added chunksizes option to nc_set_var.
# 17 Apr 2013 Wrapped import of OrderedDict in a try statement.
# Rearranged default order of dimension attributes.
# Added a check for changing the _FillValue attribute of a
# variable in nc_set_attributes.
# 12 Oct 2013 Added axis attribute to time, latitude and longitude dimensions
# in nc_set_timelatlon (not strictly required but handy for
# completeness).
# Removed 'nc_' prefix from each function name but retained
# previous function names as alias functions at the bottom of the
# script.
# Changed default format to NETCDF4_CLASSIC in ncopen().
# 19 Mar 2014 Added add_bounds() function for adding the CF bounds attribute
# to a dimension and the associated bounds array to a new
# variable.
# Added warning comment to add_data() docstring.
# 15 Aug 2014 Added type normalisation to get_attributes - now as an
# internal function called for all global and variable attributes
# and switchable via a boolean parameter. Previously, variable
# attributes were normalised.
# 12 May 2015 Added _ncversion() and _setattr() functions to provide a
# wrapper for the bug described at
# https://code.google.com/p/netcdf4-python/issues/detail?id=110
# A warning message is printed about the bug if the netCDF C
# library version is < 4.2 and an AttributeError occured when
# setting the attribute value. The AttributeError is then raised.
# The library bug may not be cause of the error but if it is then
# the message should prove helpful.
# 13 May 2015 Implemented the work-around, in addition to printing the warning
# message.
#-------------------------------------------------------------------------------
# All functions, except ncopen(), operate on the netCDF object that is
# returned from ncopen(). The functions contain "standard" operations for
# creating a netCDF file, defining dimensions, adding data and adding/removing
# metadata. If additional operations are required then the netCDF4 package
# routines can be used directly. In which case, the functions here can be
# considered as examples of using the netCDF4 package routines.
#
# One limitation on returning the netCDF object from ncopen() is that the
# corresponding filename is not retained with the object. If the filename was
# added as an object attribute it would become a global attribute in the
# resulting file. Other possible work-arounds, such as managing a filename
# object attribute separately or creating a class that inherits from the
# netCDF object class, would create a non-standard netCDF implementation.
# So it is instead left to the user to retain and manage the filename.
#
# The netCDF4 package uses NumPy arrays and data types to manage data. As
# such it is difficult to exclude NumPy entirely when working with the
# netCDF4 package. If NumPy is not available on your system then you *may*
# be able to use this code as a guide and customise your own routines.
#
# Requires:
# NumPy
# OrderedDict
# https://code.google.com/p/netcdf4-python/
import netCDF4
import numpy as np
import os, re
try: from collections import OrderedDict
except ImportError:
try: from ordereddict import OrderedDict
except ImportError:
print "Require OrderedDict, https://pypi.python.org/pypi/ordereddict"
raise
def ncopen(fname, permission='a', format='NETCDF4_CLASSIC'):
"""
Return a netCDF object.
Default permission is 'a' for appending.
"""
if permission == 'w':
ncobj = netCDF4.Dataset(fname,mode=permission,format=format)
else:
# Format will be deduced by the netCDF modules
ncobj = netCDF4.Dataset(fname,mode=permission)
return ncobj
def ncclose(ncobj):
"""
Close a netCDF object.
This is required to write the final state of the netCDF object to disk.
"""
ncobj.close()
def _ncversion(v=None):
"""Return the netCDF C library version. If v is None the full version
number string is returned. If v is a full version number string the
[major].[minor] number is returned as float"""
if v is None:
return str(netCDF4.getlibversion()).split()[0] # Full string version
v = v.split('.')
if len(v) == 1: v.append('0')
return float('.'.join(v[0:2])) # [major].[minor] number as a float
def _setattr(obj, name, val):
"""Local wrapper for the standard python function setattr() to handle the
bug described at
https://code.google.com/p/netcdf4-python/issues/detail?id=110
which is fixed in netCDF C library >= 4.2"""
vers = _ncversion()
if _ncversion(vers) >= 4.2:
setattr(obj, name, val)
return
try:
setattr(obj, name, val)
except AttributeError as e:
print "WARNING: A bug in your netCDF C library version (" + vers + ") may mean that updating an attribute with a value that has a larger size than the current value may cause a library crash. See https://code.google.com/p/netcdf4-python/issues/detail?id=110 for details. We'll attempt to apply the work-around. Best option, however, is to upgrade your netCDF C library."
# Ok, we'll fix it, need a temporary variable name
import uuid
tmp = str(uuid.uuid4()).split('-')[0]
cnt = 0
while (not re.match('[a-z]', tmp)) or \
(getattr(obj, tmp, None) is not None):
tmp = str(uuid.uuid4()).split('-')[0]
cnt = cnt + 1
if cnt > 10:
print 'Failed to create a unique temporary attribute name'
raise e
# Apply the fix
print 'Applying set attribute work-around'
setattr(obj, tmp, tmp)
setattr(obj, name, val)
delattr(obj, tmp)
def _normalise(d, verbose=None):
"""Normalise value types from numpy to regular types.
"""
for k,v in d.iteritems():
if verbose: print 'Attribute type and value ('+k+'):', type(v), v
if not isinstance(v,str) and not isinstance(v,unicode):
# Its probably a numpy dtype thanks to the netCDF* module.
# Need to convert it to a standard python type for JSON.
# May need to implement more type checks here.
# Clues:
# isinstance(y,np.float32)
# np.issubdtype(y,float)
# val = str(val)
try:
if re.search('\'numpy\.', str(type(v))):
v = v.tolist() # works for numpy arrays and scalars
else:
v = str(v)
if verbose: print ' Converted to type:', type(v)
except TypeError:
print 'Conversion error ('+k+'):', type(v), v
pass
d[k] = v
return d
def get_attributes(ncobj, verbose=None, normalise=True):
"""
Copy the global and variable attributes from a netCDF object to an
OrderedDict. This is a little like 'ncdump -h' (without the formatting).
Global attributes are keyed in the OrderedDict by the attribute name.
Variable attributes are keyed in the OrderedDict by the variable name and
attribute name separated by a colon, i.e. variable:attribute.
Normalise means that some NumPy types returned from the netCDF module are
converted to equivalent regular types.
Notes from the netCDF module:
The ncattrs method of a Dataset or Variable instance can be used to
retrieve the names of all the netCDF attributes.
The __dict__ attribute of a Dataset or Variable instance provides all
the netCDF attribute name/value pairs in an OrderedDict.
ncobj.dimensions.iteritems()
ncobj.variables
ncobj.ncattrs()
ncobj.__dict__
"""
d = OrderedDict()
# Get the global attributes
d.update( ncobj.__dict__ )
# Iterate through each Dimension and Variable, pre-pending the dimension
# or variable name to the name of each attribute
for name,var in ncobj.variables.iteritems():
for att,val in var.__dict__.iteritems():
d.update( {name+':'+att : val} )
if normalise:
d = _normalise(d, verbose)
return d
def set_attributes(ncobj, ncdict, delval='DELETE'):
"""
Copy attribute names and values from a dict (or OrderedDict) to a netCDF
object.
Global attributes are keyed in the OrderedDict by the attribute name.
Variable attributes are keyed in the OrderedDict by the variable name and
attribute name separated by a colon, i.e. variable:attribute.
If any value is equal to delval then, if the corresponding attribute exists
in the netCDF object, the corresponding attribute is removed from the
netCDF object. The default value of delval is 'DELETE'. For example,
nc3_set_attributes(ncobj, {'temperature:missing_value':'DELETE'})
will delete the missing_value attribute from the temperature variable.
A ValueError exception is raised if a key refers to a variable name that
is not defined in the netCDF object.
"""
# Add metadata attributes
for k in ncdict.keys():
p = k.partition(':')
if p[1]=="":
# Key is a global attribute
if ncdict[k]==delval:
delattr(ncobj, p[0])
else:
_setattr(ncobj, p[0], ncdict[k])
elif p[0] in ncobj.variables:
# Key is a variable attribute
if ncdict[k]==delval:
delattr(ncobj.variables[p[0]], p[2])
elif p[2] == "_FillValue":
# Its ok to have _FillValue in the dict as long as it has
# the same value as the variable's attribute
if getattr(ncobj.variables[p[0]], p[2]) != ncdict[k]:
print "Warning: As of netcdf4-python version 0.9.2, _FillValue can only be set when the variable is created (see http://netcdf4-python.googlecode.com/svn/trunk/Changelog). The only way to change the _FillValue would be to copy the array and create a new variable."
raise AttributeError("Can not change "+k)
else:
_setattr(ncobj.variables[p[0]], p[2], ncdict[k])
else:
raise ValueError("Variable name in dict does not match any variable names in the netcdf object:", p[0])
#print "Updated attributes in netcdf object"
def set_timelatlon(ncobj, ntime, nlat, nlon, timeunit=None):
"""
Create a skeleton 3-D netCDF object with time, latitude and longitude
dimensions and corresponding dimension variables (but no data in the
dimension variables). The dimension variables have 'long_name',
'standard_name' and 'units' attributes defined.
Inputs 'ntime', 'nlat' and 'nlon' are the number of elements for the time,
latitude and longitude vector dimensions, respectively.
A length of None or 0 (zero) creates an unlimited dimension.
The default unit for time is: 'days since 1800-01-01 00:00:00.0'.
The time unit should be in a Udunits format. The time unit and calendar
(default = gregorian) are used by add_time() to encode a list of
datetime objects.
The skeleton object can be customised with the netCDF4 module methods. See
http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4-module.html
To write data to the dimension variables see add_time() and add_data().
Recommended ordering of dimensions is:
time, height or depth (Z), latitude (Y), longitude (X).
Any other dimensions should be defined before (placed to the left of) the
spatio-temporal coordinates.
Examples of adding data to dimensions:
latitudes[:] = numpy.linspace(-10,-44,681)
longitudes[:] = numpy.linspace(112,154,841)
dates = [datetime(2011,2,1)]
times[:] = netCDF4.date2num(dates,units=times.units,calendar=times.calendar)
"""
if timeunit==None:
timeunit = 'days since 1800-01-01 00:00:00.0'
# Dimensions can be renamed with the 'renameDimension' method of the file
ncobj.createDimension('time',ntime)
ncobj.createDimension('latitude',nlat)
ncobj.createDimension('longitude',nlon)
times = ncobj.createVariable('time','f8',('time',))
latitudes = ncobj.createVariable('latitude','f8',('latitude',))
longitudes = ncobj.createVariable('longitude','f8',('longitude',))
latitudes.long_name = 'latitude'
latitudes.standard_name = 'latitude'
latitudes.units = 'degrees_north'
latitudes.axis = 'Y'
longitudes.long_name = 'longitude'
longitudes.standard_name = 'longitude'
longitudes.units = 'degrees_east'
longitudes.axis = 'X'
times.long_name = 'time'
times.standard_name = 'time'
times.units = timeunit
times.calendar = 'gregorian'
times.axis = 'T'
def show_dimensions(ncobj):
"""
Print the dimension names, lengths and whether they are unlimited.
"""
print '{0:10} {1:7} {2}'.format("DimName","Length","IsUnlimited")
for dim,obj in ncobj.dimensions.iteritems():
print '{0:10} {1:<7d} {2!s}'.format(dim,len(obj),obj.isunlimited())
def set_variable(ncobj, varname, dtype='f4', dims=None, chunksize=None, fill=None, zlib=False, **kwargs):
"""
Define (create) a variable in a netCDF object. No data is written to the
variable yet. Give the variable's dimensions as a tuple of dimension names.
Dimensions must have been previously created with ncobj.createDimension
(e.g. see set_timelatlon()).
Recommended ordering of dimensions is:
time, height or depth (Z), latitude (Y), longitude (X).
Any other dimensions should be defined before (placed to the left of) the
spatio-temporal coordinates.
To create a scalar variable, use an empty tuple for the dimensions.
Variables can be renamed with the 'renameVariable' method of the netCDF
object.
Specify compression with zlib=True (default = False).
Specify the chunksize with a sequence (tuple, list) of the same length
as dims (i.e., the number of dimensions) where each element of chunksize
corresponds to the size of the chunk along the corresponding dimension.
There are some tips and tricks associated with chunking - see
http://data.auscover.org.au/node/73 for an overview.
The default behaviour is to create a floating-point (f4) variable
with dimensions ('time','latitude','longitude'), with no chunking and
no compression.
"""
if dims is None:
dims = ('time','latitude','longitude')
return ncobj.createVariable(varname, dtype, dimensions=dims,
chunksizes=chunksize, fill_value=fill, zlib=zlib, **kwargs)
def add_time(ncobj, datetime_list, timevar='time'):
"""
Add time data to the time dimension variable. This routine is separate to
add_data() because data/time data is encoded in a special way
according to the units and calendar associated with the time dimension.
Timelist is a list of datetime objects.
The time variable should have already been defined with units and calendar
attributes.
Examples from:
http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4-module.html
from datetime import datetime, timedelta
datetime_list = [datetime(2001,3,1)+n*timedelta(hours=12) for n in range(...)]
from netCDF3 import num2date
dates = num2date(nctime[:],units=nctime.units,calendar=nctime.calendar)
"""
nctime = ncobj.variables[timevar]
nctime[:] = netCDF4.date2num(datetime_list,
units=nctime.units,
calendar=nctime.calendar)
def add_bounds(ncobj, dimname, bounds, bndname=None):
"""Add a bounds array of data to the netCDF object.
Bounds array can be a list, tuple or NumPy array.
A bounds array gives the values of the vertices corresponding to a dimension
variable (see the CF documentation for more information). The dimension
variable requires an attribute called 'bounds', which references a variable
that contains the bounds array. The bounds array has the same shape as the
corresponding dimension with an extra size for the number of vertices.
This function:
- Adds a 'bounds' attribute to the dimension variable if required.
If a bounds attribute exits then its value will be used for the bounds
variable (bndname). Otherwise if a bndname is given then this will be
used. Otherwise the default bndname will be '_bounds' appended to the
dimension name.
- If the bounds variable exists then a ValueError will be raised if its
shape does not match the bounds array.
- If the bounds variable does not exist then it will be created. If so
an exra dimension is required for the number of vertices. Any existing
dimension of the right size will be used. Otherwise a new dimension
will be created. The new dimension's name will be 'nv' (number of
vertices), unless this dimension name is already used in which case
'_nv' appended to the dimension name will be used instead.
- Lastly, the bounds array is written to the bounds variable. If the
corresponding dimension is time (name = 'time' or dim.axis = 't') then
the bounds array will be written as date2num data.
"""
# Convert bounds data to a numpy array if needed
if isinstance(bounds,(list,tuple)):
bounds = np.array(bounds)
bndshp = bounds.shape # tuple
nverts = bndshp[-1]
# Get variable object of the dimension
dimobj = ncobj.variables[dimname]
# Get/set the name of corresponding bounds variable
if 'bounds' in dimobj.ncattrs():
bndname = dimobj.bounds
else:
if bndname is None: bndname = dimname+'_bounds'
dimobj.bounds = bndname
# Get/set the variable object of the bounds variable
if bndname in ncobj.variables:
bndobj = ncobj.variables[bndname]
if bndobj.shape != bndshp:
raise ValueError('Existing bounds variable shape does not '+ \
'match data:', bndname, bndobj.shape, bshp)
else:
# Need a number of vertices dimension
nvname = None
for k,v in ncobj.dimensions.items():
if len(v) == nverts:
nvname = k
break
if nvname is None:
nvname = 'nv'
if nvname in ncobj.dimensions:
nvname = dimname + '_nv' # Change it if there's a conflict
ncobj.createDimension(nvname,nverts)
# Create the bounds variable
bndobj = ncobj.createVariable(bndname, dimobj.dtype,
dimobj.dimensions+(nvname,))
# Is this a time dimension
istime = False
if dimname.lower() == 'time':
istime = True
elif 'axis' in dimobj.ncattrs():
if dimobj.axis.lower() == 't':
istime = True
# Add the bounds array
if istime:
bndobj[:] = netCDF4.date2num(bounds, units=dimobj.units,
calendar=dimobj.calendar)
else:
bndobj[:] = bounds
def add_data(ncobj, varname, data, index=None):
"""
*****
THIS FUNCTION DOES NOT WORK AS EXPECTED (at least last time it was tested).
You are better off using the netCDF4 module directly, i.e.,
ncobj.variables[varname][:] = numpy_array
or to add 2D array to a 3D variable,
ncobj.variables[varname][0,:,:] = numpy_array
*****
Add variable data to the given variable name.
Data can be a list, tuple or NumPy array.
In general, the size and shape of data must correspond to the size and
shape of the variable, except when:
- There are dimension(s) of size 1 defined for the variable, in which
case these dimension(s) don't need to be included in data.
- There are unlimited dimension(s) defined for the variable, in which
case these dimension(s) of the data can be of any size.
It is also possible to add data to a subset of the variable via slices
(ranges and strides) passed in through the index parameter. The only
caveat to using this feature is that the size and shape suggested by index
must match the size and shape of data and follow the "In general..." rules
for the variable as given above.
This is potentially a very complicated feature. The netCDF module
(netCDF4.Variable) will fail if the variable and data dimensions and
index slices are not compatible. It is difficult to catch or manipulate
the inputs to satisfy the netCDF module due to the variety and flexibility
of this feature. Experimentation before production is highly recommended.
Index is a sequence (list,tuple) of slice objects, lists, tuples, integers
and/or strings. Each element of the sequence is internally converted to a
slice object.
For best results, a sequence of slice objects is recommended so that you
have explicit control over where the data is placed within the variable.
The other element types are provided for convenience.
A slice(None,None,None) object is equivalent to filling the variable with
the data along the corresponding dimension. Thus, the default behaviour
(with index not given) is to fill the variable with data.
Integers are taken to be the start index. The end index is then chosen to
match the data array. This feature only works if the number of data
dimensions matches the number of variable dimensions. Otherwise its too
hard to guess which data dimension the index integer refers to.
If an integer element is provided in index, a ValueError will be raised if
the number of data and variable dimensions do not match.
Strings are probably only really useful for testing. Strings are of the
form "start:stop:stride" with any missing element chosen to be None, i.e.:
'' or ':' -> slice(None,None,None)
'2' or '2:' -> slice(2,None,None)
':2' -> slice(None,2,None)
'::2' -> slice(None,None,2)
'2:4' -> slice(2,4,None)
"""
# Get the variable object and its shape
var = ncobj.variables[varname]
vshp = var.shape # tuple
# If data is a list then first convert it to a numpy aray so that
# the shape can be properly interogated
if isinstance(data,(list,tuple)):
data = np.array(data)
dshp = data.shape # tuple
# Fill dshp if required
# Fill index if required
# Not quite but close:
# Loop through vshp, check each dim for either size=1 or unlimited
# if unlimited, dshp[i]=dshp[i] and index[i]='' or ':'
# if size=1, dshp[i]=1=vshp[i] and index[i]='' or ':'
# else
# if dshp[i] exists dshp[i]=dshp[i]
# else dshp[i]=vshp[i]
# if index[i] exists index[i]=index[i]
# else index[i]='' or ':'
if index is None:
index = ('',) * len(vshp)
range = [] # List of slice objects, one per dimension
for i,x in enumerate(index):
if isinstance(x,slice):
range.append(x)
elif isinstance(x,(tuple,list)):
range.append(slice(x))
elif isinstance(x,int):
# Assume x is start index and we slice to the corresponding
# shape of data
if len(vshp)==len(dshp):
# dshp must be same size as vshp for this to work!
range.append( slice(x,x+dshp[i]) )
else:
raise ValueError("Number of dimensions for the data and variable do not match, so I can't guess which data dimension this index refers to. Be explicit with the index range in a slice or string")
elif isinstance(x,str):
# Assume its some sort of start:stop:stride string
p = x.split(':')
for j in [0,1,2]:
if j<len(p):
if p[j]=='': p[j]=None
else: p[j]=int(p[j])
else:
p.append(None)
range.append(slice(p[0],p[1],p[2]))
else:
raise TypeError("Index element is not a valid type: ",x,type(x))
# Try to add the data to the variable.
# netCDF4.Variable will complain if dimensions and size ar not valid
var[range] = data
# Alias functions to support some back compatibility with code that imported
# earlier versions of this file.
def nc_open(*args,**kwargs): return ncopen(args,kwargs)
def nc_close(*args,**kwargs): return ncclose(args,kwargs)
def nc_get_attributes(*args,**kwargs): return get_attributes(args,kwargs)
def nc_set_attributes(*args,**kwargs): return set_attributes(args,kwargs)
def nc_set_timelatlon(*args,**kwargs): return set_timelatlon(args,kwargs)
def nc_show_dims(*args,**kwargs): return show_dims(args,kwargs)
def nc_set_var(*args,**kwargs): return set_var(args,kwargs)
def nc_add_time(*args,**kwargs): return add_time(args,kwargs)
def nc_add_data(*args,**kwargs): return add_data(args,kwargs)
def nc3_open(*args,**kwargs): return ncopen(args,kwargs)
def nc3_close(*args,**kwargs): return ncclose(args,kwargs)
def nc3_get_attributes(*args,**kwargs): return get_attributes(args,kwargs)
def nc3_set_attributes(*args,**kwargs): return set_attributes(args,kwargs)
def nc3_set_timelatlon(*args,**kwargs): return set_timelatlon(args,kwargs)
def nc3_show_dims(*args,**kwargs): return show_dims(args,kwargs)
def nc3_set_var(*args,**kwargs): return set_var(args,kwargs)
def nc3_add_time(*args,**kwargs): return add_time(args,kwargs)
def nc3_add_data(*args,**kwargs): return add_data(args,kwargs)
|
GeoscienceAustralia/gdf
|
gdf/netcdf_builder.py
|
Python
|
apache-2.0
| 27,813
|
[
"NetCDF"
] |
655491ba4e98edb9ee3fa0fd33a780d5725eb3c93e227ee1b1bdcf5ff4b5b9c7
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBiocfilecache(RPackage):
"""Manage Files Across Sessions
This package creates a persistent on-disk cache of files that the user
can add, update, and retrieve. It is useful for managing resources (such
as custom Txdb objects) that are costly or difficult to create, web
resources, and data files used across sessions."""
homepage = "https://bioconductor.org/packages/BiocFileCache"
git = "https://git.bioconductor.org/packages/BiocFileCache.git"
version('1.14.0', commit='cdcde4b59ae73dda12aa225948dbd0a058d9be6d')
version('1.8.0', commit='0e3542b6aae849b01240d8055a48da1b267bd5a0')
version('1.6.0', commit='c2de6c1cdef6294e5d0adea31e4ebf25865742ba')
version('1.4.0', commit='a2c473d17f78899c7899b9638faea8c30735eb80')
version('1.2.3', commit='d78bf5b46c8a329f5ddef879fe51230444bc42f8')
version('1.0.1', commit='dbf4e8dd4d8d9f475066cd033481efe95c56df75')
depends_on('r@3.4.0:', type=('build', 'run'))
depends_on('r-dplyr', type=('build', 'run'))
depends_on('r-dbplyr@1.0.0:', when='@1.2.3:', type=('build', 'run'))
depends_on('r-rsqlite', type=('build', 'run'))
depends_on('r-dbi', type=('build', 'run'))
depends_on('r-rappdirs', type=('build', 'run'))
depends_on('r-curl', when='@1.6.0:', type=('build', 'run'))
depends_on('r-httr', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-biocfilecache/package.py
|
Python
|
lgpl-2.1
| 1,588
|
[
"Bioconductor"
] |
3ddcfdf11ccb8369c776b36afb356544284adc269a11a19807d468bf2c0dc387
|
from pyfcm import FCMNotification
import feedparser
''' === important constants === '''
tags = ['A Song of Ice and Fire', 'Ace Attorney', 'Ace of Diamond', 'Addams Family', 'Agents of SHIELD', 'Aldnoah.Zero',
'Alex Rider', 'Alias', 'Almost Human', 'American Horror Story', 'Angel: The Series', 'Animorphs', 'Arrow (TV 2012)',
'Arslan Senki', 'Artemis Fowl', 'As the World Turns', 'Assassination Classroom', 'Assassin\'s Creed',
'Attack on Titan', 'Avatar (TV)', 'Avatar: Legend of Korra', 'Avatar: The Last Airbender', 'Avengers (Marvel)',
'Baby-Sitters Club', 'Babylon 5 & related', 'Baccano!', 'Band of Brothers', 'Batman', 'Battlestar Galactica',
'Being Human (UK)', 'Being Human (US/Canada)', 'Big Hero 6', 'Big Windup!', 'BioShock', 'Black Butler',
'Black Widow', 'Blue Exorcist', 'Boondock Saints', 'Borderlands', 'Breaking Bad', 'Brokeback Mountain',
'Brooklyn Nine-Nine', 'Buffy the Vampire Slayer', 'Bungou Stray Dogs', 'Burn Notice', 'CSI', 'Call of Duty',
'Canadian 6 Degrees', 'Captain America', 'Captive Prince', 'Cardcaptor Sakura', 'Carmilla', 'Check Please!',
'Chronicles of Narnia', 'Code Geass', 'Community (TV)', 'Criminal Minds', 'Crisis Core: Final Fantasy VII',
'D.Gray-man', 'DCU', 'DRAMAtical Murder', 'Dangan Ronpa', 'Danny Phantom', 'Days Of Our Lives', 'Deadpool',
'Death Note', 'Descendants', 'Destiny (Video Game)', 'Detective Conan', 'Devil May Cry', 'Digimon', 'Discworld',
'Dishonored (Video Games)', 'Disney', 'Divergent', 'Dollhouse', 'Downtown Abbey', 'Dr. Who & related', 'Dragon Age',
'Dragon Ball', 'Dungeons & Dragons', 'Durarara!!', 'Elder Scrolls V: Skyrim', 'Elementary (TV)', 'Emmerdale',
'Eureka (TV)', 'Fairy Tail', 'Fake News', 'Faking It (TV 2014)', 'Fallout (Video Games)', 'Fangirl',
'Fast and Furious', 'Fate/stay night', 'Final Fantasy I', 'Final Fantasy II', 'Final Fantasy III',
'Final Fantasy IV', 'Final Fantasy IX', 'Final Fantasy V', 'Final Fantasy VI', 'Final Fantasy VII',
'Final Fantasy VIII', 'Final Fantasy X', 'Final Fantasy X & Final Fantasy X-2', 'Final Fantasy X-2',
'Final Fantasy XI', 'Final Fantasy XII', 'Final Fantasy XIII series', 'Final Fantasy XIV', 'Final Fantasy XV',
'Fire Emblem: Awakening', 'Fire Emblem: Fates', 'Fire Emblem: Path of Radiance/Radiant Dawn',
'Five Nights at Freddy\'s', 'Free!', 'Fringe (TV)', 'Fruits Basket', 'Fullmetal Alchemist', 'Game of Thrones',
'Gangsta.', 'Gekkan Shoujo Nozaki-kun', 'General Hospital', 'Generation Kill', 'Ghostbusters', 'Gilmore Girls',
'Gintama', 'Girl Genius', 'Girl Meets World', 'Glee', 'Good Omens', 'Gotham', 'Grand Theft Auto', 'Gravity Falls',
'Green Arrow', 'Green Lantern', 'Grey\'s Anatomy', 'Grimm (TV)', 'Guardians of Childhood & related',
'Guardians of the Galaxy', 'Gundam & related', 'Haikyuu!!', 'Halo & related', 'Hamilton', 'Hannibal Lecter',
'Harry Potter', 'Hatoful Boyfriend', 'Hawkeye (Comics)', 'Hellsing', 'Hetalia: Axis Powers', 'High School Musical',
'Highlander', 'Hikaru no Go', 'Homeland', 'Homestuck', 'Hornblower (TV)', 'House M.D.', 'How I Met Your Mother',
'How to Get Away With Murder', 'How to Train Your Dragon', 'Hunger Games', 'Hunter x Hunter', 'In the Flesh (TV)',
'Infernal Devices', 'Inspector Morse & related', 'InuYasha', 'Invader Zim', 'Iron Man', 'James Bond',
'Jessica Jones (TV)', 'JoJo\'s Bizarre Adventure', 'Jupiter Ascending (2015)', 'Jurassic Park', 'Justice League',
'K', 'Kagerou Project', 'Kamen Rider', 'Karneval', 'Katekyou Hitman Reborn!', 'Kid Icarus', 'Kill la Kill',
'Kim Possible (Cartoon)', 'Kingdom Hearts', 'Kingsman: The Secret Service', 'Kuroko\'s Basketball', 'Labyrinth (1986)',
'Law & Order: SVU', 'League of Legends', 'Legend of the Seeker', 'Les Miserables', 'Leverage', 'Lewis (TV)',
'Life is Strange', 'Life on Mars & related', 'Lord of the Rings', 'Lost Girl', 'Love Live! School Idol Project',
'MS Paint Adventures', 'Magi', 'Major Crimes (TV)', 'Maleficent', 'Marvel', 'Marvel (Movies)',
'Marvel Cinematic Universe', 'Mass Effect', 'Mega Man', 'Mekakucity Actors', 'Metal Gear', 'Metalopocalypse',
'Mighty Morphin Power Rangers', 'Minecraft', 'Miraculous Ladybug', 'Miss Fisher\'s Murder Mysteries',
'Mission: Impossible', 'Mortal Instruments', 'Mortal Kombat', 'My Hero Academia', 'My Little Pony', 'My Mad Fat Diary',
'NCIS', 'Naruto', 'Natsume\'s Book of Friends', 'Neon Genesis Evangelion', 'Nightwing (Comics)', 'No. 6', 'Noragami',
'Numb3rs', 'Once Upon a Time (TV)', 'One Piece', 'One-Punch Man', 'Orphan Black (TV)', 'Osomatsu-san',
'Ouran High School Host Club', 'Outlander & related', 'Outsiders (Ambiguous)', 'Overwatch', 'Pacific Rim',
'Pandora Hearts', 'Paranatural', 'Parks and Recreation', 'Penny Dreadful', 'Percy Jackson and the Olympians & related',
'Person of Interest', 'Persona 3', 'Persona 4', 'Persona 5', 'Phantom of the Opera', 'Pirates of the Caribbean',
'Pokemon', 'Pokemon Adventures', 'Portal (Video Game)', 'Power Rangers', 'Pretty Little Liars', 'Pride and Prejudice',
'Primeval', 'Prince of Tennis', 'Princess Tutu', 'Prison Break', 'Professor Layton series', 'Psycho-Pass',
'Puella Magi Madoka Magica', 'Queer as Folk (US)', 'Ranma 1/2', 'Raven Cycle', 'Red Dwarf', 'Red Robin (Comics)',
'Reign (TV)', 'Resident Evil', 'Revolution (TV)', 'Revolutionary Girl Utena', 'Rick and Morty', 'Rizzoli & Isles',
'Robin Hood', 'Rune Factory (Video Games)', 'Rurouni Kenshin', 'Sailor Moon', 'Saint Seiya', 'Saints Row', 'Saiyuki',
'Sanctuary (TV)', 'Sengoku Basara', 'Sense8 (TV)', 'Seraph of the End', 'Shadowhunter Chronicles', 'Shakespeare',
'Shameless (US)', 'Sherlock Holmes & related', 'Silent Hill', 'Silicon Valley', 'Simon Snow', 'Smallville',
'Sonic the Hedgehog', 'Sons of Anarchy', 'Soul Eater', 'South Park', 'Spartacus (TV)', 'Spider-Man',
'Stand Still Stay Silent', 'Star Trek', 'Star Wars', 'Star vs. the Forces of Evil', 'Starchy & Hutch', 'Stargate',
'Steven Universe', 'Suikoden', 'Superman', 'Supernatural', 'Tales of Graces', 'Tales of Symphonia', 'Tales of Vesperia',
'Tales of Xilla', 'Tales of Zestiria', 'Tales of the Abyss', 'Team Fortress 2', 'Teen Titans', 'Teen Wolf (TV)',
'Teenage Mutant Ninja Turtles', 'Terminator', 'Terror in Resonance', 'The 100', 'The A-Team', 'The Almighty Johnsons',
'The Big Bang Theory', 'The Blacklist', 'The Devil Wears Prada', 'The Dresden Files', 'The Eagle (Ambiguous)',
'The Evil Within (Video Game)', 'The Flash', 'The Following', 'The Fosters (TV 2013)', 'The Good Wife (TV)',
'The Hobbit', 'The Incredible Hulk', 'The Legend of Zelda', 'The Librarians (TV 2014)', 'The Losers (2010)',
'The Magnificent Seven (TV)', 'The Man From UNCLE.', 'The Martian', 'The Maze Runner', 'The Mentalist',
'The Mindy Project', 'The Newsroom (US TV)', 'The Office (US)', 'The Originals (TV)', 'The Pacific (TV)',
'The Professionals', 'The Sandman (Comics)', 'The Sentinel', 'The Silmarillion', 'The Vampire Diaries',
'The Walking Dead & related', 'The West Wing', 'The Witcher', 'Thor', 'Three Musketeers', 'Thunderbirds',
'Tin Man (2007)', 'Tokyo Ghoul', 'Torchwood', 'Tortall', 'Touhou Project', 'Touken Ranbu', 'Transformers', 'Tron',
'True Blood', 'True Detective', 'Tsubasa: Reservoir Chronicles', 'Under the Red Hood', 'Undertale',
'Until Dawn (Video Game)', 'Uta no Prince-sama', 'Vampire Knight', 'Veronica Mars', 'Vikings (TV)',
'Voltron: Legendary Defender', 'Warcraft', 'Warriors', 'Watchmen', 'Winter Soldier (Comics)', 'Wonder Woman',
'X-men', 'Xena: Warrior Princess', 'Yona of the Dawn', 'Young Justice', 'Yowamushi Pedal', 'Yu Yu Hakusho',
'Yu-Gi-Oh! (series)', 'Zero Escape (Video Games)', 'xxxHoLic']
keys = [2007008,
1034737,
1486363,
108038,
879346,
2631558,
114706,
206112,
882899,
303506,
934,
131276,
587792,
69214,
131285,
18886,
911149,
56917,
721553,
1091121,
166591,
65,
727114,
957664,
230714,
13314,
9151,
236208,
7473,
352740,
448892,
2009747,
873654,
37215,
155456,
604153,
247670,
223663,
133445,
142166,
1968629,
1099950,
1346,
4594871,
10213,
86438,
37210,
582433,
578887,
3516977,
4186,
5405707,
1147379,
287734,
34408,
775667,
9892,
14088,
2870,
390,
3634928,
1633246,
47474,
199121,
662604,
925984,
5154319,
816757,
964594,
21944,
481231,
131997,
10788043,
154229,
263316,
14136,
170944,
105412,
827055,
19877,
579568,
90773,
309473,
440981,
152669,
261760,
55873,
25575,
1781878,
870188,
1109537,
157745,
274395,
24854,
24864,
14198,
14036,
13892,
14205,
14042,
8210,
10795,
10596,
103329,
13855,
24865,
287,
448770,
711325,
933850,
782522,
5493837,
172695,
2730336,
865923,
10406377,
3103,
2954180,
242462,
423753,
2632233,
26671,
3165,
215082,
884,
62494,
21914,
721159,
13154,
114591,
2818614,
5443632,
524391,
848190,
5455680,
114,
299272,
930011,
777706,
624039,
758208,
7205540,
6637939,
735779,
136512,
431982,
484757,
266,
12845,
305075,
434477,
3549,
290625,
117807,
9775,
272,
10313,
3046868,
125633,
452309,
22959,
813142,
850435,
1100985,
13184,
43537,
739806,
380372,
5466945,
1487281,
4212458,
3409358,
4945534,
614804,
491649,
796030,
15830,
244259,
939813,
1146205,
63584,
4182,
3883994,
519363,
14621,
1014,
483836,
54428,
102330,
5280,
262567,
4153523,
249183,
741433,
144106,
796047,
129342,
603432,
554058,
1348854,
7266,
96137,
414093,
49548,
215080,
1749187,
28625,
18810,
11263,
254648,
582724,
500110,
110196,
116265,
701010,
3828398,
17199,
763581,
951,
13999,
309920,
112929,
525714,
479394,
1327257,
1747,
379999,
10767,
1199183,
883659,
7048385,
482937,
2943377,
1553899,
3406514,
872785,
20555,
1124470,
46414,
1909262,
658827,
287761,
13896,
13665,
4231202,
233118,
223664,
448284,
61493,
83491,
541,
126098,
6762709,
5009,
94259,
3045,
4917,
111275,
644159,
226819,
1039,
21571,
628622,
1196,
258664,
1157470,
104798,
594995,
95038,
1357661,
129781,
101645,
535185,
2737,
2529660,
59203,
362807,
5074,
10565,
59739,
5381302,
3094364,
850338,
202578,
245573,
105692,
4089308,
1772539,
1109601,
187,
451725,
22404,
14184,
35537,
291621,
130638,
3697829,
1801,
101375,
4076489,
11778,
135886,
1197580,
21128,
431213,
27,
470458,
28057,
5324,
520763,
3535484,
3699,
44716,
4945690,
258526,
1064218,
109503,
2486805,
4585787,
1343765,
304320,
775959,
1129372,
54476,
726686,
627240,
3134381,
2850393,
719523,
919104,
1001902,
541478,
730108,
1387845,
3605093,
126504,
78546,
5882645,
5439781,
3163988,
12984,
566569,
512641,
4405,
813205,
4124108,
1673,
774727,
273,
230931,
2233012,
6906220,
450,
299357,
625992,
230438,
105286,
33061,
2129537,
203,
115633,
257095,
4122599,
219012,
217263,
9902,
1322077,
11007,
153784,
6541412,
6302834,
398188,
11974,
1582930,
781463,
10104017,
827052,
116711,
103350,
722204,
1283234,
250093,
4794,
694407,
4878889,
1149757,
941637,
1346123,
765738,
4039]
push_service = FCMNotification(api_key="<omitted>")
''' === reading/writing functions === '''
def strip_newlines(strings):
stripped = list()
for string in strings:
stripped.append(string.rstrip('\n'))
return stripped
def read_files():
last_checked = list()
last_id = list()
with open('/home/richard/FCM/last_checked.txt', 'r') as last_checked_file, open('/home/richard/FCM/last_id.txt', 'r') as last_id_file:
last_checked = strip_newlines(last_checked_file.readlines())
last_id = strip_newlines(last_id_file.readlines())
last_checked_file.close()
last_id_file.close()
return last_checked, last_id
def write_files(last_checked, last_id):
with open('/home/richard/FCM/last_checked.txt', 'w') as last_checked_file, open('/home/richard/FCM/last_id.txt', 'w') as last_id_file:
for checked in new_checked:
last_checked_file.write('%s\n' % checked)
for _id in new_ids:
last_id_file.write('%s\n' % _id)
last_checked_file.close()
last_id_file.close()
''' === extracting functions === '''
def extract_ships(string):
if '<li>Relationships:' in string:
if '<li>Additional Tags:' in string:
temp = extract_helper(string, '<li>Relationships: <a', '<li>Additional Tags:')
else:
temp = extract_helper(string, '<li>Relationships: <a', '</ul>')
return repeat_extract(temp, '\">', '</a>')
else:
return "no relationships"
def extract_helper(string, startRef='', endRef=''):
start = string.find(startRef) + len(startRef)
end = string.find(endRef, start)
if startRef == '': return string[:-end]
if endRef == '': return string[start:]
return string[start:end]
def repeat_extract(string, startRef, endRef):
extracted = extract_helper(string, startRef, endRef)
temp = extract_helper(string, endRef)
while startRef in temp:
extracted += (", %s" % extract_helper(temp, startRef, endRef))
temp = extract_helper(temp, endRef)
return extracted
def char_helper(s):
string = s.replace('&', '&')
string = string.replace(''', '\'')
string = string.replace('"', '\"')
return string
''' === feed checking function === '''
def check_feed(index):
print("in check_feed for index %d" % index)
url = 'https://archiveofourown.org/tags/%s/feed.atom' % str(keys[index])
_id = ''
if (len(last_id) > index):
_id = last_id[index] # ok even if last_id[index] is empty b/c this is ONLY about assignment
checked = ''
if (len(last_checked) > index) and (last_checked[index] != ''):
checked = last_checked[index]
try:
parsed = feedparser.parse(url, modified=checked)
except:
print("unable to parse feed: %s" % url)
return checked, _id
else:
try:
parsed = feedparser.parse(url)
except:
print("unable to parse feed: %s" % url)
return checked, _id
print("status %d" % parsed.status)
if parsed.status == 304:
# should be impossible for there not to be a last_checked or last_id in this case
# b/c that could only happen if the feed is empty...
return checked, _id
last_checked_pos = len(parsed.entries)-1
for i in range(0, len(parsed.entries)): # excludes end index
if parsed.entries[i].id == _id:
last_checked_pos = i
print("last_checked_pos: %d" % last_checked_pos)
# b/c sometimes the feed doesn't give 304 even when there isn't a new fic
if last_checked_pos == 0:
return checked, _id
for n in range(0, last_checked_pos):
title = char_helper(parsed.entries[n].title)
ships = char_helper(extract_ships(parsed.entries[n].summary))
message = "New in %s: %s - %s" % (tags[index], title, ships)
message = message.encode('ascii', 'ignore')
print(message)
payload = { 'title': title, 'link': parsed.entries[n].link }
push_service.notify_topic_subscribers(topic_name=str(keys[index]), message_body=message, data_message=payload)
try:
checked = parsed.modified
except:
print("no last-modified header?")
pass
return checked, parsed.entries[0].id
''' === do the thing === '''
new_checked = list()
new_ids = list()
if (len(tags) == len(keys)): # a safeguard
last_checked, last_id = read_files()
for index in range(0, len(tags)):
modified, newest_id = check_feed(index=index)
new_checked.append(modified)
new_ids.append(newest_id)
print('\n\n\n') # so that the output log is easier to read
write_files(last_checked, last_id)
else:
print("length of tags is not equal to length of keys")
|
michelleran/FicFeed
|
server/fcm.py
|
Python
|
mit
| 15,355
|
[
"Galaxy"
] |
defed5f5cb2bb385edab88e946ba18c7a4e3c077875a851b609c5c50d0e6e406
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import qcdb
def true_false_decorator(compare_fn, *args, **kwargs):
"""Turns `compare_fn` that returns `None` on success and raises
`qcdb.TestComparisonError` on failure into a function that returns
True/False, suitable for assertions in pytest.
"""
def true_false_wrapper(*args, **kwargs):
try:
compare_fn(*args, **kwargs)
except qcdb.TestComparisonError as err:
return False
else:
return True
return true_false_wrapper
compare_values = true_false_decorator(qcdb.compare_values)
compare_strings = true_false_decorator(qcdb.compare_strings)
compare_integers = true_false_decorator(qcdb.compare_integers)
compare_matrices = true_false_decorator(qcdb.compare_matrices)
compare_arrays = true_false_decorator(qcdb.compare_arrays)
compare_dicts = true_false_decorator(qcdb.compare_dicts)
compare_molrecs = true_false_decorator(qcdb.compare_molrecs)
|
psi4/psi4
|
psi4/driver/qcdb/pytest/utils.py
|
Python
|
lgpl-3.0
| 1,848
|
[
"Psi4"
] |
dce4abfd6e68cd93eee5a2131028c148e37d98df2a5dddec581ce42a4013fde5
|
#
# Copyright (C) 2001 greg Landrum
#
# unit testing code for the composite model COM server
from __future__ import print_function
from rdkit import RDConfig
import unittest
from rdkit.ML.Composite import Composite
from win32com.client import Dispatch
from Numeric import *
class TestCase(unittest.TestCase):
def setUp(self):
print('\n%s: '%self.shortDescription(),end='')
def testConnect(self):
" connecting to COM server "
ok = 1
try:
c = Dispatch('RD.Composite')
except Exception:
ok = 0
assert ok and c is not None, 'connection to COM server failed'
def testLoad(self):
" loading a composite "
c = Dispatch('RD.Composite')
ok = 1
try:
c.LoadComposite(RDConfig.RDCodeDir+'/ml/composite/test_data/composite_base.pkl')
except Exception:
ok = 0
assert ok, 'LoadComposite failed'
def testNames(self):
" testing descriptor names "
c = Dispatch('RD.Composite')
c.LoadComposite(RDConfig.RDCodeDir+'/ml/composite/test_data/composite_base.pkl')
names = c.GetDescriptorNames()
expectedNames = ('composition', 'max_atomic', 'has3d', 'has4d', 'has5d',
'elconc', 'atvol', 'isferro')
assert names==expectedNames, 'GetDescriptorNames failed'
def testInputOrder(self):
" testing input order "
c = Dispatch('RD.Composite')
c.LoadComposite(RDConfig.RDCodeDir+'/ml/composite/test_data/composite_base.pkl')
names = c.GetDescriptorNames()
ok = 1
try:
c.SetInputOrder(names)
except Exception:
ok = 0
assert ok,'SetInputOrder failed'
def testClassify(self):
" testing classification "
argV = ['CrPt3','fcc','AuCu3',58.09549962,36,4,0.228898,2.219,1,3.67481803894, 1, 0, 1, 0.619669341609, 14.523874905]
nameV = ['composition','Structure','Structure_Type','Volume',
'Electrons_Per_Unit','Atoms_Per_Unit','Hardness','DOS_Ef',
'isferro','max_atomic', 'has3d', 'has4d', 'has5d',
'elconc', 'atvol']
c = Dispatch('RD.Composite')
c.LoadComposite(RDConfig.RDCodeDir+'/ml/composite/test_data/composite_base.pkl')
c.SetInputOrder(nameV)
res = c.ClassifyExample(argV)
expected = [1,1.0]
assert res[0] == expected[0],'bad prediction'
assert res[1] == expected[1],'bad confidence'
def TestSuite():
suite = unittest.TestSuite()
suite.addTest(TestCase('testConnect'))
suite.addTest(TestCase('testLoad'))
suite.addTest(TestCase('testNames'))
suite.addTest(TestCase('testInputOrder'))
suite.addTest(TestCase('testClassify'))
return suite
if __name__ == '__main__':
suite = TestSuite()
unittest.TextTestRunner().run(suite)
|
adalke/rdkit
|
rdkit/ML/Composite/UnitTestCOMServer.py
|
Python
|
bsd-3-clause
| 2,667
|
[
"RDKit"
] |
8873175491027e9682c91e30a3a24d549ea04b4f33b0c5d772f11dff7c767296
|
from typing import Any, Tuple, Type
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.bijectors import Exp
from tensorflow_probability.python.distributions import Uniform
import gpflow
from gpflow.base import PriorOn
from gpflow.config import set_default_float
from gpflow.utilities import to_default_float
np.random.seed(1)
class Datum:
X = 10 * np.random.randn(5, 1)
Y = 10 * np.random.randn(5, 1)
lengthscale = 3.3
def test_gpr_objective_equivalence() -> None:
"""
In Maximum Likelihood Estimation (MLE), i.e. when there are no priors on
the parameters, the objective should not depend on any transforms on the
parameters.
We use GPR as a simple model that has an objective.
"""
data = (Datum.X, Datum.Y)
l_value = Datum.lengthscale
l_variable = tf.Variable(l_value, dtype=gpflow.default_float(), trainable=True)
m1 = gpflow.models.GPR(data, kernel=gpflow.kernels.SquaredExponential(lengthscales=l_value))
m2 = gpflow.models.GPR(data, kernel=gpflow.kernels.SquaredExponential())
m2.kernel.lengthscales = gpflow.Parameter(l_variable, transform=None)
assert np.allclose(
m1.kernel.lengthscales.numpy(), m2.kernel.lengthscales.numpy()
) # consistency check
assert np.allclose(
m1.log_marginal_likelihood().numpy(), m2.log_marginal_likelihood().numpy()
), "MLE objective should not depend on Parameter transform"
def test_log_prior_with_no_prior() -> None:
"""
A parameter without any prior should have zero log-prior,
even if it has a transform to constrain it.
"""
param = gpflow.Parameter(5.3, transform=gpflow.utilities.positive())
assert param.log_prior_density().numpy() == 0.0
def test_log_prior_for_uniform_prior() -> None:
"""
If we assign a Uniform prior to a parameter, we should not expect the value of the prior density
to change with the parameter value, even if it has a transform associated with it.
"""
uniform_prior = Uniform(low=np.float64(0), high=np.float64(100))
param = gpflow.Parameter(1.0, transform=gpflow.utilities.positive(), prior=uniform_prior)
low_value = param.log_prior_density().numpy()
param.assign(10.0)
high_value = param.log_prior_density().numpy()
assert np.isclose(low_value, high_value)
def test_log_prior_on_unconstrained() -> None:
"""
A parameter with an Exp transform, and a uniform prior on its unconstrained, should have a
prior in the constrained space that scales as 1/value.
"""
initial_value = 1.0
scale_factor = 10.0
uniform_prior = Uniform(low=np.float64(0), high=np.float64(100))
param = gpflow.Parameter(
initial_value,
transform=Exp(),
prior=uniform_prior,
prior_on=PriorOn.UNCONSTRAINED,
)
low_value = param.log_prior_density().numpy()
param.assign(scale_factor * initial_value)
high_value = param.log_prior_density().numpy()
assert np.isclose(low_value, high_value + np.log(scale_factor))
class DummyModel(gpflow.models.BayesianModel):
value = 3.3
log_scale = 0.4
def __init__(self, with_transform: bool) -> None:
super().__init__()
prior = tfp.distributions.Normal(to_default_float(1.0), to_default_float(1.0))
scale = np.exp(self.log_scale)
if with_transform:
transform = tfp.bijectors.Shift(to_default_float(0.0))(
tfp.bijectors.Scale(to_default_float(scale))
)
else:
transform = None
self.theta = gpflow.Parameter(self.value, prior=prior, transform=transform)
def maximum_log_likelihood_objective(self, *args: Any, **kwargs: Any) -> tf.Tensor:
assert not args
assert not kwargs
return (self.theta + 5) ** 2
def test_map_invariance_to_transform() -> None:
m1 = DummyModel(with_transform=True)
m2 = DummyModel(with_transform=False)
assert np.allclose(
m1.log_posterior_density().numpy(), m2.log_posterior_density().numpy()
), "log posterior density should not be affected by a transform"
def get_gpmc_model_params() -> Tuple[Any, ...]:
kernel = gpflow.kernels.Matern32()
likelihood = gpflow.likelihoods.Gaussian()
data = [np.random.randn(5, 1), np.random.randn(5, 1)]
return data, kernel, likelihood
@pytest.mark.parametrize(
"model_class, args",
[
(gpflow.models.GPMC, get_gpmc_model_params()),
# (gpflow.models.SGPMC, get_SGPMC_model_params()) # Fails due to inducing_variable=None bug
],
)
def test_v_prior_dtypes(model_class: Type[Any], args: Tuple[Any, ...]) -> None:
with gpflow.config.as_context():
set_default_float(np.float32)
m = model_class(*args)
assert m.V.prior.dtype == np.float32
set_default_float(np.float64)
m = model_class(*args)
assert m.V.prior.dtype == np.float64
|
GPflow/GPflow
|
tests/gpflow/test_base_prior.py
|
Python
|
apache-2.0
| 4,957
|
[
"Gaussian"
] |
f3f33e13c1d7663e0fef6d1ceb4d14451aecd96ad812493a605f446036a3df3a
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***************************************
espressopp.standard_system.LennardJones
***************************************
.. function:: espressopp.standard_system.LennardJones(num_particles, box, rc, skin, dt, epsilon, sigma, shift, temperature, xyzfilename, xyzrfilename)
:param num_particles:
:param box: (default: (000))
:param rc: (default: 1.12246)
:param skin: (default: 0.3)
:param dt: (default: 0.005)
:param epsilon: (default: 1.0)
:param sigma: (default: 1.0)
:param shift: (default: 'auto')
:param temperature: (default: None)
:param xyzfilename: (default: None)
:param xyzrfilename: (default: None)
:type num_particles:
:type box:
:type rc: real
:type skin: real
:type dt: real
:type epsilon: real
:type sigma: real
:type shift:
:type temperature:
:type xyzfilename:
:type xyzrfilename:
return random Lennard Jones system and integrator:
if tempearture is != None then Langevin thermostat is set to temperature (gamma is 1.0)
"""
import espressopp
import mpi4py.MPI as MPI
def LennardJones(num_particles, box=(0,0,0), rc=1.12246, skin=0.3, dt=0.005, epsilon=1.0, sigma=1.0, shift='auto', temperature=None, xyzfilename=None, xyzrfilename=None):
if xyzfilename and xyzrfilename:
print "ERROR: only one of xyzfilename (only xyz data) or xyzrfilename (additional particle radius data) can be provided."
sys.exit(1)
if xyzrfilename:
pidf, typef, xposf, yposf, zposf, xvelf, yvelf, zvelf, Lxf, Lyf, Lzf, radiusf = espressopp.tools.readxyzr(xyzrfilename)
box = (Lxf, Lyf, Lzf)
num_particles = len(pidf)
elif xyzfilename:
pidf, typef, xposf, yposf, zposf, xvelf, yvelf, zvelf, Lxf, Lyf, Lzf = espressopp.tools.readxyz(xyzfilename)
box = (Lxf, Lyf, Lzf)
num_particles = len(pidf)
else:
if box[0]<=0 or box[1]<=0 or box[2]<=0:
print "WARNING: no valid box size specified, box size set to (100,100,100) !"
system = espressopp.System()
system.rng = espressopp.esutil.RNG()
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.skin = skin
nodeGrid = espressopp.tools.decomp.nodeGrid(MPI.COMM_WORLD.size)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc, skin)
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
interaction = espressopp.interaction.VerletListLennardJones(espressopp.VerletList(system, cutoff=rc))
interaction.setPotential(type1=0, type2=0, potential=espressopp.interaction.LennardJones(epsilon, sigma, rc, shift))
system.addInteraction(interaction)
integrator = espressopp.integrator.VelocityVerlet(system)
integrator.dt = dt
if (temperature != None):
thermostat = espressopp.integrator.LangevinThermostat(system)
thermostat.gamma = 1.0
thermostat.temperature = temperature
integrator.addExtension(thermostat)
mass = 1.0
if xyzrfilename:
new_particles = []
props = ['id', 'type', 'mass', 'pos', 'v', 'radius']
for idx in xrange(num_particles):
part = [ pidf[idx], typef[idx], mass,
espressopp.Real3D(xposf[idx],yposf[idx],zposf[idx]),
espressopp.Real3D(xvelf[idx],yvelf[idx],zvelf[idx]),
radiusf[idx] ]
new_particles.append(part)
if idx % 1000 == 0:
system.storage.addParticles(new_particles, *props)
system.storage.decompose()
new_particles = []
system.storage.addParticles(new_particles, *props)
system.storage.decompose()
elif xyzfilename:
new_particles = []
props = ['id', 'type', 'mass', 'pos', 'v']
for idx in xrange(num_particles):
part = [ pidf[idx], typef[idx], mass,
espressopp.Real3D(xposf[idx],yposf[idx],zposf[idx]),
espressopp.Real3D(xvelf[idx],yvelf[idx],zvelf[idx])]
new_particles.append(part)
if idx % 1000 == 0:
system.storage.addParticles(new_particles, *props)
system.storage.decompose()
new_particles = []
system.storage.addParticles(new_particles, *props)
system.storage.decompose()
else:
props = ['id', 'type', 'mass', 'pos', 'v']
new_particles = []
pid = 1
while pid <= num_particles:
type = 0
mass = 1.0
pos = system.bc.getRandomPos()
vel = espressopp.Real3D(0.0, 0.0, 0.0)
part = [pid, type, mass, pos, vel]
new_particles.append(part)
if pid % 1000 == 0:
system.storage.addParticles(new_particles, *props)
system.storage.decompose()
new_particles = []
pid += 1
system.storage.addParticles(new_particles, *props)
system.storage.decompose()
return system, integrator
|
fedepad/espressopp
|
src/standard_system/LennardJones.py
|
Python
|
gpl-3.0
| 5,605
|
[
"ESPResSo"
] |
0f74aab96fb743f615d7de2e99a76d9a9ac824c30272455d966ee2fec8d5d513
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Aug 26, 2012"
import unittest
from pymatgen.util.string import formula_double_format, latexify, \
latexify_spacegroup, transformation_to_string, htmlify, unicodeify
class FuncTest(unittest.TestCase):
def test_latexify(self):
self.assertEqual(latexify("Li3Fe2(PO4)3"),
"Li$_{3}$Fe$_{2}$(PO$_{4}$)$_{3}$")
self.assertEqual(latexify("Li0.2Na0.8Cl"),
"Li$_{0.2}$Na$_{0.8}$Cl")
def test_latexify_spacegroup(self):
self.assertEqual(latexify_spacegroup("Fd-3m"), "Fd$\\overline{3}$m")
self.assertEqual(latexify_spacegroup("P2_1/c"), "P2$_{1}$/c")
def test_htmlify(self):
self.assertEqual(htmlify("Li3Fe2(PO4)3"),
"Li<sub>3</sub>Fe<sub>2</sub>(PO<sub>4</sub>)<sub>3</sub>")
self.assertEqual(htmlify("Li0.2Na0.8Cl"),
"Li<sub>0.2</sub>Na<sub>0.8</sub>Cl")
def test_unicodeify(self):
self.assertEqual(unicodeify("Li3Fe2(PO4)3"),
"Li₃Fe₂(PO₄)₃")
self.assertRaises(ValueError, unicodeify,
"Li0.2Na0.8Cl")
def test_formula_double_format(self):
self.assertEqual(formula_double_format(1.00), "")
self.assertEqual(formula_double_format(2.00), "2")
self.assertEqual(formula_double_format(2.10), "2.1")
self.assertEqual(formula_double_format(2.10000000002), "2.1")
def test_transformation_to_string(self):
m = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
t = [0, 0, 0]
s = 'x,y,z'
ms = 'mx,my,mz'
abc = 'a,b,c'
self.assertEqual(s, transformation_to_string(m, t))
self.assertEqual(ms, transformation_to_string(m, t, c='m'))
self.assertEqual(abc, transformation_to_string(m, t, components=('a', 'b', 'c')))
m = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
t = [11, 12, 13]
s = 'x+2y+3z+11,4x+5y+6z+12,7x+8y+9z+13'
self.assertEqual(s, transformation_to_string(m, t))
m = [[-1 / 2, -2 / 3, -3 / 4], [-5 / 6, -6 / 7, -7 / 8], [-8 / 9, -9 / 10, -10 / 11]]
t = [-11 / 12, -12 / 13, -13 / 14]
s = '-x/2-2y/3-3z/4-11/12,-5x/6-6y/7-7z/8-12/13,-8x/9-9y/10-10z/11-13/14'
self.assertEqual(s, transformation_to_string(m, t))
if __name__ == "__main__":
unittest.main()
|
nisse3000/pymatgen
|
pymatgen/util/tests/test_string_utils.py
|
Python
|
mit
| 2,686
|
[
"pymatgen"
] |
17a68d2ad1650f21a1ac5f4422bdf1e58c99149bea5af0056cd7e207e47ce895
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
from td.peakdetect import peakdetect
NM2EV = 1240.6691
class Spectrum:
def __init__(self, name, excited_states, gs_energy=None, nm_range=None):
self.name = name
self.excited_states = excited_states
wavelengths = [es.l for es in self.es]
if nm_range:
self.nm_range = np.array(nm_range)
else:
self.nm_range = np.array((int(min(wavelengths))-25,
int(max(wavelengths))+100))
"""
NM2EV = 1240.6691
self.eV_range = NM2EV / self.nm_range
"""
self.gs_energy = gs_energy
@property
def es(self):
return self.excited_states
def gauss_uv_band(self, x, osc, x_i):
return (1.3062974e8 * osc / (1e7 / 3099.6) *
np.exp(-((1. / x - 1. / x_i) / (1. / 3099.6))**2))
@property
def nm(self):
return self.broaden(*self.nm_range)
@property
def eV(self):
in_nm, osc_nm = self.nm
in_eV = in_nm
in_eV[:,0] = NM2EV / in_nm[:,0]
osc_in_eV = osc_nm
osc_in_eV[:,0] = NM2EV / osc_in_eV[:,0]
return in_eV, osc_in_eV
def broaden(self, from_nm, to_nm):
# According to:
# http://www.gaussian.com/g_whitepap/tn_uvvisplot.htm
# wave lengths and oscillator strengths
# E(eV) = 1240.6691 eV * nm / l(nm)
NM2EV = 1240.6691
osc_nm = np.array([(es.l, es.f) for es in self.excited_states])
x = np.arange(from_nm, to_nm, 0.5)
spectrum = list()
for l in x:
spectrum.append(np.sum([self.gauss_uv_band(l, osc, l_i)
for l_i, osc in osc_nm]))
spectrum = np.array(spectrum)
spectrum_norm = spectrum / spectrum.max()
in_nm = np.stack((x, spectrum, spectrum_norm), axis=-1)
return in_nm, osc_nm
"""
if e2f:
spectrum /= 40490.05867167
if not nnorm:
spectrum = spectrum / spectrum.max()
"""
"""
Used for printing also the gauss bands of the
n-highest transitions
# Sort by f
fli_sorted = sorted(fli, key=lambda tpl: -tpl[0])
highest_fs = fli_sorted[:15]
print
x = np.arange(200, 600, 0.5)
highest_bands = list()
for f, l_i in highest_fs:
band = lambda l: gauss_uv_band(l, f, l_i)
calced_band = band(x)
calced_band = band(x) / calced_band.max() * f * 3
highest_bands.append(calced_band)
#for xi, ai in zip(x, calced_band):
# print xi, ai
#print
highest_bands_headers = tuple(['"l_i = {} nm, f = {}"'.format(
l_i, f) for f, l_i in highest_fs])
headers = ('"l in nm"', "Sum") + highest_bands_headers
wargel = zip(x, spectrum, *highest_bands)
print tabulate(wargel, headers=headers, tablefmt="plain")
"""
"""
def plot_eV(self, title="", with_peaks=False):
in_eV, osc_eV = self.eV
unit = "eV"
self.plot(in_eV, osc_eV, unit, title=title, reverse_x=True,
with_peaks=with_peaks)
def plot_nm(self, title="", with_peaks=False):
in_nm, osc_nm = self.nm
unit = "nm"
self.plot(in_nm, osc_nm, unit, title=title, with_peaks=with_peaks)
def plot(self, conv_spectrum, osc, unit, title="", reverse_x=False,
with_peaks=None):
fig, ax1 = plt.subplots()
fig.suptitle(title)
xlabel = "E / {}".format(unit)
ax1.plot(conv_spectrum[:,0], conv_spectrum[:,1])
if with_peaks:
peak_inds = self.get_peak_inds(conv_spectrum)
peaks = conv_spectrum[peak_inds]
for i, peak in enumerate(peaks):
energy, epsilon = peak[:2]
print("{:2d}:\tλ={:5.0f} {}, ε={:8.0f}".format(i, energy,
unit, epsilon))
xytext = peak[:2] * (1, 1.05)
ax1.annotate("{}".format(i), xy=peak[:2], xytext=xytext,
horizontalalignment="center")
ax1.plot(peaks[:,0], peaks[:,1], "ro")
if reverse_x:
from_x, to_x = ax1.get_xlim()
ax1.set_xlim(to_x, from_x)
ax1.set_xlabel(xlabel)
ax1.set_ylabel("ε / mol cm⁻¹ l⁻¹")
ax2 = ax1.twinx()
ax2.stem(osc[:,0], osc[:,1], markerfmt=" ", basefmt=" ")
from_y2, to_y2 = ax2.get_ylim()
to_y2 = max(to_y2, 0.5)
ax2.set_ylim(from_y2, to_y2)
ax2.set_ylabel("f")
plt.show()
"""
def get_peak_inds(self, conv_spectrum, lookahead=25):
conv_spectrum_ys = conv_spectrum[:,1]
max_peaks, min_peaks = peakdetect(conv_spectrum_ys, lookahead=lookahead)
return np.array(max_peaks)[:,0].astype(int)
def write_nm(self):
in_nm, _ = self.nm
nm_name = f"{self.name}_nm.dat"
np.savetxt(nm_name, in_nm)
def __str__(self):
return f"Spectrum({len(self.excited_states)} states)"
|
eljost/td
|
td/Spectrum.py
|
Python
|
gpl-3.0
| 5,178
|
[
"Gaussian"
] |
6454753b6d592f34f92eeeb2c4989424a687fae92cafc945689b6a2fdaa1ca67
|
from mock import *
from gp_unittest import *
from gppylib.operations.package import IsVersionCompatible
class IsVersionCompatibleTestCase(GpTestCase):
def setUp(self):
self.gppkg_mock_values = \
{'main_rpm': 'plperl-1.1-2.x86_64.rpm',
'postupdate': [],
'pkgname': 'plperl',
'description': 'some description.',
'postinstall': [{'Master': "some reason to restart database"}],
'postuninstall': [],
'abspath': 'plperl-ossv5.12.4_pv1.3_gpdb4.3-rhel5-x86_64.gppkg',
'preinstall': [],
'version': 'ossv5.12.4_pv1.2_gpdb4.3',
'pkg': 'plperl-ossv5.12.4_pv1.3_gpdb4.3-rhel5-x86_64.gppkg',
'dependencies': [],
'file_list': ['deps',
'gppkg_spec.yml',
'plperl-1.1-2.x86_64.rpm'],
'gpdbversion': Mock(),
'preuninstall': [],
'os': 'rhel5',
'architecture': 'x86_64'}
self.apply_patches([
patch('gppylib.operations.package.logger',
return_value=Mock(spec=['log', 'info', 'debug', 'error'])),
])
self.mock_logger = self.mock_objs[0]
def _is_requires_orca_logged(self, gppkg_name, log_messages):
return ('Greenplum Database requires orca version of '
'%s' % gppkg_name in log_messages)
@patch('gppylib.operations.package.GpVersion',
return_value=Mock(version=[4, 3, 10, 0]))
def test__execute_reports_incompatability(self, mock_gpversion):
logger = self.mock_logger
gppkg_mock_values = self.gppkg_mock_values
gppkg = Mock(**gppkg_mock_values)
subject = IsVersionCompatible(gppkg)
subject.execute()
gppkg_name = 'plperl-ossv5.12.4_pv1.3_gpdb4.3-rhel5-x86_64.gppkg'
# call object is a tuple of method name and arg list tuple
log_messages = [args[1][0] for args in logger.method_calls]
self.assertTrue(self._is_requires_orca_logged(gppkg_name,
log_messages))
@patch('gppylib.operations.package.GpVersion',
return_value=Mock(version=[4, 3, 3, 0]))
def test__execute_reports_compatability_with_older_version(self,
mock_gpversion):
logger = self.mock_logger
gppkg_mock_values = self.gppkg_mock_values
gppkg = Mock(**gppkg_mock_values)
subject = IsVersionCompatible(gppkg)
subject.execute()
gppkg_name = 'plperl-ossv5.12.4_pv1.3_gpdb4.3-rhel5-x86_64.gppkg'
# call object is a tuple of method name and arg list tuple
log_messages = [args[1][0] for args in logger.method_calls]
self.assertFalse(self._is_requires_orca_logged(gppkg_name,
log_messages))
def test__execute_compatible(self):
logger = self.mock_logger
gppkg_name = 'plperl-ossv5.12.4_pv1.3_gpdb4.3orca-rhel5-x86_64.gppkg'
modified_gppkg_mock_values = \
{'abspath': gppkg_name,
'version': 'ossv5.12.4_pv1.2_gpdb4.3orca',
'pkg': gppkg_name}
gppkg_mock_values = self.gppkg_mock_values
gppkg_mock_values.update(**modified_gppkg_mock_values)
gppkg = Mock(**gppkg_mock_values)
subject = IsVersionCompatible(gppkg)
subject.execute()
log_messages = [args[1][0] for args in logger.method_calls]
self.assertFalse(self._is_requires_orca_logged(gppkg_name,
log_messages))
if __name__ == '__main__':
run_tests()
|
lintzc/gpdb
|
gpMgmt/bin/gppylib/test/unit/test_unit_package.py
|
Python
|
apache-2.0
| 3,737
|
[
"ORCA"
] |
3f2e9f3c48a73d9a1817b0f5bbd1832e17cf800c0ccd56b4d66346afe4322017
|
# -*- coding: utf-8 -*-
import datetime
import json
import mock
import pytest
import requests_mock
from constance.test import override_config
from django.conf import settings
from django.contrib.sites.models import Site
from django.core import mail
from django.template.loader import render_to_string
from django.utils.six.moves import html_parser
from django.utils.six.moves.urllib.parse import parse_qs, urlencode, urlparse
from pyquery import PyQuery as pq
from waffle.testutils import override_flag, override_switch
from kuma.core.templatetags.jinja_helpers import add_utm
from kuma.core.tests import (assert_no_cache_header,
assert_shared_cache_header, get_user)
from kuma.core.urlresolvers import reverse
from kuma.core.utils import to_html
from kuma.spam.constants import (
SPAM_CHECKS_FLAG, SPAM_SUBMISSIONS_FLAG, VERIFY_URL)
from kuma.users.tests import UserTestCase
from . import (create_document_tree, document, make_translation,
new_document_data, normalize_html, revision, WikiTestCase)
from .conftest import ks_toolbox
from ..content import get_seo_description
from ..events import EditDocumentEvent, EditDocumentInTreeEvent
from ..forms import MIDAIR_COLLISION
from ..models import Document, RevisionIP
from ..templatetags.jinja_helpers import get_compare_url
class ViewTests(UserTestCase, WikiTestCase):
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
def test_json_view(self):
"""bug 875349"""
expected_tags = sorted(['foo', 'bar', 'baz'])
expected_review_tags = sorted(['tech', 'editorial'])
doc = Document.objects.get(pk=1)
doc.tags.set(*expected_tags)
doc.current_revision.review_tags.set(*expected_review_tags)
url = reverse('wiki.json')
resp = self.client.get(url, {'title': 'an article title'})
assert resp.status_code == 200
assert_shared_cache_header(resp)
data = json.loads(resp.content)
assert data['slug'] == 'article-title'
result_tags = sorted([str(x) for x in data['tags']])
assert result_tags == expected_tags
result_review_tags = sorted([str(x) for x in data['review_tags']])
assert result_review_tags == expected_review_tags
url = reverse('wiki.json_slug', args=('article-title',))
with override_switch('application_ACAO', True):
resp = self.client.get(url)
assert resp.status_code == 200
assert_shared_cache_header(resp)
assert resp['Access-Control-Allow-Origin'] == '*'
data = json.loads(resp.content)
assert data['title'] == 'an article title'
assert 'translations' in data
result_tags = sorted([str(x) for x in data['tags']])
assert result_tags == expected_tags
result_review_tags = sorted([str(x) for x in data['review_tags']])
assert result_review_tags == expected_review_tags
def test_toc_view(self):
slug = 'toc_test_doc'
html = '<h2>Head 2</h2><h3>Head 3</h3>'
doc = document(title='blah', slug=slug, html=html, save=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
revision(document=doc, content=html, is_approved=True, save=True)
url = reverse('wiki.toc', args=[slug])
with override_switch('application_ACAO', True):
resp = self.client.get(url)
assert resp.status_code == 200
assert_shared_cache_header(resp)
assert resp['Access-Control-Allow-Origin'] == '*'
assert normalize_html(resp.content) == normalize_html(
'<ol><li><a href="#Head_2" rel="internal">Head 2</a></ol>'
)
@override_switch('application_ACAO', True)
def test_children_view(self):
"""bug 875349"""
test_content = '<p>Test <a href="http://example.com">Summary</a></p>'
def _make_doc(title, slug, parent=None, is_redir=False):
doc = document(title=title,
slug=slug,
save=True,
is_redirect=is_redir)
if is_redir:
content = 'REDIRECT <a class="redirect" href="/en-US/blah">Blah</a>'
else:
content = test_content
revision(document=doc,
content=test_content,
summary=get_seo_description(
test_content,
strip_markup=False),
save=True)
doc.html = content
if parent:
doc.parent_topic = parent
doc.save()
return doc
root_doc = _make_doc('Root', 'Root')
child_doc_1 = _make_doc('Child 1', 'Root/Child_1', root_doc)
_make_doc('Grandchild 1', 'Root/Child_1/Grandchild_1', child_doc_1)
grandchild_doc_2 = _make_doc('Grandchild 2',
'Root/Child_1/Grandchild_2',
child_doc_1)
_make_doc('Great Grandchild 1',
'Root/Child_1/Grandchild_2/Great_Grand_Child_1',
grandchild_doc_2)
_make_doc('Child 2', 'Root/Child_2', root_doc)
_make_doc('Child 3', 'Root/Child_3', root_doc, True)
for expand in (True, False):
url = reverse('wiki.children', args=['Root'])
if expand:
url = '%s?expand' % url
resp = self.client.get(url)
assert resp.status_code == 200
assert_shared_cache_header(resp)
assert resp['Access-Control-Allow-Origin'] == '*'
json_obj = json.loads(resp.content)
# Basic structure creation testing
assert json_obj['slug'] == 'Root'
if not expand:
assert 'summary' not in json_obj
else:
assert (json_obj['summary'] ==
'Test <a href="http://example.com">Summary</a>')
assert 'tags' in json_obj
assert 'review_tags' in json_obj
assert len(json_obj['subpages']) == 2
assert len(json_obj['subpages'][0]['subpages']) == 2
assert (json_obj['subpages'][0]['subpages'][1]['title'] ==
'Grandchild 2')
# Depth parameter testing
def _depth_test(depth, aught):
url = (reverse('wiki.children', args=['Root']) +
'?depth=' + str(depth))
resp = self.client.get(url)
assert resp.status_code == 200
assert_shared_cache_header(resp)
assert resp['Access-Control-Allow-Origin'] == '*'
json_obj = json.loads(resp.content)
assert (len(json_obj['subpages'][0]['subpages'][1]['subpages']) ==
aught)
_depth_test(2, 0)
_depth_test(3, 1)
_depth_test(6, 1)
# Sorting test
sort_root_doc = _make_doc('Sort Root', 'Sort_Root')
_make_doc('B Child', 'Sort_Root/B_Child', sort_root_doc)
_make_doc('A Child', 'Sort_Root/A_Child', sort_root_doc)
resp = self.client.get(reverse('wiki.children', args=['Sort_Root']))
assert resp.status_code == 200
assert_shared_cache_header(resp)
assert resp['Access-Control-Allow-Origin'] == '*'
json_obj = json.loads(resp.content)
assert json_obj['subpages'][0]['title'] == 'A Child'
# Test if we are serving an error json if document does not exist
no_doc_url = reverse('wiki.children', args=['nonexistentDocument'])
resp = self.client.get(no_doc_url)
assert resp.status_code == 200
assert_shared_cache_header(resp)
assert resp['Access-Control-Allow-Origin'] == '*'
assert (json.loads(resp.content) ==
{'error': 'Document does not exist.'})
# Test error json if document is a redirect
_make_doc('Old Name', 'Old Name', is_redir=True)
redirect_doc_url = reverse('wiki.children', args=['Old Name'])
resp = self.client.get(redirect_doc_url)
assert resp.status_code == 200
assert_shared_cache_header(resp)
assert resp['Access-Control-Allow-Origin'] == '*'
assert json.loads(resp.content) == {'error': 'Document has moved.'}
def test_summary_view(self):
"""The ?summary option should restrict document view to summary"""
rev = revision(is_approved=True, save=True, content="""
<p>Foo bar <a href="http://example.com">baz</a></p>
<p>Quux xyzzy</p>
""")
resp = self.client.get('%s?raw&summary' %
rev.document.get_absolute_url())
assert resp.status_code == 200
assert_shared_cache_header(resp)
assert resp.content == b'Foo bar <a href="http://example.com">baz</a>'
@mock.patch('waffle.flag_is_active', return_value=True)
@mock.patch('kuma.wiki.jobs.DocumentContributorsJob.get', return_value=[
{'id': 1, 'username': 'ringo', 'email': 'ringo@apple.co.uk'},
{'id': 2, 'username': 'john', 'email': 'lennon@apple.co.uk'},
])
def test_footer_contributors(self, get_contributors, flag_is_active):
get_contributors.return_value = [
{'id': 1, 'username': 'ringo', 'email': 'ringo@apple.co.uk'},
{'id': 2, 'username': 'john', 'email': 'lennon@apple.co.uk'},
]
flag_is_active.return_value = True
rev = revision(is_approved=True, save=True, content='some content')
resp = self.client.get(rev.document.get_absolute_url())
assert resp.status_code == 200
assert_shared_cache_header(resp)
page = pq(resp.content)
contributors = (page.find(":contains('Contributors to this page')")
.parents('.contributors-sub'))
# just checking if the contributor link is rendered
assert len(contributors.find('a')) == 2
def test_revision_view_bleached_content(self):
"""Bug 821988: Revision content should be cleaned with bleach"""
rev = revision(is_approved=True, save=True, content="""
<a href="#" onload=alert(3)>Hahaha</a>
<svg><svg onload=alert(3);>
""")
resp = self.client.get(rev.get_absolute_url())
page = pq(resp.content)
ct = to_html(page.find('#wikiArticle'))
assert '<svg>' not in ct
assert '<a href="#">Hahaha</a>' in ct
def test_article_revision_content(self):
doc = document(title='Testing Article', slug='Article', save=True)
r = revision(save=True, document=doc, is_approved=True)
resp = self.client.get(r.get_absolute_url())
page = pq(resp.content)
assert b'Revision Source' in resp.content
assert b'Revision Content' in resp.content
assert 'open' == page.find('#wikiArticle').parent().attr('open')
assert page.find('#doc-source').parent().attr('open') is None
class ReadOnlyTests(UserTestCase, WikiTestCase):
"""Tests readonly scenarios"""
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
def setUp(self):
super(ReadOnlyTests, self).setUp()
rev = revision(is_approved=True, save=True)
self.edit_url = reverse('wiki.edit', args=[rev.document.slug])
def test_everyone(self):
""" kumaediting: everyone, kumabanned: none """
self.kumaediting_flag.everyone = True
self.kumaediting_flag.save()
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.edit_url)
assert resp.status_code == 200
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
def test_superusers_only(self):
""" kumaediting: superusers, kumabanned: none """
self.kumaediting_flag.everyone = None
self.kumaediting_flag.superusers = True
self.kumaediting_flag.save()
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.edit_url)
assert resp.status_code == 403
assert b'The wiki is in read-only mode.' in resp.content
assert_no_cache_header(resp)
self.client.logout()
self.client.login(username='admin', password='testpass')
resp = self.client.get(self.edit_url)
assert resp.status_code == 200
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
class KumascriptIntegrationTests(UserTestCase, WikiTestCase):
"""
Tests for usage of the kumascript service.
Note that these tests really just check whether or not the service was
used, and are not integration tests meant to exercise the real service.
"""
def setUp(self):
super(KumascriptIntegrationTests, self).setUp()
self.rev = revision(is_approved=True, save=True, content="TEST CONTENT")
self.doc = self.rev.document
self.doc.tags.set('foo', 'bar', 'baz')
self.url = self.doc.get_absolute_url()
# TODO: upgrade mock to 0.8.0 so we can do this.
# self.mock_kumascript_get = (
# mock.patch('kuma.wiki.kumascript.get'))
# self.mock_kumascript_get.return_value = self.doc.html
def tearDown(self):
super(KumascriptIntegrationTests, self).tearDown()
# TODO: upgrade mock to 0.8.0 so we can do this.
# self.mock_kumascript_get.stop()
@override_config(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_basic_view(self, mock_kumascript_get):
"""When kumascript timeout is non-zero, the service should be used"""
mock_kumascript_get.return_value = (self.doc.html, None)
self.client.get(self.url, follow=False)
assert mock_kumascript_get.called, "kumascript should have been used"
@override_config(KUMASCRIPT_TIMEOUT=0.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_disabled(self, mock_kumascript_get):
"""When disabled, the kumascript service should not be used"""
mock_kumascript_get.return_value = (self.doc.html, None)
self.client.get(self.url, follow=False)
assert not mock_kumascript_get.called, "kumascript should not have been used"
@override_config(KUMASCRIPT_TIMEOUT=0.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_disabled_rendering(self, mock_kumascript_get):
"""When disabled, the kumascript service should not be used
in rendering"""
mock_kumascript_get.return_value = (self.doc.html, None)
self.doc.schedule_rendering('max-age=0')
assert not mock_kumascript_get.called, "kumascript should not have been used"
@override_config(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_nomacros(self, mock_kumascript_get):
mock_kumascript_get.return_value = (self.doc.html, None)
self.client.get('%s?nomacros' % self.url, follow=False)
assert not mock_kumascript_get.called, "kumascript should not have been used"
@override_config(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_raw(self, mock_kumascript_get):
mock_kumascript_get.return_value = (self.doc.html, None)
self.client.get('%s?raw' % self.url, follow=False)
assert not mock_kumascript_get.called, "kumascript should not have been used"
@override_config(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_raw_macros(self, mock_kumascript_get):
mock_kumascript_get.return_value = (self.doc.html, None)
self.client.get('%s?raw¯os' % self.url, follow=False)
assert mock_kumascript_get.called, "kumascript should have been used"
@override_config(KUMASCRIPT_TIMEOUT=1.0, KUMASCRIPT_MAX_AGE=1234)
@requests_mock.mock()
def test_ua_max_age_zero(self, mock_requests):
"""
Authenticated users can request a zero max-age for kumascript
"""
mock_requests.get(
requests_mock.ANY,
[
dict(content='HELLO WORLD'),
ks_toolbox().macros_response,
]
)
self.client.get(self.url, follow=False, HTTP_CACHE_CONTROL='no-cache')
assert ('max-age=1234' ==
mock_requests.request_history[0].headers['Cache-Control'])
self.client.login(username='admin', password='testpass')
self.client.get(self.url, follow=False,
HTTP_CACHE_CONTROL='no-cache')
assert ('no-cache' ==
mock_requests.request_history[1].headers['Cache-Control'])
@override_config(KUMASCRIPT_TIMEOUT=1.0, KUMASCRIPT_MAX_AGE=1234)
@requests_mock.mock()
def test_ua_no_cache(self, mock_requests):
"""
Authenticated users can request no-cache for kumascript
"""
mock_requests.get(
requests_mock.ANY,
[
dict(content='HELLO WORLD'),
ks_toolbox().macros_response,
]
)
self.client.get(self.url, follow=False, HTTP_CACHE_CONTROL='no-cache')
assert ('max-age=1234' ==
mock_requests.request_history[0].headers['Cache-Control'])
self.client.login(username='admin', password='testpass')
self.client.get(self.url, follow=False, HTTP_CACHE_CONTROL='no-cache')
assert ('no-cache' ==
mock_requests.request_history[1].headers['Cache-Control'])
@override_config(KUMASCRIPT_TIMEOUT=1.0, KUMASCRIPT_MAX_AGE=1234)
@requests_mock.mock()
def test_conditional_get(self, mock_requests):
"""
Ensure conditional GET in requests to kumascript work as expected
"""
expected_etag = "8675309JENNY"
expected_modified = "Wed, 14 Mar 2012 22:29:17 GMT"
expected_content = "HELLO THERE, WORLD"
mock_requests.get(
requests_mock.ANY, [
{
'content': expected_content,
'headers': {
'etag': expected_etag,
'last-modified': expected_modified,
'age': '456',
}
},
{
'content': expected_content,
'headers': {
'etag': expected_etag,
'last-modified': expected_modified,
'age': '456',
},
},
{
'content': expected_content,
'status_code': 304,
'headers': {
'etag': expected_etag,
'last-modified': expected_modified,
'age': '123',
},
}
]
)
# First request to let the view cache etag / last-modified
self.client.get(self.url)
# Clear rendered_html to force another request.
self.doc.rendered_html = ''
self.doc.save()
# Second request to verify the view sends them back
response = self.client.get(self.url)
assert (expected_etag ==
mock_requests.request_history[1].headers['If-None-Match'])
assert (expected_modified ==
mock_requests.request_history[1].headers['If-Modified-Since'])
# Third request to verify content was cached and served on a 304
response = self.client.get(self.url)
assert expected_content in response.content
@override_config(KUMASCRIPT_TIMEOUT=1.0, KUMASCRIPT_MAX_AGE=600)
@requests_mock.mock()
def test_preview_nonascii(self, mock_requests):
"""POSTing non-ascii to kumascript should encode to utf8"""
content = u'Français'
mock_requests.post(requests_mock.ANY, content=content.encode('utf8'))
self.client.login(username='admin', password='testpass')
resp = self.client.post(reverse('wiki.preview'), {'content': content})
assert_no_cache_header(resp)
# No UnicodeDecodeError
mock_requests.request_history[0].body.decode('utf8')
@override_config(KUMASCRIPT_TIMEOUT=1.0, KUMASCRIPT_MAX_AGE=600)
@mock.patch('kuma.wiki.kumascript.post')
def test_dont_render_previews_for_deferred_docs(self, mock_post):
"""
When a user previews a document with deferred rendering,
we want to force the preview to skip the kumascript POST,
so that big previews can't use up too many kumascript connections.
bug 1197971
"""
self.doc.defer_rendering = True
self.doc.save()
mock_post.side_effect = Exception("Should not be called")
self.client.login(username='admin', password='testpass')
resp = self.client.post(reverse('wiki.preview'),
{'doc_id': self.doc.id})
assert_no_cache_header(resp)
class DocumentSEOTests(UserTestCase, WikiTestCase):
"""Tests for the document seo logic"""
# NOTE(djf): In the past, we included the title of the "SEO Root"
# document in the title of every page, and this test tested for it.
# That feature has been removed now, and now this test verifies that
# the SEO root title does *not* appear in document titles.
def test_no_seo_title(self):
self.client.login(username='admin', password='testpass')
# Utility to make a quick doc
def _make_doc(title, aught_titles, slug):
doc = document(save=True, slug=slug, title=title,
locale=settings.WIKI_DEFAULT_LANGUAGE)
revision(save=True, document=doc)
response = self.client.get(reverse('wiki.document', args=[slug]))
page = pq(response.content)
assert page.find('head > title').text() in aught_titles
# Test nested document titles
_make_doc('One', ['One | MDN'], 'one')
_make_doc('Two', ['Two | MDN'], 'one/two')
_make_doc('Three', ['Three | MDN'], 'one/two/three')
_make_doc(u'Special Φ Char',
[u'Special \u03a6 Char | MDN',
u'Special \xce\xa6 Char | MDN'],
'one/two/special_char')
# Additional tests for /Web/* changes
_make_doc('Firefox OS', ['Firefox OS | MDN'], 'firefox_os')
_make_doc('Email App', ['Email App | MDN'],
'firefox_os/email_app')
_make_doc('Web', ['Web | MDN'], 'Web')
_make_doc('HTML', ['HTML | MDN'], 'Web/html')
_make_doc('Fieldset', ['Fieldset | MDN'], 'Web/html/fieldset')
_make_doc('Legend', ['Legend | MDN'],
'Web/html/fieldset/legend')
def test_seo_script(self):
self.client.login(username='admin', password='testpass')
def make_page_and_compare_seo(slug, content, aught_preview):
# Create the doc
data = new_document_data()
data.update({'title': 'blah', 'slug': slug, 'content': content})
response = self.client.post(reverse('wiki.create'), data)
assert 302 == response.status_code
# Connect to newly created page
response = self.client.get(reverse('wiki.document', args=[slug]))
page = pq(response.content)
meta_content = page.find('meta[name=description]').attr('content')
assert str(meta_content) == str(aught_preview)
# Test pages - very basic
good = 'This is the content which should be chosen, man.'
make_page_and_compare_seo('one', '<p>' + good + '</p>', good)
# No content, no seo
make_page_and_compare_seo('two', 'blahblahblahblah<br />', None)
# No summary, no seo
make_page_and_compare_seo('three', '<div><p>You cant see me</p></div>',
None)
# Warning paragraph ignored
make_page_and_compare_seo('four',
'<div class="geckoVersion">'
'<p>No no no</p></div><p>yes yes yes</p>',
'yes yes yes')
# Warning paragraph ignored, first one chosen if multiple matches
make_page_and_compare_seo('five',
'<div class="geckoVersion"><p>No no no</p>'
'</div><p>yes yes yes</p>'
'<p>ignore ignore ignore</p>',
'yes yes yes')
# Don't take legacy crumbs
make_page_and_compare_seo('six', u'<p>« CSS</p><p>I am me!</p>',
'I am me!')
# Take the seoSummary class'd element
make_page_and_compare_seo('seven',
u'<p>I could be taken</p>'
'<p class="seoSummary">I should be though</p>',
'I should be though')
# Two summaries append
make_page_and_compare_seo('eight',
u'<p>I could be taken</p>'
'<p class="seoSummary">a</p>'
'<p class="seoSummary">b</p>',
'a b')
# No brackets
make_page_and_compare_seo('nine',
u'<p>I <em>am</em> awesome.'
' <a href="blah">A link</a> is also <cool></p>',
u'I am awesome. A link is also cool')
class DocumentEditingTests(UserTestCase, WikiTestCase):
"""Tests for the document-editing view"""
def test_editor_safety_filter(self):
"""Safety filter should be applied before rendering editor
bug 821986
"""
self.client.login(username='admin', password='testpass')
r = revision(save=True, content="""
<svg><circle onload=confirm(3)>
""")
args = [r.document.slug]
urls = (
reverse('wiki.edit', args=args),
'%s?tolocale=%s' % (reverse('wiki.translate', args=args), 'fr')
)
for url in urls:
page = pq(self.client.get(url).content)
editor_src = page.find('#id_content').text()
assert 'onload' not in editor_src
def test_create_on_404(self):
self.client.login(username='admin', password='testpass')
# Create the parent page.
rev = revision(is_approved=True, save=True)
# Establish attribs of child page.
local_slug = 'Some_New_Title'
slug = '%s/%s' % (rev.document.slug, local_slug)
url = reverse('wiki.document', args=[slug])
# Ensure redirect to create new page on attempt to visit non-existent
# child page.
resp = self.client.get(url)
assert resp.status_code == 302
assert_no_cache_header(resp)
assert 'public' not in resp['Cache-Control']
assert 's-maxage' not in resp['Cache-Control']
assert 'docs/new' in resp['Location']
assert ('slug=%s' % local_slug) in resp['Location']
# Ensure real 404 for visit to non-existent page with params common to
# kumascript and raw content API.
for p_name in ('raw', 'include', 'nocreate'):
sub_url = '%s?%s=1' % (url, p_name)
resp = self.client.get(sub_url)
assert resp.status_code == 404
# Ensure root level documents work, not just children
response = self.client.get(reverse('wiki.document', args=['noExist']))
assert response.status_code == 302
assert 'public' not in response['Cache-Control']
assert 'no-cache' in resp['Cache-Control']
assert 'docs/new' in response['Location']
response = self.client.get(reverse('wiki.document',
args=['Template:NoExist']))
assert response.status_code == 302
assert 'public' not in response['Cache-Control']
assert 'no-cache' in resp['Cache-Control']
assert 'docs/new' in response['Location']
def test_creating_child_of_redirect(self):
"""
While try to create a child of a redirect,
the parent of the child should be redirect's parent.
"""
self.client.login(username='admin', password='testpass')
rev = revision(is_approved=True, save=True)
doc = rev.document
doc_first_slug = doc.slug
# Move the document to new slug
doc._move_tree(new_slug="moved_doc")
# Try to create a child with the old slug
child_full_slug = doc_first_slug + "/" + "children_document"
url = reverse('wiki.document', args=[child_full_slug])
response = self.client.get(url)
assert response.status_code == 302
assert 'public' not in response['Cache-Control']
assert 'no-cache' in response['Cache-Control']
assert 'docs/new' in response['Location']
# The parent id of the query should be same because while moving,
# a new document is created with old slug and make redirect to the
# old document
parameters = parse_qs(urlparse(response['Location']).query)
assert parameters['parent'][0] == str(doc.id)
def test_child_of_redirect_to_non_document(self):
"""Return a 404 when accessing the child of a non-document redirect."""
self.client.login(username='admin', password='testpass')
content = '<p>REDIRECT <a class="redirect" href="/">MDN</a></p>'
rev = revision(content=content, is_approved=True, save=True)
doc = rev.document
assert doc.is_redirect
assert doc.get_redirect_url() == '/'
assert doc.get_redirect_document() is None
doc_url = doc.get_absolute_url()
response = self.client.get(doc_url)
assert response.status_code == 301
assert response['Location'] == '/'
subpage_url = doc_url + '/SubPage'
response = self.client.get(subpage_url)
assert response.status_code == 404
@pytest.mark.retitle
def test_retitling_solo_doc(self):
""" Editing just title of non-parent doc:
* Changes title
* Doesn't cause errors
* Doesn't create redirect
"""
# Not testing slug changes separately; the model tests cover those plus
# slug+title changes. If title changes work in the view, the rest
# should also.
self.client.login(username='admin', password='testpass')
new_title = 'Some New Title'
rev = revision(is_approved=True, save=True)
doc = rev.document
old_title = doc.title
data = new_document_data()
data.update({'title': new_title,
'form-type': 'rev'})
data['slug'] = ''
url = reverse('wiki.edit', args=[doc.slug])
response = self.client.post(url, data)
assert response.status_code == 302
assert response['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(response)
assert (Document.objects.get(slug=doc.slug, locale=doc.locale).title ==
new_title)
assert not Document.objects.filter(title=old_title).exists()
@pytest.mark.retitle
def test_retitling_parent_doc(self):
""" Editing just title of parent doc:
* Changes title
* Doesn't cause errors
* Doesn't create redirect
"""
# Not testing slug changes separately; the model tests cover those plus
# slug+title changes. If title changes work in the view, the rest
# should also.
self.client.login(username='admin', password='testpass')
# create parent doc & rev along with child doc & rev
d = document(title='parent', save=True)
revision(document=d, content='parent', save=True)
d2 = document(title='child', parent_topic=d, save=True)
revision(document=d2, content='child', save=True)
old_title = d.title
new_title = 'Some New Title'
data = new_document_data()
data.update({'title': new_title,
'form-type': 'rev'})
data['slug'] = ''
url = reverse('wiki.edit', args=[d.slug])
response = self.client.post(url, data)
assert response.status_code == 302
assert response['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(response)
assert (Document.objects.get(slug=d.slug, locale=d.locale).title ==
new_title)
assert not Document.objects.filter(title=old_title).exists()
def test_slug_change_ignored_for_iframe(self):
"""When the title of an article is edited in an iframe, the change is
ignored."""
self.client.login(username='admin', password='testpass')
new_slug = 'some_new_slug'
rev = revision(is_approved=True, save=True)
old_slug = rev.document.slug
data = new_document_data()
data.update({'title': rev.document.title,
'slug': new_slug,
'form': 'rev'})
response = self.client.post('%s?iframe=1' %
reverse('wiki.edit',
args=[rev.document.slug]),
data)
assert response.status_code == 200
assert response['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(response)
assert (Document.objects.get(slug=rev.document.slug,
locale=rev.document.locale).slug ==
old_slug)
assert "REDIRECT" not in Document.objects.get(slug=old_slug).html
@pytest.mark.clobber
def test_slug_collision_errors(self):
"""When an attempt is made to retitle an article and another with that
title already exists, there should be form errors"""
self.client.login(username='admin', password='testpass')
exist_slug = "existing-doc"
# Create a new doc.
data = new_document_data()
data.update({"slug": exist_slug})
resp = self.client.post(reverse('wiki.create'), data)
assert resp.status_code == 302
# Create another new doc.
data = new_document_data()
data.update({"slug": 'some-new-title'})
resp = self.client.post(reverse('wiki.create'), data)
assert resp.status_code == 302
# Now, post an update with duplicate slug
data.update({
'form-type': 'rev',
'slug': exist_slug
})
resp = self.client.post(reverse('wiki.edit', args=['some-new-title']),
data)
assert resp.status_code == 200
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
p = pq(resp.content)
assert p.find('.errorlist').length > 0
assert p.find('.errorlist a[href="#id_slug"]').length > 0
@pytest.mark.clobber
def test_redirect_can_be_clobbered(self):
"""When an attempt is made to retitle an article, and another article
with that title exists but is a redirect, there should be no errors and
the redirect should be replaced."""
self.client.login(username='admin', password='testpass')
exist_title = "Existing doc"
exist_slug = "existing-doc"
changed_title = 'Changed title'
changed_slug = 'changed-title'
# Create a new doc.
data = new_document_data()
data.update({"title": exist_title, "slug": exist_slug})
resp = self.client.post(reverse('wiki.create'), data)
assert resp.status_code == 302
# Change title and slug
data.update({'form-type': 'rev',
'title': changed_title,
'slug': changed_slug})
resp = self.client.post(reverse('wiki.edit', args=[exist_slug]),
data)
assert resp.status_code == 302
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
# Change title and slug back to originals, clobbering the redirect
data.update({'form-type': 'rev',
'title': exist_title,
'slug': exist_slug})
resp = self.client.post(reverse('wiki.edit', args=[changed_slug]),
data)
assert resp.status_code == 302
def test_slug_revamp(self):
self.client.login(username='admin', password='testpass')
# Test that slugs with the same "specific" slug but in different levels
# in the heiharachy are validated properly upon submission.
# Create base doc
parent_doc = document(title='Length',
slug='length',
is_localizable=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
parent_doc.save()
r = revision(document=parent_doc)
r.save()
# Create child, try to use same slug, should work
child_data = new_document_data()
child_data['title'] = 'Child Length'
child_data['slug'] = 'length'
child_data['content'] = 'This is the content'
child_data['is_localizable'] = True
child_url = (reverse('wiki.create') +
'?parent=' +
str(parent_doc.id))
response = self.client.post(child_url, child_data)
assert response.status_code == 302
# grab new revision ID
child = Document.objects.get(locale='en-US', slug='length/length')
rev_id = child.current_revision.id
self.assertRedirects(response,
reverse('wiki.document', args=['length/length']))
# Editing newly created child "length/length" doesn't cause errors
child_data['form-type'] = 'rev'
child_data['slug'] = ''
edit_url = reverse('wiki.edit', args=['length/length'])
response = self.client.post(edit_url, child_data)
assert response.status_code == 302
assert response['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(response)
url = reverse('wiki.document', args=['length/length'])
params = {'rev_saved': rev_id}
url = '%s?%s' % (url, urlencode(params))
self.assertRedirects(response, url)
# Creating a new translation of parent and child
# named "length" and "length/length" respectively
# doesn't cause errors
child_data['form-type'] = 'both'
child_data['slug'] = 'length'
translate_url = reverse('wiki.document', args=[child_data['slug']])
response = self.client.post(translate_url + '$translate?tolocale=es',
child_data)
assert 302 == response.status_code
url = reverse('wiki.document', args=[child_data['slug']], locale='es')
params = {'rev_saved': ''}
url = '%s?%s' % (url, urlencode(params))
self.assertRedirects(response, url)
translate_url = reverse('wiki.document', args=['length/length'])
response = self.client.post(translate_url + '$translate?tolocale=es',
child_data)
assert 302 == response.status_code
slug = 'length/' + child_data['slug']
url = reverse('wiki.document', args=[slug], locale='es')
params = {'rev_saved': ''}
url = '%s?%s' % (url, urlencode(params))
self.assertRedirects(response, url)
def test_translate_keeps_topical_parent(self):
self.client.login(username='admin', password='testpass')
en_doc, de_doc = make_translation()
en_child_doc = document(parent_topic=en_doc, slug='en-child',
save=True)
en_child_rev = revision(document=en_child_doc, save=True)
de_child_doc = document(parent_topic=de_doc, locale='de',
slug='de-child', parent=en_child_doc,
save=True)
revision(document=de_child_doc, save=True)
post_data = {}
post_data['slug'] = de_child_doc.slug
post_data['title'] = 'New title'
post_data['form'] = 'both'
post_data['content'] = 'New translation'
post_data['tolocale'] = 'de'
post_data['toc_depth'] = 0
post_data['based_on'] = en_child_rev.id
post_data['parent_id'] = en_child_doc.id
translate_url = reverse('wiki.edit',
args=[de_child_doc.slug],
locale='de')
response = self.client.post(translate_url, post_data)
assert response.status_code == 302
assert response['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(response)
de_child_doc = Document.objects.get(locale='de', slug='de-child')
assert en_child_doc == de_child_doc.parent
assert de_doc == de_child_doc.parent_topic
assert 'New translation' == de_child_doc.current_revision.content
def test_translate_keeps_toc_depth(self):
self.client.login(username='admin', password='testpass')
locale = settings.WIKI_DEFAULT_LANGUAGE
original_slug = 'eng-doc'
foreign_locale = 'es'
foreign_slug = 'es-doc'
en_doc = document(title='Eng Doc', slug=original_slug,
is_localizable=True, locale=locale)
en_doc.save()
r = revision(document=en_doc, toc_depth=1)
r.save()
post_data = new_document_data()
post_data['title'] = 'ES Doc'
post_data['slug'] = foreign_slug
post_data['content'] = 'This is the content'
post_data['is_localizable'] = True
post_data['form'] = 'both'
post_data['toc_depth'] = r.toc_depth
translate_url = reverse('wiki.document', args=[original_slug])
translate_url += '$translate?tolocale=' + foreign_locale
response = self.client.post(translate_url, post_data)
doc_url = reverse('wiki.document', args=[foreign_slug], locale=foreign_locale)
params = {'rev_saved': ''}
doc_url = '%s?%s' % (doc_url, urlencode(params))
self.assertRedirects(response, doc_url)
es_d = Document.objects.get(locale=foreign_locale, slug=foreign_slug)
assert r.toc_depth == es_d.current_revision.toc_depth
def test_translate_rebuilds_source_json(self):
self.client.login(username='admin', password='testpass')
# Create an English original and a Spanish translation.
en_slug = 'en-doc'
es_locale = 'es'
es_slug = 'es-doc'
en_doc = document(title='EN Doc',
slug=en_slug,
is_localizable=True)
en_doc.save()
en_doc.render()
en_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=en_slug)
json.loads(en_doc.json)
r = revision(document=en_doc)
r.save()
translation_data = new_document_data()
translation_data['title'] = 'ES Doc'
translation_data['slug'] = es_slug
translation_data['content'] = 'This is the content'
translation_data['is_localizable'] = False
translation_data['form'] = 'both'
translate_url = reverse('wiki.document', args=[en_slug])
translate_url += '$translate?tolocale=' + es_locale
response = self.client.post(translate_url, translation_data)
# Sanity to make sure the translate succeeded.
doc_url = reverse('wiki.document', args=[es_slug], locale=es_locale)
params = {'rev_saved': ''}
doc_url = '%s?%s' % (doc_url, urlencode(params))
self.assertRedirects(response, doc_url)
es_doc = Document.objects.get(locale=es_locale,
slug=es_slug)
es_doc.render()
new_en_json = json.loads(Document.objects.get(pk=en_doc.pk).json)
assert 'translations' in new_en_json
assert (translation_data['title'] in
[t['title'] for t in new_en_json['translations']])
es_translation_json = [t for t in new_en_json['translations'] if
t['title'] == translation_data['title']][0]
assert (es_translation_json['last_edit'] ==
es_doc.current_revision.created.isoformat())
def test_slug_translate(self):
"""Editing a translated doc keeps the correct slug"""
self.client.login(username='admin', password='testpass')
# Settings
original_slug = 'eng-doc'
child_slug = 'child-eng-doc'
foreign_locale = 'es'
foreign_slug = 'es-doc'
foreign_child_slug = 'child-es-doc'
# Create the one-level English Doc
en_doc = document(title='Eng Doc',
slug=original_slug,
is_localizable=True)
en_doc.save()
r = revision(document=en_doc)
r.save()
# Translate to ES
parent_data = new_document_data()
parent_data['title'] = 'ES Doc'
parent_data['slug'] = foreign_slug
parent_data['content'] = 'This is the content'
parent_data['is_localizable'] = True
parent_data['form'] = 'both'
translate_url = reverse('wiki.document', args=[original_slug])
translate_url += '$translate?tolocale=' + foreign_locale
response = self.client.post(translate_url, parent_data)
doc_url = reverse('wiki.document', args=[foreign_slug], locale=foreign_locale)
params = {'rev_saved': ''}
doc_url = '%s?%s' % (doc_url, urlencode(params))
self.assertRedirects(response, doc_url)
# Go to edit the translation, ensure the the slug is correct
response = self.client.get(reverse('wiki.edit',
args=[foreign_slug],
locale=foreign_locale))
page = pq(response.content)
assert page.find('input[name=slug]')[0].value == foreign_slug
# Create an English child now
en_doc = document(title='Child Eng Doc',
slug=original_slug + '/' + child_slug,
is_localizable=True,
locale=settings.WIKI_DEFAULT_LANGUAGE,
parent_topic=en_doc)
en_doc.save()
r = revision(document=en_doc)
r.save()
# Translate to ES
child_data = new_document_data()
child_data['title'] = 'ES Child Doc'
child_data['slug'] = foreign_child_slug
child_data['content'] = 'This is the content'
child_data['is_localizable'] = True
child_data['form'] = 'both'
translate_url = reverse('wiki.document',
args=[original_slug + '/' + child_slug])
translate_url += '$translate?tolocale=' + foreign_locale
response = self.client.post(translate_url, child_data)
slug = foreign_slug + '/' + child_data['slug']
doc_url = reverse('wiki.document', args=[slug], locale=foreign_locale)
params = {'rev_saved': ''}
doc_url = '%s?%s' % (doc_url, urlencode(params))
self.assertRedirects(response, doc_url)
def test_restore_translation_source(self):
"""Edit a localized article without an English parent allows user to
set translation parent."""
# Create english doc
self.client.login(username='admin', password='testpass')
data = new_document_data()
self.client.post(reverse('wiki.create'), data)
en_d = Document.objects.get(locale=data['locale'], slug=data['slug'])
# Create french doc
data.update({'locale': 'fr',
'title': 'A Tést Articlé',
'content': "C'ést bon."})
self.client.post(reverse('wiki.create', locale='fr'), data)
fr_d = Document.objects.get(locale=data['locale'], slug=data['slug'])
# Check edit doc page for choose parent box
url = reverse('wiki.edit', args=[fr_d.slug], locale='fr')
response = self.client.get(url)
assert response.status_code == 200
assert response['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(response)
assert pq(response.content)('li.metadata-choose-parent')
# Set the parent
data.update({'form-type': 'rev', 'parent_id': en_d.id})
resp = self.client.post(url, data)
assert resp.status_code == 302
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
assert 'fr/docs/a-test-article' in resp['Location']
# Check the languages drop-down
resp = self.client.get(resp['Location'])
translations = pq(resp.content)('ul#translations li')
assert 'English (US)' in translations.text()
def test_translation_source(self):
"""Allow users to change "translation source" settings"""
self.client.login(username='admin', password='testpass')
data = new_document_data()
self.client.post(reverse('wiki.create'), data)
parent = Document.objects.get(locale=data['locale'], slug=data['slug'])
data.update({'title': 'Another Test Article',
'content': "Yahoooo!",
'parent_id': parent.id})
self.client.post(reverse('wiki.create'), data)
child = Document.objects.get(locale=data['locale'], slug=data['slug'])
url = reverse('wiki.edit', args=[child.slug])
response = self.client.get(url)
assert response.status_code == 200
assert response['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(response)
content = pq(response.content)
assert content('li.metadata-choose-parent')
assert str(parent.id) in to_html(content)
@pytest.mark.tags
def test_tags_while_document_update(self):
self.client.login(username='admin', password='testpass')
ts1 = ('JavaScript', 'AJAX', 'DOM')
ts2 = ('XML', 'JSON')
# Create a revision with some tags
rev = revision(save=True, tags=','.join(ts1))
doc = rev.document
# Update the document with some other tags
data = new_document_data()
data.update({'form-type': 'rev', 'tags': ', '.join(ts2)})
response = self.client.post(
reverse('wiki.edit', args=[doc.slug]), data)
assert response.status_code == 302
assert response['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(response)
# Check only last added tags are related with the documents
doc_tags = doc.tags.all().values_list('name', flat=True)
assert sorted(doc_tags) == sorted(ts2)
@pytest.mark.tags
def test_tags_showing_correctly_after_doc_update(self):
"""After any update to the document, the new tags should show correctly"""
self.client.login(username='admin', password='testpass')
ts1 = ('JavaScript', 'AJAX', 'DOM')
ts2 = ('XML', 'JSON')
rev = revision(save=True, tags=','.join(ts1))
doc = rev.document
# Update the document with some other tags
data = new_document_data()
del data['slug']
data.update({'form-type': 'rev', 'tags': ', '.join(ts2)})
response = self.client.post(
reverse('wiki.edit', args=[doc.slug]), data)
assert response.status_code == 302
assert response['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(response)
# Check document is showing the new tags
response = self.client.get(doc.get_absolute_url(), follow=True)
assert response.status_code == 200
page = pq(response.content)
response_tags = page.find('.tags li a').contents()
assert response_tags == sorted(ts2)
@pytest.mark.review_tags
@mock.patch.object(Site.objects, 'get_current')
def test_review_tags(self, get_current):
"""Review tags can be managed on document revisions"""
get_current.return_value.domain = 'su.mo.com'
self.client.login(username='admin', password='testpass')
# Create a new doc with one review tag
data = new_document_data()
data.update({'review_tags': ['technical']})
response = self.client.post(reverse('wiki.create'), data)
assert response.status_code == 302
# Ensure there's now a doc with that expected tag in its newest
# revision
doc = Document.objects.get(slug="a-test-article")
rev = doc.revisions.order_by('-id').all()[0]
review_tags = [x.name for x in rev.review_tags.all()]
assert review_tags == ['technical']
# Now, post an update with two tags
data.update({
'form-type': 'rev',
'review_tags': ['editorial', 'technical'],
})
response = self.client.post(reverse('wiki.edit', args=[doc.slug]),
data)
assert response.status_code == 302
assert_no_cache_header(response)
# Ensure the doc's newest revision has both tags.
doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug="a-test-article")
rev = doc.revisions.order_by('-id').all()[0]
review_tags = [x.name for x in rev.review_tags.all()]
review_tags.sort()
assert review_tags == ['editorial', 'technical']
# Now, ensure that review form appears for the review tags.
response = self.client.get(reverse('wiki.document', args=[doc.slug]),
data)
assert response.status_code == 200
# Since the client is logged-in, the response should not be cached.
assert_no_cache_header(response)
page = pq(response.content)
assert page.find('.page-meta.reviews').length == 1
assert page.find('#id_request_technical').length == 1
assert page.find('#id_request_editorial').length == 1
doc_entry = '<entry><title>{}</title>'.format(doc.title)
doc_selector = "ul.document-list li a:contains('{}')".format(doc.title)
# Ensure the page appears on the listing pages
response = self.client.get(reverse('wiki.list_review'))
assert response.status_code == 200
assert_shared_cache_header(response)
assert pq(response.content).find(doc_selector).length == 1
response = self.client.get(reverse('wiki.list_review_tag',
args=('technical',)))
assert response.status_code == 200
assert_shared_cache_header(response)
assert pq(response.content).find(doc_selector).length == 1
response = self.client.get(reverse('wiki.list_review_tag',
args=('editorial',)))
assert response.status_code == 200
assert_shared_cache_header(response)
assert pq(response.content).find(doc_selector).length == 1
# Also, ensure that the page appears in the proper feeds
# HACK: Too lazy to parse the XML. Lazy lazy.
response = self.client.get(reverse('wiki.feeds.list_review',
args=('atom',)))
assert doc_entry.encode('utf-8') in response.content
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'technical', )))
assert doc_entry.encode('utf-8') in response.content
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'editorial', )))
assert doc_entry.encode('utf-8') in response.content
# Post an edit that removes the technical review tag.
data.update({
'form-type': 'rev',
'review_tags': ['editorial', ]
})
response = self.client.post(reverse('wiki.edit',
args=[doc.slug]), data)
# Ensure only one of the tags' warning boxes appears, now.
response = self.client.get(reverse('wiki.document',
args=[doc.slug]), data)
page = pq(response.content)
assert page.find('.page-meta.reviews').length == 1
assert page.find('#id_request_technical').length == 0
assert page.find('#id_request_editorial').length == 1
# Ensure the page appears on the listing pages
response = self.client.get(reverse('wiki.list_review'))
assert response.status_code == 200
assert_shared_cache_header(response)
assert pq(response.content).find(doc_selector).length == 1
response = self.client.get(reverse('wiki.list_review_tag',
args=('technical',)))
assert response.status_code == 200
assert_shared_cache_header(response)
assert pq(response.content).find(doc_selector).length == 0
response = self.client.get(reverse('wiki.list_review_tag',
args=('editorial',)))
assert response.status_code == 200
assert_shared_cache_header(response)
assert pq(response.content).find(doc_selector).length == 1
# Also, ensure that the page appears in the proper feeds
# HACK: Too lazy to parse the XML. Lazy lazy.
response = self.client.get(reverse('wiki.feeds.list_review',
args=('atom',)))
assert doc_entry in response.content
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'technical', )))
assert doc_entry not in response.content
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'editorial', )))
assert doc_entry in response.content
@pytest.mark.review_tags
def test_quick_review(self):
"""Test the quick-review button."""
self.client.login(username='admin', password='testpass')
test_data = [
{
'params': {'request_technical': 1},
'expected_tags': ['technical'],
'name': 'technical',
'message_contains': [
'Editorial review completed.',
]
},
{
'params': {'request_editorial': 1},
'expected_tags': ['editorial'],
'name': 'editorial',
'message_contains': [
'Technical review completed.',
]
},
{
'params': {},
'expected_tags': [],
'name': 'editorial-technical',
'message_contains': [
'Technical review completed.',
'Editorial review completed.',
]
}
]
for data_dict in test_data:
slug = 'test-quick-review-%s' % data_dict['name']
data = new_document_data()
data.update({'review_tags': ['editorial', 'technical'], 'slug': slug})
resp = self.client.post(reverse('wiki.create'), data)
doc = Document.objects.get(slug=slug)
rev = doc.revisions.order_by('-id').all()[0]
review_url = reverse('wiki.quick_review',
args=[doc.slug])
params = dict(data_dict['params'], revision_id=rev.id)
resp = self.client.post(review_url, params)
assert resp.status_code == 302
assert_no_cache_header(resp)
doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
rev = doc.revisions.order_by('-id').all()[0]
review_tags = [x.name for x in rev.review_tags.all()]
review_tags.sort()
for expected_str in data_dict['message_contains']:
assert expected_str in rev.summary
assert expected_str in rev.comment
assert review_tags == data_dict['expected_tags']
@pytest.mark.midair
def test_edit_midair_collisions(self, is_ajax=False, translate_locale=None):
"""Tests midair collisions for non-ajax submissions."""
self.client.login(username='admin', password='testpass')
# Post a new document.
data = new_document_data()
resp = self.client.post(reverse('wiki.create'), data)
doc = Document.objects.get(slug=data['slug'])
# This is the url to post new revisions for the rest of this test
posting_url = reverse('wiki.edit', args=[doc.slug])
# Edit #1 starts...
resp = self.client.get(
reverse('wiki.edit', args=[doc.slug])
)
page = pq(resp.content)
rev_id1 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 starts...
resp = self.client.get(
reverse('wiki.edit', args=[doc.slug])
)
page = pq(resp.content)
rev_id2 = page.find('input[name="current_rev"]').attr('value')
# Update data for the POST we are about to attempt
data.update({
'form-type': 'rev',
'content': 'This edit got there first',
'current_rev': rev_id2
})
# If this is a translation test, then create a translation and a
# revision on it. Then update data.
if translate_locale:
translation = document(parent=doc, locale=translate_locale, save=True)
translation_rev = revision(
document=translation,
based_on=translation.parent.current_or_latest_revision(),
save=True
)
rev_id1 = rev_id2 = translation_rev.id
posting_url = reverse(
'wiki.edit',
args=[translation_rev.document.slug],
locale=translate_locale
)
data.update({
'title': translation.title,
'locale': translation.locale,
'slug': translation.slug,
'current_rev': rev_id2
})
# Edit #2 submits successfully
if is_ajax:
resp = self.client.post(
posting_url,
data, HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
assert resp.status_code == 200
assert not json.loads(resp.content)['error']
else:
resp = self.client.post(posting_url, data)
assert resp.status_code == 302
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
# Edit #1 submits, but receives a mid-aired notification
data.update({
'form-type': 'rev',
'content': 'This edit gets mid-aired',
'current_rev': rev_id1
})
if is_ajax:
resp = self.client.post(
posting_url,
data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
else:
resp = self.client.post(posting_url, data)
# The url of the document's history
locale = translate_locale if translate_locale else doc.locale
doc_path = translation.slug if translate_locale else doc.slug
history_url = reverse(
'wiki.document_revisions', kwargs={'document_path': doc_path}, locale=locale
)
# The midair collission error, with the document url
midair_collission_error = (unicode(
MIDAIR_COLLISION) % {'url': history_url}
).encode('utf-8')
if is_ajax:
location_of_error = json.loads(resp.content)['error_message']
else:
# If this is not an ajax post, then the error comes back in escaped
# html. We unescape the resp.content, but not all of it, since that
# causes ascii errors.
start_of_error = resp.content.index(midair_collission_error[0:20])
# Add an some extra characters to the end, since the unescaped length
# is a little less than the escaped length
end_of_error = start_of_error + len(midair_collission_error) + 20
location_of_error = html_parser.HTMLParser().unescape(
resp.content[start_of_error: end_of_error]
)
assert midair_collission_error in location_of_error
@pytest.mark.midair
def test_edit_midair_collisions_ajax(self):
"""Tests midair collisions for ajax submissions."""
self.test_edit_midair_collisions(is_ajax=True)
@override_flag(SPAM_SUBMISSIONS_FLAG, active=True)
@override_flag(SPAM_CHECKS_FLAG, active=True)
@override_config(AKISMET_KEY='dashboard')
@requests_mock.mock()
@mock.patch('kuma.spam.akismet.Akismet.check_comment')
def test_edit_spam_ajax(self, mock_requests, mock_akismet_method, translate_locale=None):
"""Tests attempted spam edits that occur on Ajax POSTs."""
# Note: Akismet is enabled by the Flag overrides
mock_requests.post(VERIFY_URL, content='valid')
# The return value of akismet.check_comment is set to True
mock_akismet_method.return_value = True
# self.client.login(username='admin', password='testpass')
self.client.login(username='testuser', password='testpass')
# Create a new document.
doc = document(save=True)
data = new_document_data()
# Create a revision on the document
revision(save=True, document=doc)
# This is the url to post new revisions for the rest of this test
posting_url = reverse('wiki.edit', args=[doc.slug])
# If this is a translation test, then create a translation and a revision on it
if translate_locale:
data['locale'] = translate_locale
translation = document(
parent=doc,
locale=translate_locale,
save=True
)
translation_rev = revision(
document=translation,
based_on=translation.parent.current_or_latest_revision(),
save=True
)
# rev_id = translation_rev.id
posting_url = reverse(
'wiki.edit',
args=[translation_rev.document.slug],
locale=translate_locale
)
# Get the rev id
resp = self.client.get(posting_url)
page = pq(resp.content)
rev_id = page.find('input[name="current_rev"]').attr('value')
# Edit submits
data.update({
'form-type': 'rev',
'content': 'Spam content',
'current_rev': rev_id
})
resp = self.client.post(
posting_url,
data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
spam_message = render_to_string('wiki/includes/spam_error.html')
assert spam_message in json.loads(resp.content)['error_message']
def test_multiple_edits_ajax(self, translate_locale=None):
"""Tests multiple sequential attempted valid edits that occur as Ajax POSTs."""
self.client.login(username='admin', password='testpass')
# Post a new document.
data = new_document_data()
resp = self.client.post(reverse('wiki.create'), data)
doc = Document.objects.get(slug=data['slug'])
# This is the url to post new revisions for the rest of this test
if translate_locale:
posting_url = reverse('wiki.edit', args=[doc.slug], locale=translate_locale)
else:
posting_url = reverse('wiki.edit', args=[doc.slug])
if translate_locale:
# Post a new translation on doc
translate_url = reverse(
'wiki.translate',
args=[data['slug']]
) + '?tolocale={}'.format(translate_locale)
self.client.post(translate_url, data, follow=True)
data.update({'locale': translate_locale})
# Edit #1
resp = self.client.get(posting_url)
page = pq(resp.content)
rev_id1 = page.find('input[name="current_rev"]').attr('value')
# Edit #1 submits successfully
data.update({
'form-type': 'rev',
'content': 'Edit #1',
'current_rev': rev_id1
})
resp1 = self.client.post(
posting_url,
data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
# Edit #2
resp = self.client.get(posting_url)
page = pq(resp.content)
rev_id2 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 submits successfully
data.update({
'form-type': 'rev',
'content': 'Edit #2',
'current_rev': rev_id2
})
resp2 = self.client.post(
posting_url,
data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
# For Ajax requests the response is a JsonResponse
for resp in [resp1, resp2]:
assert not json.loads(resp.content)['error']
assert 'error_message' not in json.loads(resp.content).keys()
def test_multiple_translation_edits_ajax(self):
"""Tests multiple sequential valid transalation edits that occur as Ajax POSTs."""
self.test_multiple_edits_ajax(translate_locale='es')
# test translation fails as well
def test_translation_midair_collission(self):
"""Tests midair collisions for non-ajax translation revisions."""
self.test_edit_midair_collisions(is_ajax=False, translate_locale='az')
def test_translation_midair_collission_ajax(self):
"""Tests midair collisions for ajax translation revisions."""
self.test_edit_midair_collisions(is_ajax=True, translate_locale='af')
def test_translation_spam_ajax(self):
"""Tests attempted translation spam edits that occur on Ajax POSTs."""
self.test_edit_spam_ajax(translate_locale='ru')
@pytest.mark.toc
def test_toc_toggle_off(self):
"""Toggling of table of contents in revisions"""
self.client.login(username='admin', password='testpass')
rev = revision(is_approved=True, save=True)
doc = rev.document
data = new_document_data()
assert Document.objects.get(slug=doc.slug, locale=doc.locale).show_toc
data['form-type'] = 'rev'
data['toc_depth'] = 0
data['slug'] = doc.slug
data['title'] = doc.title
resp = self.client.post(reverse('wiki.edit', args=[doc.slug]), data)
assert resp.status_code == 302
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
doc = Document.objects.get(slug=doc.slug, locale=doc.locale)
assert doc.current_revision.toc_depth == 0
@pytest.mark.toc
def test_toc_toggle_on(self):
"""Toggling of table of contents in revisions"""
self.client.login(username='admin', password='testpass')
rev = revision(is_approved=True, save=True)
new_r = revision(document=rev.document, content=rev.content,
toc_depth=0, is_approved=True)
new_r.save()
assert not Document.objects.get(slug=rev.document.slug,
locale=rev.document.locale).show_toc
data = new_document_data()
data['form-type'] = 'rev'
data['slug'] = rev.document.slug
data['title'] = rev.document.title
resp = self.client.post(reverse('wiki.edit', args=[rev.document.slug]),
data)
assert resp.status_code == 302
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
assert Document.objects.get(slug=rev.document.slug,
locale=rev.document.locale).show_toc
def test_parent_topic(self):
"""Selection of a parent topic when creating a document."""
# TODO: Do we need this test? This seems broken in that the
# parent specified via the parent topic doesn't get it's
# slug prepended to the new document's slug, as happens
# when specifying the parent via the URL.
self.client.login(username='admin', password='testpass')
doc = document(title='HTML8')
doc.save()
rev = revision(document=doc)
rev.save()
data = new_document_data()
data['title'] = 'Replicated local storage'
data['parent_topic'] = doc.id
resp = self.client.post(reverse('wiki.create'), data)
assert resp.status_code == 302
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
assert doc.children.count() == 1
assert doc.children.all()[0].title == 'Replicated local storage'
def test_repair_breadcrumbs(self):
english_top = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English top',
save=True)
english_mid = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English mid',
parent_topic=english_top,
save=True)
english_bottom = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English bottom',
parent_topic=english_mid,
save=True)
french_top = document(locale='fr',
title='French top',
parent=english_top,
save=True)
french_mid = document(locale='fr',
title='French mid',
parent=english_mid,
parent_topic=english_mid,
save=True)
french_bottom = document(locale='fr',
title='French bottom',
parent=english_bottom,
parent_topic=english_bottom,
save=True)
self.client.login(username='admin', password='testpass')
resp = self.client.get(reverse('wiki.repair_breadcrumbs',
args=[french_bottom.slug],
locale='fr'))
assert resp.status_code == 302
assert_no_cache_header(resp)
assert french_bottom.get_absolute_url() in resp['Location']
french_bottom_fixed = Document.objects.get(locale='fr',
title=french_bottom.title)
assert french_mid.id == french_bottom_fixed.parent_topic.id
assert (french_top.id ==
french_bottom_fixed.parent_topic.parent_topic.id)
def test_translate_on_edit(self):
d1 = document(title="Doc1", locale=settings.WIKI_DEFAULT_LANGUAGE,
save=True)
revision(document=d1, save=True)
d2 = document(title="TransDoc1", locale='de', parent=d1, save=True)
revision(document=d2, save=True)
self.client.login(username='admin', password='testpass')
url = reverse('wiki.edit', args=(d2.slug,), locale=d2.locale)
resp = self.client.get(url)
assert resp.status_code == 200
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
def test_discard_location(self):
"""Testing that the 'discard' HREF goes to the correct place when it's
explicitely and implicitely set"""
self.client.login(username='admin', password='testpass')
def _create_doc(slug, locale):
doc = document(slug=slug, is_localizable=True, locale=locale)
doc.save()
r = revision(document=doc)
r.save()
return doc
# Test that the 'discard' button on an edit goes to the original page
doc = _create_doc('testdiscarddoc', settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(reverse('wiki.edit', args=[doc.slug]))
assert (pq(response.content).find('.btn-discard').attr('href') ==
reverse('wiki.document', args=[doc.slug]))
# Test that the 'discard button on a new translation goes
# to the en-US page'
response = self.client.get(reverse('wiki.translate', args=[doc.slug]),
{'tolocale': 'es'})
assert (pq(response.content).find('.btn-discard').attr('href') ==
reverse('wiki.document', args=[doc.slug]))
# Test that the 'discard' button on an existing translation goes
# to the 'es' page
foreign_doc = _create_doc('testdiscarddoc', 'es')
response = self.client.get(reverse('wiki.edit',
args=[foreign_doc.slug],
locale=foreign_doc.locale))
assert (pq(response.content).find('.btn-discard').attr('href') ==
reverse('wiki.document', args=[foreign_doc.slug],
locale=foreign_doc.locale))
@override_config(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get',
return_value=('lorem ipsum dolor sit amet', None))
def test_revert(self, mock_kumascript_get):
self.client.login(username='admin', password='testpass')
data = new_document_data()
data['title'] = 'A Test Article For Reverting'
data['slug'] = 'test-article-for-reverting'
response = self.client.post(reverse('wiki.create'), data)
doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug='test-article-for-reverting')
rev = doc.revisions.order_by('-id').all()[0]
data['content'] = 'Not lorem ipsum anymore'
data['comment'] = 'Nobody likes Latin anyway'
response = self.client.post(reverse('wiki.edit',
args=[doc.slug]), data)
mock_kumascript_get.reset_mock()
response = self.client.post(reverse('wiki.revert_document',
args=[doc.slug, rev.id]),
{'revert': True, 'comment': 'Blah blah'})
assert response.status_code == 302
assert_no_cache_header(response)
assert mock_kumascript_get.called, "kumascript should have been used"
rev = doc.revisions.order_by('-id').all()[0]
assert rev.content == 'lorem ipsum dolor sit amet'
assert 'Blah blah' in rev.comment
mock_kumascript_get.reset_mock()
rev = doc.revisions.order_by('-id').all()[1]
response = self.client.post(reverse('wiki.revert_document',
args=[doc.slug, rev.id]),
{'revert': True})
assert response.status_code == 302
rev = doc.revisions.order_by('-id').all()[0]
assert ': ' not in rev.comment
assert mock_kumascript_get.called, "kumascript should have been used"
def test_revert_moved(self):
doc = document(slug='move-me', save=True)
rev = revision(document=doc, save=True)
prev_rev_id = rev.id
doc._move_tree('moved-doc')
self.client.login(username='admin', password='testpass')
resp = self.client.post(reverse('wiki.revert_document',
args=[doc.slug, prev_rev_id]))
assert resp.status_code == 200
assert_no_cache_header(resp)
assert b'cannot revert a document that has been moved' in resp.content
def test_store_revision_ip(self):
self.client.login(username='testuser', password='testpass')
data = new_document_data()
slug = 'test-article-for-storing-revision-ip'
data.update({'title': 'A Test Article For Storing Revision IP',
'slug': slug})
self.client.post(reverse('wiki.create'), data)
doc = Document.objects.get(locale='en-US', slug=slug)
data.update({'form-type': 'rev',
'content': 'This revision should NOT record IP',
'comment': 'This revision should NOT record IP'})
resp = self.client.post(reverse('wiki.edit', args=[doc.slug]),
data,
HTTP_USER_AGENT='Mozilla Firefox',
HTTP_REFERER='http://localhost/')
assert resp.status_code == 302
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
assert RevisionIP.objects.all().count() == 0
data.update({'content': 'Store the IP address for the revision.',
'comment': 'Store the IP address for the revision.'})
with override_switch('store_revision_ips', True):
self.client.post(reverse('wiki.edit', args=[doc.slug]),
data,
HTTP_USER_AGENT='Mozilla Firefox',
HTTP_REFERER='http://localhost/')
assert RevisionIP.objects.all().count() == 1
rev = doc.revisions.order_by('-id').all()[0]
rev_ip = RevisionIP.objects.get(revision=rev)
assert rev_ip.ip == '127.0.0.1'
assert rev_ip.user_agent == 'Mozilla Firefox'
assert rev_ip.referrer == 'http://localhost/'
@pytest.mark.edit_emails
def test_email_for_first_edits(self):
self.client.login(username='testuser', password='testpass')
data = new_document_data()
slug = 'test-article-for-storing-revision-ip'
data.update({'title': 'A Test Article For First Edit Emails',
'slug': slug})
self.client.post(reverse('wiki.create'), data)
assert len(mail.outbox) == 1
doc = Document.objects.get(
locale=settings.WIKI_DEFAULT_LANGUAGE, slug=slug)
data.update({'form-type': 'rev',
'content': 'This edit should not send an email',
'comment': 'This edit should not send an email'})
resp = self.client.post(reverse('wiki.edit', args=[doc.slug]), data)
assert resp.status_code == 302
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
assert len(mail.outbox) == 1
self.client.login(username='admin', password='testpass')
data.update({'content': 'Admin first edit should send an email',
'comment': 'Admin first edit should send an email'})
self.client.post(reverse('wiki.edit',
args=[doc.slug]),
data)
assert len(mail.outbox) == 2
def _check_message_for_headers(message, username):
assert "%s made their first edit" % username in message.subject
assert message.extra_headers == {
'X-Kuma-Document-Url': doc.get_full_url(),
'X-Kuma-Editor-Username': username,
'X-Kuma-Document-Locale': doc.locale,
'X-Kuma-Document-Title': doc.title
}
testuser_message = mail.outbox[0]
admin_message = mail.outbox[1]
_check_message_for_headers(testuser_message, 'testuser')
_check_message_for_headers(admin_message, 'admin')
def test_email_for_watched_edits(self):
"""
When a user edits a watched document, we should send an email to users
who are watching it.
"""
self.client.login(username='testuser', password='testpass')
data = new_document_data()
rev = revision(save=True)
previous_rev = rev.previous
testuser2 = get_user(username='testuser2')
EditDocumentEvent.notify(testuser2, rev.document)
data.update({'form-type': 'rev',
'slug': rev.document.slug,
'title': rev.document.title,
'content': 'This edit should send an email',
'comment': 'This edit should send an email'})
resp = self.client.post(reverse('wiki.edit', args=[rev.document.slug]),
data)
assert resp.status_code == 302
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
self.assertEquals(1, len(mail.outbox))
message = mail.outbox[0]
assert testuser2.email in message.to
assert str(rev.document.title) in message.body
assert 'sub-articles' not in message.body
# Test that the compare URL points to the right revisions
rev = Document.objects.get(pk=rev.document_id).current_revision
assert rev.id != previous_rev
assert (add_utm(get_compare_url(rev.document, rev.previous.id, rev.id),
'Wiki Doc Edits')
in message.body)
# Subscribe another user and assert 2 emails sent this time
mail.outbox = []
testuser01 = get_user(username='testuser01')
EditDocumentEvent.notify(testuser01, rev.document)
data.update({'form-type': 'rev',
'slug': rev.document.slug,
'content': 'This edit should send 2 emails',
'comment': 'This edit should send 2 emails'})
self.client.post(reverse('wiki.edit',
args=[rev.document.slug]),
data)
self.assertEquals(2, len(mail.outbox))
message = mail.outbox[0]
assert testuser2.email in message.to
assert rev.document.title in message.body
assert 'sub-articles' not in message.body
message = mail.outbox[1]
assert testuser01.email in message.to
assert rev.document.title in message.body
assert 'sub-articles' not in message.body
@pytest.mark.edit_emails
def test_email_for_child_edit_in_watched_tree(self):
"""
When a user edits a child document in a watched document tree, we
should send an email to users who are watching the tree.
"""
root_doc, child_doc, grandchild_doc = create_document_tree()
testuser2 = get_user(username='testuser2')
EditDocumentInTreeEvent.notify(testuser2, root_doc)
self.client.login(username='testuser', password='testpass')
data = new_document_data()
data.update({'form-type': 'rev',
'slug': child_doc.slug,
'content': 'This edit should send an email',
'comment': 'This edit should send an email'})
resp = self.client.post(reverse('wiki.edit', args=[child_doc.slug]),
data)
assert resp.status_code == 302
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
assert len(mail.outbox) == 1
message = mail.outbox[0]
assert testuser2.email in message.to
assert 'sub-articles' in message.body
@pytest.mark.edit_emails
def test_email_for_grandchild_edit_in_watched_tree(self):
"""
When a user edits a grandchild document in a watched document tree, we
should send an email to users who are watching the tree.
"""
root_doc, child_doc, grandchild_doc = create_document_tree()
testuser2 = get_user(username='testuser2')
EditDocumentInTreeEvent.notify(testuser2, root_doc)
self.client.login(username='testuser', password='testpass')
data = new_document_data()
data.update({'form-type': 'rev',
'slug': grandchild_doc.slug,
'content': 'This edit should send an email',
'comment': 'This edit should send an email'})
self.client.post(reverse('wiki.edit',
args=[grandchild_doc.slug]),
data)
assert len(mail.outbox) == 1
message = mail.outbox[0]
assert testuser2.email in message.to
assert 'sub-articles' in message.body
@pytest.mark.edit_emails
def test_single_email_when_watching_doc_and_tree(self):
"""
When a user edits a watched document in a watched document tree, we
should only send a single email to users who are watching both the
document and the tree.
"""
root_doc, child_doc, grandchild_doc = create_document_tree()
testuser2 = get_user(username='testuser2')
EditDocumentInTreeEvent.notify(testuser2, root_doc)
EditDocumentEvent.notify(testuser2, child_doc)
self.client.login(username='testuser', password='testpass')
data = new_document_data()
data.update({'form-type': 'rev',
'slug': child_doc.slug,
'content': 'This edit should send an email',
'comment': 'This edit should send an email'})
self.client.post(reverse('wiki.edit',
args=[child_doc.slug]),
data)
assert len(mail.outbox) == 1
message = mail.outbox[0]
assert testuser2.email in message.to
class SectionEditingResourceTests(UserTestCase, WikiTestCase):
def test_raw_source(self):
"""The raw source for a document can be requested"""
self.client.login(username='admin', password='testpass')
rev = revision(is_approved=True, save=True, content="""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
expected = """
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
"""
with override_switch('application_ACAO', True):
response = self.client.get('%s?raw=true' %
reverse('wiki.document',
args=[rev.document.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
# Since the client is logged-in, the response should not be cached.
assert_no_cache_header(response)
assert response['Access-Control-Allow-Origin'] == '*'
assert normalize_html(expected) == normalize_html(response.content)
def test_raw_editor_safety_filter(self):
"""Safety filter should be applied before rendering editor
bug 821986
"""
self.client.login(username='admin', password='testpass')
rev = revision(is_approved=True, save=True, content="""
<p onload=alert(3)>FOO</p>
<svg><circle onload=confirm(3)>HI THERE</circle></svg>
""")
response = self.client.get('%s?raw=true' %
reverse('wiki.document',
args=[rev.document.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
# Since the client is logged-in, the response should not be cached.
assert_no_cache_header(response)
assert b'<p onload=' not in response.content
assert b'<circle onload=' not in response.content
def test_raw_with_editing_links_source(self):
"""The raw source for a document can be requested, with section editing
links"""
self.client.login(username='admin', password='testpass')
rev = revision(is_approved=True, save=True, content="""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
expected = """
<h1 id="s1"><a class="edit-section" data-section-id="s1" data-section-src-url="/en-US/docs/%(slug)s?raw=true&section=s1" href="/en-US/docs/%(slug)s$edit?edit_links=true&section=s1" title="Edit section">Edit</a>s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2"><a class="edit-section" data-section-id="s2" data-section-src-url="/en-US/docs/%(slug)s?raw=true&section=s2" href="/en-US/docs/%(slug)s$edit?edit_links=true&section=s2" title="Edit section">Edit</a>s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3"><a class="edit-section" data-section-id="s3" data-section-src-url="/en-US/docs/%(slug)s?raw=true&section=s3" href="/en-US/docs/%(slug)s$edit?edit_links=true&section=s3" title="Edit section">Edit</a>s3</h1>
<p>test</p>
<p>test</p>
""" % {'slug': rev.document.slug}
response = self.client.get('%s?raw=true&edit_links=true' %
reverse('wiki.document',
args=[rev.document.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
# Since the client is logged-in, the response should not be cached.
assert_no_cache_header(response)
assert normalize_html(expected) == normalize_html(response.content)
def test_raw_section_source(self):
"""The raw source for a document section can be requested"""
self.client.login(username='admin', password='testpass')
rev = revision(is_approved=True, save=True, content="""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
expected = """
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
"""
response = self.client.get('%s?section=s2&raw=true' %
reverse('wiki.document',
args=[rev.document.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
# Since the client is logged-in, the response should not be cached.
assert_no_cache_header(response)
assert normalize_html(expected) == normalize_html(response.content)
@pytest.mark.midair
def test_raw_section_edit_ajax(self):
self.client.login(username='admin', password='testpass')
rev = revision(is_approved=True, save=True, content="""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
replace = """
<h1 id="s2">s2</h1>
<p>replace</p>
"""
response = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit',
args=[rev.document.slug]),
{"form-type": "rev",
"slug": rev.document.slug,
"content": replace},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
assert response['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(response)
assert json.loads(response.content) == {
'error': False,
'new_revision_id': rev.id + 1
}
expected = """
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>replace</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
"""
response = self.client.get('%s?raw=true' %
reverse('wiki.document',
args=[rev.document.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
# Since the client is logged-in, the response should not be cached.
assert_no_cache_header(response)
assert normalize_html(expected) == normalize_html(response.content)
@pytest.mark.midair
def test_midair_section_merge_ajax(self):
"""If a page was changed while someone was editing, but the changes
didn't affect the specific section being edited, then ignore the midair
warning"""
self.client.login(username='admin', password='testpass')
rev = revision(is_approved=True, save=True, content="""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
replace_1 = """
<h1 id="replace1">replace1</h1>
<p>replace</p>
"""
replace_2 = """
<h1 id="replace2">replace2</h1>
<p>replace</p>
"""
expected = """
<h1 id="replace1">replace1</h1>
<p>replace</p>
<h1 id="replace2">replace2</h1>
<p>replace</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
"""
data = {
'form-type': 'rev',
'content': rev.content,
'slug': ''
}
# Edit #1 starts...
resp = self.client.get('%s?section=s1' %
reverse('wiki.edit',
args=[rev.document.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert resp.status_code == 200
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
page = pq(resp.content)
rev_id1 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 starts...
resp = self.client.get('%s?section=s2' %
reverse('wiki.edit',
args=[rev.document.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
page = pq(resp.content)
rev_id2 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 submits successfully
data.update({
'form-type': 'rev',
'content': replace_2,
'current_rev': rev_id2,
'slug': rev.document.slug
})
resp = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit',
args=[rev.document.slug]),
data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert resp.status_code == 200
assert resp['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(resp)
assert not json.loads(resp.content)['error']
# Edit #1 submits, but since it's a different section, there's no
# mid-air collision
data.update({
'form-type': 'rev',
'content': replace_1,
'current_rev': rev_id1
})
resp = self.client.post('%s?section=s1&raw=true' %
reverse('wiki.edit', args=[rev.document.slug]),
data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# No conflict, but we should get a 205 Reset as an indication that the
# page needs a refresh.
assert resp.status_code == 205
# Finally, make sure that all the edits landed
response = self.client.get('%s?raw=true' %
reverse('wiki.document',
args=[rev.document.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
# Since the client is logged-in, the response should not be cached.
assert_no_cache_header(response)
assert normalize_html(expected) == normalize_html(response.content)
# Also, ensure that the revision is slipped into the headers
assert (unicode(Document.objects.get(slug=rev.document.slug,
locale=rev.document.locale)
.current_revision.id) ==
unicode(response['x-kuma-revision']))
@pytest.mark.midair
def test_midair_section_collision_ajax(self):
"""If both a revision and the edited section has changed, then a
section edit is a collision."""
self.client.login(username='admin', password='testpass')
rev = revision(is_approved=True, save=True, content="""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
replace_1 = """
<h1 id="s2">replace</h1>
<p>replace</p>
"""
replace_2 = """
<h1 id="s2">first replace</h1>
<p>first replace</p>
"""
data = {
'form-type': 'rev',
'content': rev.content
}
# Edit #1 starts...
resp = self.client.get('%s?section=s2' %
reverse('wiki.edit', args=[rev.document.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
page = pq(resp.content)
rev_id1 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 starts...
resp = self.client.get('%s?section=s2' %
reverse('wiki.edit', args=[rev.document.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
page = pq(resp.content)
rev_id2 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 submits successfully
data.update({
'form-type': 'rev',
'content': replace_2,
'slug': rev.document.slug,
'current_rev': rev_id2
})
resp = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit', args=[rev.document.slug]),
data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert not json.loads(resp.content)['error']
# Edit #1 submits, but since it's the same section, there's a collision
data.update({
'form': 'rev',
'content': replace_1,
'current_rev': rev_id1
})
resp = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit', args=[rev.document.slug]),
data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert 200 == resp.status_code
# We receive the midair collission message
history_url = reverse(
'wiki.document_revisions',
kwargs={'document_path': rev.document.slug})
midair_collission_error = (unicode(MIDAIR_COLLISION) % {'url': history_url}).encode('utf-8')
assert midair_collission_error in json.loads(resp.content)['error_message']
def test_raw_include_option(self):
doc_src = u"""
<div class="noinclude">{{ XULRefAttr() }}</div>
<dl>
<dt>{{ XULAttr("maxlength") }}</dt>
<dd>Type: <em>integer</em></dd>
<dd>Przykłady 例 예제 示例</dd>
</dl>
<p><iframe></iframe></p>
<div class="noinclude">
<p>{{ languages( { "ja": "ja/XUL/Attribute/maxlength" } ) }}</p>
</div>
"""
rev = revision(is_approved=True, save=True, content=doc_src)
expected = u"""
<dl>
<dt>{{ XULAttr("maxlength") }}</dt>
<dd>Type: <em>integer</em></dd>
<dd>Przykłady 例 예제 示例</dd>
</dl>
<p><iframe></iframe></p>
"""
resp = self.client.get('%s?raw&include' %
reverse('wiki.document',
args=[rev.document.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert resp.status_code == 200
assert_shared_cache_header(resp)
assert (normalize_html(expected) ==
normalize_html(resp.content.decode('utf-8')))
def test_section_edit_toc(self):
"""show_toc is preserved in section editing."""
self.client.login(username='admin', password='testpass')
rev = revision(is_approved=True, save=True, content="""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
rev.toc_depth = 1
rev.save()
replace = """
<h1 id="s2">s2</h1>
<p>replace</p>
"""
self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit', args=[rev.document.slug]),
{"form-type": "rev", "slug": rev.document.slug, "content": replace},
follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
changed = Document.objects.get(pk=rev.document.id).current_revision
assert rev.id != changed.id
assert 1 == changed.toc_depth
def test_section_edit_review_tags(self):
"""review tags are preserved in section editing."""
self.client.login(username='admin', password='testpass')
rev = revision(is_approved=True, save=True, content="""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
tags_to_save = ['bar', 'foo']
rev.save()
rev.review_tags.set(*tags_to_save)
replace = """
<h1 id="s2">s2</h1>
<p>replace</p>
"""
self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit', args=[rev.document.slug]),
{"form-type": "rev", "slug": rev.document.slug, "content": replace},
follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
changed = Document.objects.get(pk=rev.document.id).current_revision
assert rev.id != changed.id
assert set(tags_to_save) == set(t.name for t in changed.review_tags.all())
class MindTouchRedirectTests(UserTestCase, WikiTestCase):
"""
Test that we appropriately redirect old-style MindTouch URLs to
new-style kuma URLs.
"""
# A note on these tests: we could try to use assertRedirects on
# these, but for the most part we're just constructing a URL
# similar enough to the wiki app's own built-in redirects that
# it'll pick up the request and do what we want with it. But it
# may end up issuing its own redirects, which are tricky to sort
# out from the ones the legacy MindTouch handling will emit, so
# instead we just test that A) we did issue a redirect and B) the
# URL we constructed is enough for the document views to go on.
server_prefix = '/%s/docs' % settings.WIKI_DEFAULT_LANGUAGE
namespace_urls = (
# One for each namespace.
{'mindtouch': '/Help:Foo',
'kuma': '%s/Help:Foo' % server_prefix},
{'mindtouch': '/Help_talk:Foo',
'kuma': '%s/Help_talk:Foo' % server_prefix},
{'mindtouch': '/Project:En/MDC_editor_guide',
'kuma': '%s/Project:MDC_editor_guide' % server_prefix},
{'mindtouch': '/Project_talk:En/MDC_style_guide',
'kuma': '%s/Project_talk:MDC_style_guide' % server_prefix},
{'mindtouch': '/Special:Foo',
'kuma': '%s/Special:Foo' % server_prefix},
{'mindtouch': '/Talk:en/Foo',
'kuma': '%s/Talk:Foo' % server_prefix},
{'mindtouch': '/Template:Foo',
'kuma': '%s/Template:Foo' % server_prefix},
{'mindtouch': '/User:Foo',
'kuma': '%s/User:Foo' % server_prefix},
)
def test_namespace_urls(self):
new_doc = document()
new_doc.title = 'User:Foo'
new_doc.slug = 'User:Foo'
new_doc.save()
for namespace_test in self.namespace_urls:
resp = self.client.get(namespace_test['mindtouch'], follow=False)
assert 301 == resp.status_code
assert resp['Location'] == namespace_test['kuma']
def test_document_urls(self):
"""Check the url redirect to proper document when the url like
/<locale>/<document_slug>"""
d = document(locale='zh-CN')
d.save()
mt_url = '/{locale}/{slug}'.format(locale=d.locale, slug=d.slug)
resp = self.client.get(mt_url, follow=True)
assert resp.status_code == 200
# Check the last redirect chain url is correct document url
last_url = resp.redirect_chain[-1][0]
assert last_url == d.get_absolute_url()
def test_view_param(self):
d = document()
d.locale = settings.WIKI_DEFAULT_LANGUAGE
d.slug = 'HTML/HTML5'
d.title = 'HTML 5'
d.save()
mt_url = '/en-US/%s?view=edit' % (d.slug,)
resp = self.client.get(mt_url)
assert 301 == resp.status_code
expected_url = d.get_absolute_url('wiki.edit')
assert resp['Location'] == expected_url
@override_config(KUMASCRIPT_TIMEOUT=5.0, KUMASCRIPT_MAX_AGE=600)
class DeferredRenderingViewTests(UserTestCase, WikiTestCase):
"""Tests for the deferred rendering system and interaction with views"""
def setUp(self):
super(DeferredRenderingViewTests, self).setUp()
self.rendered_content = 'HELLO RENDERED CONTENT'
self.raw_content = 'THIS IS RAW CONTENT'
self.rev = revision(is_approved=True, save=True,
content=self.raw_content,
# Disable TOC, makes content inspection easier.
toc_depth=0)
self.doc = self.rev.document
self.doc.html = self.raw_content
self.doc.rendered_html = self.rendered_content
self.doc.save()
self.url = self.doc.get_absolute_url()
@mock.patch('kuma.wiki.kumascript.get')
def test_rendered_content(self, mock_kumascript_get):
"""Document view should serve up rendered content when available"""
mock_kumascript_get.return_value = (self.rendered_content, None)
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
txt = p.find('#wikiArticle').text()
assert self.rendered_content in txt
assert self.raw_content not in txt
assert 0 == p.find('#doc-rendering-in-progress').length
assert 0 == p.find('#doc-render-raw-fallback').length
def test_rendering_in_progress_warning(self):
# Make the document look like there's a rendering in progress.
self.doc.render_started_at = datetime.datetime.now()
self.doc.save()
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
txt = p.find('#wikiArticle').text()
# Even though a rendering looks like it's in progress, ensure the
# last-known render is displayed.
assert self.rendered_content in txt
assert self.raw_content not in txt
assert 0 == p.find('#doc-rendering-in-progress').length
# Only for logged-in users, ensure the render-in-progress warning is
# displayed.
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
assert 1 == p.find('#doc-rendering-in-progress').length
@mock.patch('kuma.wiki.kumascript.get')
def test_raw_content_during_initial_render(self, mock_kumascript_get):
"""Raw content should be displayed during a document's initial
deferred rendering"""
mock_kumascript_get.return_value = (self.rendered_content, None)
# Make the document look like there's no rendered content, but that a
# rendering is in progress.
self.doc.html = self.raw_content
self.doc.rendered_html = ''
self.doc.render_started_at = datetime.datetime.now()
self.doc.save()
# Now, ensure that raw content is shown in the view.
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
txt = p.find('#wikiArticle').text()
assert self.rendered_content not in txt
assert self.raw_content in txt
assert 0 == p.find('#doc-render-raw-fallback').length
# Only for logged-in users, ensure that a warning is displayed about
# the fallback
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
assert 1 == p.find('#doc-render-raw-fallback').length
@mock.patch.object(Document, 'schedule_rendering')
@mock.patch('kuma.wiki.kumascript.get')
def test_schedule_rendering(self, mock_kumascript_get,
mock_document_schedule_rendering):
mock_kumascript_get.return_value = (self.rendered_content, None)
self.client.login(username='testuser', password='testpass')
data = new_document_data()
data.update({
'form-type': 'rev',
'content': 'This is an update',
})
edit_url = reverse('wiki.edit', args=[self.doc.slug])
resp = self.client.post(edit_url, data)
assert 302 == resp.status_code
assert mock_document_schedule_rendering.called
mock_document_schedule_rendering.reset_mock()
data.update({
'form-type': 'both',
'content': 'This is a translation',
})
translate_url = (reverse('wiki.translate', args=[data['slug']]) +
'?tolocale=fr')
response = self.client.post(translate_url, data)
assert response.status_code == 302
assert response['X-Robots-Tag'] == 'noindex'
assert_no_cache_header(response)
assert mock_document_schedule_rendering.called
class PageMoveTests(UserTestCase, WikiTestCase):
def test_move_conflict(self):
parent = revision(title='Test page move views',
slug='test-page-move-views',
is_approved=True,
save=True)
parent_doc = parent.document
child = revision(title='Child of page-move view test',
slug='page-move/test-views',
is_approved=True,
save=True)
child_doc = child.document
child_doc.parent_topic = parent.document
child_doc.save()
revision(title='Conflict for page-move view',
slug='moved/test-page-move-views/test-views',
is_approved=True,
save=True)
data = {'slug': 'moved/test-page-move-views'}
self.client.login(username='admin', password='testpass')
with override_flag('page_move', True):
resp = self.client.post(reverse('wiki.move',
args=(parent_doc.slug,)),
data=data)
assert resp.status_code == 200
assert_no_cache_header(resp)
|
jwhitlock/kuma
|
kuma/wiki/tests/test_views.py
|
Python
|
mpl-2.0
| 116,810
|
[
"VisIt"
] |
b984e474afa3dbcccb2b910c3779857c9c9ffae7dffeb965c40ac93ea20a67c2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.