text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/python
#Copyright (C) 2020- The University of Notre Dame
#This software is distributed under the GNU General Public License.
#See the file COPYING for details.
import os
import sys
import json
from work_queue_server import WorkQueueServer
from time import time
from create_splits import create_splits
tasks = []
inputs = ["bwa", "ref.fastq", "ref.fastq.sa", "ref.fastq.pac", "ref.fastq.amb", "ref.fastq.ann", "ref.fastq.bwt"]
def define_tasks(nsplits):
for i in range(nsplits):
task = {}
task["command_line"] = "./bwa mem ref.fastq query.fastq.%d.gz | gzip > query.fastq.%d.sam" % (i,i)
task["output_files"] = []
task["input_files"] = []
#output files
out = {}
out["local_name"] = "query.fastq.%d.sam" % i
out["remote_name"] = "query.fastq.%d.sam" % i
flags = {}
flags["cache"] = False
flags["watch"] = False
out["flags"] = flags
task["output_files"].append(out)
#input files
for name in inputs:
input_file = {}
input_file["local_name"] = name
input_file["remote_name"] = name
flags = {}
flags["cache"] = True
flags["watch"] = False
input_file["flags"] = flags
task["input_files"].append(input_file)
q = {}
q["local_name"] = "query.fastq.%d.gz" % i
q["remote_name"] = "query.fastq.%d.gz" % i
flags = {}
flags["cache"] = False
flags["watch"] = False
q["flags"] = flags
task["input_files"].append(q)
q = {}
q["local_name"] = "/usr/bin/gzip"
q["remote_name"] = "gzip"
flags = {}
flags["cache"] = True
flags["watch"] = False
q["flags"] = flags
task["input_files"].append(q)
#specify resources
task["cores"] = 2
task["memory"] = 1000
task["disk"] = 1000
tasks.append(task)
def main():
if len(sys.argv) < 3:
print("USAGE: ./wq_bwa_json.py <nsplits> <nworkers>")
sys.exit(0)
start = time()
#generate tasks
define_tasks(int(sys.argv[1]))
q = WorkQueueServer()
#connect to server
q.connect('127.0.0.1', 2345, 1234, "wq_bwa_json")
#submit tasks
for t in tasks:
t = json.dumps(t)
response = q.submit(t)
print(response)
#submit wait requests
while not q.empty():
response = q.wait(10)
print(response)
#disconnect
q.disconnect()
end = time()
start = float(start)
end = float(end)
print("time: {}".format(end-start-1))
os.system("rm -f query.fastq.*.sam")
if __name__ == "__main__":
main()
|
nkremerh/cctools
|
apps/wq_bwa_json/wq_bwa_json.py
|
Python
|
gpl-2.0
| 2,791
|
[
"BWA"
] |
a602e3708cf86b43bbf2061a9b272113587c9d701ca8b489bad5b225d904f4d8
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
from datetime import datetime, timedelta
from flaky import flaky
from textwrap import dedent
from unittest import skip
from nose.plugins.attrib import attr
import pytz
import urllib
from bok_choy.promise import EmptyPromise
from common.test.acceptance.tests.helpers import (
UniqueCourseTest,
EventsTestMixin,
load_data_str,
generate_course_key,
select_option_by_value,
element_has_text,
select_option_by_text,
get_selected_option_text
)
from common.test.acceptance.pages.lms import BASE_URL
from common.test.acceptance.pages.lms.account_settings import AccountSettingsPage
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.create_mode import ModeCreationPage
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.lms.course_info import CourseInfoPage
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from common.test.acceptance.pages.lms.course_nav import CourseNavPage
from common.test.acceptance.pages.lms.progress import ProgressPage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.pages.lms.video.video import VideoPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.studio.settings import SettingsPage
from common.test.acceptance.pages.lms.login_and_register import CombinedLoginAndRegisterPage, ResetPasswordPage
from common.test.acceptance.pages.lms.track_selection import TrackSelectionPage
from common.test.acceptance.pages.lms.pay_and_verify import PaymentAndVerificationFlow, FakePaymentPage
from common.test.acceptance.pages.lms.course_wiki import CourseWikiPage, CourseWikiEditPage
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc, CourseUpdateDesc
@attr(shard=8)
class ForgotPasswordPageTest(UniqueCourseTest):
"""
Test that forgot password forms is rendered if url contains 'forgot-password-modal'
in hash.
"""
def setUp(self):
""" Initialize the page object """
super(ForgotPasswordPageTest, self).setUp()
self.user_info = self._create_user()
self.reset_password_page = ResetPasswordPage(self.browser)
def _create_user(self):
"""
Create a unique user
"""
auto_auth = AutoAuthPage(self.browser).visit()
user_info = auto_auth.user_info
LogoutPage(self.browser).visit()
return user_info
def test_reset_password_form_visibility(self):
# Navigate to the password reset page
self.reset_password_page.visit()
# Expect that reset password form is visible on the page
self.assertTrue(self.reset_password_page.is_form_visible())
def test_reset_password_confirmation_box_visibility(self):
# Navigate to the password reset page
self.reset_password_page.visit()
# Navigate to the password reset form and try to submit it
self.reset_password_page.fill_password_reset_form(self.user_info['email'])
self.reset_password_page.is_success_visible(".submission-success")
# Expect that we're shown a success message
self.assertIn("Password Reset Email Sent", self.reset_password_page.get_success_message())
@attr(shard=8)
class LoginFromCombinedPageTest(UniqueCourseTest):
"""Test that we can log in using the combined login/registration page.
Also test that we can request a password reset from the combined
login/registration page.
"""
def setUp(self):
"""Initialize the page objects and create a test course. """
super(LoginFromCombinedPageTest, self).setUp()
self.login_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="login",
course_id=self.course_id
)
self.dashboard_page = DashboardPage(self.browser)
# Create a course to enroll in
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
def test_login_success(self):
# Create a user account
email, password = self._create_unique_user()
# Navigate to the login page and try to log in
self.login_page.visit().login(email=email, password=password)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
def test_login_failure(self):
# Navigate to the login page
self.login_page.visit()
# User account does not exist
self.login_page.login(email="nobody@nowhere.com", password="password")
# Verify that an error is displayed
self.assertIn("Email or password is incorrect.", self.login_page.wait_for_errors())
def test_toggle_to_register_form(self):
self.login_page.visit().toggle_form()
self.assertEqual(self.login_page.current_form, "register")
@flaky # ECOM-1165
def test_password_reset_success(self):
# Create a user account
email, password = self._create_unique_user() # pylint: disable=unused-variable
# Navigate to the password reset form and try to submit it
self.login_page.visit().password_reset(email=email)
# Expect that we're shown a success message
self.assertIn("Password Reset Email Sent", self.login_page.wait_for_success())
def test_password_reset_no_user(self):
# Navigate to the password reset form
self.login_page.visit()
# User account does not exist
self.login_page.password_reset(email="nobody@nowhere.com")
# Expect that we're shown a success message
self.assertIn("Password Reset Email Sent", self.login_page.wait_for_success())
def test_third_party_login(self):
"""
Test that we can login using third party credentials, and that the
third party account gets linked to the edX account.
"""
# Create a user account
email, password = self._create_unique_user()
# Navigate to the login page
self.login_page.visit()
# Baseline screen-shots are different for chrome and firefox.
#self.assertScreenshot('#login .login-providers', 'login-providers-{}'.format(self.browser.name), .25)
#The line above is commented out temporarily see SOL-1937
# Try to log in using "Dummy" provider
self.login_page.click_third_party_dummy_provider()
# The user will be redirected somewhere and then back to the login page:
msg_text = self.login_page.wait_for_auth_status_message()
self.assertIn("You have successfully signed into Dummy", msg_text)
self.assertIn("To link your accounts, sign in now using your edX password", msg_text)
# Now login with username and password:
self.login_page.login(email=email, password=password)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
try:
# Now logout and check that we can log back in instantly (because the account is linked):
LogoutPage(self.browser).visit()
self.login_page.visit()
self.login_page.click_third_party_dummy_provider()
self.dashboard_page.wait_for_page()
finally:
self._unlink_dummy_account()
def test_hinted_login(self):
""" Test the login page when coming from course URL that specified which third party provider to use """
# Create a user account and link it to third party auth with the dummy provider:
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self._link_dummy_account()
try:
LogoutPage(self.browser).visit()
# When not logged in, try to load a course URL that includes the provider hint ?tpa_hint=...
course_page = CoursewarePage(self.browser, self.course_id)
self.browser.get(course_page.url + '?tpa_hint=oa2-dummy')
# We should now be redirected to the login page
self.login_page.wait_for_page()
self.assertIn(
"Would you like to sign in using your Dummy credentials?",
self.login_page.hinted_login_prompt
)
# Baseline screen-shots are different for chrome and firefox.
#self.assertScreenshot('#hinted-login-form', 'hinted-login-{}'.format(self.browser.name), .25)
#The line above is commented out temporarily see SOL-1937
self.login_page.click_third_party_dummy_provider()
# We should now be redirected to the course page
course_page.wait_for_page()
finally:
self._unlink_dummy_account()
def _link_dummy_account(self):
""" Go to Account Settings page and link the user's account to the Dummy provider """
account_settings = AccountSettingsPage(self.browser).visit()
# switch to "Linked Accounts" tab
account_settings.switch_account_settings_tabs('accounts-tab')
field_id = "auth-oa2-dummy"
account_settings.wait_for_field(field_id)
self.assertEqual("Link Your Account", account_settings.link_title_for_link_field(field_id))
account_settings.click_on_link_in_link_field(field_id)
# make sure we are on "Linked Accounts" tab after the account settings
# page is reloaded
account_settings.switch_account_settings_tabs('accounts-tab')
account_settings.wait_for_link_title_for_link_field(field_id, "Unlink This Account")
def _unlink_dummy_account(self):
""" Verify that the 'Dummy' third party auth provider is linked, then unlink it """
# This must be done after linking the account, or we'll get cross-test side effects
account_settings = AccountSettingsPage(self.browser).visit()
# switch to "Linked Accounts" tab
account_settings.switch_account_settings_tabs('accounts-tab')
field_id = "auth-oa2-dummy"
account_settings.wait_for_field(field_id)
self.assertEqual("Unlink This Account", account_settings.link_title_for_link_field(field_id))
account_settings.click_on_link_in_link_field(field_id)
account_settings.wait_for_message(field_id, "Successfully unlinked")
def _create_unique_user(self):
"""
Create a new user with a unique name and email.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
password = "password"
# Create the user (automatically logs us in)
AutoAuthPage(
self.browser,
username=username,
email=email,
password=password
).visit()
# Log out
LogoutPage(self.browser).visit()
return (email, password)
@attr(shard=8)
class RegisterFromCombinedPageTest(UniqueCourseTest):
"""Test that we can register a new user from the combined login/registration page. """
def setUp(self):
"""Initialize the page objects and create a test course. """
super(RegisterFromCombinedPageTest, self).setUp()
self.register_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="register",
course_id=self.course_id
)
self.dashboard_page = DashboardPage(self.browser)
# Create a course to enroll in
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
def test_register_success(self):
# Navigate to the registration page
self.register_page.visit()
# Fill in the form and submit it
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.register_page.register(
email=email,
password="password",
username=username,
full_name="Test User",
country="US",
favorite_movie="Mad Max: Fury Road",
terms_of_service=True
)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
def test_register_failure(self):
# Navigate to the registration page
self.register_page.visit()
# Enter a blank for the username field, which is required
# Don't agree to the terms of service / honor code.
# Don't specify a country code, which is required.
# Don't specify a favorite movie.
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.register_page.register(
email=email,
password="password",
username="",
full_name="Test User",
terms_of_service=False
)
# Verify that the expected errors are displayed.
errors = self.register_page.wait_for_errors()
self.assertIn(u'Please enter your Public username.', errors)
self.assertIn(u'You must agree to the edX Terms of Service and Honor Code.', errors)
self.assertIn(u'Please select your Country.', errors)
self.assertIn(u'Please tell us your favorite movie.', errors)
def test_toggle_to_login_form(self):
self.register_page.visit().toggle_form()
self.assertEqual(self.register_page.current_form, "login")
def test_third_party_register(self):
"""
Test that we can register using third party credentials, and that the
third party account gets linked to the edX account.
"""
# Navigate to the register page
self.register_page.visit()
# Baseline screen-shots are different for chrome and firefox.
#self.assertScreenshot('#register .login-providers', 'register-providers-{}'.format(self.browser.name), .25)
# The line above is commented out temporarily see SOL-1937
# Try to authenticate using the "Dummy" provider
self.register_page.click_third_party_dummy_provider()
# The user will be redirected somewhere and then back to the register page:
msg_text = self.register_page.wait_for_auth_status_message()
self.assertEqual(self.register_page.current_form, "register")
self.assertIn("You've successfully signed into Dummy", msg_text)
self.assertIn("We just need a little more information", msg_text)
# Now the form should be pre-filled with the data from the Dummy provider:
self.assertEqual(self.register_page.email_value, "adama@fleet.colonies.gov")
self.assertEqual(self.register_page.full_name_value, "William Adama")
self.assertIn("Galactica1", self.register_page.username_value)
# Set country, accept the terms, and submit the form:
self.register_page.register(country="US", favorite_movie="Battlestar Galactica", terms_of_service=True)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
# Now logout and check that we can log back in instantly (because the account is linked):
LogoutPage(self.browser).visit()
login_page = CombinedLoginAndRegisterPage(self.browser, start_page="login")
login_page.visit()
login_page.click_third_party_dummy_provider()
self.dashboard_page.wait_for_page()
# Now unlink the account (To test the account settings view and also to prevent cross-test side effects)
account_settings = AccountSettingsPage(self.browser).visit()
# switch to "Linked Accounts" tab
account_settings.switch_account_settings_tabs('accounts-tab')
field_id = "auth-oa2-dummy"
account_settings.wait_for_field(field_id)
self.assertEqual("Unlink This Account", account_settings.link_title_for_link_field(field_id))
account_settings.click_on_link_in_link_field(field_id)
account_settings.wait_for_message(field_id, "Successfully unlinked")
@attr(shard=8)
class PayAndVerifyTest(EventsTestMixin, UniqueCourseTest):
"""Test that we can proceed through the payment and verification flow."""
def setUp(self):
"""Initialize the test.
Create the necessary page objects, create a test course and configure its modes,
create a user and log them in.
"""
super(PayAndVerifyTest, self).setUp()
self.track_selection_page = TrackSelectionPage(self.browser, self.course_id)
self.payment_and_verification_flow = PaymentAndVerificationFlow(self.browser, self.course_id)
self.immediate_verification_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='verify-now')
self.upgrade_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='upgrade')
self.fake_payment_page = FakePaymentPage(self.browser, self.course_id)
self.dashboard_page = DashboardPage(self.browser)
# Create a course
CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
).install()
# Add an honor mode to the course
ModeCreationPage(self.browser, self.course_id).visit()
# Add a verified mode to the course
ModeCreationPage(self.browser, self.course_id, mode_slug=u'verified', mode_display_name=u'Verified Certificate', min_price=10, suggested_prices='10,20').visit()
@skip("Flaky 02/02/2015")
def test_immediate_verification_enrollment(self):
# Create a user and log them in
student_id = AutoAuthPage(self.browser).visit().get_user_id()
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
# Proceed to verification
self.payment_and_verification_flow.immediate_verification()
# Take face photo and proceed to the ID photo step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Take ID photo and proceed to the review photos step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Submit photos and proceed to the enrollment confirmation step
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
def test_deferred_verification_enrollment(self):
# Create a user and log them in
student_id = AutoAuthPage(self.browser).visit().get_user_id()
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
def test_enrollment_upgrade(self):
# Create a user, log them in, and enroll them in the honor mode
student_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as honor in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'honor')
# Click the upsell button on the dashboard
self.dashboard_page.upgrade_enrollment(self.course_info["display_name"], self.upgrade_page)
# Select the first contribution option appearing on the page
self.upgrade_page.indicate_contribution()
# Proceed to the fake payment page
self.upgrade_page.proceed_to_payment()
def only_enrollment_events(event):
"""Filter out all non-enrollment events."""
return event['event_type'].startswith('edx.course.enrollment.')
expected_events = [
{
'event_type': 'edx.course.enrollment.mode_changed',
'event': {
'user_id': int(student_id),
'mode': 'verified',
}
}
]
with self.assert_events_match_during(event_filter=only_enrollment_events, expected_events=expected_events):
# Submit payment
self.fake_payment_page.submit_payment()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
@attr(shard=1)
class CourseWikiTest(UniqueCourseTest):
"""
Tests that verify the course wiki.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(CourseWikiTest, self).setUp()
# self.course_info['number'] must be shorter since we are accessing the wiki. See TNL-1751
self.course_info['number'] = self.unique_id[0:6]
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_wiki_page = CourseWikiPage(self.browser, self.course_id)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_wiki_edit_page = CourseWikiEditPage(self.browser, self.course_id, self.course_info)
self.tab_nav = TabNavPage(self.browser)
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
# Access course wiki page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Wiki')
def _open_editor(self):
self.course_wiki_page.open_editor()
self.course_wiki_edit_page.wait_for_page()
def test_edit_course_wiki(self):
"""
Wiki page by default is editable for students.
After accessing the course wiki,
Replace the content of the default page
Confirm new content has been saved
"""
content = "hello"
self._open_editor()
self.course_wiki_edit_page.replace_wiki_content(content)
self.course_wiki_edit_page.save_wiki_content()
actual_content = unicode(self.course_wiki_page.q(css='.wiki-article p').text[0])
self.assertEqual(content, actual_content)
@attr(shard=1)
class HighLevelTabTest(UniqueCourseTest):
"""
Tests that verify each of the high-level tabs available within a course.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(HighLevelTabTest, self).setUp()
# self.course_info['number'] must be shorter since we are accessing the wiki. See TNL-1751
self.course_info['number'] = self.unique_id[0:6]
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.progress_page = ProgressPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.video = VideoPage(self.browser)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_update(
CourseUpdateDesc(date='January 29, 2014', content='Test course update1')
)
course_fix.add_handout('demoPDF.pdf')
course_fix.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab', data=r"static tab data with mathjax \(E=mc^2\)"),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2'),
XBlockFixtureDesc('sequential', 'Test Subsection 3'),
)
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_course_info(self):
"""
Navigate to the course info page.
"""
# Navigate to the course info page from the progress page
self.progress_page.visit()
self.tab_nav.go_to_tab('Home')
# Expect just one update
self.assertEqual(self.course_info_page.num_updates, 1)
# Expect a link to the demo handout pdf
handout_links = self.course_info_page.handout_links
self.assertEqual(len(handout_links), 1)
self.assertIn('demoPDF.pdf', handout_links[0])
def test_progress(self):
"""
Navigate to the progress page.
"""
# Navigate to the progress page from the info page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Progress')
# We haven't answered any problems yet, so assume scores are zero
# Only problems should have scores; so there should be 2 scores.
CHAPTER = 'Test Section'
SECTION = 'Test Subsection'
EXPECTED_SCORES = [(0, 3), (0, 1)]
actual_scores = self.progress_page.scores(CHAPTER, SECTION)
self.assertEqual(actual_scores, EXPECTED_SCORES)
def test_static_tab(self):
"""
Navigate to a static tab (course content)
"""
# From the course info page, navigate to the static tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Test Static Tab')
self.assertTrue(self.tab_nav.is_on_tab('Test Static Tab'))
def test_static_tab_with_mathjax(self):
"""
Navigate to a static tab (course content)
"""
# From the course info page, navigate to the static tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Test Static Tab')
self.assertTrue(self.tab_nav.is_on_tab('Test Static Tab'))
# Verify that Mathjax has rendered
self.tab_nav.mathjax_has_rendered()
def test_wiki_tab_first_time(self):
"""
Navigate to the course wiki tab. When the wiki is accessed for
the first time, it is created on the fly.
"""
course_wiki = CourseWikiPage(self.browser, self.course_id)
# From the course info page, navigate to the wiki tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Wiki')
self.assertTrue(self.tab_nav.is_on_tab('Wiki'))
# Assert that a default wiki is created
expected_article_name = "{org}.{course_number}.{course_run}".format(
org=self.course_info['org'],
course_number=self.course_info['number'],
course_run=self.course_info['run']
)
self.assertEqual(expected_article_name, course_wiki.article_name)
def test_courseware_nav(self):
"""
Navigate to a particular unit in the course.
"""
# Navigate to the course page from the info page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
# Check that the course navigation appears correctly
EXPECTED_SECTIONS = {
'Test Section': ['Test Subsection'],
'Test Section 2': ['Test Subsection 2', 'Test Subsection 3']
}
actual_sections = self.course_nav.sections
for section, subsections in EXPECTED_SECTIONS.iteritems():
self.assertIn(section, actual_sections)
self.assertEqual(actual_sections[section], EXPECTED_SECTIONS[section])
# Navigate to a particular section
self.course_nav.go_to_section('Test Section', 'Test Subsection')
# Check the sequence items
EXPECTED_ITEMS = ['Test Problem 1', 'Test Problem 2', 'Test HTML']
actual_items = self.course_nav.sequence_items
self.assertEqual(len(actual_items), len(EXPECTED_ITEMS))
for expected in EXPECTED_ITEMS:
self.assertIn(expected, actual_items)
@attr(shard=1)
class PDFTextBooksTabTest(UniqueCourseTest):
"""
Tests that verify each of the textbook tabs available within a course.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(PDFTextBooksTabTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
# Install a course with TextBooks
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
# Add PDF textbooks to course fixture.
for i in range(1, 3):
course_fix.add_textbook("PDF Book {}".format(i), [{"title": "Chapter Of Book {}".format(i), "url": ""}])
course_fix.install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_verify_textbook_tabs(self):
"""
Test multiple pdf textbooks loads correctly in lms.
"""
self.course_info_page.visit()
# Verify each PDF textbook tab by visiting, it will fail if correct tab is not loaded.
for i in range(1, 3):
self.tab_nav.go_to_tab("PDF Book {}".format(i))
@attr(shard=1)
class VisibleToStaffOnlyTest(UniqueCourseTest):
"""
Tests that content with visible_to_staff_only set to True cannot be viewed by students.
"""
def setUp(self):
super(VisibleToStaffOnlyTest, self).setUp()
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Subsection With Locked Unit').add_children(
XBlockFixtureDesc('vertical', 'Locked Unit', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('html', 'Html Child in locked unit', data="<html>Visible only to staff</html>"),
),
XBlockFixtureDesc('vertical', 'Unlocked Unit').add_children(
XBlockFixtureDesc('html', 'Html Child in unlocked unit', data="<html>Visible only to all</html>"),
)
),
XBlockFixtureDesc('sequential', 'Unlocked Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Html Child in visible unit', data="<html>Visible to all</html>"),
)
),
XBlockFixtureDesc('sequential', 'Locked Subsection', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'html', 'Html Child in locked subsection', data="<html>Visible only to staff</html>"
)
)
)
)
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
def test_visible_to_staff(self):
"""
Scenario: All content is visible for a user marked is_staff (different from course staff)
Given some of the course content has been marked 'visible_to_staff_only'
And I am logged on with an account marked 'is_staff'
Then I can see all course content
"""
AutoAuthPage(self.browser, username="STAFF_TESTER", email="johndoe_staff@example.com",
course_id=self.course_id, staff=True).visit()
self.courseware_page.visit()
self.assertEqual(3, len(self.course_nav.sections['Test Section']))
self.course_nav.go_to_section("Test Section", "Subsection With Locked Unit")
self.assertEqual([u'Locked Unit', u'Unlocked Unit'], self.course_nav.sequence_items)
self.course_nav.go_to_section("Test Section", "Unlocked Subsection")
self.assertEqual([u'Test Unit'], self.course_nav.sequence_items)
self.course_nav.go_to_section("Test Section", "Locked Subsection")
self.assertEqual([u'Test Unit'], self.course_nav.sequence_items)
def test_visible_to_student(self):
"""
Scenario: Content marked 'visible_to_staff_only' is not visible for students in the course
Given some of the course content has been marked 'visible_to_staff_only'
And I am logged on with an authorized student account
Then I can only see content without 'visible_to_staff_only' set to True
"""
AutoAuthPage(self.browser, username="STUDENT_TESTER", email="johndoe_student@example.com",
course_id=self.course_id, staff=False).visit()
self.courseware_page.visit()
self.assertEqual(2, len(self.course_nav.sections['Test Section']))
self.course_nav.go_to_section("Test Section", "Subsection With Locked Unit")
self.assertEqual([u'Unlocked Unit'], self.course_nav.sequence_items)
self.course_nav.go_to_section("Test Section", "Unlocked Subsection")
self.assertEqual([u'Test Unit'], self.course_nav.sequence_items)
@attr(shard=1)
class TooltipTest(UniqueCourseTest):
"""
Tests that tooltips are displayed
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(TooltipTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab'),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
)
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_tooltip(self):
"""
Verify that tooltips are displayed when you hover over the sequence nav bar.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
self.courseware_page.verify_tooltips_displayed()
@attr(shard=1)
class PreRequisiteCourseTest(UniqueCourseTest):
"""
Tests that pre-requisite course messages are displayed
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(PreRequisiteCourseTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
self.prc_info = {
'org': 'test_org',
'number': self.unique_id,
'run': 'prc_test_run',
'display_name': 'PR Test Course' + self.unique_id
}
CourseFixture(
self.prc_info['org'], self.prc_info['number'],
self.prc_info['run'], self.prc_info['display_name']
).install()
pre_requisite_course_key = generate_course_key(
self.prc_info['org'],
self.prc_info['number'],
self.prc_info['run']
)
self.pre_requisite_course_id = unicode(pre_requisite_course_key)
self.dashboard_page = DashboardPage(self.browser)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_dashboard_message(self):
"""
Scenario: Any course where there is a Pre-Requisite course Student dashboard should have
appropriate messaging.
Given that I am on the Student dashboard
When I view a course with a pre-requisite course set
Then At the bottom of course I should see course requirements message.'
"""
# visit dashboard page and make sure there is not pre-requisite course message
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.pre_requisite_message_displayed())
# Logout and login as a staff.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# visit course settings page and set pre-requisite course
self.settings_page.visit()
self._set_pre_requisite_course()
# Logout and login as a student.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=False).visit()
# visit dashboard page again now it should have pre-requisite course message
self.dashboard_page.visit()
EmptyPromise(lambda: self.dashboard_page.available_courses > 0, 'Dashboard page loaded').fulfill()
self.assertTrue(self.dashboard_page.pre_requisite_message_displayed())
def _set_pre_requisite_course(self):
"""
set pre-requisite course
"""
select_option_by_value(self.settings_page.pre_requisite_course_options, self.pre_requisite_course_id)
self.settings_page.save_changes()
@attr(shard=1)
class ProblemExecutionTest(UniqueCourseTest):
"""
Tests of problems.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(ProblemExecutionTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
# Install a course with sections and problems.
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_asset(['python_lib.zip'])
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Python Problem', data=dedent(
"""\
<problem>
<script type="loncapa/python">
from number_helpers import seventeen, fortytwo
oneseven = seventeen()
def check_function(expect, ans):
if int(ans) == fortytwo(-22):
return True
else:
return False
</script>
<p>What is the sum of $oneseven and 3?</p>
<customresponse expect="20" cfn="check_function">
<textline/>
</customresponse>
</problem>
"""
))
)
)
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_python_execution_in_problem(self):
# Navigate to the problem page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
self.course_nav.go_to_section('Test Section', 'Test Subsection')
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name.upper(), 'PYTHON PROBLEM')
# Does the page have computation results?
self.assertIn("What is the sum of 17 and 3?", problem_page.problem_text)
# Fill in the answer correctly.
problem_page.fill_answer("20")
problem_page.click_check()
self.assertTrue(problem_page.is_correct())
# Fill in the answer incorrectly.
problem_page.fill_answer("4")
problem_page.click_check()
self.assertFalse(problem_page.is_correct())
@attr(shard=1)
class EntranceExamTest(UniqueCourseTest):
"""
Tests that course has an entrance exam.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(EntranceExamTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_entrance_exam_section(self):
"""
Scenario: Any course that is enabled for an entrance exam, should have entrance exam chapter at course
page.
Given that I am on the course page
When I view the course that has an entrance exam
Then there should be an "Entrance Exam" chapter.'
"""
entrance_exam_link_selector = '.accordion .course-navigation .chapter .group-heading'
# visit course page and make sure there is not entrance exam chapter.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
self.assertFalse(element_has_text(
page=self.courseware_page,
css_selector=entrance_exam_link_selector,
text='Entrance Exam'
))
# Logout and login as a staff.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# visit course settings page and set/enabled entrance exam for that course.
self.settings_page.visit()
self.settings_page.wait_for_page()
self.assertTrue(self.settings_page.is_browser_on_page())
self.settings_page.entrance_exam_field.click()
self.settings_page.save_changes()
# Logout and login as a student.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=False).visit()
# visit course info page and make sure there is an "Entrance Exam" section.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
self.assertTrue(element_has_text(
page=self.courseware_page,
css_selector=entrance_exam_link_selector,
text='Entrance Exam'
))
@attr(shard=1)
class NotLiveRedirectTest(UniqueCourseTest):
"""
Test that a banner is shown when the user is redirected to
the dashboard from a non-live course.
"""
def setUp(self):
"""Create a course that isn't live yet and enroll for it."""
super(NotLiveRedirectTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name'],
start_date=datetime(year=2099, month=1, day=1)
).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_redirect_banner(self):
"""
Navigate to the course info page, then check that we're on the
dashboard page with the appropriate message.
"""
url = BASE_URL + "/courses/" + self.course_id + "/" + 'info'
self.browser.get(url)
page = DashboardPage(self.browser)
page.wait_for_page()
self.assertIn(
'The course you are looking for does not start until',
page.banner_text
)
@attr(shard=1)
class EnrollmentClosedRedirectTest(UniqueCourseTest):
"""
Test that a banner is shown when the user is redirected to the
dashboard after trying to view the track selection page for a
course after enrollment has ended.
"""
def setUp(self):
"""Create a course that is closed for enrollment, and sign in as a user."""
super(EnrollmentClosedRedirectTest, self).setUp()
course = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
now = datetime.now(pytz.UTC)
course.add_course_details({
'enrollment_start': (now - timedelta(days=30)).isoformat(),
'enrollment_end': (now - timedelta(days=1)).isoformat()
})
course.install()
# Add an honor mode to the course
ModeCreationPage(self.browser, self.course_id).visit()
# Add a verified mode to the course
ModeCreationPage(
self.browser,
self.course_id,
mode_slug=u'verified',
mode_display_name=u'Verified Certificate',
min_price=10,
suggested_prices='10,20'
).visit()
def _assert_dashboard_message(self):
"""
Assert that the 'closed for enrollment' text is present on the
dashboard.
"""
page = DashboardPage(self.browser)
page.wait_for_page()
self.assertIn(
'The course you are looking for is closed for enrollment',
page.banner_text
)
def test_redirect_banner(self):
"""
Navigate to the course info page, then check that we're on the
dashboard page with the appropriate message.
"""
AutoAuthPage(self.browser).visit()
url = BASE_URL + "/course_modes/choose/" + self.course_id
self.browser.get(url)
self._assert_dashboard_message()
def test_login_redirect(self):
"""
Test that the user is correctly redirected after logistration when
attempting to enroll in a closed course.
"""
url = '{base_url}/register?{params}'.format(
base_url=BASE_URL,
params=urllib.urlencode({
'course_id': self.course_id,
'enrollment_action': 'enroll',
'email_opt_in': 'false'
})
)
self.browser.get(url)
register_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="register",
course_id=self.course_id
)
register_page.wait_for_page()
register_page.register(
email="email@example.com",
password="password",
username="username",
full_name="Test User",
country="US",
favorite_movie="Mad Max: Fury Road",
terms_of_service=True
)
self._assert_dashboard_message()
@attr(shard=1)
class LMSLanguageTest(UniqueCourseTest):
""" Test suite for the LMS Language """
def setUp(self):
super(LMSLanguageTest, self).setUp()
self.dashboard_page = DashboardPage(self.browser)
self.account_settings = AccountSettingsPage(self.browser)
AutoAuthPage(self.browser).visit()
def test_lms_language_change(self):
"""
Scenario: Ensure that language selection is working fine.
First I go to the user dashboard page in LMS. I can see 'English' is selected by default.
Then I choose 'Dummy Language' from drop down (at top of the page).
Then I visit the student account settings page and I can see the language has been updated to 'Dummy Language'
in both drop downs.
After that I select the 'English' language and visit the dashboard page again.
Then I can see that top level language selector persist its value to 'English'.
"""
self.dashboard_page.visit()
language_selector = self.dashboard_page.language_selector
self.assertEqual(
get_selected_option_text(language_selector),
u'English'
)
select_option_by_text(language_selector, 'Dummy Language (Esperanto)')
self.dashboard_page.wait_for_ajax()
self.account_settings.visit()
self.assertEqual(self.account_settings.value_for_dropdown_field('pref-lang'), u'Dummy Language (Esperanto)')
self.assertEqual(
get_selected_option_text(language_selector),
u'Dummy Language (Esperanto)'
)
# changed back to English language.
select_option_by_text(language_selector, 'English')
self.account_settings.wait_for_ajax()
self.assertEqual(self.account_settings.value_for_dropdown_field('pref-lang'), u'English')
self.dashboard_page.visit()
self.assertEqual(
get_selected_option_text(language_selector),
u'English'
)
@attr('a11y')
class CourseInfoA11yTest(UniqueCourseTest):
"""Accessibility test for course home/info page."""
def setUp(self):
super(CourseInfoA11yTest, self).setUp()
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.course_fixture.add_update(
CourseUpdateDesc(date='January 29, 2014', content='Test course update1')
)
self.course_fixture.add_update(
CourseUpdateDesc(date='February 5th, 2014', content='Test course update2')
)
self.course_fixture.add_update(
CourseUpdateDesc(date='March 31st, 2014', content='Test course update3')
)
self.course_fixture.install()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_course_home_a11y(self):
self.course_info_page.visit()
self.course_info_page.a11y_audit.check_for_accessibility_errors()
|
louyihua/edx-platform
|
common/test/acceptance/tests/lms/test_lms.py
|
Python
|
agpl-3.0
| 53,429
|
[
"VisIt"
] |
0f6f1387cf64ee82428aa61d04f3c9dcf1ddd17f3314f144d16d5200a144ec92
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
|
marmyshev/bug_1117098
|
openlp/plugins/custom/forms/__init__.py
|
Python
|
gpl-2.0
| 2,106
|
[
"Brian"
] |
45003622b1fbe54a5e2b898389f4fcb7af8443d4f1697a178a78b93dd7920526
|
from fem import DofHandler, Basis, QuadFE
from gmrf import GaussianField, Covariance
from assembler import Form, Assembler
from mesh import Mesh1D, QuadMesh
from plot import Plot
from function import Nodal
import numpy as np
from scipy import sparse as sp
import matplotlib.pyplot as plt
"""
Goal:
Investigate optimal mesh density for reproducing the statistics of a given
quantity of interest. Let q ~ Gaussian random field with expressed at some
maximum resolution and let
Q = Var(J(q))
where
J(q) = I[0.75,1] q(x) dx.
The field is discretized using piecewise constant basis over the fine mesh.
Experiments:
1. Greedy algorithm: Start off with coarse mesh. At every stage, choose r
cells to refine. Use brute-force method, enumerating over all
possibilities.
2. Optimization: Set up a mesh optimization problem.
Implementation:
a. Calculate variance for a given mesh.
i. Use the intermediate mesh (assembly every time),
ii. or the fine mesh with a lifting operator (we probably need the
projection in any case).
b. Calculate the gradient somehow.
"""
def projection_matrix(dofhandler, fine_flag, coarse_flag):
"""
Project a piecewise constant function, defined on a fine scale mesh onto
a coarse scale mesh.
Inputs:
dofhandler: DofHandler, for discontinuous piecewise constant elements
fine_flag: str/int, mesh-flag for fine mesh
coarse_flag: str/int, mesh-flag for coarse mesh
Outputs:
P: double, sparse (n_dofs_coars, n_dofs_fine) matrix representation of
the projection,
"""
assert dofhandler.element.element_type()=='DQ0', \
'Only piecewise constant approximations supported.'
mesh = dofhandler.mesh
rows, cols, vals = [], [], []
for leaf in mesh.cells.get_leaves(subforest_flag=fine_flag):
# Iterate over fine mesh
# Add leaf dof to list of columns
cols.extend(dofhandler.get_cell_dofs(leaf))
# Search for nearest ancestor in coarse grid
ancestor = leaf
while not ancestor.is_marked(coarse_flag):
ancestor = ancestor.get_parent()
# Record coarse cell dof
rows.extend(dofhandler.get_cell_dofs(ancestor))
# Determine the ratio in areas
if mesh.dim()==1:
# One-dimensional interval
multiplier = leaf.length()/ancestor.length()
elif mesh.dim()==2:
# Two-dimensional cell
multiplier = leaf.area()/ancestor.area()
# Store the value
vals.append(multiplier)
#
# Re-index rows and columns
#
# Compute unique dofs
col_dofs = list(set(cols))
row_dofs = list(set(rows))
# Re-index using unique dofs
rows = [row_dofs.index(i) for i in rows]
cols = [col_dofs.index(i) for i in cols]
#
# Define sparse projection matrix
#
n_rows = len(row_dofs)
n_cols = len(col_dofs)
P = sp.coo_matrix((vals,(rows,cols)), shape=(n_rows,n_cols))
return P
def error(dofhandler, q, L, ):
"""
Compute the error E(|J(q)-J(qhat)|^2) for a given sample of q's.
"""
def compute_variance(dofhandler, q, L, submesh_flag=None):
"""
Compute the variance of the quantity of interest
J(q) = I[0.75,1] q(x,y) dx
On a given submesh.
Inputs:
dofhandler: DofHandler, associated with the problem
L: double, vector representing the linear operator on the finest mesh
submesh_flag: str/int, representing the coarser mesh.
Output:
v: double, variance given by
v = L^T (PT*P) K PT*P L
"""
#
# Get covariance matrix
#
K = q.covariance().get_matrix()
n = q.size()
if submesh_flag is None:
# Identity Matrix
P = np.eye(n)
else:
#
# Get projection and relaxation matrix
#
P = projection_matrix(dofhandler, None, submesh_flag) # projection
Pt = P.transpose() # lifting operator
PtP = Pt.dot(P)
# Compute variance
return L.dot(PtP.dot(K.dot(PtP.dot(L))))
def test01_projection():
"""
Test projection operator
"""
pass
def test02_variance():
"""
Compute the variance of J(q) for different mesh refinement levels
and compare with MC estimates.
"""
l_max = 8
for i_res in np.arange(2,l_max):
# Computational mesh
mesh = Mesh1D(resolution=(2**i_res,))
# Element
element = QuadFE(mesh.dim(), 'DQ0')
dofhandler = DofHandler(mesh, element)
dofhandler.distribute_dofs()
# Linear Functional
mesh.mark_region('integrate', lambda x: x>=0.75, entity_type='cell',
strict_containment=False)
phi = Basis(dofhandler)
assembler = Assembler(Form(4,test=phi, flag='integrate'))
assembler.assemble()
L = assembler.get_vector()
# Define Gaussian random field
C = Covariance(dofhandler, name='gaussian', parameters={'l':0.05})
C.compute_eig_decomp()
eta = GaussianField(dofhandler.n_dofs(), K=C)
eta.update_support()
n_samples = 100000
J_paths = L.dot(eta.sample(n_samples=n_samples))
var_mc = np.var(J_paths)
lmd, V = C.get_eig_decomp()
LV = L.dot(V)
var_an = LV.dot(np.diag(lmd).dot(LV.transpose()))
print(var_mc, var_an)
def experiment01():
"""
Compute the quantity of interest, it's expectation and variance
"""
#
# FE Discretization
#
# Computational mesh
mesh = Mesh1D(resolution=(64,))
# Element
element = QuadFE(mesh.dim(), 'DQ0')
dofhandler = DofHandler(mesh, element)
dofhandler.distribute_dofs()
# Linear Functional
mesh.mark_region('integrate', lambda x: x>0.75, entity_type='cell',
strict_containment=False)
phi = Basis(dofhandler)
assembler = Assembler(Form(1,test=phi, flag='integrate'))
assembler.assemble()
L = assembler.get_vector()
# Gaussian field
if __name__ == '__main__':
#%% Test 2: Variance
test02_variance()
#%% Simple projection Matrix
#
# Define and record coarse mesh
#
mesh = Mesh1D(resolution=(16,))
mesh.record(0)
#
# Refine mesh and record
#
"""
# One level of refinement
mesh.cells.find_node([0]).mark('r')
mesh.cells.refine(refinement_flag='r')
mesh.cells.find_node([0,0]).mark('r')
mesh.cells.refine(refinement_flag='r')
"""
l_max = 4
for i in range(l_max):
mesh.cells.refine()
mesh.record(i+1)
# plot meshes
plot = Plot(quickview=False)
fig, ax = plt.subplots(l_max,1)
for i in range(l_max):
ax[i] = plot.mesh(mesh, axis=ax[i], subforest_flag=i)
#ax[1] = plot.mesh(mesh, axis=ax[1], subforest_flag=None)
plt.show()
#
# Define piecewise constant elements
#
element = QuadFE(mesh.dim(), 'DQ0')
dofhandler = DofHandler(mesh, element)
dofhandler.distribute_dofs()
# Get projection matrix
P = projection_matrix(dofhandler, None, 0)
fig, ax = plt.subplots(1,1)
for i in range(l_max):
CC = Covariance(dofhandler, subforest_flag=i, name='gaussian',
parameters={'l':0.01})
CC.compute_eig_decomp()
d, V = CC.get_eig_decomp()
print(d)
lmd = np.arange(len(d))
ax.semilogy(lmd, d, '.-', label='level=%d'%i)
plt.legend()
plt.show()
#
# Define random field on the fine mesh
#
C = Covariance(dofhandler, name='gaussian', parameters={'l':0.05})
C.compute_eig_decomp()
eta = GaussianField(dofhandler.n_dofs(), K=C)
eta.update_support()
#eta_path = Nodal(data=eta.sample(), basis=phi)
eta0 = P.dot(eta.sample())
eg0 = eta.condition(P, eta0, n_samples=100)
eg0_paths = Nodal(data=eg0, basis=Basis(dofhandler))
e0_path = Nodal(data=eta0, basis=Basis(dofhandler, subforest_flag=0))
plot = Plot(quickview=False)
ax = plt.subplot(111)
for i in range(30):
ax = plot.line(eg0_paths, axis=ax, mesh=mesh, i_sample=i,
plot_kwargs={'color':'k', 'linewidth':0.5})
ax = plot.line(e0_path, axis=ax, mesh=mesh,
plot_kwargs={'color':'r', 'linewidth':2})
ax.set_ylim([-3,3])
plt.tight_layout()
plt.show()
#
# Define linear functional
#
mesh.mark_region('integrate', lambda x: x>0.75, entity_type='cell',
strict_containment=False)
phi = Basis(dofhandler)
assembler = Assembler(Form(1,test=phi, flag='integrate'))
assembler.assemble()
L = assembler.get_vector()
n_samples = 1000
J = {}
for l in range(l_max+1):
P = projection_matrix(dofhandler, None, l)
J[l] = L.dot(P.T.dot(P.dot(eta.sample(n_samples=n_samples))))
plt.hist(J[l], bins=40, density=False, alpha=0.5, label=l)
plt.legend()
plt.show()
# Compute variance
var = np.zeros(l_max+1)
h = np.zeros(l_max+1)
for i in range(l_max+1):
var[i] = compute_variance(dofhandler, eta, L, submesh_flag=i)
h[i] = mesh.cells.get_leaves(subforest_flag=i)[0].length()
print(var)
plt.loglog(h,var)
#%%
#
# Initial coarse mesh
#
l_max = 4
mesh = Mesh1D()
for i in range(l_max):
mesh.record(i)
mesh.cells.refine()
mesh.mark_region('integrate', lambda x: x>0.75, entity_type='cell',
strict_containment=False)
plot = Plot(time=0.1)
plot.mesh(mesh,regions=[('integrate','cell')])
# Finite Element Space
DQ0 = QuadFE(1,'DQ0')
dh_0 = DofHandler(mesh,DQ0)
dh_0.distribute_dofs()
n = dh_0.n_dofs()
leaves = mesh.cells.get_leaves()
print(len(leaves))
for cell in mesh.cells.get_leaves():
#print(cell.get_root().info())
#print(cell.get_parent().is_marked(l_max-1))
c_dof = dh_0.get_cell_dofs(cell)[0]
#print(c_dof)
phi_0 = Basis(dh_0)
psi_0 = Basis(dh_0, subforest_flag=l_max-1)
#plot.mesh(mesh, dofhandler=dh)
C = Covariance(dh_0, name='gaussian', parameters={'l':0.05})
eta = GaussianField(n,K=C)
eta_path = Nodal(data=eta.sample(), basis=phi_0)
assembler = Assembler(Form(1,test=phi_0, flag='integrate'))
assembler.assemble()
L = assembler.get_vector()
print(L)
#
# Coarsening
#
rows = []
cols = []
vals = []
for leaf in mesh.cells.get_leaves():
rows.extend(dh_0.get_cell_dofs(leaf.get_parent()))
cols.extend(dh_0.get_cell_dofs(leaf))
vals.append(0.5)
#
# Map to index
#
# Rows
coarse_dofs = list(set(rows))
dof2idx = dict()
for (dof,i) in zip(coarse_dofs,range(len(coarse_dofs))):
dof2idx[dof] = i
rows = [dof2idx[dof] for dof in rows]
# Columns
fine_dofs = list(set(cols))
dof2idx = dict()
for (dof,i) in zip(fine_dofs,range(len(fine_dofs))):
dof2idx[dof] = i
cols = [dof2idx[dof] for dof in cols]
# Local averaging matrix
R = sp.coo_matrix((vals,(rows,cols))).tocsc()
# Average data
ave_data = R.dot(eta_path.data())
eta_ave = Nodal(data=ave_data, basis=psi_0)
#
# Plots
#
plot = Plot(quickview=False)
ax = plt.subplot(111)
ax = plot.line(eta_path, axis=ax, mesh=mesh)
ax = plot.line(eta_ave, axis=ax, mesh=mesh)
ax.set_ylim([-3,3])
plt.tight_layout()
plt.show()
#%%
mesh = Mesh1D()
# Coarse mesh
mesh.cells.refine()
mesh.record(0)
"""
for i in range(3):
for leaf in mesh.cells.get_leaves():
if np.random.rand()>0.3:
leaf.mark('r')
mesh.cells.refine(refinement_flag='r')
mesh.record(0)
"""
plot = Plot(quickview=False)
fig, ax = plt.subplots(1,2)
ax[0] = plot.mesh(mesh, axis=ax[0], subforest_flag=0)
# Fine submesh
for i in range(3):
for leaf in mesh.cells.get_leaves():
if np.random.rand()>0.4:
leaf.mark('r')
mesh.cells.refine(refinement_flag='r')
mesh.record(1)
#mesh.balance(0)
#mesh.balance()
ax[1] = plot.mesh(mesh, axis=ax[1], subforest_flag=1)
mesh.cells.get_child(0).info()
DQ0 = QuadFE(mesh.dim(),'DQ0')
dh = DofHandler(mesh,DQ0)
dh.distribute_dofs()
#for leaf in mesh.cells.get_leaves(subforest_flag=1):
# print(dh.get_cell_dofs(leaf))
leaves = mesh.cells.get_leaves(subforest_flag=0)
while len(leaves)!=0:
leaf = leaves.pop()
print(mesh.cells.depth())
d2i = [[] for _ in range(mesh.cells.depth()+1)];
print(len(d2i))
for cell in mesh.cells.traverse(mode='breadth-first', flag=1):
# Add cell dofs to that level
cell.get_depth()
d2i[cell.get_depth()].extend(dh.get_cell_dofs(cell))
for level in d2i:
level.sort()
print(d2i)
|
hvanwyk/quadmesh
|
experiments/multiscale_gmrf/ex04/ex04.py
|
Python
|
mit
| 13,614
|
[
"Gaussian"
] |
f50515a4eec68ce14b0c58fce2d1e42a69745d7629792e47fe9d03a70d9a6a21
|
"""The Mayavi Envisage application.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import sys
import os.path
import logging
# Enthought library imports.
from apptools.logger.api import LogFileHandler, FORMATTER
from traits.etsconfig.api import ETSConfig
from traits.api import (HasTraits, Instance, Int,
on_trait_change, Bool)
# Local imports.
from mayavi_workbench_application import MayaviWorkbenchApplication
from mayavi.preferences.api import preference_manager
from mayavi.core.customize import get_custom_plugins
# GLOBALS
logger = logging.getLogger()
######################################################################
# Useful functions.
######################################################################
def setup_logger(logger, fname, stream=True, mode=logging.ERROR):
"""Setup a log file and the logger. If the given file name is not
absolute, put the log file in `ETSConfig.application_home`, if not
it will create it where desired.
Parameters:
-----------
fname -- file name the logger should use. If this is an absolute
path it will create the log file as specified, if not it will put it
in `ETSConfig.application_home`.
stream -- Add a stream handler.
mode -- the logging mode of the stream handler.
"""
if not os.path.isabs(fname):
path = os.path.join(ETSConfig.application_home, fname)
else:
path = fname
# Check if we have already added a logger (can happen when the app
# is started multiple number of times from ipython say).
handlers = logger.handlers
if len(handlers) > 1:
h = handlers[0]
if isinstance(h, LogFileHandler) and h.baseFilename == path:
logger.info('Logging handlers already set! Not duplicating.')
return
logger.setLevel(logging.DEBUG)
handler = LogFileHandler(path)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
if stream:
s = logging.StreamHandler()
s.setFormatter(FORMATTER)
s.setLevel(mode)
logger.addHandler(s)
logger.info("*"*80)
logger.info("logfile is: '%s'", os.path.abspath(path))
logger.info("*"*80)
def get_non_gui_plugin_classes():
"""Get list of basic mayavi plugin classes that do not add any views or
actions."""
from envisage.core_plugin import CorePlugin
from envisage.ui.workbench.workbench_plugin import WorkbenchPlugin
from tvtk.plugins.scene.scene_plugin import ScenePlugin
from mayavi.plugins.mayavi_plugin import MayaviPlugin
plugins = [CorePlugin,
WorkbenchPlugin,
MayaviPlugin,
ScenePlugin,
]
return plugins
def get_non_gui_plugins():
"""Get list of basic mayavi plugins that do not add any views or
actions."""
return [cls() for cls in get_non_gui_plugin_classes()]
def get_plugin_classes():
"""Get list of default plugin classes to use for Mayavi."""
# Force the selection of a toolkit:
from traitsui.api import toolkit
toolkit()
from traits.etsconfig.api import ETSConfig
try_use_ipython = preference_manager.root.use_ipython
use_ipython = False
if ETSConfig.toolkit == 'wx' and try_use_ipython:
try:
# If the right versions of IPython, EnvisagePlugins and
# Pyface are not installed, this import will fail.
from envisage.plugins.ipython_shell.view.ipython_shell_view \
import IPythonShellView
use_ipython = True
except: pass
if use_ipython:
from envisage.plugins.ipython_shell.ipython_shell_plugin import \
IPythonShellPlugin
PythonShellPlugin = IPythonShellPlugin
else:
from envisage.plugins.python_shell.python_shell_plugin import PythonShellPlugin
from envisage.plugins.text_editor.text_editor_plugin import TextEditorPlugin
from apptools.logger.plugin.logger_plugin import LoggerPlugin
from tvtk.plugins.scene.ui.scene_ui_plugin import SceneUIPlugin
from mayavi.plugins.mayavi_ui_plugin import MayaviUIPlugin
plugins = get_non_gui_plugin_classes()
plugins.extend([
LoggerPlugin,
MayaviUIPlugin,
SceneUIPlugin,
PythonShellPlugin,
TextEditorPlugin,
])
return plugins
def get_plugins():
"""Get list of default plugins to use for Mayavi."""
return [cls() for cls in get_plugin_classes()]
###########################################################################
# `Mayavi` class.
###########################################################################
class Mayavi(HasTraits):
"""The Mayavi application class.
This class may be easily subclassed to do something different.
For example, one way to script MayaVi (as a standalone application
and not interactively) is to subclass this and do the needful.
"""
# The main envisage application.
application = Instance('envisage.ui.workbench.api.WorkbenchApplication')
# Turn this off if you don't want the workbench to start the GUI
# event loop.
start_gui_event_loop = Bool(True, desc='start a GUI event loop')
# The MayaVi Script instance.
script = Instance('mayavi.plugins.script.Script')
# The logging mode.
log_mode = Int(logging.ERROR, desc='the logging mode to use')
def main(self, argv=None, plugins=None):
"""The main application is created and launched here.
Parameters
----------
argv : list of strings
The list of command line arguments. The default is `None`
where no command line arguments are parsed. To support
command line arguments you can pass `sys.argv[1:]`.
plugins : list of Plugin objects
List of plugins to start. If none is provided it defaults to
something meaningful.
log_mode :
The logging mode to use.
"""
# Parse any cmd line args.
if argv is None:
argv = []
self.parse_command_line(argv)
if plugins is None:
plugins = get_plugins()
plugins += get_custom_plugins()
# Create the application
prefs = preference_manager.preferences
app = MayaviWorkbenchApplication(plugins=plugins,
preferences=prefs,
start_gui_event_loop=self.start_gui_event_loop)
self.application = app
# Setup the logger.
self.setup_logger()
# Start the application.
app.run()
def setup_logger(self):
"""Setup logging for the application."""
setup_logger(logger, 'mayavi.log', mode=self.log_mode)
def parse_command_line(self, argv):
"""Parse command line options.
Parameters
----------
- argv : `list` of `strings`
The list of command line arguments.
"""
from optparse import OptionParser
usage = "usage: %prog [options]"
parser = OptionParser(usage)
(options, args) = parser.parse_args(argv)
def run(self):
"""This function is called after the GUI has started.
Override this to do whatever you want to do as a MayaVi
script. If this is not overridden then an empty MayaVi
application will be started.
*Make sure all other MayaVi specific imports are made here!*
If you import MayaVi related code earlier you will run into
difficulties. Use 'self.script' to script the mayavi engine.
"""
pass
######################################################################
# Non-public interface.
######################################################################
@on_trait_change('application.gui:started')
def _on_application_gui_started(self, obj, trait_name, old, new):
"""This is called as soon as the Envisage GUI starts up. The
method is responsible for setting our script instance.
"""
if trait_name != 'started' or not new:
return
app = self.application
from mayavi.plugins.script import Script
window = app.workbench.active_window
# Set our script instance.
self.script = window.get_service(Script)
# Call self.run from the GUI thread.
app.gui.invoke_later(self.run)
def main(argv=None):
"""Simple helper to start up the mayavi application. This returns
the running application."""
m = Mayavi()
m.main(argv)
return m
if __name__ == '__main__':
main(sys.argv[1:])
|
liulion/mayavi
|
mayavi/plugins/app.py
|
Python
|
bsd-3-clause
| 8,773
|
[
"Mayavi"
] |
86f2e98412d6f51150a089df7d836302512bbcd82e2381a1411bff3166ceb794
|
""" Diffusion 2: jump diffusion, stochastic volatility, stochastic time
Created on Tue Dec 08 15:03:49 2009
Author: josef-pktd following Meucci
License: BSD
contains:
CIRSubordinatedBrownian
Heston
IG
JumpDiffusionKou
JumpDiffusionMerton
NIG
VG
References
----------
Attilio Meucci, Review of Discrete and Continuous Processes in Finance: Theory and Applications
Bloomberg Portfolio Research Paper No. 2009-02-CLASSROOM July 1, 2009
http://papers.ssrn.com/sol3/papers.cfm?abstract_id=1373102
this is currently mostly a translation from matlab of
http://www.mathworks.com/matlabcentral/fileexchange/23554-review-of-discrete-and-continuous-processes-in-finance
license BSD:
Copyright (c) 2008, Attilio Meucci
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
TODO:
* vectorize where possible
* which processes are exactly simulated by finite differences ?
* include or exclude (now) the initial observation ?
* convert to and merge with diffusion.py (part 1 of diffusions)
* which processes can be easily estimated ?
loglike or characteristic function ?
* tests ? check for possible index errors (random indices), graphs look ok
* adjust notation, variable names, more consistent, more pythonic
* delete a few unused lines, cleanup
* docstrings
random bug (showed up only once, need fuzz-testing to replicate)
File "../diffusion2.py", line 375, in <module>
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
File "../diffusion2.py", line 129, in simulate
jumps_ts[n] = CumS[Events]
IndexError: index out of bounds
CumS is empty array, Events == -1
"""
import numpy as np
#from scipy import stats # currently only uses np.random
import matplotlib.pyplot as plt
class JumpDiffusionMerton(object):
'''
Example
-------
mu=.00 # deterministic drift
sig=.20 # Gaussian component
l=3.45 # Poisson process arrival rate
a=0 # drift of log-jump
D=.2 # st.dev of log-jump
X = JumpDiffusionMerton().simulate(mu,sig,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(X.T)
plt.title('Merton jump-diffusion')
'''
def __init__(self):
pass
def simulate(self, m,s,lambd,a,D,ts,nrepl):
T = ts[-1] # time points
# simulate number of jumps
n_jumps = np.random.poisson(lambd*T, size=(nrepl, 1))
jumps=[]
nobs=len(ts)
jumps=np.zeros((nrepl,nobs))
for j in range(nrepl):
# simulate jump arrival time
t = T*np.random.rand(n_jumps[j])#,1) #uniform
t = np.sort(t,0)
# simulate jump size
S = a + D*np.random.randn(n_jumps[j],1)
# put things together
CumS = np.cumsum(S)
jumps_ts = np.zeros(nobs)
for n in range(nobs):
Events = np.sum(t<=ts[n])-1
#print n, Events, CumS.shape, jumps_ts.shape
jumps_ts[n]=0
if Events > 0:
jumps_ts[n] = CumS[Events] #TODO: out of bounds see top
#jumps = np.column_stack((jumps, jumps_ts)) #maybe wrong transl
jumps[j,:] = jumps_ts
D_Diff = np.zeros((nrepl,nobs))
for k in range(nobs):
Dt=ts[k]
if k>1:
Dt=ts[k]-ts[k-1]
D_Diff[:,k]=m*Dt + s*np.sqrt(Dt)*np.random.randn(nrepl)
x = np.hstack((np.zeros((nrepl,1)),np.cumsum(D_Diff,1)+jumps))
return x
class JumpDiffusionKou(object):
def __init__(self):
pass
def simulate(self, m,s,lambd,p,e1,e2,ts,nrepl):
T=ts[-1]
# simulate number of jumps
N = np.random.poisson(lambd*T,size =(nrepl,1))
jumps=[]
nobs=len(ts)
jumps=np.zeros((nrepl,nobs))
for j in range(nrepl):
# simulate jump arrival time
t=T*np.random.rand(N[j])
t=np.sort(t)
# simulate jump size
ww = np.random.binomial(1, p, size=(N[j]))
S = ww * np.random.exponential(e1, size=(N[j])) - \
(1-ww) * np.random.exponential(e2, N[j])
# put things together
CumS = np.cumsum(S)
jumps_ts = np.zeros(nobs)
for n in range(nobs):
Events = sum(t<=ts[n])-1
jumps_ts[n]=0
if Events:
jumps_ts[n]=CumS[Events]
jumps[j,:] = jumps_ts
D_Diff = np.zeros((nrepl,nobs))
for k in range(nobs):
Dt=ts[k]
if k>1:
Dt=ts[k]-ts[k-1]
D_Diff[:,k]=m*Dt + s*np.sqrt(Dt)*np.random.normal(size=nrepl)
x = np.hstack((np.zeros((nrepl,1)),np.cumsum(D_Diff,1)+jumps))
return x
class VG(object):
'''variance gamma process
'''
def __init__(self):
pass
def simulate(self, m,s,kappa,ts,nrepl):
T=len(ts)
dXs = np.zeros((nrepl,T))
for t in range(T):
dt=ts[1]-0
if t>1:
dt = ts[t]-ts[t-1]
#print dt/kappa
#TODO: check parameterization of gamrnd, checked looks same as np
d_tau = kappa * np.random.gamma(dt/kappa,1.,size=(nrepl))
#print s*np.sqrt(d_tau)
# this raises exception:
#dX = stats.norm.rvs(m*d_tau,(s*np.sqrt(d_tau)))
# np.random.normal requires scale >0
dX = np.random.normal(loc=m*d_tau, scale=1e-6+s*np.sqrt(d_tau))
dXs[:,t] = dX
x = np.cumsum(dXs,1)
return x
class IG(object):
'''inverse-Gaussian ??? used by NIG
'''
def __init__(self):
pass
def simulate(self, l,m,nrepl):
N = np.random.randn(nrepl,1)
Y = N**2
X = m + (.5*m*m/l)*Y - (.5*m/l)*np.sqrt(4*m*l*Y+m*m*(Y**2))
U = np.random.rand(nrepl,1)
ind = U>m/(X+m)
X[ind] = m*m/X[ind]
return X.ravel()
class NIG(object):
'''normal-inverse-Gaussian
'''
def __init__(self):
pass
def simulate(self, th,k,s,ts,nrepl):
T = len(ts)
DXs = np.zeros((nrepl,T))
for t in range(T):
Dt=ts[1]-0
if t>1:
Dt=ts[t]-ts[t-1]
lfrac = 1/k*(Dt**2)
m = Dt
DS = IG().simulate(lfrac, m, nrepl)
N = np.random.randn(nrepl)
DX = s*N*np.sqrt(DS) + th*DS
#print DS.shape, DX.shape, DXs.shape
DXs[:,t] = DX
x = np.cumsum(DXs,1)
return x
class Heston(object):
'''Heston Stochastic Volatility
'''
def __init__(self):
pass
def simulate(self, m, kappa, eta,lambd,r, ts, nrepl,tratio=1.):
T = ts[-1]
nobs = len(ts)
dt = np.zeros(nobs) #/tratio
dt[0] = ts[0]-0
dt[1:] = np.diff(ts)
DXs = np.zeros((nrepl,nobs))
dB_1 = np.sqrt(dt) * np.random.randn(nrepl,nobs)
dB_2u = np.sqrt(dt) * np.random.randn(nrepl,nobs)
dB_2 = r*dB_1 + np.sqrt(1-r**2)*dB_2u
vt = eta*np.ones(nrepl)
v=[]
dXs = np.zeros((nrepl,nobs))
vts = np.zeros((nrepl,nobs))
for t in range(nobs):
dv = kappa*(eta-vt)*dt[t]+ lambd*np.sqrt(vt)*dB_2[:,t]
dX = m*dt[t] + np.sqrt(vt*dt[t]) * dB_1[:,t]
vt = vt + dv
vts[:,t] = vt
dXs[:,t] = dX
x = np.cumsum(dXs,1)
return x, vts
class CIRSubordinatedBrownian(object):
'''CIR subordinated Brownian Motion
'''
def __init__(self):
pass
def simulate(self, m, kappa, T_dot,lambd,sigma, ts, nrepl):
T = ts[-1]
nobs = len(ts)
dtarr = np.zeros(nobs) #/tratio
dtarr[0] = ts[0]-0
dtarr[1:] = np.diff(ts)
DXs = np.zeros((nrepl,nobs))
dB = np.sqrt(dtarr) * np.random.randn(nrepl,nobs)
yt = 1.
dXs = np.zeros((nrepl,nobs))
dtaus = np.zeros((nrepl,nobs))
y = np.zeros((nrepl,nobs))
for t in range(nobs):
dt = dtarr[t]
dy = kappa*(T_dot-yt)*dt + lambd*np.sqrt(yt)*dB[:,t]
yt = np.maximum(yt+dy,1e-10) # keep away from zero ?
dtau = np.maximum(yt*dt, 1e-6)
dX = np.random.normal(loc=m*dtau, scale=sigma*np.sqrt(dtau))
y[:,t] = yt
dtaus[:,t] = dtau
dXs[:,t] = dX
tau = np.cumsum(dtaus,1)
x = np.cumsum(dXs,1)
return x, tau, y
def schout2contank(a,b,d):
th = d*b/np.sqrt(a**2-b**2)
k = 1/(d*np.sqrt(a**2-b**2))
s = np.sqrt(d/np.sqrt(a**2-b**2))
return th,k,s
if __name__ == '__main__':
#Merton Jump Diffusion
#^^^^^^^^^^^^^^^^^^^^^
# grid of time values at which the process is evaluated
#("0" will be added, too)
nobs = 252.#1000 #252.
ts = np.linspace(1./nobs, 1., nobs)
nrepl=5 # number of simulations
mu=.010 # deterministic drift
sigma = .020 # Gaussian component
lambd = 3.45 *10 # Poisson process arrival rate
a=0 # drift of log-jump
D=.2 # st.dev of log-jump
jd = JumpDiffusionMerton()
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('Merton jump-diffusion')
sigma = 0.2
lambd = 3.45
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('Merton jump-diffusion')
#Kou jump diffusion
#^^^^^^^^^^^^^^^^^^
mu=.0 # deterministic drift
lambd=4.25 # Poisson process arrival rate
p=.5 # prob. of up-jump
e1=.2 # parameter of up-jump
e2=.3 # parameter of down-jump
sig=.2 # Gaussian component
x = JumpDiffusionKou().simulate(mu,sig,lambd,p,e1,e2,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('double exponential (Kou jump diffusion)')
#variance-gamma
#^^^^^^^^^^^^^^
mu = .1 # deterministic drift in subordinated Brownian motion
kappa = 1. #10. #1 # inverse for gamma shape parameter
sig = 0.5 #.2 # s.dev in subordinated Brownian motion
x = VG().simulate(mu,sig,kappa,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('variance gamma')
#normal-inverse-Gaussian
#^^^^^^^^^^^^^^^^^^^^^^^
# (Schoutens notation)
al = 2.1
be = 0
de = 1
# convert parameters to Cont-Tankov notation
th,k,s = schout2contank(al,be,de)
x = NIG().simulate(th,k,s,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo x-axis
plt.title('normal-inverse-Gaussian')
#Heston Stochastic Volatility
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^
m=.0
kappa = .6 # 2*Kappa*Eta>Lambda^2
eta = .3**2
lambd =.25
r = -.7
T = 20.
nobs = 252.*T#1000 #252.
tsh = np.linspace(T/nobs, T, nobs)
x, vts = Heston().simulate(m,kappa, eta,lambd,r, tsh, nrepl, tratio=20.)
plt.figure()
plt.plot(x.T)
plt.title('Heston Stochastic Volatility')
plt.figure()
plt.plot(np.sqrt(vts).T)
plt.title('Heston Stochastic Volatility - CIR Vol.')
plt.figure()
plt.subplot(2,1,1)
plt.plot(x[0])
plt.title('Heston Stochastic Volatility process')
plt.subplot(2,1,2)
plt.plot(np.sqrt(vts[0]))
plt.title('CIR Volatility')
#CIR subordinated Brownian
#^^^^^^^^^^^^^^^^^^^^^^^^^
m=.1
sigma=.4
kappa=.6 # 2*Kappa*T_dot>Lambda^2
T_dot=1
lambd=1
#T=252*10
#dt=1/252
#nrepl=2
T = 10.
nobs = 252.*T#1000 #252.
tsh = np.linspace(T/nobs, T, nobs)
x, tau, y = CIRSubordinatedBrownian().simulate(m, kappa, T_dot,lambd,sigma, tsh, nrepl)
plt.figure()
plt.plot(tsh, x.T)
plt.title('CIRSubordinatedBrownian process')
plt.figure()
plt.plot(tsh, y.T)
plt.title('CIRSubordinatedBrownian - CIR')
plt.figure()
plt.plot(tsh, tau.T)
plt.title('CIRSubordinatedBrownian - stochastic time ')
plt.figure()
plt.subplot(2,1,1)
plt.plot(tsh, x[0])
plt.title('CIRSubordinatedBrownian process')
plt.subplot(2,1,2)
plt.plot(tsh, y[0], label='CIR')
plt.plot(tsh, tau[0], label='stoch. time')
plt.legend(loc='upper left')
plt.title('CIRSubordinatedBrownian')
#plt.show()
|
bashtage/statsmodels
|
statsmodels/sandbox/tsa/diffusion2.py
|
Python
|
bsd-3-clause
| 13,371
|
[
"Gaussian"
] |
f1a9643fd7c409ad17ae3ccaff9019fdb69e1d70f75701bf316dfa08a996038d
|
#!/usr/bin/env python
########################################################################
# File : dirac-dms-pfn-accessURL
# Author : Stuart Paterson
########################################################################
"""
Retrieve an access URL for a PFN given a valid DIRAC SE
"""
import DIRAC
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument("PFN: Physical File Name or file containing PFNs")
Script.registerArgument("SE: Valid DIRAC SE")
_, args = Script.parseCommandLine(ignoreErrors=True)
if len(args) > 2:
print("Only one PFN SE pair will be considered")
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
exitCode = 0
pfn = args[0]
seName = args[1]
try:
with open(pfn, "r") as f:
pfns = f.read().splitlines()
except Exception:
pfns = [pfn]
for pfn in pfns:
result = dirac.getPhysicalFileAccessURL(pfn, seName, printOutput=True)
if not result["OK"]:
print("ERROR: ", result["Message"])
exitCode = 2
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_dms_pfn_accessURL.py
|
Python
|
gpl-3.0
| 1,287
|
[
"DIRAC"
] |
66d6ea27258fd8b06077e8416ab42f312e501729338136ed35f7c1aef066895f
|
""" Failover Transfer
The failover transfer client exposes the following methods:
- transferAndRegisterFile()
- transferAndRegisterFileFailover()
Initially these methods were developed inside workflow modules but
have evolved to a generic 'transfer file with failover' client.
The transferAndRegisterFile() method will correctly set registration
requests in case of failure.
The transferAndRegisterFileFailover() method will attempt to upload
a file to a list of alternative SEs and set appropriate replication
to the original target SE as well as the removal request for the
temporary replica.
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
class FailoverTransfer(object):
""" .. class:: FailoverTransfer
"""
#############################################################################
def __init__(self, requestObject=None, log=None, defaultChecksumType='ADLER32'):
""" Constructor function, can specify request object to instantiate
FailoverTransfer or a new request object is created.
"""
self.log = log
if not self.log:
self.log = gLogger.getSubLogger("FailoverTransfer")
self.request = requestObject
if not self.request:
self.request = Request()
self.request.RequestName = 'noname_request'
self.request.SourceComponent = 'FailoverTransfer'
self.defaultChecksumType = defaultChecksumType
self.registrationProtocols = DMSHelpers().getRegistrationProtocols()
#############################################################################
def transferAndRegisterFile(self,
fileName,
localPath,
lfn,
destinationSEList,
fileMetaDict,
fileCatalog=None,
masterCatalogOnly=False):
"""Performs the transfer and register operation with failover.
"""
errorList = []
fileGUID = fileMetaDict.get("GUID", None)
fileChecksum = fileMetaDict.get("Checksum", None)
for se in destinationSEList:
self.log.info("Attempting dm.putAndRegister",
"('%s','%s','%s',guid='%s',catalog='%s', checksum = '%s')" %
(lfn, localPath, se, fileGUID, fileCatalog, fileChecksum))
result = DataManager(
catalogs=fileCatalog,
masterCatalogOnly=masterCatalogOnly).putAndRegister(lfn,
localPath,
se,
guid=fileGUID,
checksum=fileChecksum)
self.log.verbose(result)
if not result['OK']:
self.log.error('dm.putAndRegister failed with message', result['Message'])
errorList.append(result['Message'])
continue
if not result['Value']['Failed']:
self.log.info('dm.putAndRegister successfully uploaded and registered',
'%s to %s' % (fileName, se))
return S_OK({'uploadedSE': se, 'lfn': lfn})
# Now we know something went wrong
self.log.warn("Didn't manage to do everything, now adding requests for the missing operation")
errorDict = result['Value']['Failed'][lfn]
if 'register' not in errorDict:
self.log.error('dm.putAndRegister failed with unknown error', str(errorDict))
errorList.append('Unknown error while attempting upload to %s' % se)
continue
# fileDict = errorDict['register']
# Therefore the registration failed but the upload was successful
if not fileCatalog:
fileCatalog = ''
if masterCatalogOnly:
fileCatalog = FileCatalog().getMasterCatalogNames()['Value']
result = self._setRegistrationRequest(lfn, se, fileMetaDict, fileCatalog)
if not result['OK']:
self.log.error('Failed to set registration request', 'SE %s and metadata: \n%s' % (se, fileMetaDict))
errorList.append('Failed to set registration request for: SE %s and metadata: \n%s' % (se, fileMetaDict))
continue
else:
self.log.info('Successfully set registration request',
'for: SE %s and metadata: \n%s' % (se, fileMetaDict))
metadata = {}
metadata['filedict'] = fileMetaDict
metadata['uploadedSE'] = se
metadata['lfn'] = lfn
metadata['registration'] = 'request'
return S_OK(metadata)
self.log.error('Failed to upload output data file', 'Encountered %s errors' % len(errorList))
return S_ERROR('Failed to upload output data file')
#############################################################################
def transferAndRegisterFileFailover(self,
fileName,
localPath,
lfn,
targetSE,
failoverSEList,
fileMetaDict,
fileCatalog=None,
masterCatalogOnly=False):
"""Performs the transfer and register operation to failover storage and sets the
necessary replication and removal requests to recover.
"""
failover = self.transferAndRegisterFile(
fileName,
localPath,
lfn,
failoverSEList,
fileMetaDict,
fileCatalog,
masterCatalogOnly=masterCatalogOnly)
if not failover['OK']:
self.log.error('Could not upload file to failover SEs', failover['Message'])
return failover
# set removal requests and replication requests
result = self._setFileReplicationRequest(lfn, targetSE, fileMetaDict, sourceSE=failover['Value']['uploadedSE'])
if not result['OK']:
self.log.error('Could not set file replication request', result['Message'])
return result
lfn = failover['Value']['lfn']
failoverSE = failover['Value']['uploadedSE']
self.log.info('Attempting to set replica removal request',
'for LFN %s at failover SE %s' % (lfn, failoverSE))
result = self._setReplicaRemovalRequest(lfn, failoverSE)
if not result['OK']:
self.log.error('Could not set removal request', result['Message'])
return result
return S_OK({'uploadedSE': failoverSE, 'lfn': lfn})
def getRequest(self):
""" get the accumulated request object
"""
return self.request
def commitRequest(self):
""" Send request to the Request Management Service
"""
if self.request.isEmpty():
return S_OK()
isValid = RequestValidator().validate(self.request)
if not isValid["OK"]:
return S_ERROR("Failover request is not valid: %s" % isValid["Message"])
else:
requestClient = ReqClient()
result = requestClient.putRequest(self.request)
return result
#############################################################################
def _setFileReplicationRequest(self, lfn, targetSE, fileMetaDict, sourceSE=''):
""" Sets a registration request.
"""
self.log.info('Setting ReplicateAndRegister request',
'for %s to %s' % (lfn, targetSE))
transfer = Operation()
transfer.Type = "ReplicateAndRegister"
transfer.TargetSE = targetSE
if sourceSE:
transfer.SourceSE = sourceSE
trFile = File()
trFile.LFN = lfn
cksm = fileMetaDict.get("Checksum", None)
cksmType = fileMetaDict.get("ChecksumType", self.defaultChecksumType)
if cksm and cksmType:
trFile.Checksum = cksm
trFile.ChecksumType = cksmType
size = fileMetaDict.get("Size", 0)
if size:
trFile.Size = size
guid = fileMetaDict.get("GUID", "")
if guid:
trFile.GUID = guid
transfer.addFile(trFile)
self.request.addOperation(transfer)
return S_OK()
#############################################################################
def _setRegistrationRequest(self, lfn, targetSE, fileDict, catalog):
""" Sets a registration request
:param str lfn: LFN
:param list se: list of SE (or just string)
:param list catalog: list (or string) of catalogs to use
:param dict fileDict: file metadata
"""
self.log.info('Setting registration request',
'for %s at %s.' % (lfn, targetSE))
if not isinstance(catalog, list):
catalog = [catalog]
for cat in catalog:
register = Operation()
register.Type = "RegisterFile"
register.Catalog = cat
register.TargetSE = targetSE
regFile = File()
regFile.LFN = lfn
regFile.Checksum = fileDict.get("Checksum", "")
regFile.ChecksumType = fileDict.get("ChecksumType", self.defaultChecksumType)
regFile.Size = fileDict.get("Size", 0)
regFile.GUID = fileDict.get("GUID", "")
se = StorageElement(targetSE)
res = returnSingleResult(se.getURL(lfn, self.registrationProtocols))
if not res["OK"]:
self.log.error("Unable to get PFN for LFN", res['Message'])
return res
regFile.PFN = res["Value"]
register.addFile(regFile)
self.request.addOperation(register)
return S_OK()
#############################################################################
def _setReplicaRemovalRequest(self, lfn, se):
""" Sets a removal request for a replica.
:param str lfn: LFN
:param se:
"""
if isinstance(se, str):
se = ",".join([se.strip() for se in se.split(",") if se.strip()])
removeReplica = Operation()
removeReplica.Type = "RemoveReplica"
removeReplica.TargetSE = se
replicaToRemove = File()
replicaToRemove.LFN = lfn
removeReplica.addFile(replicaToRemove)
self.request.addOperation(removeReplica)
return S_OK()
#############################################################################
def _setFileRemovalRequest(self, lfn, se='', pfn=''):
""" Sets a removal request for a file including all replicas.
"""
remove = Operation()
remove.Type = "RemoveFile"
if se:
remove.TargetSE = se
rmFile = File()
rmFile.LFN = lfn
if pfn:
rmFile.PFN = pfn
remove.addFile(rmFile)
self.request.addOperation(remove)
return S_OK()
|
fstagni/DIRAC
|
DataManagementSystem/Client/FailoverTransfer.py
|
Python
|
gpl-3.0
| 11,111
|
[
"DIRAC"
] |
dcd50d74113edc25d59505947aa8d1a8be28ef959e37b7f59e773ec0a581dba6
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
import pkg_resources
import mdtraj
import numpy as np
import pyemma.coordinates as coor
from pyemma.coordinates.data.fragmented_trajectory_reader import FragmentedTrajectoryReader
from six.moves import range
class TestFragmentedTrajectory(unittest.TestCase):
@classmethod
def setUpClass(cls):
d = np.array([[i] for i in range(0, 100)])
cls.d = d
return cls
def test_full_trajectory(self):
reader = FragmentedTrajectoryReader([self.d, self.d])
reader.chunksize = 0
expected = np.vstack((self.d, self.d))
np.testing.assert_array_almost_equal(expected, reader.get_output(stride=1)[0])
def test_full_trajectory_random_access(self):
reader = FragmentedTrajectoryReader([self.d, self.d])
indices = np.asarray([[0, 1], [0, 3], [0, 3], [0, 99], [0, 100], [0, 199]])
out = reader.get_output(stride=indices, chunk=0)
np.testing.assert_array_equal(np.array(out).squeeze(), np.array([1, 3, 3, 99, 0, 99]))
def test_chunked_trajectory_random_access(self):
reader = FragmentedTrajectoryReader([self.d, self.d])
indices = np.asarray([[0, 1], [0, 3], [0, 3], [0, 99], [0, 100], [0, 199]])
out = reader.get_output(stride=indices, chunk=1)
np.testing.assert_array_equal(np.array(out).squeeze(), np.array([1,3,3,99,0,99]))
def test_full_trajectory_stridden(self):
for stride in [1, 3, 5, 7, 13, 20]:
reader = FragmentedTrajectoryReader([self.d, self.d])
reader.chunksize = 0
expected = np.vstack((self.d, self.d))[::stride]
out = reader.get_output(stride=stride)[0]
np.testing.assert_array_almost_equal(expected, out, err_msg="Failed for stride=%s" % stride)
def test_full_trajectory_stridden_with_lag(self):
reader = FragmentedTrajectoryReader([self.d, self.d])
data = np.vstack((self.d, self.d))
for lag in [1, 5, 7]:
for stride in [1, 3, 5, 7, 13, 20]:
reader.chunksize = 0
X, Y = None, None
# not chunked
for itraj, X, Y in reader.iterator(stride=stride, lag=lag):
pass
np.testing.assert_array_almost_equal(data[::stride][0:len(Y)], X)
np.testing.assert_array_almost_equal(data[lag::stride], Y)
def test_fragmented_xtc(self):
from pyemma.coordinates.tests.util import create_traj
top_file = pkg_resources.resource_filename(__name__, 'data/test.pdb')
trajfiles = []
for _ in range(3):
f, _, _ = create_traj(top_file)
trajfiles.append(f)
try:
# three trajectories: one consisting of all three, one consisting of the first,
# one consisting of the first and the last
source = coor.source([trajfiles, [trajfiles[0]], [trajfiles[0], trajfiles[2]]], top=top_file)
source.chunksize = 1000
out = source.get_output(stride=1)
trajs = [mdtraj.load(trajfiles[i], top=top_file).xyz.reshape(-1,9) for i in range(0,3)]
np.testing.assert_equal(out[0], np.vstack(trajs))
np.testing.assert_equal(out[1], trajs[0])
np.testing.assert_equal(out[2], np.vstack((trajs[0], trajs[2])))
finally:
for t in trajfiles:
try:
os.unlink(t)
except EnvironmentError:
pass
def test_multiple_input_trajectories_random_access(self):
indices = np.asarray([
[0, 1], [0, 3], [0, 3], [0, 99], [0, 100], [0, 199],
[1, 0], [1, 5], [1, 99],
[2, 5], [2, 7], [2, 23]
])
expected = [np.array([1, 3, 3, 99, 0, 99]), np.array([0, 5, 99]), np.array([5, 7, 23])]
for chunk_size in [0, 1, 3, 5, 13]:
reader = FragmentedTrajectoryReader([[self.d, self.d], self.d, [self.d, self.d]])
out_full_trajectory_mode = reader.get_output(chunk=chunk_size, stride=indices)
for i in range(3):
np.testing.assert_array_equal(expected[i], out_full_trajectory_mode[i].squeeze())
def test_multiple_input_trajectories(self):
reader = FragmentedTrajectoryReader([[self.d, self.d], self.d, [self.d, self.d]])
reader.chunksize = 37
out = reader.get_output()
reader.chunksize = 0
out2 = reader.get_output()
expected0_2 = np.vstack((self.d, self.d))
for itraj in range(0, 3):
np.testing.assert_array_almost_equal(out[itraj], out2[itraj])
np.testing.assert_array_almost_equal(out[0], expected0_2)
np.testing.assert_array_almost_equal(out[1], self.d)
np.testing.assert_array_almost_equal(out[2], expected0_2)
def test_chunked_trajectory_with_lag(self):
data = np.vstack((self.d, self.d))
reader = FragmentedTrajectoryReader([self.d, self.d])
for lag in [0, 1, 3]:
for stride in [1, 3, 5]:
for chunksize in [1, 34, 53, 72]:
reader.chunksize = chunksize
if lag > 0:
collected = None
collected_lagged = None
for itraj, X, Y in reader.iterator(stride=stride, lag=lag):
collected = X if collected is None else np.vstack((collected, X))
collected_lagged = Y if collected_lagged is None else np.vstack((collected_lagged, Y))
np.testing.assert_array_almost_equal(data[::stride][0:len(collected_lagged)], collected)
np.testing.assert_array_almost_equal(data[lag::stride], collected_lagged)
else:
collected = None
for itraj, X in reader.iterator(stride=stride):
collected = X if collected is None else np.vstack((collected, X))
np.testing.assert_array_almost_equal(data[::stride], collected)
def test_index_to_reader_index(self):
reader = FragmentedTrajectoryReader([self.d, self.d])
assert (0, 0) == reader._index_to_reader_index(0, 0), "first frame is first frame of first reader"
assert (0, 1) == reader._index_to_reader_index(1, 0), "second frame is second frame of first reader"
assert (1, 0) == reader._index_to_reader_index(100, 0), "101'st frame is first frame of second reader"
assert (1, 1) == reader._index_to_reader_index(101, 0), "102'nd frame is second frame of second reader"
with self.assertRaises(ValueError):
reader._index_to_reader_index(-1, 0)
with self.assertRaises(ValueError):
reader._index_to_reader_index(200, 0)
def test_cols(self):
dim = 5
arr = np.arange(60).reshape(-1, dim)
data = [(arr, arr), arr, (arr, arr, arr)]
reader = FragmentedTrajectoryReader(data)
cols = (0, 3)
for itraj, x in reader.iterator(chunk=0, return_trajindex=True, cols=cols):
if isinstance(data[itraj], tuple):
syn_traj = np.concatenate(data[itraj])
else:
syn_traj = data[itraj]
np.testing.assert_equal(x, syn_traj[:, cols])
def test_raise_different_dims(self):
data = [self.d, np.array([[1,2,3], [4,5,6]])]
with self.assertRaises(ValueError):
FragmentedTrajectoryReader(data)
|
gph82/PyEMMA
|
pyemma/coordinates/tests/test_fragmented_trajectory.py
|
Python
|
lgpl-3.0
| 8,281
|
[
"MDTraj"
] |
6ca4717746f958cff6106b9acd9c65bb61305b3603921320cc96d8c9f209e184
|
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from array import array
from functools import partial
from functools import total_ordering
from hashlib import md5
from itertools import chain
from itertools import groupby
import textwrap
from licensedcode import query
from licensedcode.spans import Span
from licensedcode import MAX_DIST
from licensedcode import tokenize
"""
LicenseMatch data structure and matches merging and filtering routines.
"""
TRACE = False
TRACE_FILTER_CONTAINS = False
TRACE_REFINE = False
TRACE_MERGE = False
TRACE_REFINE_SMALL = False
TRACE_REFINE_SINGLE = False
TRACE_REFINE_RULE_MIN_COVERAGE = False
TRACE_SPAN_DETAILS = False
def logger_debug(*args): pass
if (TRACE or TRACE_FILTER_CONTAINS or TRACE_MERGE
or TRACE_REFINE_RULE_MIN_COVERAGE or TRACE_REFINE_SINGLE
or TRACE_REFINE_SMALL):
import logging
import sys
logger = logging.getLogger(__name__)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, basestring) and a or repr(a) for a in args))
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
# FIXME: Implement each ordering functions. From the Python docs: Note: While
# this decorator makes it easy to create well behaved totally ordered types, it
# does come at the cost of slower execution and more complex stack traces for
# the derived comparison methods. If performance benchmarking indicates this is
# a bottleneck for a given application, implementing all six rich comparison
# methods instead is likely to provide an easy speed boost.
@total_ordering
class LicenseMatch(object):
"""
License detection match to a rule with matched query positions and lines and
matched index positions. Also computes a score for match. At a high level, a
match behaves a bit like a Span and has several similar methods taking into
account both the query and index Span.
"""
__slots__ = 'rule', 'qspan', 'ispan', 'hispan', 'query_run_start', 'matcher', 'start_line', 'end_line', 'query'
def __init__(self, rule, qspan, ispan, hispan=None, query_run_start=0, matcher='', start_line=0, end_line=0, query=None):
"""
Create a new match from:
- rule: matched Rule object
- qspan: query text matched Span, start at zero which is the absolute query start (not the query_run start).
- ispan: rule text matched Span, start at zero which is the rule start.
- hispan: rule text matched Span for high tokens, start at zero which is the rule start. Always a subset of ispan.
- matcher: a string indicating which matching procedure this match was created with. Used for debugging and testing only.
Note that the relationship between is the qspan and ispan is such that:
- they always have the exact same number of items but when sorted each value at an index may be different
- the nth position when sorted by position is such that their token value is equal for this position
"""
self.rule = rule
self.qspan = qspan
self.ispan = ispan
if hispan is None:
hispan = Span()
self.hispan = hispan
self.query_run_start = query_run_start
self.matcher = matcher
self.start_line = start_line
self.end_line = end_line
self.query = query
def __repr__(self, trace=TRACE_SPAN_DETAILS):
spans = ''
if trace:
hispan = self.hispan
qspan = self.qspan
ispan = self.ispan
spans = ', qspan=%(qspan)r, ispan=%(ispan)r, hispan=%(hispan)r' % locals()
rep = dict(
matcher=self.matcher,
spans=spans,
rule_id=self.rule.identifier,
licenses=', '.join(self.rule.licenses),
choice=self.rule.license_choice,
score=self.score(),
coverage=self.coverage(),
qlen=self.qlen(),
ilen=self.ilen(),
hilen=self.hilen(),
qreg=(self.qstart, self.qend),
rlen=self.rule.length,
ireg=(self.istart, self.iend),
lines=self.lines(),
)
return (
'LicenseMatch<%(matcher)r, lines=%(lines)r, %(rule_id)r, '
'%(licenses)r, choice=%(choice)r, sc=%(score)r, cov=%(coverage)r, '
'qlen=%(qlen)r, ilen=%(ilen)r, hilen=%(hilen)r, rlen=%(rlen)r, '
'qreg=%(qreg)r, ireg=%(ireg)r %(spans)s>') % rep
def __eq__(self, other):
"""
Strict equality is based on licensing not matched rule.
"""
return (isinstance(other, LicenseMatch)
and self.same_licensing(other)
and self.qspan == other.qspan
and self.ispan == other.ispan
)
def same_licensing(self, other):
"""
Return True if other has the same licensing.
"""
return self.rule.same_licensing(other.rule)
def licensing_contains(self, other):
"""
Return True if other licensing is contained is this match licensing.
"""
return self.rule.licensing_contains(other.rule)
def lines(self):
return self.start_line, self.end_line
@property
def qstart(self):
return self.qspan.start
def __lt__(self, other):
return self.qstart < other.qstart
@property
def qend(self):
return self.qspan.end
def qlen(self):
"""
Return the length of the match as the number of matched query tokens.
"""
return len(self.qspan)
@property
def istart(self):
return self.ispan.start
@property
def iend(self):
return self.ispan.end
def ilen(self):
"""
Return the length of the match as the number of matched index tokens.
"""
return len(self.ispan)
@property
def histart(self):
return self.hispan.start
def hilen(self):
"""
Return the length of the match as the number of matched query tokens.
"""
return len(self.hispan)
def __contains__(self, other):
"""
Return True if qspan contains other.qspan and ispan contains other.ispan.
"""
return other.qspan in self.qspan and other.ispan in self.ispan
def qcontains(self, other):
"""
Return True if qspan contains other.qspan.
"""
return other.qspan in self.qspan
def qdistance_to(self, other):
"""
Return the absolute qspan distance to other match.
Touching and overlapping matches have a zero distance.
"""
return self.qspan.distance_to(other.qspan)
def idistance_to(self, other):
"""
Return the absolute ispan distance from self to other match.
Touching and overlapping matches have a zero distance.
"""
return self.ispan.distance_to(other.ispan)
def overlap(self, other):
"""
Return the number of overlaping positions with other.
"""
return self.qspan.overlap(other.qspan)
def coverage(self):
"""
Return the coverage of this match to the matched rule as a
rounded float between 0 and 100.
"""
if not self.rule.length:
return 0
return round(self.ilen() / self.rule.length * 100, 2)
def score(self):
"""
Return the score for this match as a rounded float between 0 and
100.
The score is an indication of the confidence that a match is
good. It is computed from the number of matched tokens, the
number of query tokens in the matched range (including unknowns
and unmatched) and the matched rule relevance.
"""
# relevance is a number between 0 and 100. Divide by 100
relevance = self.rule.relevance / 100
if not relevance:
return 0
# The query side of the match may not be contiguous and may
# contains unmatched known tokens or unknown tokens.
# Therefore we need to compute the real portion query length
# including unknown tokens that is included in this match, for
# both matches and unmatched tokens
qspan = self.qspan
magnitude = qspan.magnitude()
query = self.query
# note: to avoid breaking many tests we check query presence
if query:
# Compute a count of unknowns tokens that are inside the
# matched range, ignoring end position of the query span:
# unknowns here do not matter as they are never in the match
unknowns_pos = qspan & query.unknowns_span
qspe = qspan.end
unknowns_pos = (pos for pos in unknowns_pos if pos != qspe)
unkxpos = query.unknowns_by_pos
unknowns_in_match = sum(unkxpos[pos] for pos in unknowns_pos)
# Fixup the magnitude by adding the count of
# unknowns in the match. This number represents the full
# extent of the matched query region including matched,
# unmatched and unknown tokens.
magnitude += unknowns_in_match
# Compute the score as the ration of the matched query length to
# the magnitude, e.g. the length of the matched region
if not magnitude:
return 0
# FIXME: this should exposed as an icoverage() method instead
query_coverage = self.qlen() / magnitude
return round(query_coverage * relevance * 100, 2)
def surround(self, other):
"""
Return True if this match query span surrounds other other match query span.
This is different from containment. A matched query region can surround
another matched query region and have no positions in common with the
surrounded match.
"""
return self.qstart <= other.qstart and self.qend >= other.qend
def is_after(self, other):
"""
Return True if this match spans are strictly after other match spans.
"""
return self.qspan.is_after(other.qspan) and self.ispan.is_after(other.ispan)
def combine(self, other):
"""
Return a new match object combining self and an other match.
"""
if self.rule != other.rule:
raise TypeError(
'Cannot combine matches with different rules: '
'from: %(self)r, to: %(other)r' % locals())
if other.matcher not in self.matcher:
newmatcher = ' '.join([self.matcher, other.matcher])
else:
newmatcher = self.matcher
combined = LicenseMatch(
rule=self.rule,
qspan=Span(self.qspan | other.qspan),
ispan=Span(self.ispan | other.ispan),
hispan=Span(self.hispan | other.hispan),
query_run_start=min(self.query_run_start, other.query_run_start),
matcher=newmatcher,
query=self.query)
return combined
def update(self, other):
"""
Update self with other match and return the updated self in place.
"""
combined = self.combine(other)
self.qspan = combined.qspan
self.ispan = combined.ispan
self.hispan = combined.hispan
self.matcher = combined.matcher
self.query_run_start = min(self.query_run_start, other.query_run_start)
return self
def small(self):
"""
Return True if this match is "small" based on its rule thresholds.
Small matches are spurrious matches that are discarded.
"""
thresholds = self.rule.thresholds()
min_ihigh = thresholds.min_high
min_ilen = thresholds.min_len
hilen = self.hilen()
ilen = self.ilen()
if TRACE_REFINE_SMALL:
coverage = self.coverage()
logger_debug('LicenseMatch.small(): hilen=%(hilen)r, ilen=%(ilen)r, thresholds=%(thresholds)r coverage=%(coverage)r' % locals(),)
if thresholds.small and (hilen < min_ihigh or ilen < min_ilen) and self.coverage() < 50:
if TRACE_REFINE_SMALL:
logger_debug('LicenseMatch.small(): CASE 1 thresholds.small and self.coverage() < 50 and (hilen < min_ihigh or ilen < min_ilen)')
return True
if hilen < min_ihigh or ilen < min_ilen:
if TRACE_REFINE_SMALL:
logger_debug('LicenseMatch.small(): CASE 2 hilen < min_ihigh or ilen < min_ilen')
return True
if TRACE_REFINE_SMALL:
logger_debug('LicenseMatch.small(): not small')
return False
def false_positive(self, idx):
"""
Return a True-ish (e.g. a false positive rule id) if the LicenseMatch match
is a false positive or None otherwise (nb: not False). This is done by a
lookup of the matched tokens sequence against the `idx` index false positive
rules.
"""
ilen = self.ilen()
if ilen > idx.largest_false_positive_length:
return
rule_tokens = idx.tids_by_rid[self.rule.rid]
ispan = self.ispan
matched_itokens = array('h', (tid for ipos, tid in enumerate(rule_tokens) if ipos in ispan))
# note: hash computation is inlined here but MUST be the same code as in match_hash
matched_hash = md5(matched_itokens.tostring()).digest()
return idx.false_positive_rid_by_hash.get(matched_hash)
def matched_text(self, whole_lines=False,
highlight_matched=u'%s', highlight_not_matched=u'[%s]'):
"""
Return the matched text for this match or an empty string if no
query exists for this match.
"""
query = self.query
if not query:
# TODO: should we raise an exception instead???
# this cvase should never exist except for tests!
return u''
return u''.join(get_full_matched_text(
self,
location=query.location,
query_string=query.query_string,
idx=query.idx,
whole_lines=whole_lines,
highlight_matched=highlight_matched,
highlight_not_matched=highlight_not_matched)
)
def set_lines(matches, line_by_pos):
"""
Update a matches sequence with start and end line given a line_by_pos pos->line mapping.
"""
# if there is no line_by_pos, do not bother: the lines will stay to zero.
if line_by_pos:
for match in matches:
match.start_line = line_by_pos[match.qstart]
match.end_line = line_by_pos[match.qend]
def merge_matches(matches, max_dist=MAX_DIST):
"""
Merge matches to the same rule in a sequence of matches. Return a new list
of merged matches if they can be merged. Matches that cannot be merged are
returned as-is.
For being merged two matches must also be in increasing query and index positions.
"""
# shortcut for single matches
if len(matches) < 2:
return matches
# only merge matches with the same rule: sort then group by rule
# for the same rule, sort on start, longer high, longer match, matcher type
sorter = lambda m: (m.rule.identifier, m.qspan.start, -m.hilen(), -m.qlen(), m.matcher)
matches.sort(key=sorter)
matches_by_rule = [(rid, list(rule_matches)) for rid, rule_matches
in groupby(matches, key=lambda m: m.rule.identifier)]
if TRACE_MERGE: print('merge_matches: number of matches to process:', len(matches))
merged = []
for rid, rule_matches in matches_by_rule:
if TRACE_MERGE: logger_debug('merge_matches: processing rule:', rid)
# compare two matches in the sorted sequence: current and next
i = 0
while i < len(rule_matches) - 1:
j = i + 1
while j < len(rule_matches):
current_match = rule_matches[i]
next_match = rule_matches[j]
if TRACE_MERGE: logger_debug('---> merge_matches: current:', current_match)
if TRACE_MERGE: logger_debug('---> merge_matches: next: ', next_match)
# stop if we exceed max dist
if (current_match.qdistance_to(next_match) > MAX_DIST
or current_match.idistance_to(next_match) > MAX_DIST):
break
# keep one of equal matches
if current_match.qspan == next_match.qspan and current_match.ispan == next_match.ispan:
if TRACE_MERGE: logger_debug(' ---> ###merge_matches: next EQUALS current, del next')
del rule_matches[j]
continue
# if we have two equal ispans and some overlap
# keep the shortest/densest match in qspan e.g. the smallest magnitude of the two
if current_match.ispan == next_match.ispan and current_match.overlap(next_match):
cqmag = current_match.qspan.magnitude()
nqmag = next_match.qspan.magnitude()
if cqmag <= nqmag:
if TRACE_MERGE: logger_debug(' ---> ###merge_matches: current ispan EQUALS next ispan, current qmagnitude smaller, del next')
del rule_matches[j]
continue
else:
if TRACE_MERGE: logger_debug(' ---> ###merge_matches: current ispan EQUALS next ispan, next qmagnitude smaller, del current')
del rule_matches[i]
i -= 1
break
# remove contained matches
if current_match.qcontains(next_match):
if TRACE_MERGE: logger_debug(' ---> ###merge_matches: next CONTAINED in current, del next')
del rule_matches[j]
continue
# remove contained matches the other way
if next_match.qcontains(current_match):
if TRACE_MERGE: logger_debug(' ---> ###merge_matches: current CONTAINED in next, del current')
del rule_matches[i]
i -= 1
break
# FIXME: qsurround is too weak. We want to check also isurround
# merge surrounded
if current_match.surround(next_match):
new_match = current_match.combine(next_match)
if len(new_match.qspan) == len(new_match.ispan):
# the merged matched is likely aligned
current_match.update(next_match)
if TRACE_MERGE: logger_debug(' ---> ###merge_matches: current SURROUNDS next, merged as new:', current_match)
del rule_matches[j]
continue
# FIXME: qsurround is too weak. We want to check also isurround
# merge surrounded the other way too: merge in current
if next_match.surround(current_match):
new_match = current_match.combine(next_match)
if len(new_match.qspan) == len(new_match.ispan):
# the merged matched is likely aligned
next_match.update(current_match)
if TRACE_MERGE: logger_debug(' ---> ###merge_matches: next SURROUNDS current, merged as new:', current_match)
del rule_matches[i]
i -= 1
break
# next_match is strictly in increasing sequence: merge in current
if next_match.is_after(current_match):
current_match.update(next_match)
if TRACE_MERGE: logger_debug(' ---> ###merge_matches: next follows current, merged as new:', current_match)
del rule_matches[j]
continue
# next_match overlaps
# Check increasing sequence and overlap importance to decide merge
if (current_match.qstart <= next_match.qstart
and current_match.qend <= next_match.qend
and current_match.istart <= next_match.istart
and current_match.iend <= next_match.iend):
qoverlap = current_match.qspan.overlap(next_match.qspan)
if qoverlap:
ioverlap = current_match.ispan.overlap(next_match.ispan)
# only merge if overlaps are equals (otherwise they are not aligned)
if qoverlap == ioverlap:
current_match.update(next_match)
if TRACE_MERGE: logger_debug(' ---> ###merge_matches: next overlaps in sequence current, merged as new:', current_match)
del rule_matches[j]
continue
j += 1
i += 1
merged.extend(rule_matches)
return merged
def filter_contained_matches(matches):
"""
Return a filtered list of LicenseMatch given a `matches` list of LicenseMatch by
removing duplicated or superfluous matches using containment relationships.
Works across all matches.
For instance a match entirely contained in another bigger match is removed. When
more than one matched position matches the same license(s), only one match of
this set is kept.
"""
discarded = []
# do not bother if there is only one match
if len(matches) < 2:
return matches, discarded
# containment relationships and thresholds between two matches
# based on this containment we may prefer one match over the other and discard a match
CONTAINMENT_SMALL = 0.10
CONTAINMENT_MEDIUM = 0.40
CONTAINMENT_LARGE = 0.60
CONTAINMENT_EXTRA_LARGE = 0.80
# sort on start, longer high, longer match, matcher type
sorter = lambda m: (m.qspan.start, -m.hilen(), -m.qlen(), m.matcher)
matches = sorted(matches, key=sorter)
if TRACE_FILTER_CONTAINS: print('filter_contained_matches: number of matches to process:', len(matches))
if TRACE_FILTER_CONTAINS:
print('filter_contained_matches: matches')
map(print, matches)
# compare two matches in the sorted sequence: current and next match
# we progressively compare a pair and remove next or current
i = 0
while i < len(matches) - 1:
j = i + 1
while j < len(matches):
current_match = matches[i]
next_match = matches[j]
# stop when no overlap: Touching and overlapping matches have a zero distance.
# if current_match.qdistance_to(next_match):
# break
if TRACE_FILTER_CONTAINS: logger_debug('---> filter_contained_matches: current: i=', i, current_match)
if TRACE_FILTER_CONTAINS: logger_debug('---> filter_contained_matches: next: j=', j, next_match)
# equals matches
if current_match.qspan == next_match.qspan:
if current_match.coverage() >= next_match.coverage():
if TRACE_FILTER_CONTAINS: logger_debug(' ---> ###filter_contained_matches: next EQUALS current, removed next with lower or equal coverage', matches[j], '\n')
discarded.append(next_match)
del matches[j]
continue
else:
if TRACE_FILTER_CONTAINS: logger_debug(' ---> ###filter_contained_matches: next EQUALS current, removed current with lower coverage', matches[i], '\n')
discarded.append(current_match)
del matches[i]
i -= 1
break
# remove contained matches
if current_match.qcontains(next_match):
if TRACE_FILTER_CONTAINS: logger_debug(' ---> ###filter_contained_matches: next CONTAINED in current, removed next', matches[j], '\n')
discarded.append(next_match)
del matches[j]
continue
# remove contained matches the other way
if next_match.qcontains(current_match):
if TRACE_FILTER_CONTAINS: logger_debug(' ---> ###filter_contained_matches: current CONTAINED in next, removed current', matches[i], '\n')
discarded.append(current_match)
del matches[i]
i -= 1
break
# handle overlapping matches: determine overlap and containment relationships
overlap = current_match.overlap(next_match)
# next match overlap to current
overlap_ratio_to_next = overlap / next_match.qlen()
extra_large_next = overlap_ratio_to_next >= CONTAINMENT_EXTRA_LARGE
large_next = overlap_ratio_to_next >= CONTAINMENT_LARGE
medium_next = overlap_ratio_to_next >= CONTAINMENT_MEDIUM
small_next = overlap_ratio_to_next >= CONTAINMENT_SMALL
# current match overlap to next
overlap_ratio_to_current = overlap / current_match.qlen()
extra_large_current = overlap_ratio_to_current >= CONTAINMENT_EXTRA_LARGE
large_current = overlap_ratio_to_current >= CONTAINMENT_LARGE
medium_current = overlap_ratio_to_current >= CONTAINMENT_MEDIUM
small_current = overlap_ratio_to_current >= CONTAINMENT_SMALL
if TRACE_FILTER_CONTAINS: logger_debug(
' ---> ###filter_contained_matches:',
'overlap:', overlap,
'containment of next to current is:',
'overlap_ratio_to_next:', overlap_ratio_to_next,
(extra_large_next and 'EXTRA_LARGE')
or (large_next and 'LARGE')
or (medium_next and 'MEDIUM')
or (small_next and 'SMALL')
or 'NOT CONTAINED',
'containment of current to next is:',
'overlap_ratio_to_current:', overlap_ratio_to_current,
(extra_large_current and 'EXTRA_LARGE')
or (large_current and 'LARGE')
or (medium_current and 'MEDIUM')
or (small_current and 'SMALL')
or 'NOT CONTAINED',
)
if extra_large_next and current_match.qlen() >= next_match.qlen():
if TRACE_FILTER_CONTAINS: logger_debug(' ---> ###filter_contained_matches: EXTRA_LARGE next included, removed shorter next', matches[j], '\n')
discarded.append(next_match)
del matches[j]
continue
if extra_large_current and current_match.qlen() <= next_match.qlen():
if TRACE_FILTER_CONTAINS: logger_debug(' ---> ###filter_contained_matches: EXTRA_LARGE next includes current, removed shorter current', matches[i], '\n')
discarded.append(current_match)
del matches[i]
i -= 1
break
if large_next and current_match.qlen() >= next_match.qlen() and current_match.hilen() >= next_match.hilen():
if TRACE_FILTER_CONTAINS: logger_debug(' ---> ###filter_contained_matches: LARGE next included, removed shorter next', matches[j], '\n')
discarded.append(next_match)
del matches[j]
continue
if large_current and current_match.qlen() <= next_match.qlen() and current_match.hilen() <= next_match.hilen():
if TRACE_FILTER_CONTAINS: logger_debug(' ---> ###filter_contained_matches: LARGE next includes current, removed shorter current', matches[i], '\n')
discarded.append(current_match)
del matches[i]
i -= 1
break
if medium_next:
if TRACE_FILTER_CONTAINS: logger_debug(' ---> ###filter_contained_matches: MEDIUM NEXT')
if current_match.licensing_contains(next_match) and current_match.qlen() >= next_match.qlen() and current_match.hilen() >= next_match.hilen():
if TRACE_FILTER_CONTAINS: logger_debug(
' ---> ###filter_contained_matches: MEDIUM next included with next licensing contained, removed next', matches[j], '\n',)
discarded.append(next_match)
del matches[j]
continue
if next_match.licensing_contains(current_match) and current_match.qlen() <= next_match.qlen() and current_match.hilen() <= next_match.hilen():
if TRACE_FILTER_CONTAINS: logger_debug(
' ---> ###filter_contained_matches: MEDIUM next includes current with current licensing contained, removed current', matches[i], '\n')
discarded.append(current_match)
del matches[i]
i -= 1
break
if medium_current:
if TRACE_FILTER_CONTAINS: logger_debug(' ---> ###filter_contained_matches: MEDIUM CURRENT')
if current_match.licensing_contains(next_match) and current_match.qlen() >= next_match.qlen() and current_match.hilen() >= next_match.hilen():
if TRACE_FILTER_CONTAINS: logger_debug(
' ---> ###filter_contained_matches: MEDIUM current, bigger current with next licensing contained, removed next', matches[j], '\n')
discarded.append(next_match)
del matches[j]
continue
if next_match.licensing_contains(current_match) and current_match.qlen() <= next_match.qlen() and current_match.hilen() <= next_match.hilen():
if TRACE_FILTER_CONTAINS: logger_debug(
' ---> ###filter_contained_matches: MEDIUM current, bigger next current with current licensing contained, removed current', matches[i], '\n')
discarded.append(current_match)
del matches[i]
i -= 1
break
if small_next and current_match.surround(next_match) and current_match.licensing_contains(next_match) and current_match.qlen() >= next_match.qlen() and current_match.hilen() >= next_match.hilen():
if TRACE_FILTER_CONTAINS: logger_debug(' ---> ###filter_contained_matches: SMALL next surrounded, removed next', matches[j], '\n')
discarded.append(next_match)
del matches[j]
continue
if small_current and next_match.surround(current_match) and next_match.licensing_contains(current_match) and current_match.qlen() <= next_match.qlen() and current_match.hilen() <= next_match.hilen():
if TRACE_FILTER_CONTAINS: logger_debug(' ---> ###filter_contained_matches: SMALL current surrounded, removed current', matches[i], '\n')
discarded.append(next_match)
del matches[i]
i -= 1
break
# check the previous current and next match
# discard current if it is entirely contained in a combined previous and next
# and previous and next do not overlap
# ensure that we have a previous
if i:
previous_match = matches[i - 1]
# ensure previous and next do not overlap
if not previous_match.overlap(next_match):
# ensure most of current is contained in the previous and next overlap
cpo = current_match.overlap(previous_match)
cno = current_match.overlap(next_match)
if cpo and cno:
overlap_len = cno + cpo
cqlen = current_match.qlen()
# we want at least 90% of the current that is in the overlap
if overlap_len >= (cqlen * 0.9):
if TRACE_FILTER_CONTAINS: logger_debug(' ---> ###filter_contained_matches: current mostly contained in previsou and next, removed current', matches[i], '\n')
discarded.append(next_match)
del matches[i]
i -= 1
break
j += 1
i += 1
# FIXME: returned discarded too
return matches, discarded
def filter_rule_min_coverage(matches):
"""
Return a list of matches scoring at or above a rule-defined minimum coverage and
a list of matches with a coverage below a rule-defined minimum coverage.
"""
kept = []
discarded = []
for match in matches:
if match.coverage() < match.rule.minimum_coverage:
if TRACE_REFINE_RULE_MIN_COVERAGE: logger_debug(' ==> DISCARDING rule.minimum_coverage:', type(match.rule.minimum_coverage), ':', repr(match.rule.minimum_coverage), 'match:', match)
discarded.append(match)
else:
kept.append(match)
return kept, discarded
def filter_low_score(matches, min_score=100):
"""
Return a list of matches scoring above `min_score` and a list of matches scoring below.
"""
if not min_score:
return matches, []
kept = []
discarded = []
for match in matches:
if match.score() < min_score:
if TRACE_REFINE: logger_debug(' ==> DISCARDING small score:', match)
discarded.append(match)
else:
kept.append(match)
return kept, discarded
def filter_spurious_single_token(matches, query=None, unknown_count=5):
"""
Return a list of matches without "spurious" single token matches and a list of
"spurious" single token matches.
A "spurious" single token match is a match to a single token that is surrounded
on both sides by at least `unknown_count` tokens of either unknown tokens, short
tokens composed of a single character or tokens composed only of digits.
"""
kept = []
discarded = []
if not query:
return matches, discarded
unknowns_by_pos = query.unknowns_by_pos
shorts_and_digits = query.shorts_and_digits_pos
for match in matches:
if not match.qlen() == 1:
kept.append(match)
continue
qstart = match.qstart
qend = match.qend
# compute the number of unknown tokens before and after this single matched position
# note: unknowns_by_pos is a defaultdict(int), shorts_and_digits is a set of integers
before = unknowns_by_pos[qstart - 1]
for p in range(qstart - 1 - unknown_count, qstart):
if p in shorts_and_digits:
before += 1
if before < unknown_count:
if TRACE_REFINE_SINGLE: logger_debug(' ==> !!! NOT DISCARDING spurrious_single_token, not enough before:', match, before)
if TRACE_REFINE_SINGLE: _debug_print_matched_query_text(match, query, extras=unknown_count)
kept.append(match)
continue
after = unknowns_by_pos[qstart]
for p in range(qend, qend + 1 + unknown_count):
if p in shorts_and_digits:
after += 1
if after >= unknown_count:
if TRACE_REFINE_SINGLE: logger_debug(' ==> DISCARDING spurrious_single_token:', match)
if TRACE_REFINE_SINGLE: _debug_print_matched_query_text(match, query, extras=unknown_count)
discarded.append(match)
else:
if TRACE_REFINE_SINGLE: logger_debug(' ==> !!! NOT DISCARDING spurrious_single_token, not enough after:', match, before, after)
if TRACE_REFINE_SINGLE: _debug_print_matched_query_text(match, query, extras=unknown_count)
kept.append(match)
return kept, discarded
def filter_short_matches(matches):
"""
Return a list of matches that are not short and a list of short spurious matches.
"""
kept = []
discarded = []
for match in matches:
if match.small():
if TRACE_REFINE_SMALL: logger_debug(' ==> DISCARDING SHORT:', match)
discarded.append(match)
else:
if TRACE_REFINE_SMALL: logger_debug(' ===> NOT DISCARDING SHORT:', match)
kept.append(match)
return kept, discarded
def filter_spurious_matches(matches):
"""
Return a list of matches that are not spurious and a list of spurious matches.
Spurious matches are small matches with a low density (e.g. where the matched
tokens are separated by many unmatched tokens.)
"""
kept = []
discarded = []
for match in matches:
qdens = match.qspan.density()
idens = match.ispan.density()
ilen = match.ilen()
hilen = match.hilen()
if (
(ilen < 30 and hilen < 8 and (qdens < 0.4 or idens < 0.4))
or (ilen < 20 and hilen < 5 and (qdens < 0.3 or idens < 0.3))
or (ilen < 15 and (qdens < 0.2 or idens < 0.2))
or (ilen < 10 and (qdens < 0.1 or idens < 0.1))
):
if TRACE_REFINE: logger_debug(' ==> DISCARDING Spurious:', match)
discarded.append(match)
else:
kept.append(match)
return kept, discarded
def filter_false_positive_matches(matches, idx):
"""
Return a list of matches that are not false positives and a list of false
positive matches given an index `idx`.
"""
kept = []
discarded = []
for match in matches:
fp = match.false_positive(idx)
if fp is None:
# if TRACE_REFINE: logger_debug(' ==> NOT DISCARDING FALSE POSITIVE:', match)
kept.append(match)
else:
if TRACE_REFINE: logger_debug(' ==> DISCARDING FALSE POSITIVE:', match, 'fp rule:', idx.rules_by_rid[fp].identifier)
discarded.append(match)
return kept, discarded
def refine_matches(matches, idx, query=None, min_score=0, max_dist=MAX_DIST):
"""
Return two sequences of matches: one contains refined good matches, and the
other contains matches that were filtered out.
"""
if TRACE: logger_debug()
if TRACE: logger_debug(' #####refine_matches: STARTING matches#', len(matches))
if TRACE_REFINE: map(logger_debug, matches)
matches = merge_matches(matches, max_dist=max_dist)
if TRACE: logger_debug(' ##### refine_matches: STARTING MERGED_matches#:', len(matches))
all_discarded = []
matches, discarded = filter_rule_min_coverage(matches)
all_discarded.extend(discarded)
if TRACE: logger_debug(' #####refine_matches: NOT UNDER MIN COVERAGE #', len(matches))
if TRACE_REFINE: map(logger_debug, matches)
if TRACE: logger_debug(' #####refine_matches: UNDER MIN COVERAGE discarded#', len(discarded))
if TRACE_REFINE: map(logger_debug, discarded)
matches, discarded = filter_spurious_single_token(matches, query)
all_discarded.extend(discarded)
if TRACE: logger_debug(' #####refine_matches: NOT SINGLE TOKEN #', len(matches))
if TRACE_REFINE: map(logger_debug, matches)
if TRACE: logger_debug(' #####refine_matches: SINGLE TOKEN discarded#', len(discarded))
if TRACE_REFINE: map(logger_debug, discarded)
matches, discarded = filter_short_matches(matches)
all_discarded.extend(discarded)
if TRACE: logger_debug(' #####refine_matches: NOT SHORT #', len(matches))
if TRACE_REFINE: map(logger_debug, matches)
if TRACE: logger_debug(' #####refine_matches: SHORT discarded#', len(discarded))
if TRACE_REFINE: map(logger_debug, discarded)
matches, discarded = filter_false_positive_matches(matches, idx)
all_discarded.extend(discarded)
if TRACE: logger_debug(' #####refine_matches: NOT FALSE POS #', len(matches))
if TRACE_REFINE: map(logger_debug, matches)
if TRACE: logger_debug(' #####refine_matches: FALSE POS discarded#', len(discarded))
if TRACE_REFINE: map(logger_debug, discarded)
matches, discarded = filter_spurious_matches(matches)
all_discarded.extend(discarded)
if TRACE: logger_debug(' #####refine_matches: NOT SPURIOUS#', len(matches))
if TRACE_REFINE: map(logger_debug, matches)
if TRACE: logger_debug(' #####refine_matches: SPURIOUS discarded#', len(discarded))
if TRACE_REFINE: map(logger_debug, discarded)
matches = merge_matches(matches, max_dist=max_dist)
if TRACE: logger_debug(' #####refine_matches: before FILTER matches#', len(matches))
if TRACE_REFINE: map(logger_debug, matches)
matches, discarded = filter_contained_matches(matches)
all_discarded.extend(discarded)
logger_debug(' ##### refine_matches: NOT FILTERED matches#:', len(matches))
if TRACE_REFINE: map(logger_debug, matches)
if TRACE: logger_debug(' #####refine_matches: FILTERED discarded#', len(discarded))
if TRACE_REFINE: map(logger_debug, discarded)
if min_score:
matches, discarded = filter_low_score(matches, min_score=min_score)
all_discarded.extend(discarded)
if TRACE: logger_debug(' #####refine_matches: NOT LOW SCORE #', len(matches))
if TRACE_REFINE: map(logger_debug, matches)
if TRACE: logger_debug(' ###refine_matches: LOW SCORE discarded #:', len(discarded))
if TRACE_REFINE: map(logger_debug, discarded)
matches = merge_matches(matches, max_dist=max_dist)
logger_debug(' ##### refine_matches: FINAL MERGED_matches#:', len(matches))
if TRACE_REFINE: map(logger_debug, matches)
return matches, all_discarded
# TODO: move this as a method of LicenseMatch
def get_full_matched_text(
match, location=None, query_string=None, idx=None,
whole_lines=False, highlight_matched=u'%s', highlight_not_matched=u'[%s]'):
"""
Yield unicode strings corresponding to the full matched
matched query text given a query file at `location` or a
`query_string`, a `match` LicenseMatch and an `idx` LicenseIndex.
This contains the full text including punctuations and spaces that
are not participating in the match proper.
If `whole_lines` is True, the unmatched part at the start of the
first matched line and the end of the last matched lines are also
as_included in the returned text.
Each token is interpolated for "highlighting" and emphasis with the
`highlight_matched` format string for matched tokens or to the
`highlight_not_matched` for tokens not matched. The default is to
enclose an unmatched token sequence in [] square brackets.
Punctuation is not highlighted.
"""
assert location or query_string
assert idx
dictionary_get = idx.dictionary.get
import attr
@attr.s(slots=True)
class Token(object):
value = attr.ib()
line_num = attr.ib()
pos = attr.ib(default=-1)
# False if this is punctuation
is_text = attr.ib(default=False)
is_included = attr.ib(default=False)
is_matched = attr.ib(default=False)
is_known = attr.ib(default=False)
def _tokenize(location, query_string):
"""Yield Tokens with pos and line number."""
_pos = -1
for _line_num, _line in enumerate(query.query_lines(location, query_string, strip=False), 1):
for _is_text, _token in tokenize.matched_query_text_tokenizer(_line):
_known = _is_text and dictionary_get(_token.lower()) is not None
_tok = Token(value=_token, line_num=_line_num, is_text=_is_text, is_known=_known)
if _known:
_pos += 1
_tok.pos = _pos
yield _tok
def _in_matched_lines(tokens, _start_line, _end_line):
"""Yield tokens that are within matched start and end lines."""
for _tok in tokens:
if _tok.line_num < _start_line:
continue
if _tok.line_num > _end_line:
break
yield _tok
def _tag_tokens_as_matched(tokens, qspan):
"""Tag tokens within qspan as matched."""
for _tok in tokens:
if _tok.pos != -1 and _tok.is_known and _tok.pos in qspan:
_tok.is_matched = True
yield _tok
def _tag_tokens_as_included_in_whole_lines(tokens, _start_line, _end_line):
"""Tag tokens within start and end lines as as_included."""
for _tok in tokens:
if _start_line <= _tok.line_num <= _end_line:
_tok.is_included = True
yield _tok
def _tag_tokens_as_included_in_matched_range(tokens, _start, _end):
"""Tag tokens within start and end positions as as_included."""
started = False
finished = False
for _tok in tokens:
if not started and _tok.pos == _start:
started = True
if started and not finished:
_tok.is_included = True
yield _tok
if _tok.pos == _end:
finished = True
# Create and process a stream of Tokens
tokenized = _tokenize(location, query_string)
in_matched_lines = _in_matched_lines(tokenized, match.start_line, match.end_line)
matched = _tag_tokens_as_matched(in_matched_lines, match.qspan)
if whole_lines:
as_included = _tag_tokens_as_included_in_whole_lines(matched, match.start_line, match.end_line)
else:
as_included = _tag_tokens_as_included_in_matched_range(matched, match.qspan.start, match.qspan.end)
tokens = (t for t in as_included if t.is_included)
# Finally yield strings with eventual highlightings
for token in tokens:
if token.is_text:
if token.is_matched:
yield highlight_matched % token.value
else:
yield highlight_not_matched % token.value
else:
# punctuation
yield token.value
########################################################################
# TODO: move these to tests: this is used only for test reporting!
#
def get_texts(match, location=None, query_string=None, idx=None, width=120):
"""
Given a match and a query location of query string return a tuple of wrapped
texts at `width` for:
- the matched query text as a string.
- the matched rule text as a string.
Unmatched positions to known tokens are represented between angular backets <>
and between square brackets [] for unknown tokens not part of the index.
Punctuation is removed , spaces are normalized (new line is replaced by a space),
case is preserved.
If `width` is a number superior to zero, the texts are wrapped to width.
"""
return (get_matched_qtext(match, location, query_string, idx, width),
get_match_itext(match, width))
def get_matched_qtext(match, location=None, query_string=None, idx=None, width=120, margin=0):
"""
Return the matched query text as a wrapped string of `width` given a match, a
query location or string and an index.
Unmatched positions are represented between angular backets <> or square brackets
[] for unknown tokens not part of the index. Punctuation is removed , spaces are
normalized (new line is replaced by a space), case is preserved.
If `width` is a number superior to zero, the texts are wrapped to width with an
optional `margin`.
"""
return format_text(matched_query_tokens_str(match, location, query_string, idx), width=width, margin=margin)
def get_match_itext(match, width=120, margin=0):
"""
Return the matched rule text as a wrapped string of `width` given a match.
Unmatched positions are represented between angular backets <>.
Punctuation is removed , spaces are normalized (new line is replaced by a space),
case is preserved.
If `width` is a number superior to zero, the texts are wrapped to width with an
optional `margin`.
"""
return format_text(matched_rule_tokens_str(match), width=width, margin=margin)
def format_text(tokens, width=120, no_match='<no-match>', margin=4):
"""
Return a formatted text wrapped at `width` given an iterable of tokens.
None tokens for unmatched positions are replaced with `no_match`.
"""
nomatch = lambda s: s or no_match
tokens = map(nomatch, tokens)
noop = lambda x: [x]
initial_indent = subsequent_indent = u' ' * margin
wrapper = partial(textwrap.wrap, width=width, break_on_hyphens=False,
initial_indent=initial_indent,
subsequent_indent=subsequent_indent)
wrap = width and wrapper or noop
return u'\n'.join(wrap(u' '.join(tokens)))
def matched_query_tokens_str(match, location=None, query_string=None, idx=None):
"""
Return an iterable of matched query token strings given a query file at
`location` or a `query_string`, a match and an index.
Yield None for unmatched positions. Punctuation is removed, spaces are normalized
(new line is replaced by a space), case is preserved.
"""
assert idx
dictionary_get = idx.dictionary.get
tokens = (query.query_tokenizer(line, lower=False)
for line in query.query_lines(location, query_string))
tokens = chain.from_iterable(tokens)
match_qspan = match.qspan
match_qspan_start = match_qspan.start
match_qspan_end = match_qspan.end
known_pos = -1
started = False
finished = False
for token in tokens:
token_id = dictionary_get(token.lower())
if token_id is None:
if not started:
continue
if finished:
break
else:
known_pos += 1
if match_qspan_start <= known_pos <= match_qspan_end:
started = True
if known_pos == match_qspan_end:
finished = True
if known_pos in match_qspan and token_id is not None:
yield token
else:
if token_id is not None:
yield '<%s>' % token
else:
yield '[%s]' % token
def matched_rule_tokens_str(match):
"""
Return an iterable of matched rule token strings given a match.
Yield None for unmatched positions.
Punctuation is removed, spaces are normalized (new line is replaced by a space),
case is preserved.
"""
ispan = match.ispan
ispan_start = ispan.start
ispan_end = ispan.end
for pos, token in enumerate(match.rule.tokens(lower=False)):
if ispan_start <= pos <= ispan_end:
if pos in ispan:
yield token
else:
yield '<%s>' % token
def _debug_print_matched_query_text(match, query, extras=5):
"""
Print a matched query text including `extras` tokens before and after the match.
Used for debugging license matches.
"""
# create a fake new match with extra unknown left and right
new_match = match.combine(match)
new_qstart = max([0, match.qstart - extras])
new_qend = min([match.qend + extras, len(query.tokens)])
new_qspan = Span(new_qstart, new_qend)
new_match.qspan = new_qspan
logger_debug(new_match)
logger_debug(' MATCHED QUERY TEXT with extras')
qt, _it = get_texts(
new_match,
location=query.location, query_string=query.query_string,
idx=query.idx)
print(qt)
|
yashdsaraf/scancode-toolkit
|
src/licensedcode/match.py
|
Python
|
apache-2.0
| 52,884
|
[
"VisIt"
] |
30848972af59b1764bfee9d8f79d9d5437f5779e15808078ef25dfbf61cdab51
|
# Copyright (C) 2010 CAMd
# Please see the accompanying LICENSE file for further information.
"""This module provides all the classes and functions associated with the
evaluation of exact exchange with k-point sampling."""
from math import pi, sqrt
import sys
import numpy as np
from ase import Atoms
from ase.units import Ha
from time import ctime
from gpaw.xc import XC
from gpaw.xc.kernel import XCNull
from gpaw.xc.functional import XCFunctional
from gpaw.utilities import pack, unpack2, packed_index, devnull
from gpaw.lfc import LFC
from gpaw.wavefunctions.pw import PWDescriptor
from gpaw.kpt_descriptor import KPointDescriptor
from gpaw.mpi import world, rank
class KPoint:
def __init__(self, kd, kpt=None):
"""Helper class for parallelizing over k-points.
Placeholder for wave functions, occupation numbers,
projections, and global k-point index."""
self.kd = kd
if kpt is not None:
self.psit_nG = kpt.psit_nG
self.f_n = kpt.f_n / kpt.weight / kd.nbzkpts * 2 / kd.nspins
self.weight = 1. / kd.nbzkpts * 2 / kd.nspins
self.eps_n = kpt.eps_n
self.P_ani = kpt.P_ani
self.k = kpt.k
self.s = kpt.s
self.requests = []
def next(self):
"""Create empty object.
Data will be received from other processor."""
kpt = KPoint(self.kd)
# intialize array for receiving:
kpt.psit_nG = np.empty_like(self.psit_nG)
kpt.f_n = np.empty_like(self.f_n)
# Total number of projector functions:
I = sum([P_ni.shape[1] for P_ni in self.P_ani.values()])
kpt.P_In = np.empty((I, len(kpt.f_n)), complex)
kpt.P_ani = {}
I1 = 0
for a, P_ni in self.P_ani.items():
I2 = I1 + P_ni.shape[1]
kpt.P_ani[a] = kpt.P_In[I1:I2].T
I1 = I2
kpt.k = (self.k + 1) % self.kd.nibzkpts
kpt.s = self.s
return kpt
def start_sending(self, rank):
P_In = np.concatenate([P_ni.T for P_ni in self.P_ani.values()])
self.requests += [
self.kd.comm.send(self.psit_nG, rank, block=False, tag=1),
self.kd.comm.send(self.f_n, rank, block=False, tag=2),
self.kd.comm.send(P_In, rank, block=False, tag=3)]
def start_receiving(self, rank):
self.requests += [
self.kd.comm.receive(self.psit_nG, rank, block=False, tag=1),
self.kd.comm.receive(self.f_n, rank, block=False, tag=2),
self.kd.comm.receive(self.P_In, rank, block=False, tag=3)]
def wait(self):
self.kd.comm.waitall(self.requests)
self.requests = []
class HybridXC(XCFunctional):
orbital_dependent = True
def __init__(self, name, hybrid=None, xc=None, finegrid=False,
alpha=None, skip_gamma=False, acdf=True,
qsym=True, txt=None, ecut=None):
"""Mix standard functionals with exact exchange.
name: str
Name of hybrid functional.
hybrid: float
Fraction of exact exchange.
xc: str or XCFunctional object
Standard DFT functional with scaled down exchange.
finegrid: boolean
Use fine grid for energy functional evaluations?
"""
if name == 'EXX':
assert hybrid is None and xc is None
hybrid = 1.0
xc = XC(XCNull())
elif name == 'PBE0':
assert hybrid is None and xc is None
hybrid = 0.25
xc = XC('HYB_GGA_XC_PBEH')
elif name == 'B3LYP':
assert hybrid is None and xc is None
hybrid = 0.2
xc = XC('HYB_GGA_XC_B3LYP')
if isinstance(xc, str):
xc = XC(xc)
self.hybrid = hybrid
self.xc = xc
self.type = xc.type
self.alpha = alpha
self.qsym = qsym
self.skip_gamma = skip_gamma
self.acdf = acdf
self.exx = None
self.ecut = ecut
if txt is None:
if rank == 0:
#self.txt = devnull
self.txt = sys.stdout
else:
sys.stdout = devnull
self.txt = devnull
else:
assert type(txt) is str
from ase.parallel import paropen
self.txt = paropen(txt, 'w')
XCFunctional.__init__(self, name)
def get_setup_name(self):
return 'PBE'
def calculate_radial(self, rgd, n_sLg, Y_L, v_sg,
dndr_sLg=None, rnablaY_Lv=None,
tau_sg=None, dedtau_sg=None):
return self.xc.calculate_radial(rgd, n_sLg, Y_L, v_sg,
dndr_sLg, rnablaY_Lv)
def calculate_paw_correction(self, setup, D_sp, dEdD_sp=None,
addcoredensity=True, a=None):
return self.xc.calculate_paw_correction(setup, D_sp, dEdD_sp,
addcoredensity, a)
def initialize(self, density, hamiltonian, wfs, occupations):
self.xc.initialize(density, hamiltonian, wfs, occupations)
self.nspins = wfs.nspins
self.setups = wfs.setups
self.density = density
self.kpt_u = wfs.kpt_u
self.gd = density.gd
self.kd = wfs.kd
self.bd = wfs.bd
N_c = self.gd.N_c
N = self.gd.N_c.prod()
vol = self.gd.dv * N
if self.alpha is None:
# XXX ?
self.alpha = 6 * vol**(2 / 3.0) / pi**2
self.gamma = (vol / (2 * pi)**2 * sqrt(pi / self.alpha) *
self.kd.nbzkpts)
if self.ecut is None:
ecutmax = 0.5 * pi**2 / (self.gd.h_cv**2).sum(1).max()
self.ecut = 0.5 * ecutmax
assert self.kd.N_c is not None
n = self.kd.N_c * 2 - 1
bzk_kc = np.indices(n).transpose((1, 2, 3, 0))
bzk_kc.shape = (-1, 3)
bzk_kc -= self.kd.N_c - 1
self.bzk_kc = bzk_kc.astype(float) / self.kd.N_c
self.bzq_qc = self.kd.get_bz_q_points()
if self.qsym:
op_scc = self.kd.symmetry.op_scc
self.ibzq_qc = self.kd.get_ibz_q_points(self.bzq_qc,
op_scc)[0]
self.q_weights = self.kd.q_weights * len(self.bzq_qc)
else:
self.ibzq_qc = self.bzq_qc
self.q_weights = np.ones(len(self.bzq_qc))
print self.ibzq_qc
print self.q_weights
self.pwd = PWDescriptor(self.ecut, self.gd, complex)
self.G2_qG = self.pwd.g2(self.bzk_kc)
n = 0
for k_c, Gpk2_G in zip(self.bzk_kc[:], self.G2_qG):
if (k_c > -0.5).all() and (k_c <= 0.5).all(): #XXX???
if k_c.any():
self.gamma -= np.dot(np.exp(-self.alpha * Gpk2_G),
Gpk2_G**-1)
else:
self.gamma -= np.dot(np.exp(-self.alpha * Gpk2_G[1:]),
Gpk2_G[1:]**-1)
n += 1
assert n == self.kd.N_c.prod()
self.pwd = PWDescriptor(self.ecut, self.gd, complex)
self.G2_qG = self.pwd.g2(self.ibzq_qc)
self.ghat = LFC(self.gd,
[setup.ghat_l for setup in density.setups],
KPointDescriptor(self.bzq_qc), dtype=complex)
self.interpolator = density.interpolator
self.print_initialization(hamiltonian.xc.name)
def set_positions(self, spos_ac):
self.ghat.set_positions(spos_ac)
self.spos_ac = spos_ac
def calculate(self, gd, n_sg, v_sg=None, e_g=None):
# Normal XC contribution:
exc = self.xc.calculate(gd, n_sg, v_sg, e_g)
# Add EXX contribution:
return exc + self.exx
def calculate_exx(self):
"""Non-selfconsistent calculation."""
kd = self.kd
K = len(kd.bzk_kc)
W = world.size // self.nspins
parallel = (W > 1)
self.exx = 0.0
self.exx_kq = np.zeros((K, len(self.ibzq_qc)), float)
for s in range(self.nspins):
ibz_kpts = [KPoint(kd, kpt)
for kpt in self.kpt_u if kpt.s == s]
for ik, kpt in enumerate(kd.bzk_kc):
print >> self.txt, 'K %s %s ...' % (ik, kpt)
for iq, q in enumerate(self.ibzq_qc):
kpq = kd.find_k_plus_q(q, kpts_k=[ik])
self.apply(ibz_kpts[kd.bz2ibz_k[ik]],
ibz_kpts[kd.bz2ibz_k[kpq[0]]],
ik, kpq[0], iq)
self.exx = world.sum(self.exx)
self.exx += self.calculate_exx_paw_correction()
exx_q = np.sum(self.exx_kq, 0)
print >> self.txt
print >> self.txt, \
'------------------------------------------------------'
print >> self.txt
print >> self.txt, 'Contributions: q w E_q (eV)'
for q in range(len(exx_q)):
print >> self.txt, '[%1.3f %1.3f %1.3f] %1.3f %s' % \
(self.ibzq_qc[q][0], self.ibzq_qc[q][1], self.ibzq_qc[q][2],
self.q_weights[q]/len(self.bzq_qc),
exx_q[q]/self.q_weights[q]*len(self.bzq_qc)*Ha)
print >> self.txt, 'E_EXX = %s eV' % (self.exx*Ha)
print >> self.txt
print >> self.txt, 'Calculation completed at: ', ctime()
print >> self.txt
print >> self.txt, \
'------------------------------------------------------'
print >> self.txt
def apply(self, kpt1, kpt2, ik1, ik2, iq):
k1_c = self.kd.bzk_kc[ik1]
k2_c = self.kd.bzk_kc[ik2]
q = self.ibzq_qc[iq]
if self.qsym:
for i, q in enumerate(self.bzq_qc):
if abs(q - self.ibzq_qc[iq]).max() < 1e-9:
bzq_index = i
break
else:
bzq_index = iq
N_c = self.gd.N_c
eikr_R = np.exp(-2j * pi * np.dot(np.indices(N_c).T, q / N_c).T)
Gamma = abs(q).max() < 1e-9
if Gamma and self.skip_gamma:
return
Gpk2_G = self.G2_qG[iq]
if Gamma:
Gpk2_G = Gpk2_G.copy()
Gpk2_G[0] = 1.0 / self.gamma
N = N_c.prod()
vol = self.gd.dv * N
nspins = self.nspins
fcut = 1e-10
for n1, psit1_R in enumerate(kpt1.psit_nG):
f1 = kpt1.f_n[n1]
for n2, psit2_R in enumerate(kpt2.psit_nG):
if self.acdf:
f2 = (self.q_weights[iq] * kpt2.weight
* (1 - np.sign(kpt2.eps_n[n2] - kpt1.eps_n[n1])))
else:
f2 = kpt2.f_n[n2] * self.q_weights[iq]
if abs(f1) < fcut or abs(f2) < fcut:
continue
nt_R = self.calculate_pair_density(n1, n2, kpt1, kpt2,
ik1, ik2, bzq_index)
nt_G = self.pwd.fft(nt_R * eikr_R) / N
vt_G = nt_G.copy()
vt_G *= -pi * vol / Gpk2_G
e = np.vdot(nt_G, vt_G).real * nspins * self.hybrid
self.exx += f1 * f2 * e
self.exx_kq[ik1,iq] += f1*f2*e
def calculate_pair_density(self, n1, n2, kpt1, kpt2, ik1, ik2, bzq_index):
psit1_G = self.kd.transform_wave_function(kpt1.psit_nG[n1], ik1)
psit2_G = self.kd.transform_wave_function(kpt2.psit_nG[n2], ik2)
nt_G = psit1_G.conj() * psit2_G
s1 = self.kd.sym_k[ik1]
s2 = self.kd.sym_k[ik2]
t1 = self.kd.time_reversal_k[ik1]
t2 = self.kd.time_reversal_k[ik2]
k1_c = self.kd.ibzk_kc[kpt1.k]
k2_c = self.kd.ibzk_kc[kpt2.k]
Q_aL = {}
for a in kpt1.P_ani.keys():
b1 = self.kd.symmetry.a_sa[s1, a]
b2 = self.kd.symmetry.a_sa[s2, a]
S1_c = (np.dot(self.spos_ac[a], self.kd.symmetry.op_scc[s1]) -
self.spos_ac[b1])
S2_c = (np.dot(self.spos_ac[a], self.kd.symmetry.op_scc[s2]) -
self.spos_ac[b2])
assert abs(S1_c.round() - S1_c).max() < 1e-13
assert abs(S2_c.round() - S2_c).max() < 1e-13
x1 = np.exp(2j * pi * np.dot(k1_c, S1_c))
x2 = np.exp(2j * pi * np.dot(k2_c, S2_c))
P1_i = np.dot(self.setups[a].R_sii[s1], kpt1.P_ani[b1][n1]) * x1
P2_i = np.dot(self.setups[a].R_sii[s2], kpt2.P_ani[b2][n2]) * x2
if t1:
P1_i = P1_i.conj()
if t2:
P2_i = P2_i.conj()
D_ii = np.outer(P1_i.conj(), P2_i)
D_p = pack(D_ii)
Q_aL[a] = np.dot(D_p, self.setups[a].Delta_pL)
self.ghat.add(nt_G, Q_aL, bzq_index)
return nt_G
def calculate_exx_paw_correction(self):
exx = 0
deg = 2 // self.nspins # spin degeneracy
for a, D_sp in self.density.D_asp.items():
setup = self.setups[a]
for D_p in D_sp:
D_ii = unpack2(D_p)
ni = len(D_ii)
for i1 in range(ni):
for i2 in range(ni):
A = 0.0
for i3 in range(ni):
p13 = packed_index(i1, i3, ni)
for i4 in range(ni):
p24 = packed_index(i2, i4, ni)
A += setup.M_pp[p13, p24] * D_ii[i3, i4]
p12 = packed_index(i1, i2, ni)
exx -= self.hybrid / deg * D_ii[i1, i2] * A
if setup.X_p is not None:
exx -= self.hybrid * np.dot(D_p, setup.X_p)
exx += self.hybrid * setup.ExxC
return exx
def print_initialization(self, xc):
print >> self.txt, \
'------------------------------------------------------'
print >> self.txt, 'Non-self-consistent HF correlation energy'
print >> self.txt, \
'------------------------------------------------------'
print >> self.txt, 'Started at: ', ctime()
print >> self.txt
print >> self.txt, \
'Ground state XC functional : %s' % xc
print >> self.txt, \
'Valence electrons : %s' % self.setups.nvalence
print >> self.txt, \
'Number of Spins : %s' % self.nspins
print >> self.txt, \
'Plane wave cutoff energy : %4.1f eV' % (self.ecut*Ha)
print >> self.txt, \
'Gamma q-point excluded : %s' % self.skip_gamma
if not self.skip_gamma:
print >> self.txt, \
'Alpha parameter : %s' % self.alpha
print >> self.txt, \
'Gamma parameter : %3.3f' % self.gamma
print >> self.txt, \
'ACDF method : %s' % self.acdf
print >> self.txt, \
'Number of k-points : %s' % len(self.kd.bzk_kc)
print >> self.txt, \
'Number of Irreducible k-points : %s' % len(self.kd.ibzk_kc)
print >> self.txt, \
'Number of q-points : %s' % len(self.bzq_qc)
if not self.qsym:
print >> self.txt, \
'q-point symmetry : %s' % self.qsym
else:
print >> self.txt, \
'Number of Irreducible q-points : %s' % len(self.ibzq_qc)
print >> self.txt
for q, weight in zip(self.ibzq_qc, self.q_weights):
print >> self.txt, 'q: [%1.3f %1.3f %1.3f] - weight: %1.3f' % \
(q[0],q[1],q[2], weight/len(self.bzq_qc))
print >> self.txt
print >> self.txt, \
'------------------------------------------------------'
print >> self.txt, \
'------------------------------------------------------'
print >> self.txt
print >> self.txt, 'Looping over k-points in the full Brillouin zone'
print >> self.txt
|
ajylee/gpaw-rtxs
|
gpaw/xc/hybridq.py
|
Python
|
gpl-3.0
| 16,477
|
[
"ASE",
"GPAW"
] |
34fa95107507bc33c75c1e84b5128a57b6ee2209fbcbfa6ff1aea831a7022629
|
# -*- coding: utf-8
from yade import ymport, utils,pack,export,qt
import gts,os
from yade import geom
#import matplotlib
from yade import plot
#from pylab import *
#import os.path, locale
#################################
##### FUNCTIONS ####
#################################
def limitfinder():
for b in O.bodies:
if(b.state.pos[2]>=L-2*radius):
if isinstance(b.shape,GridNode):
top_boundary.append(b.id)
b.shape.color=(1,0,0)
b.state.blockedDOFs='z'
if(b.state.pos[2]<0.1*radius ):
if isinstance(b.shape,GridNode):
bottom_boundary.append(b.id)
b.state.blockedDOFs='z'
b.shape.color=(1,0,0)
##############################
##### SCRIPT ####
##############################
try:
os.mkdir('data')
except:
pass
try:
os.mkdir('paraview')
except:
pass
isBatch = runningInBatch()
####################
### ENGINES ###
####################
O.engines=[
ForceResetter(),
InsertionSortCollider([
Bo1_Sphere_Aabb(),
Bo1_Wall_Aabb(),
Bo1_PFacet_Aabb(),
Bo1_Facet_Aabb(),
]),
InteractionLoop([
Ig2_GridNode_GridNode_GridNodeGeom6D(),
Ig2_GridConnection_GridConnection_GridCoGridCoGeom(),
Ig2_Sphere_PFacet_ScGridCoGeom(),
Ig2_Sphere_Sphere_ScGeom(),
Ig2_Facet_Sphere_ScGeom(),
Ig2_Wall_Sphere_ScGeom()
],
[Ip2_CohFrictMat_CohFrictMat_CohFrictPhys(setCohesionNow=True,setCohesionOnNewContacts=True),
Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom6D_CohFrictPhys_CohesionMoment(),
Law2_ScGeom_FrictPhys_CundallStrack(),
Law2_ScGridCoGeom_FrictPhys_CundallStrack(),
Law2_GridCoGridCoGeom_FrictPhys_CundallStrack()
]
),
]
######################
### PROPERTIES ###
######################
radius=.0008
sigma=-3e6
#### Parameters of a rectangular grid ###
L=0.205 #length [m]
l=0.101/2. #half width (radius) [m]
nbL=36#number of nodes for the length [#] doit etre paire
nbl=44 #number of nodes for the perimeter [#] ABSOLUMENT MULTIPLE de 4 !!!
#nbL=1 #number of nodes for the length [#] doit etre paire
#nbl=4 #number of nodes for the perimeter [#] ABSOLUMENT MULTIPLE de 4 !!!
r=radius
color=[155./255.,155./255.,100./255.]
oriBody = Quaternion(Vector3(0,0,1),(pi/2))
nodesIds=[]
nodesIds1=[]
cylIds=[]
pfIds=[]
top_boundary=[]
bottom_boundary=[]
####################
### MATERIAL ###
####################
poisson=0.28
E=2*7.9e10*(1+poisson)
density=7.8e10
Et=0
frictionAngle=0.096
frictionAngleW=0.228
O.materials.append(CohFrictMat(young=E*0.1,poisson=poisson,density=density,frictionAngle=frictionAngle,normalCohesion=1e19,shearCohesion=1e19,momentRotationLaw=False,alphaKr=0,label='NodeMat'))
O.materials.append(FrictMat(young=E*0.1,poisson=poisson,density=density,frictionAngle=frictionAngle,label='Pmat'))
O.materials.append(FrictMat(young=E,poisson=poisson,density=density,frictionAngle=frictionAngle,label='Smat'))
##############################
### SAMPLE GENERATION ###
##############################
kw={'color':[1,1,1],'wire':False,'dynamic':True,'material':2}
pile=ymport.text('spheres.txt',**kw)
pile2=O.bodies.append(pile)
#sup()
print hMin(2), hMax(2)
zmin=hMin(2)
zmax=hMax(2)
#L=hMax(2)
#################################
#### MEMBRANE GENERATION ###
#################################
#mesh=2
#if(mesh==1):
##Create all nodes first :
#for i in range(0,nbL+1):
#for j in range(0,nbl):
#if (i%2==0):
#z=i*L/float(nbL)
#y=l*sin(2*pi*j/float(nbl))
#x=l*cos(2*pi*j/float(nbl))
#nodesIds.append( O.bodies.append(gridNode([x,y,z],r,wire=False,fixed=False,material='NodeMat',color=color)) )
#else:
#z=i*L/float(nbL)
#y=0.5*l*(sin(2*pi*j/float(nbl))+sin(2*pi*(j+1)/float(nbl)))
#x=0.5*l*(cos(2*pi*j/float(nbl))+cos(2*pi*(j+1)/float(nbl)))
#nodesIds1.append( O.bodies.append(gridNode([x,y,z],r,wire=False,fixed=False,material='NodeMat',color=color)) )
###Create connection between the nodes
#for i in range(0,(nbL+1)/2+1,1):
#for j in range(0,nbl-1):
#O.bodies.append( gridConnection(nodesIds[i*nbl+j],nodesIds[i*nbl+j+1],r,color=color,material='Pmat',Et=Et) )
#for i in range(0,(nbL+1)/2,1):
#for j in range(0,nbl):
#O.bodies.append( gridConnection(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j],r,color=color,material='Pmat',Et=Et) )
#for i in range(-1,(nbL+1)/2):
#j=nbl
#O.bodies.append( gridConnection(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j-1],r,color=color,material='Pmat',Et=Et) )
#for i in range(0,(nbL+1)/2,1):
#for j in range(0,nbl,1):
#cylIds.append(O.bodies.append( gridConnection(nodesIds[j+i*(nbl)],nodesIds1[j+i*(nbl)],r,color=color,material='Pmat',Et=Et) ))
#cylIds.append(O.bodies.append( gridConnection(nodesIds[j+(i+1)*(nbl)],nodesIds1[j+i*(nbl)],r,color=color,material='Pmat',Et=Et) ))
#for i in range(0,(nbL+1)/2,1):
#for j in range(0,nbl-1,1):
#print j,nbl
#cylIds.append(O.bodies.append( gridConnection(nodesIds[j+1+(i+1)*(nbl)],nodesIds1[j+i*(nbl)],r,color=color,material='Pmat',Et=Et) ))
#cylIds.append(O.bodies.append( gridConnection(nodesIds[j+1+i*(nbl)],nodesIds1[j+i*(nbl)],r,color=color,material='Pmat',Et=Et) ))
#for i in range(0,(nbL+1)/2,1):
#j=nbl-1
#cylIds.append(O.bodies.append( gridConnection(nodesIds[i*nbl+j+1],nodesIds1[j+i*(nbl)],r,color=color,material='Pmat',Et=Et) ))
#cylIds.append(O.bodies.append( gridConnection(nodesIds[(i-1)*nbl+j+1],nodesIds1[j+i*(nbl)],r,color=color,material='Pmat',Et=Et) ))
###Create PFacets
##wire=True
#for i in range(0,(nbL+1)/2,1):
#for j in range(0,nbl):
#pfIds.append(O.bodies.append(pfacet(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j],nodesIds1[i*nbl+j],color=color,mask=5,material='Pmat')))
#for i in range(0,(nbL+1)/2,1):
#for j in range(0,nbl-1):
#pfIds.append(O.bodies.append(pfacet(nodesIds[i*nbl+j],nodesIds1[i*nbl+j],nodesIds[i*nbl+j+1],color=color,mask=5,material='Pmat')))
#pfIds.append(O.bodies.append(pfacet(nodesIds[(i+1)*nbl+j+1],nodesIds[i*nbl+j+1],nodesIds1[i*nbl+j],color=color,mask=5,material='Pmat')))
#pfIds.append(O.bodies.append(pfacet(nodesIds[(i+1)*nbl+j+1],nodesIds1[i*nbl+j],nodesIds[(i+1)*nbl+j],color=color,mask=5,material='Pmat')))
#for i in range(0,(nbL+1)/2,1):
#j=nbl-1
#pfIds.append(O.bodies.append(pfacet( nodesIds[(i-1)*nbl+j+1],nodesIds1[i*nbl+j],nodesIds[i*nbl+j+1],color=color,mask=5,material='Pmat' )))
#pfIds.append(O.bodies.append(pfacet(nodesIds[(i-1)*nbl+j+1],nodesIds[i*nbl+j],nodesIds1[i*nbl+j],color=color,mask=5,material='Pmat')))
#pfIds.append(O.bodies.append(pfacet(nodesIds[i*nbl+j+1],nodesIds1[i*nbl+j],nodesIds[(i+1)*nbl+j],color=color,mask=5,material='Pmat')))
#if(mesh==2):
##Create all nodes first :
#for i in range(0,nbL+1):
#for j in range(0,nbl):
#z=i*L/float(nbL)
#y=l*sin(2*pi*j/float(nbl))
#x=l*cos(2*pi*j/float(nbl))
#nodesIds.append( O.bodies.append(gridNode([x,y,z],r,wire=False,fixed=False,material='NodeMat',color=color)) )
###Create connection between the nodes
#for i in range(0,nbL+1):
#for j in range(0,nbl-1):
#O.bodies.append( gridConnection(nodesIds[i*nbl+j],nodesIds[i*nbl+j+1],r,color=color,mask=5,material='Pmat',Et=Et) )
#for i in range(0,nbL,1):
#for j in range(0,nbl):
#O.bodies.append( gridConnection(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j],r,color=color,mask=5,material='Pmat',Et=Et) )
#for i in range(-1,nbL):
#j=nbl
#O.bodies.append( gridConnection(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j-1],r,color=color,mask=5,material='Pmat',Et=Et) )
#for i in range(0,nbL):
#for j in range(0,nbl-1):
#if (j%2==0):
#O.bodies.append( gridConnection(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j+1],r,color=color,mask=5,material='Pmat',Et=Et) )
#else:
#O.bodies.append( gridConnection(nodesIds[(i+1)*nbl+j],nodesIds[i*nbl+j+1],r,color=color,mask=5,material='Pmat',Et=Et) )
#for i in range(0,nbL):
#j=nbl
##O.bodies[nodesIds[(i-1)*nbl+j]].shape.color=Vector3(155./255.,155./255.,1.)
##O.bodies[nodesIds[(i)*nbl+j-1]].shape.color=Vector3(1,0,0)
#O.bodies.append( gridConnection(nodesIds[(i-1)*nbl+j],nodesIds[(i+1)*nbl+j-1],r,color=color,mask=5,material='Pmat',Et=Et) )
####Create PFacets
###wire=True
#for i in range(0,nbL):
#for j in range(0,nbl-1):
#if (j%2==0):
#pfIds.append(O.bodies.append(pfacet(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j],nodesIds[(i+1)*nbl+j+1],color=color,mask=5,material='Pmat')))
#pfIds.append(O.bodies.append(pfacet(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j+1],nodesIds[(i)*nbl+j+1],color=color,mask=5,material='Pmat')))
#else:
#pfIds.append(O.bodies.append(pfacet(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j],nodesIds[(i)*nbl+j+1],color=color,mask=5,material='Pmat')))
#pfIds.append(O.bodies.append(pfacet(nodesIds[i*nbl+j+1],nodesIds[(i+1)*nbl+j],nodesIds[(i+1)*nbl+j+1],color=color,mask=5,material='Pmat')))
#for i in range(0,nbL,1):
#j=nbl
#pfIds.append(O.bodies.append(pfacet( nodesIds[i*nbl+j],nodesIds[(i-1)*nbl+j],nodesIds[(i+1)*nbl+j-1],color=color,material='Pmat' )))
#pfIds.append(O.bodies.append(pfacet( nodesIds[(i)*nbl+j-1],nodesIds[(i+1)*nbl+j-1],nodesIds[(i-1)*nbl+j],color=color,material='Pmat' )))
#limitfinder()
#########################
##### WALL GENERATION ##
#########################
#O.materials.append(FrictMat(young=E,poisson=poisson,density=density,frictionAngle=frictionAngleW,label='Wmat'))
#topPlate=utils.wall(position=hMax(2)+radius,sense=0, axis=2,color=Vector3(1,0,0),material='Wmat')
#O.bodies.append(topPlate)
#bottomPlate=utils.wall(position=-hMin(2)-radius,sense=0, axis=2,color=Vector3(1,0,0),material='Wmat')
#O.bodies.append(bottomPlate)
####################
##### APPLY LOAD ##
####################
##### APPLY CONFINING PRESSURE
#def Apply_confiningpressure():
##print 'Apply_confiningpressure'
#for i in pfIds:
#e0 =O.bodies[i].shape.node3.state.pos - O.bodies[i].shape.node1.state.pos
#e1 =O.bodies[i].shape.node2.state.pos - O.bodies[i].shape.node1.state.pos
#e2 =O.bodies[i].shape.node2.state.pos - O.bodies[i].shape.node3.state.pos
#P=(O.bodies[i].shape.node1.state.pos+O.bodies[i].shape.node2.state.pos+O.bodies[i].shape.node3.state.pos)/3
##print e0,e1,e2
##nodesIds.append( O.bodies.append(gridNode([P[0],P[1],P[2]],r,wire=False,fixed=True,material='NodeMat',color=color)) )
##print 'P=',P
#v0 = e0
#v1 = e1
#v2 = P - O.bodies[i].shape.node1.state.pos
###// Compute dot products
#dot00 = scalar(v0,v0)
#dot01 = scalar(v0,v1)
#dot02 = scalar(v0,v2)
#dot11 = scalar(v1,v1)
#dot12 = scalar(v1,v2)
###// Compute the barycentric coordinates of the projection P
#invDenom = 1 / (dot00 * dot11 - dot01 * dot01)
#p1 = (dot11 * dot02 - dot01 * dot12) * invDenom
#p2 = (dot00 * dot12 - dot01 * dot02) * invDenom
#p3 = 1-p1-p2
#a = sqrt(scalar(e0,e0))
#b = sqrt(scalar(e1,e1))
#c = sqrt(scalar(e2,e2))
#s=0.5*(a+b+c)
#area= sqrt(s*(s-a)*(s-b)*(s-c))
#Fapplied=area*sigma
#normal = cross(e0,e1)
#normal=normal/normal.norm()
#F=Fapplied
#p1normal=F*p1*normal
#p2normal=F*p2*normal
#p3normal=F*p3*normal
#O.forces.addF(O.bodies[i].shape.node1.id,p1normal,permanent=False)
#O.forces.addF(O.bodies[i].shape.node2.id,p2normal,permanent=False)
#O.forces.addF(O.bodies[i].shape.node3.id,p3normal,permanent=False)
##Apply_confiningpressure()
#sigma3=0
#def check_confiningpressure():
#global sigma3
#sigma3=0
#for i in pfIds:
#e0 =O.bodies[i].shape.node3.state.pos - O.bodies[i].shape.node1.state.pos
#e1 =O.bodies[i].shape.node2.state.pos - O.bodies[i].shape.node1.state.pos
#e2 =O.bodies[i].shape.node2.state.pos - O.bodies[i].shape.node3.state.pos
#a = sqrt(scalar(e0,e0))
#b = sqrt(scalar(e1,e1))
#c = sqrt(scalar(e2,e2))
#s=0.5*(a+b+c)
#area= sqrt(s*(s-a)*(s-b)*(s-c))
#F=(O.forces.f(O.bodies[i].shape.node1.id) + O.forces.f(O.bodies[i].shape.node2.id)+O.forces.f(O.bodies[i].shape.node3.id)).norm()
#sigma3=sigma3+F/area
##print sigma3
#return sigma3
#pos=topPlate.state.pos[2]
#def dataCollector():
#global pos
#if(pos<0.1845):
#O.wait()
#saveData()
#S=pi*l**2
#Fnt=O.forces.f(topPlate.id)[2]
#Fnb=O.forces.f(bottomPlate.id)[2]
#sigma1=Fnt/S
#sigma3=check_confiningpressure()
#pos=topPlate.state.pos[2]
#plot.addData(t=O.time,pos=pos,Fnt=Fnt,Fnb=Fnb,sigma1=sigma1,sigma3=sigma3,unbF=unbalancedForce())
#def saveData():
#plot.saveDataTxt('triaxial_res.dat',vars=('t','pos','Fnt','Fnb','sigma1','sigma3','unbF'))
#plot.plots={'t':('sigma1',Et,'sigma3')}
##### MOVE TOP AND BOTTOM WALL
#v=1.7e-03
##v=1
#def moveWall(v):
#topPlate.state.vel=(0,0,-v)
##bottomPlate.state.vel=(0,0,v)
##g=-9.81
#g=0
##moveWall(v)
##limitfinder()
############################
###### ENGINE DEFINITION ##
############################
#O.dt=0.5*PWaveTimeStep()
#O.engines=O.engines+[
#PyRunner(iterPeriod=1,dead=False,command='Apply_confiningpressure()'),
#NewtonIntegrator(damping=0.7,gravity=(0,0,g),label='Newton'),
#PyRunner(initRun=True,iterPeriod=1,command='dataCollector()'),
#VTKRecorder(iterPeriod=500,initRun=True,fileName='paraview/'+'triaxial_res-',recorders=['spheres','velocity']),
#]
#if not isBatch:
## VISUALIZATION
#from yade import qt
#qt.Controller()
##qtv = qt.View()
##qtr = qt.Renderer()
#plot.plot(noShow=False, subPlots=True)
#O.run(5000)
##moveWall(v)
#else:
#O.run(5000,True)
#moveWall(v)
#O.wait()
#saveData()
|
anna-effeindzourou/trunk
|
examples/anna_scripts/triax/triaxial_test.py
|
Python
|
gpl-2.0
| 13,308
|
[
"ParaView"
] |
826687be038b8a1d9aefd663623a65242fc2a61c8de30bf8417c01c51d865c71
|
""" Plotting Service generates graphs according to the client specifications
and data
"""
__RCSID__ = "$Id$"
import os
import hashlib
from types import DictType, ListType
from DIRAC import S_OK, S_ERROR, rootPath, gConfig, gLogger, gMonitor
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.FrameworkSystem.Service.PlotCache import gPlotCache
def initializePlottingHandler( serviceInfo ):
#Get data location
plottingSection = PathFinder.getServiceSection( "Framework/Plotting" )
dataPath = gConfig.getValue( "%s/DataLocation" % plottingSection, "data/graphs" )
dataPath = dataPath.strip()
if "/" != dataPath[0]:
dataPath = os.path.realpath( "%s/%s" % ( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), dataPath ) )
gLogger.info( "Data will be written into %s" % dataPath )
try:
os.makedirs( dataPath )
except:
pass
try:
testFile = "%s/plot__.test" % dataPath
fd = file( testFile, "w" )
fd.close()
os.unlink( testFile )
except IOError:
gLogger.fatal( "Can't write to %s" % dataPath )
return S_ERROR( "Data location is not writable" )
gPlotCache.setPlotsLocation( dataPath )
gMonitor.registerActivity( "plotsDrawn", "Drawn plot images", "Plotting requests", "plots", gMonitor.OP_SUM )
return S_OK()
class PlottingHandler( RequestHandler ):
def __calculatePlotHash( self, data, metadata, subplotMetadata ):
m = hashlib.md5()
m.update( repr( {'Data':data, 'PlotMetadata':metadata, 'SubplotMetadata':subplotMetadata} ) )
return m.hexdigest()
types_generatePlot = [ [DictType, ListType], DictType ]
def export_generatePlot( self, data, plotMetadata, subplotMetadata = {} ):
""" Create a plot according to the client specification and return its name
"""
plotHash = self.__calculatePlotHash( data, plotMetadata, subplotMetadata )
result = gPlotCache.getPlot( plotHash, data, plotMetadata, subplotMetadata )
if not result['OK']:
return result
return S_OK( result['Value']['plot'] )
def transfer_toClient( self, fileId, token, fileHelper ):
"""
Get graphs data
"""
retVal = gPlotCache.getPlotData( fileId )
if not retVal[ 'OK' ]:
return retVal
retVal = fileHelper.sendData( retVal[ 'Value' ] )
if not retVal[ 'OK' ]:
return retVal
fileHelper.sendEOF()
return S_OK()
|
coberger/DIRAC
|
FrameworkSystem/Service/PlottingHandler.py
|
Python
|
gpl-3.0
| 2,415
|
[
"DIRAC"
] |
3faf0de094668bfcaf6df8bfde47a069c63e556b1e0ab9b05a585516e880eb06
|
"""Read and write notebooks in JSON format.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from base64 import encodestring
from .rwbase import NotebookReader, NotebookWriter
from .nbbase import from_dict
import json
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class JSONReader(NotebookReader):
def reads(self, s, **kwargs):
nb = json.loads(s, **kwargs)
return self.to_notebook(nb, **kwargs)
def to_notebook(self, d, **kwargs):
"""Convert from a raw JSON dict to a nested NotebookNode structure."""
return from_dict(d)
class JSONWriter(NotebookWriter):
def writes(self, nb, **kwargs):
kwargs['indent'] = 4
return json.dumps(nb, **kwargs)
_reader = JSONReader()
_writer = JSONWriter()
reads = _reader.reads
read = _reader.read
to_notebook = _reader.to_notebook
write = _writer.write
writes = _writer.writes
|
mattvonrocketstein/smash
|
smashlib/ipy3x/nbformat/v1/nbjson.py
|
Python
|
mit
| 1,503
|
[
"Brian"
] |
b300ab603aaba5e8087e1cd024f75b6aaee430c8258b77122180451da529993d
|
# -*- coding: utf-8 -*-
#
# DAX documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 12 14:43:53 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import io
sys.path.insert(0, os.path.abspath('..'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DAX'
copyright = u'2016, Benjamin Yvernault, Brian Boyd, Stephen Damon, Andrew Plassard'
author = u'Benjamin Yvernault, Brian Boyd, Stephen Damon, Andrew Plassard'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.5.0'
# The full version, including alpha/beta/rc tags.
release = u'0.5.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'DAXdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DAX.tex', u'DAX Documentation',
u'Benjamin Yvernault, Brian Boyd, Stephen Damon, Andrew Plassard', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dax', u'DAX Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DAX', u'DAX Documentation',
author, 'DAX', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
VUIIS/dax_spiders
|
docs/conf.py
|
Python
|
mit
| 9,546
|
[
"Brian"
] |
3deb0d71736dc963349606d769281dadee69dfb3c950331f400a09b74a9b9ecd
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0128_auto_20160419_1708'),
]
operations = [
migrations.AlterField(
model_name='usicalvote',
name='entry',
field=models.PositiveIntegerField(default=0, choices=[(7, "\u10e6's \u262a\u2605\u266a"), (11, "\u03b2N's"), (13, 'FURious Alpaca'), (14, 'wAr-RICE'), (21, 'Crystal\u2756Lilies'), (22, 'Procrastinate \u2192 Tomorrow'), (23, 'Petit \u01b8\u04dc\u01b7 Papillon'), (38, '\u273f\u0187\u043d\u03c3c\u03c3\u2113\u03b1\u0442 \u0191\u03c3\u03b7\u2202\u03b1\u03b7\u0442\u273f'), (40, 'NYAvigators'), (54, '\u30dfk\u03bc'), (56, 'lilaq\u273f'), (57, 'Sock It 2 Me'), (59, '\u8336\u8336\u8336'), (61, 'AKB0033'), (62, 'Undefined Red'), (67, 'Midnight\u273fBlossoms')]),
preserve_default=True,
),
]
|
dburr/SchoolIdolAPI
|
api/migrations/0129_auto_20160511_1442.py
|
Python
|
apache-2.0
| 956
|
[
"CRYSTAL"
] |
c951415ab309cfa3bce7f896125b5817f7e384bf6b8fcffa9ae9d769ca594b1a
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a plugin for parsing Google Analytics cookies."""
import urllib
from plaso.lib import errors
from plaso.lib import event
from plaso.lib import eventdata
from plaso.parsers.cookie_plugins import interface
class GoogleAnalyticsEvent(event.PosixTimeEvent):
"""A simple placeholder for a Google Analytics event."""
def __init__(self, timestamp, timestamp_desc, data_type, **kwargs):
"""Initialize a Google Analytics event."""
super(GoogleAnalyticsEvent, self).__init__(
timestamp, timestamp_desc, data_type)
for key, value in kwargs.iteritems():
setattr(self, key, value)
class GoogleAnalyticsUtmzPlugin(interface.CookiePlugin):
"""A browser cookie plugin for Google Analytics cookies."""
NAME = 'cookie_ganalytics_utmz'
COOKIE_NAME = u'__utmz'
# Point to few sources for URL information.
URLS = [
(u'http://www.dfinews.com/articles/2012/02/'
u'google-analytics-cookies-and-forensic-implications')]
# Google Analytics __utmz variable translation.
# Taken from:
# http://www.dfinews.com/sites/dfinews.com/files/u739/Tab2Cookies020312.jpg
GA_UTMZ_TRANSLATION = {
'utmcsr': 'Last source used to access.',
'utmccn': 'Ad campaign information.',
'utmcmd': 'Last type of visit.',
'utmctr': 'Keywords used to find site.',
'utmcct': 'Path to the page of referring link.'}
def GetEntries(self, cookie_data, **unused_kwargs):
"""Process the cookie."""
# The structure of the field:
# <domain hash>.<last time>.<sessions>.<sources>.<variables>
fields = cookie_data.split('.')
if len(fields) > 5:
variables = '.'.join(fields[4:])
fields = fields[0:4]
fields.append(variables)
if len(fields) != 5:
raise errors.WrongPlugin(u'Wrong number of fields. [{} vs. 5]'.format(
len(fields)))
domain_hash, last, sessions, sources, variables = fields
extra_variables = variables.split('|')
extra_variables_translated = []
for variable in extra_variables:
key, _, value = variable.partition('=')
translation = self.GA_UTMZ_TRANSLATION.get(key, key)
try:
value_line = unicode(urllib.unquote(str(value)), 'utf-8')
except UnicodeDecodeError:
value_line = repr(value)
extra_variables_translated.append(u'{} = {}'.format(
translation, value_line))
yield GoogleAnalyticsEvent(
int(last, 10), eventdata.EventTimestamp.LAST_VISITED_TIME,
self._data_type, domain_hash=domain_hash, sessions=int(sessions, 10),
sources=int(sources, 10), extra=extra_variables_translated)
class GoogleAnalyticsUtmaPlugin(interface.CookiePlugin):
"""A browser cookie plugin for Google Analytics cookies."""
NAME = 'cookie_ganalytics_utma'
COOKIE_NAME = u'__utma'
# Point to few sources for URL information.
URLS = [
(u'http://www.dfinews.com/articles/2012/02/'
u'google-analytics-cookies-and-forensic-implications')]
def GetEntries(self, cookie_data, **unused_kwargs):
"""Yield event objects extracted from the cookie."""
# Values has the structure of:
# <domain hash>.<visitor ID>.<first visit>.<previous>.<last>.<# of
# sessions>
fields = cookie_data.split('.')
# Check for a valid record.
if len(fields) != 6:
raise errors.WrongPlugin(u'Wrong number of fields. [{} vs. 6]'.format(
len(fields)))
domain_hash, visitor_id, first_visit, previous, last, sessions = fields
# TODO: Double check this time is stored in UTC and not local time.
first_epoch = int(first_visit, 10)
yield GoogleAnalyticsEvent(
first_epoch, 'Analytics Creation Time', self._data_type,
domain_hash=domain_hash, visitor_id=visitor_id,
sessions=int(sessions, 10))
yield GoogleAnalyticsEvent(
int(previous, 10), 'Analytics Previous Time', self._data_type,
domain_hash=domain_hash, visitor_id=visitor_id,
sessions=int(sessions, 10))
yield GoogleAnalyticsEvent(
int(last, 10), eventdata.EventTimestamp.LAST_VISITED_TIME,
self._data_type, domain_hash=domain_hash, visitor_id=visitor_id,
sessions=int(sessions, 10))
class GoogleAnalyticsUtmbPlugin(interface.CookiePlugin):
"""A browser cookie plugin for Google Analytics cookies."""
NAME = 'cookie_ganalytics_utmb'
COOKIE_NAME = u'__utmb'
# Point to few sources for URL information.
URLS = [
(u'http://www.dfinews.com/articles/2012/02/'
u'google-analytics-cookies-and-forensic-implications')]
def GetEntries(self, cookie_data, **unused_kwargs):
"""Yield event objects extracted from the cookie."""
# Values has the structure of:
# <domain hash>.<pages viewed>.10.<last time>
fields = cookie_data.split('.')
# Check for a valid record.
if len(fields) != 4:
raise errors.WrongPlugin(u'Wrong number of fields. [{} vs. 4]'.format(
len(fields)))
domain_hash, pages_viewed, _, last = fields
yield GoogleAnalyticsEvent(
int(last, 10), eventdata.EventTimestamp.LAST_VISITED_TIME,
self._data_type, domain_hash=domain_hash,
pages_viewed=int(pages_viewed, 10))
|
iwm911/plaso
|
plaso/parsers/cookie_plugins/ganalytics.py
|
Python
|
apache-2.0
| 5,883
|
[
"VisIt"
] |
dbf23d6d150ca01a405f8a265c2cf69a7ec4ed7856406dd84e7fb8226a306b3c
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 10 16:26:15 2016
@author: Rachid & Chaima
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage, misc
import image
import utils
import pdb
#
#def div0( a, b ):
# """ ignore / 0, div0( [-1, 0, 1], 0 ) -> [0, 0, 0] """
# with np.errstate(divide='ignore', invalid='ignore'):
# c = np.true_divide( a, b )
# c[ ~ np.isfinite( c )] = 0
# return c
class LaplacianMap(object):
"""Class for weights attribution with Laplacian Fusion"""
def __init__(self, fmt, names, n=3):
"""names is a liste of names, fmt is the format of the images"""
self.images = []
for name in names:
self.images.append(image.Image(fmt, name, crop=True, n=n))
self.shape = self.images[0].shape
self.num_images = len(self.images)
self.height_pyr = n
def get_weights_map(self, w_c, w_s, w_e):
"""Return the normalized Weight map"""
self.weights = []
sums = np.zeros((self.shape[0], self.shape[1]))
for image_name in self.images:
contrast = image_name.contrast()
saturation = image_name.saturation()
exposedness = image_name.exposedness()
weight = (contrast**w_c) * (saturation**w_s) * (exposedness**
w_e) + 1e-12
self.weights.append(weight)
sums = sums + weight
for index in range(self.num_images):
self.weights[index] = self.weights[index] / sums
return self.weights
def get_gaussian_pyramid(self, image, n):
"""Return the Gaussian Pyramid of an image"""
gaussian_pyramid_floors = [image]
for floor in range(1, n):
gaussian_pyramid_floors.append(
utils.Reduce(gaussian_pyramid_floors[-1], 1))
return gaussian_pyramid_floors
def get_gaussian_pyramid_weights(self):
"""Return the Gaussian Pyramid of the Weight map of all images"""
self.weights_pyramid = []
for index in range(self.num_images):
self.weights_pyramid.append(
self.get_gaussian_pyramid(self.weights[index],
self.height_pyr))
return self.weights_pyramid
def get_laplacian_pyramid(self, image, n):
"""Return the Laplacian Pyramid of an image"""
gaussian_pyramid_floors = self.get_gaussian_pyramid(image, n)
laplacian_pyramid_floors = [gaussian_pyramid_floors[-1]]
for floor in range(n - 2, -1, -1):
new_floor = gaussian_pyramid_floors[floor] - utils.Expand(
gaussian_pyramid_floors[floor + 1], 1)
laplacian_pyramid_floors = [new_floor] + laplacian_pyramid_floors
return laplacian_pyramid_floors
def get_laplacian_pyramid_images(self):
"""Return all the Laplacian pyramid for all images"""
self.laplacian_pyramid = []
for index in range(self.num_images):
self.laplacian_pyramid.append(
self.get_laplacian_pyramid(self.images[index].array,
self.height_pyr))
return self.laplacian_pyramid
def result_exposure(self, w_c=1, w_s=1, w_e=1):
"Return the Exposure Fusion image with Laplacian/Gaussian Fusion method"
print "weights"
self.get_weights_map(w_c, w_s, w_e)
print "gaussian pyramid"
self.get_gaussian_pyramid_weights()
print "laplacian pyramid"
self.get_laplacian_pyramid_images()
result_pyramid = []
for floor in range(self.height_pyr):
print 'floor ', floor
result_floor = np.zeros(self.laplacian_pyramid[0][floor].shape)
for index in range(self.num_images):
print 'image ', index
for canal in range(3):
result_floor[:, :,
canal] += self.laplacian_pyramid[index][floor][:, :,
canal] * self.weights_pyramid[index][floor]
result_pyramid.append(result_floor)
# Get the image from the Laplacian pyramid
self.result_image = result_pyramid[-1]
for floor in range(self.height_pyr - 2, -1, -1):
print 'floor ', floor
self.result_image = result_pyramid[floor] + utils.Expand(
self.result_image, 1)
self.result_image[self.result_image < 0] = 0
self.result_image[self.result_image > 1] = 1
return self.result_image
if __name__ == "__main__":
names = [line.rstrip('\n') for line in open('list_images.txt')]
lap = LaplacianMap('arno', names, n=6)
res = lap.result_exposure(1, 1, 1)
image.show(res)
misc.imsave("res/arno_3.jpg", res)
|
Rachine/ExposureFusion
|
laplacianfusion.py
|
Python
|
mit
| 4,883
|
[
"Gaussian"
] |
05dc594411b1cdabd2399638caac38a57de0d11d6b55029bbe4bf8efb5867083
|
import os
import datetime
import numpy as np
import xarray as xr
import requests
import logging
log=logging.getLogger('noaa_coops')
from ... import utils
from .common import periods
all_products=dict(
water_level="water_level",
air_temperature="air_temperature",
water_temperature="water_temperature",
wind="wind",
air_pressure="air_pressure",
air_gap="air_gap",
conductivity="conductivity",
visibility="visibility",
humidity="humidity",
salinity="salinity",
hourly_height="hourly_height",
high_low="high_low",
daily_mean="daily_mean",
monthly_mean="monthly_mean",
one_minute_water_level="one_minute_water_level",
predictions="predictions",
datums="datums",
currents="currents")
all_datums=dict(
CRD="Columbia River Datum",
MHHW="Mean Higher High Water",
MHW="Mean High Water",
MTL="Mean Tide Level",
MSL="Mean Sea Level",
MLW="Mean Low Water",
MLLW="Mean Lower Low Water",
NAVD="North American Vertical Datum",
STND="Station Datum")
def coops_json_to_ds(json,params):
""" Mold the JSON response from COOPS into a dataset
"""
ds=xr.Dataset()
if 'metadata' in json:
meta=json['metadata']
ds['station']=( ('station',), [meta['id']])
for k in ['name','lat','lon']:
val=meta[k]
if k in ['lat','lon']:
val=float(val)
ds[k]= ( ('station',), [val])
else:
# predictions do not come back with metadata
ds['station']= ('station',),[params['station']]
times=[]
values=[]
qualities=[]
if 'data' in json:
data=json['data']
elif 'predictions' in json:
# Why do they present predictions data in such a different format?
data=json['predictions']
for row in data:
# {'f': '0,0,0,0', 'q': 'v', 's': '0.012', 't': '2010-12-01 00:00', 'v': '0.283'}
try:
values.append(float(row['v']))
except ValueError:
values.append(np.nan)
times.append( np.datetime64(row['t']) )
# for now, ignore flags, verified status.
ds['time']=( ('time',),times)
ds[params['product']]=( ('station','time'), [values] )
bad_count=np.sum( np.isnan(values) )
if bad_count:
log.warning("%d of %d data values were missing"%(bad_count,len(values)))
if params['product'] in ['water_level','predictions']:
ds[params['product']].attrs['datum'] = params['datum']
return ds
def coops_dataset(station,start_date,end_date,products,
days_per_request=None,cache_dir=None):
"""
bare bones retrieval script for NOAA Tides and Currents data.
In particular, no error handling yet, doesn't batch requests, no caching,
can't support multiple parameters, no metadata, etc.
days_per_request: break up the request into chunks no larger than this many
days. for hourly data, this should be less than 365. for six minute, I think
the limit is 32 days.
"""
ds_per_product=[]
for product in products:
ds=coops_dataset_product(station=station,
product=product,
start_date=start_date,
end_date=end_date,
days_per_request=days_per_request,
cache_dir=cache_dir)
if ds is not None:
ds_per_product.append(ds)
ds_merged=xr.merge(ds_per_product,join='outer')
return ds_merged
def coops_dataset_product(station,product,
start_date,end_date,days_per_request='M',
cache_dir=None,refetch_incomplete=True,
interval=None,datum=None,
clip=True):
"""
Retrieve a single data product from a single station.
station: string or numeric identifier for COOPS station
product: string identifying the variable to retrieve, such as "water_level".
See all_products at the top of this file.
start_date,end_date: period to retrieve, as python datetime, matplotlib datenum,
or numpy datetime64.
days_per_request: batch the requests to fetch smaller chunks at a time.
if this is an integer, then chunks will start with start_date, then start_date+days_per_request,
etc.
if this is a string, it is interpreted as the frequency argument to pandas.PeriodIndex.
so 'M' will request month-aligned chunks. this has the advantage that requests for different
start dates will still be aligned to integer periods, and can reuse cached data.
cache_dir: if specified, save each chunk as a netcdf file in this directory,
with filenames that include the gage, period and products. The directory must already
exist.
returns an xarray dataset, or None if no data could be fetched
refetch_incomplete: if True, if a dataset is pulled from cache but appears incomplete
with respect to the start_date and end_date, attempt to fetch it again. Not that incomplete
here is meant for realtime data which has not yet been recorded, so the test is only
between end_date and the last time stamp of retrieved data.
clip: if true, return only data within the requested window, even if more data was fetched.
"""
start_date=utils.to_dt64(start_date)
end_date=utils.to_dt64(end_date)
fmt_date=lambda d: utils.to_datetime(d).strftime("%Y%m%d %H:%M")
base_url="https://tidesandcurrents.noaa.gov/api/datagetter"
if datum is not None:
datums=[datum]
else:
# not supported by this script: bin
# Some predictions are only in MLLW
datums=['NAVD','MSL','MLLW']
datasets=[]
for interval_start,interval_end in periods(start_date,end_date,days_per_request):
if cache_dir is not None:
begin_str=utils.to_datetime(interval_start).strftime('%Y-%m-%d')
end_str =utils.to_datetime(interval_end).strftime('%Y-%m-%d')
cache_fn=os.path.join(cache_dir,
"%s_%s_%s_%s.nc"%(station,
product,
begin_str,
end_str))
else:
cache_fn=None
ds=None
if (cache_fn is not None) and os.path.exists(cache_fn):
log.info("Cached %s -- %s"%(interval_start,interval_end))
ds=xr.open_dataset(cache_fn)
if refetch_incomplete:
# This will fetch a bit more than absolutely necessary
# In the case that this file is up to date, but the sensor was down,
# we might be able to discern that if this was originally fetched
# after another request which found valid data from a later time.
if ds.time.values[-1]<min(utils.to_dt64(interval_end),
end_date):
log.warning(" but that was incomplete -- will re-fetch")
ds=None
if ds is None:
log.info("Fetching %s -- %s"%(interval_start,interval_end))
params=dict(begin_date=fmt_date(interval_start),
end_date=fmt_date(interval_end),
station=str(station),
time_zone='gmt', # always!
application='stompy',
units='metric',
format='json',
product=product)
if interval is not None:
# Some predictions require interval='hilo'
params['interval']=interval
if product in ['water_level','hourly_height',"one_minute_water_level","predictions"]:
while 1:
# not all stations have NAVD, so fall back to MSL
params['datum']=datums[0]
try:
req=requests.get(base_url,params=params)
except requests.ConnectionError:
log.warning("Unable to connect to tidesandcurrents.noaa.gov -- possibly on HPC node")
data=dict(error=dict(message="Internet access error"))
break
try:
data=req.json()
except ValueError: # thrown by json parsing
log.warning("Likely server error retrieving JSON data from tidesandcurrents.noaa.gov")
data=dict(error=dict(message="Likely server error"))
break
if (('error' in data)
and (("datum" in data['error']['message'].lower())
or (product=='predictions'))):
# Actual message like 'The supported Datum values are: MHHW, MHW, MTL, MSL, MLW, MLLW, LWI, HWI'
# Predictions sometimes silently fail, as if there is no data, but really just need
# to try MSL.
log.warning(data['error']['message'])
datums.pop(0) # move on to next datum
continue # assume it's because the datum is missing
break
else:
req=requests.get(base_url,params=params)
data=req.json()
if 'error' in data:
msg=data['error']['message']
if "No data was found" in msg:
# station does not have this data for this time.
log.warning("No data found for this period")
else:
# Regardless, if there was an error we got no data.
log.warning("Unknown error - got no data back.")
log.warning("URL was %s"%(req.url))
log.debug(data)
log.debug("URL was %s"%(req.url))
continue
ds=coops_json_to_ds(data,params)
if cache_fn is not None:
if os.path.exists(cache_fn):
# simply overwriting often does not work, so try removing first
os.unlink(cache_fn)
ds.to_netcdf(cache_fn)
if len(datasets)>0:
# avoid duplicates in case they overlap
ds=ds.isel(time=ds.time.values>datasets[-1].time.values[-1])
datasets.append(ds)
if len(datasets)==0:
# could try to construct zero-length dataset, but that sounds like a pain
# at the moment.
return None
if len(datasets)>1:
# data_vars='minimal' is needed to keep lat/lon from being expanded
# along the time axis.
dataset=xr.concat( datasets, dim='time',data_vars='minimal')
else:
dataset=datasets[0].copy(deep=True)
# better not to leave these lying around open
for d in datasets:
d.close()
if clip:
time_sel=(dataset.time.values>=start_date) & (dataset.time.values<end_date)
dataset=dataset.isel(time=time_sel)
dataset['time'].attrs['timezone']='UTC'
return dataset
|
rustychris/stompy
|
stompy/io/local/noaa_coops.py
|
Python
|
mit
| 11,330
|
[
"NetCDF"
] |
e8f929cef0ba04738270749b2b1d10389c9d057ef3e6e7c2570a57d4732c761b
|
#!/usr/bin/python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys
import os
import glob
import re
DriverPath = ''
InsertPath = '/../../../'
if (len(sys.argv) == 2):
DriverPath = sys.argv[1] + '/'
sys.path.insert(0, os.path.abspath(os.getcwd()))
def pts(category, pyfile):
print('Auto-documenting %s module %s' % (category, pyfile))
# Available psi variables in psi4/driver/qcdb/cfour.py
fdriver = open('source/autodir_psivariables/module__cfour.rst', 'w')
fdriver.write('\n\n')
psivars = []
for pyfile in glob.glob(DriverPath + '../../psi4/driver/qcdb/cfour.py'):
filename = os.path.split(pyfile)[1]
basename = os.path.splitext(filename)[0]
div = '=' * len(basename)
if basename not in []:
pts('psi variables', basename)
fdriver.write('.. _`apdx:%s_psivar`:\n\n' % (basename.lower()))
fdriver.write('\n%s\n%s\n\n' % (basename.upper(), '"' * len(basename)))
fdriver.write('.. hlist::\n :columns: 1\n\n')
f = open(pyfile)
contents = f.readlines()
f.close()
for line in contents:
mobj = re.search(r"""^\s*psivar\[\'(.*)\'\]\s*=""", line)
if mobj:
if mobj.group(1) not in psivars:
psivars.append(mobj.group(1))
for pv in sorted(psivars):
pvsquashed = pv.replace(' ', '')
fdriver.write(f' * :psivar:`{pv}`\n\n')
#fdriver.write(' * :psivar:`%s <%s>`\n\n' % (pv, pvsquashed))
fdriver.write('\n')
fdriver.close()
for line in open('source/autodoc_psivariables_bymodule.rst'):
if 'module__cfour' in line:
break
else:
fdriver = open('source/autodoc_psivariables_bymodule.rst', 'a')
fdriver.write(' autodir_psivariables/module__cfour\n\n')
fdriver.close()
|
susilehtola/psi4
|
doc/sphinxman/document_cfour.py
|
Python
|
lgpl-3.0
| 2,632
|
[
"CFOUR",
"Psi4"
] |
07c4fb85a15fc8cb43ea62b6aab88b353af162ae95511a72ebe37a72021838c9
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# read data
#
reader = vtk.vtkGenericEnSightReader()
# Make sure all algorithms use the composite data pipeline
cdp = vtk.vtkCompositeDataPipeline()
reader.SetDefaultExecutivePrototype(cdp)
del cdp
reader.SetCaseFileName("" + str(VTK_DATA_ROOT) + "/Data/EnSight/office6_bin.case")
reader.Update()
# to add coverage for vtkOnePieceExtentTranslator
translator = vtk.vtkOnePieceExtentTranslator()
sddp = vtk.vtkStreamingDemandDrivenPipeline()
Outinfo = reader.GetOutputInformation(0)
sddp.SetExtentTranslator(Outinfo,translator)
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputConnection(reader.GetOutputPort())
mapOutline = vtk.vtkHierarchicalPolyDataMapper()
mapOutline.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(mapOutline)
outlineActor.GetProperty().SetColor(0,0,0)
# Create source for streamtubes
streamer = vtk.vtkStreamPoints()
streamer.SetInputConnection(reader.GetOutputPort())
streamer.SetStartPosition(0.1,2.1,0.5)
streamer.SetMaximumPropagationTime(500)
streamer.SetTimeIncrement(0.5)
streamer.SetIntegrationDirectionToForward()
cone = vtk.vtkConeSource()
cone.SetResolution(8)
cones = vtk.vtkGlyph3D()
cones.SetInputConnection(streamer.GetOutputPort())
cones.SetSourceConnection(cone.GetOutputPort())
cones.SetScaleFactor(0.9)
cones.SetScaleModeToScaleByVector()
mapCones = vtk.vtkHierarchicalPolyDataMapper()
mapCones.SetInputConnection(cones.GetOutputPort())
mapCones.SetScalarRange(reader.GetOutput().GetBlock(0).GetScalarRange())
conesActor = vtk.vtkActor()
conesActor.SetMapper(mapCones)
ren1.AddActor(outlineActor)
ren1.AddActor(conesActor)
ren1.SetBackground(0.4,0.4,0.5)
renWin.SetSize(300,300)
iren.Initialize()
# interact with data
reader.SetDefaultExecutivePrototype(None)
# --- end of script --
|
collects/VTK
|
IO/EnSight/Testing/Python/EnSight6OfficeBin.py
|
Python
|
bsd-3-clause
| 2,072
|
[
"VTK"
] |
81a0d2ce603bc3006bdb439847e1c094011f2f69910c5dfdb4941a4985c3dc21
|
import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_jobs, h2o_glm
from h2o_test import verboseprint, dump_json, OutputObj
from tabulate import tabulate
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM_basic_1(self):
importFolderPath = "logreg"
csvFilename = "benign.csv"
hex_key = "benign.hex"
csvPathname = importFolderPath + "/" + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=hex_key, check_header=1,
timeoutSecs=180, doSummary=False)
pA = h2o_cmd.ParseObj(parseResult)
iA = h2o_cmd.InspectObj(pA.parse_key)
parse_key = pA.parse_key
numRows = iA.numRows
numCols = iA.numCols
labelList = iA.labelList
expected = []
allowedDelta = 0
# loop, to see if we get same centers
labelListUsed = list(labelList)
labelListUsed.remove('STR')
labelListUsed.remove('FNDX') # response removed also
numColsUsed = numCols - 2
for trial in range(1):
# family [u'gaussian', u'binomial', u'poisson', u'gamma', u'tweedie']
# link [u'family_default', u'identity', u'logit', u'log', u'inverse', u'tweedie']
# can we do classification with probabilities?
# are only lambda and alpha grid searchable?
parameters = {
'validation_frame': parse_key,
'ignored_columns': '[STR]',
'response_column': 'FNDX',
# FIX! when is this needed? redundant for binomial?
'balance_classes': False,
'max_after_balance_size': None,
'standardize': False,
'family': 'binomial',
'link': None,
'tweedie_variance_power': None,
'tweedie_link_power': None,
'alpha': '[1e-4]',
'lambda': '[0.5]',
'prior1': None,
'lambda_search': None,
'nlambdas': None,
'lambda_min_ratio': None,
'use_all_factor_levels': False,
# NPE with n_folds 2?
'n_folds': 1,
}
model_key = 'benign_glm.hex'
bmResult = h2o.n0.build_model(
algo='glm',
destination_key=model_key,
training_frame=parse_key,
parameters=parameters,
timeoutSecs=10)
bm = OutputObj(bmResult, 'bm')
modelResult = h2o.n0.models(key=model_key)
model = OutputObj(modelResult['models'][0]['output'], 'model')
h2o_glm.simpleCheckGLM(self, model, parameters, labelList, labelListUsed)
cmmResult = h2o.n0.compute_model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
cmm = OutputObj(cmmResult, 'cmm')
mcms = OutputObj({'data': cmm.max_criteria_and_metric_scores.data}, 'mcms')
m1 = mcms.data[1:]
h0 = mcms.data[0]
print "\nmcms", tabulate(m1, headers=h0)
thms = OutputObj(cmm.thresholds_and_metric_scores, 'thms')
cmms = OutputObj({'cm': cmm.confusion_matrices}, 'cmms')
if 1==0:
print ""
for i,c in enumerate(cmms.cm):
print "\ncmms.cm[%s]" % i, tabulate(c)
print ""
mmResult = h2o.n0.model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
mm = OutputObj(mmResult['model_metrics'][0], 'mm')
prResult = h2o.n0.predict(model=model_key, frame=parse_key, timeoutSecs=60)
pr = OutputObj(prResult['model_metrics'][0]['predictions'], 'pr')
# h2o_cmd.runStoreView()
if __name__ == '__main__':
h2o.unit_main()
|
bikash/h2o-dev
|
py2/testdir_single_jvm/test_GLM_basic_1.py
|
Python
|
apache-2.0
| 4,180
|
[
"Gaussian"
] |
4a80d6d6e8dc3a0dd2de4030a1a5cecbd6d5f2076d0f3535882dd445ebaf1228
|
import logging
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.http import HttpResponseRedirect
from core.utils import get_context
from .utils import BLASTNew
from .forms import BLASTNewForm
log = logging.getLogger(__name__)
@login_required
def index(request):
form = BLASTNewForm()
context = get_context(request)
context["form"] = form
return render(request, 'blast_new/index.html', context=context)
@login_required
def results(request):
context = get_context(request)
if request.method == 'POST':
log.debug(request.POST)
form = BLASTNewForm(request.POST)
if form.is_valid():
cleaned_data = form.cleaned_data
blast = BLASTNew(
blast_type='new',
name=cleaned_data['name'],
sequence=cleaned_data['sequence'],
gene_codes=cleaned_data['gene_codes'],
)
blast.save_seqs_to_file()
if not blast.is_blast_db_up_to_date():
blast.create_blast_db()
blast.save_query_to_file()
blast.do_blast()
result = blast.parse_blast_output()
if not result:
result = None
blast.delete_query_output_files()
context["result"] = result
return render(request, 'blast_new/results.html', context)
else:
context["form"] = form
return render(request, 'blast_new/index.html', context)
return HttpResponseRedirect('/blast_new/')
|
carlosp420/VoSeq
|
blast_new/views.py
|
Python
|
bsd-3-clause
| 1,590
|
[
"BLAST"
] |
6868e3b58d08cec3b69841210dcc4d723377271a89ef3f0e187bdf611551d33c
|
from distutils.core import setup
setup(
name='ss2csv',
version='0.0.1',
author='Brian Downs',
author_email='brian.downs@gmail.com',
maintainer='Brian Downs',
maintainer_email='brian.downs@gmail.com',
scripts=['ss2csv.py'],
packages=['ss2csv'],
package_data={
'': ['*.txt'],
},
url='https://github.com/briandowns/ss2csv',
license='Apache',
description='Quickly save spreadsheet workbooks to csv files.',
long_description=open('README.md').read(),
)
|
briandowns/ss2csv
|
setup.py
|
Python
|
apache-2.0
| 513
|
[
"Brian"
] |
6d5ef635e04afced635a95b88ffa7c2742fe1691bc7eea681958e7a80a6ecda8
|
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
if __name__ == '__main__':
import nose
nose.runmodule()
|
theoryno3/scikit-learn
|
sklearn/neighbors/tests/test_ball_tree.py
|
Python
|
bsd-3-clause
| 10,258
|
[
"Gaussian"
] |
f0573e79ab0489180937032f5ebdc21739f228a07e7f0c8f67a60cf39e88e23e
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Generate DFT grids and weights, based on the code provided by Gerald Knizia <>
Reference for Lebedev-Laikov grid:
V. I. Lebedev, and D. N. Laikov "A quadrature formula for the sphere of the
131st algebraic order of accuracy", Doklady Mathematics, 59, 477-481 (1999)
'''
import ctypes
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.dft import radi
from pyscf import gto
from pyscf.gto.eval_gto import BLKSIZE
from pyscf import __config__
libdft = lib.load_library('libdft')
# ~= (L+1)**2/3
LEBEDEV_ORDER = {
0 : 1 ,
3 : 6 ,
5 : 14 ,
7 : 26 ,
9 : 38 ,
11 : 50 ,
13 : 74 ,
15 : 86 ,
17 : 110 ,
19 : 146 ,
21 : 170 ,
23 : 194 ,
25 : 230 ,
27 : 266 ,
29 : 302 ,
31 : 350 ,
35 : 434 ,
41 : 590 ,
47 : 770 ,
53 : 974 ,
59 : 1202,
65 : 1454,
71 : 1730,
77 : 2030,
83 : 2354,
89 : 2702,
95 : 3074,
101: 3470,
107: 3890,
113: 4334,
119: 4802,
125: 5294,
131: 5810
}
LEBEDEV_NGRID = numpy.asarray((
1 , 6 , 14 , 26 , 38 , 50 , 74 , 86 , 110 , 146 ,
170 , 194 , 230 , 266 , 302 , 350 , 434 , 590 , 770 , 974 ,
1202, 1454, 1730, 2030, 2354, 2702, 3074, 3470, 3890, 4334,
4802, 5294, 5810))
# SG0
# S. Chien and P. Gill, J. Comput. Chem. 27 (2006) 730-739.
def sg1_prune(nuc, rads, n_ang, radii=radi.SG1RADII):
'''SG1, CPL, 209, 506
Args:
nuc : int
Nuclear charge.
rads : 1D array
Grid coordinates on radical axis.
n_ang : int
Max number of grids over angular part.
Kwargs:
radii : 1D array
radii (in Bohr) for atoms in periodic table
Returns:
A list has the same length as rads. The list element is the number of
grids over angular part for each radial grid.
'''
# In SG1 the ang grids for the five regions
# 6 38 86 194 86
leb_ngrid = numpy.array([6, 38, 86, 194, 86])
alphas = numpy.array((
(0.25 , 0.5, 1.0, 4.5),
(0.1667, 0.5, 0.9, 3.5),
(0.1 , 0.4, 0.8, 2.5)))
r_atom = radii[nuc] + 1e-200
if nuc <= 2: # H, He
place = ((rads/r_atom).reshape(-1,1) > alphas[0]).sum(axis=1)
elif nuc <= 10: # Li - Ne
place = ((rads/r_atom).reshape(-1,1) > alphas[1]).sum(axis=1)
else:
place = ((rads/r_atom).reshape(-1,1) > alphas[2]).sum(axis=1)
return leb_ngrid[place]
def nwchem_prune(nuc, rads, n_ang, radii=radi.BRAGG_RADII):
'''NWChem
Args:
nuc : int
Nuclear charge.
rads : 1D array
Grid coordinates on radical axis.
n_ang : int
Max number of grids over angular part.
Kwargs:
radii : 1D array
radii (in Bohr) for atoms in periodic table
Returns:
A list has the same length as rads. The list element is the number of
grids over angular part for each radial grid.
'''
alphas = numpy.array((
(0.25 , 0.5, 1.0, 4.5),
(0.1667, 0.5, 0.9, 3.5),
(0.1 , 0.4, 0.8, 2.5)))
leb_ngrid = LEBEDEV_NGRID[4:] # [38, 50, 74, 86, ...]
if n_ang < 50:
return numpy.repeat(n_ang, len(rads))
elif n_ang == 50:
leb_l = numpy.array([1, 2, 2, 2, 1])
else:
idx = numpy.where(leb_ngrid==n_ang)[0][0]
leb_l = numpy.array([1, 3, idx-1, idx, idx-1])
r_atom = radii[nuc] + 1e-200
if nuc <= 2: # H, He
place = ((rads/r_atom).reshape(-1,1) > alphas[0]).sum(axis=1)
elif nuc <= 10: # Li - Ne
place = ((rads/r_atom).reshape(-1,1) > alphas[1]).sum(axis=1)
else:
place = ((rads/r_atom).reshape(-1,1) > alphas[2]).sum(axis=1)
angs = leb_l[place]
angs = leb_ngrid[angs]
return angs
# Prune scheme JCP 102, 346 (1995); DOI:10.1063/1.469408
def treutler_prune(nuc, rads, n_ang, radii=None):
'''Treutler-Ahlrichs
Args:
nuc : int
Nuclear charge.
rads : 1D array
Grid coordinates on radical axis.
n_ang : int
Max number of grids over angular part.
Returns:
A list has the same length as rads. The list element is the number of
grids over angular part for each radial grid.
'''
nr = len(rads)
leb_ngrid = numpy.empty(nr, dtype=int)
leb_ngrid[:nr//3] = 14 # l=5
leb_ngrid[nr//3:nr//2] = 50 # l=11
leb_ngrid[nr//2:] = n_ang
return leb_ngrid
###########################################################
# Becke partitioning
# Stratmann, Scuseria, Frisch. CPL, 257, 213 (1996), eq.11
def stratmann(g):
'''Stratmann, Scuseria, Frisch. CPL, 257, 213 (1996); DOI:10.1016/0009-2614(96)00600-8'''
a = .64 # for eq. 14
g = numpy.asarray(g)
ma = g/a
ma2 = ma * ma
g1 = numpy.asarray((1/16.)*(ma*(35 + ma2*(-35 + ma2*(21 - 5 *ma2)))))
g1[g<=-a] = -1
g1[g>= a] = 1
return g1
def original_becke(g):
'''Becke, JCP 88, 2547 (1988); DOI:10.1063/1.454033'''
# This funciton has been optimized in the C code VXCgen_grid
# g = (3 - g**2) * g * .5
# g = (3 - g**2) * g * .5
# g = (3 - g**2) * g * .5
# return g
pass
def gen_atomic_grids(mol, atom_grid={}, radi_method=radi.gauss_chebyshev,
level=3, prune=nwchem_prune, **kwargs):
'''Generate number of radial grids and angular grids for the given molecule.
Returns:
A dict, with the atom symbol for the dict key. For each atom type,
the dict value has two items: one is the meshgrid coordinates wrt the
atom center; the second is the volume of that grid.
'''
if isinstance(atom_grid, (list, tuple)):
atom_grid = dict([(mol.atom_symbol(ia), atom_grid)
for ia in range(mol.natm)])
atom_grids_tab = {}
for ia in range(mol.natm):
symb = mol.atom_symbol(ia)
if symb not in atom_grids_tab:
chg = gto.charge(symb)
if symb in atom_grid:
n_rad, n_ang = atom_grid[symb]
if n_ang not in LEBEDEV_NGRID:
if n_ang in LEBEDEV_ORDER:
logger.warn(mol, 'n_ang %d for atom %d %s is not '
'the supported Lebedev angular grids. '
'Set n_ang to %d', n_ang, ia, symb,
LEBEDEV_ORDER[n_ang])
n_ang = LEBEDEV_ORDER[n_ang]
else:
raise ValueError('Unsupported angular grids %d' % n_ang)
else:
n_rad = _default_rad(chg, level)
n_ang = _default_ang(chg, level)
rad, dr = radi_method(n_rad, chg, ia, **kwargs)
rad_weight = 4*numpy.pi * rad**2 * dr
if callable(prune):
angs = prune(chg, rad, n_ang)
else:
angs = [n_ang] * n_rad
logger.debug(mol, 'atom %s rad-grids = %d, ang-grids = %s',
symb, n_rad, angs)
angs = numpy.array(angs)
coords = []
vol = []
for n in sorted(set(angs)):
grid = numpy.empty((n,4))
libdft.MakeAngularGrid(grid.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(n))
idx = numpy.where(angs==n)[0]
for i0, i1 in prange(0, len(idx), 12): # 12 radi-grids as a group
coords.append(numpy.einsum('i,jk->jik',rad[idx[i0:i1]],
grid[:,:3]).reshape(-1,3))
vol.append(numpy.einsum('i,j->ji', rad_weight[idx[i0:i1]],
grid[:,3]).ravel())
atom_grids_tab[symb] = (numpy.vstack(coords), numpy.hstack(vol))
return atom_grids_tab
def get_partition(mol, atom_grids_tab,
radii_adjust=None, atomic_radii=radi.BRAGG_RADII,
becke_scheme=original_becke, concat=True):
'''Generate the mesh grid coordinates and weights for DFT numerical integration.
We can change radii_adjust, becke_scheme functions to generate different meshgrid.
Kwargs:
concat: bool
Whether to concatenate grids and weights in return
Returns:
grid_coord and grid_weight arrays. grid_coord array has shape (N,3);
weight 1D array has N elements.
'''
if callable(radii_adjust) and atomic_radii is not None:
f_radii_adjust = radii_adjust(mol, atomic_radii)
else:
f_radii_adjust = None
atm_coords = numpy.asarray(mol.atom_coords() , order='C')
atm_dist = gto.inter_distance(mol)
if (becke_scheme is original_becke and
(radii_adjust is radi.treutler_atomic_radii_adjust or
radii_adjust is radi.becke_atomic_radii_adjust or
f_radii_adjust is None)):
if f_radii_adjust is None:
p_radii_table = lib.c_null_ptr()
else:
f_radii_table = numpy.asarray([f_radii_adjust(i, j, 0)
for i in range(mol.natm)
for j in range(mol.natm)])
p_radii_table = f_radii_table.ctypes.data_as(ctypes.c_void_p)
def gen_grid_partition(coords):
coords = numpy.asarray(coords, order='F')
ngrids = coords.shape[0]
pbecke = numpy.empty((mol.natm,ngrids))
libdft.VXCgen_grid(pbecke.ctypes.data_as(ctypes.c_void_p),
coords.ctypes.data_as(ctypes.c_void_p),
atm_coords.ctypes.data_as(ctypes.c_void_p),
p_radii_table,
ctypes.c_int(mol.natm), ctypes.c_int(ngrids))
return pbecke
else:
def gen_grid_partition(coords):
ngrids = coords.shape[0]
grid_dist = numpy.empty((mol.natm,ngrids))
for ia in range(mol.natm):
dc = coords - atm_coords[ia]
grid_dist[ia] = numpy.sqrt(numpy.einsum('ij,ij->i',dc,dc))
pbecke = numpy.ones((mol.natm,ngrids))
for i in range(mol.natm):
for j in range(i):
g = 1/atm_dist[i,j] * (grid_dist[i]-grid_dist[j])
if f_radii_adjust is not None:
g = f_radii_adjust(i, j, g)
g = becke_scheme(g)
pbecke[i] *= .5 * (1-g)
pbecke[j] *= .5 * (1+g)
return pbecke
coords_all = []
weights_all = []
for ia in range(mol.natm):
coords, vol = atom_grids_tab[mol.atom_symbol(ia)]
coords = coords + atm_coords[ia]
pbecke = gen_grid_partition(coords)
weights = vol * pbecke[ia] * (1./pbecke.sum(axis=0))
coords_all.append(coords)
weights_all.append(weights)
if concat:
coords_all = numpy.vstack(coords_all)
weights_all = numpy.hstack(weights_all)
return coords_all, weights_all
gen_partition = get_partition
def make_mask(mol, coords, relativity=0, shls_slice=None, verbose=None):
'''Mask to indicate whether a shell is zero on grid
Args:
mol : an instance of :class:`Mole`
coords : 2D array, shape (N,3)
The coordinates of grids.
Kwargs:
relativity : bool
No effects.
shls_slice : 2-element list
(shl_start, shl_end).
If given, only part of AOs (shl_start <= shell_id < shl_end) are
evaluated. By default, all shells defined in mol will be evaluated.
verbose : int or object of :class:`Logger`
No effects.
Returns:
2D mask array of shape (N,nbas), where N is the number of grids, nbas
is the number of shells.
'''
coords = numpy.asarray(coords, order='F')
ngrids = len(coords)
if shls_slice is None:
shls_slice = (0, mol.nbas)
nbas = shls_slice[1] - shls_slice[0]
non0tab = numpy.empty(((ngrids+BLKSIZE-1)//BLKSIZE, nbas),
dtype=numpy.uint8)
libdft.VXCnr_ao_screen(non0tab.ctypes.data_as(ctypes.c_void_p),
coords.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(ngrids),
mol._atm.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(mol.natm),
mol._bas[shls_slice[0]:].ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nbas),
mol._env.ctypes.data_as(ctypes.c_void_p))
return non0tab
class Grids(lib.StreamObject):
'''DFT mesh grids
Attributes for Grids:
level : int
To control the number of radial and angular grids. Large number
leads to large mesh grids. The default level 3 corresponds to
(50,302) for H, He;
(75,302) for second row;
(80~105,434) for rest.
Grids settings at other levels can be found in
pyscf.dft.gen_grid.RAD_GRIDS and pyscf.dft.gen_grid.ANG_ORDER
atomic_radii : 1D array
| radi.BRAGG_RADII (default)
| radi.COVALENT_RADII
| None : to switch off atomic radii adjustment
radii_adjust : function(mol, atomic_radii) => (function(atom_id, atom_id, g) => array_like_g)
Function to adjust atomic radii, can be one of
| radi.treutler_atomic_radii_adjust
| radi.becke_atomic_radii_adjust
| None : to switch off atomic radii adjustment
radi_method : function(n) => (rad_grids, rad_weights)
scheme for radial grids, can be one of
| radi.treutler (default)
| radi.delley
| radi.mura_knowles
| radi.gauss_chebyshev
becke_scheme : function(v) => array_like_v
weight partition function, can be one of
| gen_grid.original_becke (default)
| gen_grid.stratmann
prune : function(nuc, rad_grids, n_ang) => list_n_ang_for_each_rad_grid
scheme to reduce number of grids, can be one of
| gen_grid.nwchem_prune (default)
| gen_grid.sg1_prune
| gen_grid.treutler_prune
| None : to switch off grid pruning
symmetry : bool
whether to symmetrize mesh grids (TODO)
atom_grid : dict
Set (radial, angular) grids for particular atoms.
Eg, grids.atom_grid = {'H': (20,110)} will generate 20 radial
grids and 110 angular grids for H atom.
Examples:
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1.1')
>>> grids = dft.gen_grid.Grids(mol)
>>> grids.level = 4
>>> grids.build()
'''
def __init__(self, mol):
import sys
self.mol = mol
self.stdout = mol.stdout
self.verbose = mol.verbose
self.symmetry = mol.symmetry
self.atom_grid = {}
self.non0tab = None
cur_mod = sys.modules[__name__]
def _load_conf(mod, name, default):
var = getattr(__config__, name, None)
if var is None:
return default
elif callable(var):
return var
elif mod is None:
return cur_mod[var]
else:
return getattr(mod, var)
self.atomic_radii = _load_conf(radi, 'dft_gen_grid_Grids_atomic_radii',
radi.BRAGG_RADII)
#self.atomic_radii = radi.COVALENT_RADII
self.radii_adjust = _load_conf(radi, 'dft_gen_grid_Grids_radii_adjust',
radi.treutler_atomic_radii_adjust)
#self.radii_adjust = radi.becke_atomic_radii_adjust
#self.radii_adjust = None # to switch off atomic radii adjustment
self.radi_method = _load_conf(radi, 'dft_gen_grid_Grids_radi_method',
radi.treutler)
#self.radi_method = radi.gauss_chebyshev
#self.radi_method = radi.mura_knowles
#self.radi_method = radi.delley
self.becke_scheme = _load_conf(None, 'dft_gen_grid_Grids_becke_scheme',
original_becke)
#self.becke_scheme = stratmann
self.prune = _load_conf(None, 'dft_gen_grid_Grids_prune', nwchem_prune)
self.level = getattr(__config__, 'dft_gen_grid_Grids_level', 3)
##################################################
# don't modify the following attributes, they are not input options
self.coords = None
self.weights = None
self._keys = set(self.__dict__.keys())
@property
def size(self):
return getattr(self.weights, 'size', 0)
def __setattr__(self, key, val):
if key in ('atom_grid', 'atomic_radii', 'radii_adjust', 'radi_method',
'becke_scheme', 'prune', 'level'):
self.reset()
super(Grids, self).__setattr__(key, val)
def dump_flags(self, verbose=None):
logger.info(self, 'radial grids: %s', self.radi_method.__doc__)
logger.info(self, 'becke partition: %s', self.becke_scheme.__doc__)
logger.info(self, 'pruning grids: %s', self.prune)
logger.info(self, 'grids dens level: %d', self.level)
logger.info(self, 'symmetrized grids: %s', self.symmetry)
if self.radii_adjust is not None:
logger.info(self, 'atomic radii adjust function: %s',
self.radii_adjust)
logger.debug2(self, 'atomic_radii : %s', self.atomic_radii)
if self.atom_grid:
logger.info(self, 'User specified grid scheme %s', str(self.atom_grid))
return self
def build(self, mol=None, with_non0tab=False, **kwargs):
if mol is None: mol = self.mol
if self.verbose >= logger.WARN:
self.check_sanity()
atom_grids_tab = self.gen_atomic_grids(mol, self.atom_grid,
self.radi_method,
self.level, self.prune, **kwargs)
self.coords, self.weights = \
self.get_partition(mol, atom_grids_tab,
self.radii_adjust, self.atomic_radii,
self.becke_scheme)
if with_non0tab:
self.non0tab = self.make_mask(mol, self.coords)
else:
self.non0tab = None
logger.info(self, 'tot grids = %d', len(self.weights))
return self
def kernel(self, mol=None, with_non0tab=False):
self.dump_flags()
return self.build(mol, with_non0tab)
def reset(self, mol=None):
'''Reset mol and clean up relevant attributes for scanner mode'''
if mol is not None:
self.mol = mol
self.coords = None
self.weights = None
self.non0tab = None
return self
@lib.with_doc(gen_atomic_grids.__doc__)
def gen_atomic_grids(self, mol, atom_grid=None, radi_method=None,
level=None, prune=None, **kwargs):
if atom_grid is None: atom_grid = self.atom_grid
if radi_method is None: radi_method = self.radi_method
if level is None: level = self.level
if prune is None: prune = self.prune
return gen_atomic_grids(mol, atom_grid, self.radi_method, level, prune, **kwargs)
@lib.with_doc(get_partition.__doc__)
def get_partition(self, mol, atom_grids_tab=None,
radii_adjust=None, atomic_radii=radi.BRAGG_RADII,
becke_scheme=original_becke, concat=True):
if atom_grids_tab is None:
atom_grids_tab = self.gen_atomic_grids(mol)
return get_partition(mol, atom_grids_tab, radii_adjust, atomic_radii,
becke_scheme, concat=concat)
gen_partition = get_partition
@lib.with_doc(make_mask.__doc__)
def make_mask(self, mol=None, coords=None, relativity=0, shls_slice=None,
verbose=None):
if mol is None: mol = self.mol
if coords is None: coords = self.coords
return make_mask(mol, coords, relativity, shls_slice, verbose)
def _default_rad(nuc, level=3):
'''Number of radial grids '''
tab = numpy.array( (2 , 10, 18, 36, 54, 86, 118))
period = (nuc > tab).sum()
return RAD_GRIDS[level,period]
# Period 1 2 3 4 5 6 7 # level
RAD_GRIDS = numpy.array((( 10, 15, 20, 30, 35, 40, 50), # 0
( 30, 40, 50, 60, 65, 70, 75), # 1
( 40, 60, 65, 75, 80, 85, 90), # 2
( 50, 75, 80, 90, 95,100,105), # 3
( 60, 90, 95,105,110,115,120), # 4
( 70,105,110,120,125,130,135), # 5
( 80,120,125,135,140,145,150), # 6
( 90,135,140,150,155,160,165), # 7
(100,150,155,165,170,175,180), # 8
(200,200,200,200,200,200,200),)) # 9
def _default_ang(nuc, level=3):
'''Order of angular grids. See LEBEDEV_ORDER for the mapping of
the order and the number of angular grids'''
tab = numpy.array( (2 , 10, 18, 36, 54, 86, 118))
period = (nuc > tab).sum()
return LEBEDEV_ORDER[ANG_ORDER[level,period]]
# Period 1 2 3 4 5 6 7 # level
ANG_ORDER = numpy.array(((11, 15, 17, 17, 17, 17, 17 ), # 0
(17, 23, 23, 23, 23, 23, 23 ), # 1
(23, 29, 29, 29, 29, 29, 29 ), # 2
(29, 29, 35, 35, 35, 35, 35 ), # 3
(35, 41, 41, 41, 41, 41, 41 ), # 4
(41, 47, 47, 47, 47, 47, 47 ), # 5
(47, 53, 53, 53, 53, 53, 53 ), # 6
(53, 59, 59, 59, 59, 59, 59 ), # 7
(59, 59, 59, 59, 59, 59, 59 ), # 8
(65, 65, 65, 65, 65, 65, 65 ),)) # 9
def prange(start, end, step):
for i in range(start, end, step):
yield i, min(i+step, end)
if __name__ == '__main__':
h2o = gto.Mole()
h2o.verbose = 0
h2o.output = None#"out_h2o"
h2o.atom = [
['O' , (0. , 0. , 0.)],
['H' , (0. , -0.757 , 0.587)],
['H' , (0. , 0.757 , 0.587)] ]
h2o.build()
g = Grids(h2o)
g.build()
print(g.coords.shape)
|
sunqm/pyscf
|
pyscf/dft/gen_grid.py
|
Python
|
apache-2.0
| 23,326
|
[
"NWChem",
"PySCF"
] |
5ebe08aa099445f23040f371db4cd7b76603d5565f8037ad674645623c656232
|
"""
String multi-replace for all appropriate files in directory
Requires manual use
"""
import os, sys
directory = "../src/org/mhag/model/data/mh3g/"
pretend = False # If True, don't actually write
# IMPORTANT - removes all temporary files created by bakaneko tools,
# to prevent cross contamination
directory = "../src/org/mhag/model/data/mh3g/"
for file in os.listdir(directory):
ext = os.path.splitext(file)[1]
if ext == ".dat.new":
try:
os.remove(directory + file)
except OSError:
pass
# Knockout King omitted
# "search": "replace",
replacements = {
"Solid Def": "Def Lock",
"Anti Defense Down": "Iron Skin",
"Germ Sci": "Biology",
"Bio Doctor": "Bio Master",
"Sheathe": "Sheathing",
"Speed Sheathe": "Quick Sheathe",
"Blow": "Tenderizer",
"Stinger": "Weakness Exploit",
"Bash": "Destroyer",
"Destructor": "Partbreaker",
"Exhaust": "Stam Drain",
"Stamina Taker": "Stamina Thief",
"High Windproof": "Windproaf (Hi)",
"Windproof": "Windproof (Lo)",
"Windproaf (Hi)": "Windproof (Hi)",
"Tremor-Proof": "Tremor Resistance",
"StaminaRec": "Stam Recov",
"Stamina Rec Down": "Stam Recov Down",
"Stamina Rec Up": "Stam Recov Up",
"Evasion Up": "Evade Extender",
"Elemental Atk Up": "Element Atk +1",
"Elemental Atk Up": "Element Atk Down",
"Elemental": "Elementatk",
"NormalS Up": "Normal Up",
"NormalS/RapidBow Boost": "Normal/Rapid Up",
"PierceS Up": "Pierce Up",
"PierceS/PrceBow Boost": "Pierce/Pierce Up",
"PelletS Up": "Pellet Up",
"PelletS/ScattBow Boost": "Pellet/Spread Up",
"NormalS All Up": "Use Any Normal S",
"PierceS Lv1 Up": "Use Lv1 Pierce S",
"PierceS All Up": "Use Any Pierce S",
"PelletS Lv1 Up": "Use Lv1 Pellet S",
"PelletS All Up": "Use Any Pellet S",
"Crag S Lv1 Up": "Use Lv1 Crag S",
"Crag S All Up": "Use Any Crag S",
"Clust S Lv1 Up": "Use Lv1 Clust S",
"Clust S All Up": "Use Any Clust S",
"Slice S+": "Slicing S+",
"Slice S Up": "Use Slicing S",
"Blast S+": "Slime S+",
"Blast S Up": "Use Slime S",
"Poison C Up": "Use Poison Coat",
"Para C Up": "Use Para Coat",
"Sleep C Up": "Use Sleep Coat",
"Power C Up": "Use Power Coat",
"CloseR C+": "C.Range C+",
"CloseR C Up": "Use C.Range Coat",
"Exhaust C Up": "Use Exhaust Coat",
"Blast C+": "Slime C+",
"Blast C Up": "Use Slime Coat",
"Flute Expert": "Horn Maestro",
"Flute": "Maestro",
"Gunnery Master": "Artillery Novice",
"Gunnery King": "Artillery Expert",
"Gunnery": "Artillery",
"Meat Lover": "Carnivore",
"Mat Lover": "Meat Lover",
"Charm": "Charmer",
"Charmer Hunter": "Charm Chaser",
"Hunter": "Ranger",
"Ranger Life": "Outdoorsman",
"Flaming Aura": "Flame Aura",
"Oxygen Down": "Short Breath",
"Oxygen Unlimited": "Endless Oxygen",
"CurrentRes (Lo)": "Current Res (Lo)",
"CurrentRes (Hi)": "Current Res (Hi)",
"Water God": "Sea Legs",
"Water Dog": "Ocean's Blessing",
"Earnest": "Gloves Off",
"Power Release": "Latent Power",
"Fighter": "Spirit",
"Unhurt": "Unscathed",
"Full Charge": "Peak Performance",
"Blightproof": "Blights Negated",
"Self-Defense": "Wellness",
"Emboldened": "Wrath Awoken",
"Stellar Ranger": "Honed Blade",
"Archer": "SteadyHand",
"Steel Shots": "Silver Bullet",
"Firm": "Unshakable",
"Strong Body": "Rock Steady",
}
for file in os.listdir(directory):
ext = os.path.splitext(file)[1]
if ext == ".dat":
# Read .dat file line by line
with open(directory + file) as f:
file_data = f.readlines()
# Iterate through lines
for line in file_data:
# Replace text within line
for search in replacements:
# Separate name from armor dat.s
if file == "armor.dat" or file == "armor_item.dat":
name = line.split(":")[0]
line = line.replace(name, "")
line = line.replace(search, replacements[search])
# Reapply name from armor dat.s
if file == "armor.dat" or file == "armor_item.dat":
line = name + line
# Write replaced line to temporary file
with open(directory + file + ".new", "a") as f:
f.write(line)
# Replace file with temporary file
if not pretend:
os.remove(directory + file)
os.rename(directory + file + ".new", directory + file)
|
PrincessTeruko/MHAGr
|
tools_(bakaneko)/dir_replace.py
|
Python
|
gpl-3.0
| 4,136
|
[
"BLAST"
] |
00a473981eba7c502d3917556a61b5f2708ab4c4ed8d14809289ed71abcc5a51
|
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
"""
cry2cif\n\n
Read the last geometry corresponding to the CRYSTALLOGRAPHIC CELL on a
CRYSTAL09 output file and print it in a cif format. If geometry
optimization did not converge, input geometry is printed instead.
"""
# TODO:
# * returns coordinates instead of write a file
# * make functions for various formats
__author__ = "Germain Vallverdu"
__email__ = "germain.vallverdu@univ-pau.fr"
__licence__ = "GPL"
import os
import datetime
import argparse
import re
from math import pi, cos, sin, sqrt
def usage(code):
""" cry2cif usage """
print(__doc__)
exit(code)
def get_options():
""" get options from command lines """
parser = argparse.ArgumentParser(prog="cry2cif", description=__doc__)
# mandatory argument is CRYSTAL output filename
parser.add_argument("filename",
help="CRYSTAL output file",
metavar="FILENAME",
type=str)
# choose either cif or POSCAR format
parser.add_argument("-t", "--to",
help="output format: either cif or VASP (POSCAR)",
metavar="format",
default="cif",
choices=("cif", "POSCAR"),
type=str)
# center slab or nanotubes
parser.add_argument("-i", "--center",
help="move the slab or nanotubes in the center of the box",
action="store_true",
dest="center",
default=False)
# sort atom along z or x for slab or nanotubes
parser.add_argument("-z", "--sortz",
help="Sort atoms along z axis (for slabs)",
dest="sortz",
action="store_true",
default=False)
parser.add_argument("-x", "--sortx",
help="Sort atoms along x axis (for nanotubes)",
dest="sortx",
action="store_true",
default=False)
# in the case of slabs or nanotubes, you have to give a value for b or c
parser.add_argument("-b",
help="lattice parameter b",
metavar="b",
default=50,
type=float)
parser.add_argument("-c",
help="lattice parameter c",
metavar="c",
default=50,
type=float)
return parser.parse_args()
def cry2cif(filename, to="cif", center=False, sortx=False, sortz=False,
b=50, c=50):
"""
Read a CRYSTAL output file and return the structure in a cif or POSCAR format.
Args:
filename (str): crystal output filename
to (str): 'cif' or 'POSCAR', format of the output file (default is cif)
center (bool): if True, the slab or nanotube is translated to the center of
the box (default is False)
sortx (bool): Nanotube : if True, atoms are sorted along x axes (default is False).
sortz (bool): slab : if True, atoms are sorted along z axes (default is False).
b_dum (float): dummy lattice paramters b in angstrom for nanotubes (default 50 A)
c_dum (float): dummy lattice paramters c in angstrom for nanotubes and slabs (default 50 A)
"""
b_dum = b
c_dum = c
# ----------------------------------------------------------
# lecture du fichier output
# ----------------------------------------------------------
slab = False
nanotube = False
locGroupe = "SPACE GROUP"
primitive = True
locMaille_patt = re.compile(r"^ PRIMITIVE CELL")
with open(filename, "r") as f:
# read general data
line = f.readline()
end = True
while line != "":
line = f.readline()
if "SLAB CALCULATION" in line:
slab = True
locGroupe = "PLANE GROUP"
elif "SLABCUT" in line:
slab = True
elif "EEEEEEEEEE STARTING" in line:
phasename = f.readline().strip()
print("title : {0}".format(phasename))
elif locGroupe in line:
group = line.split(":")[1].strip()
print("group : {0}".format(group))
elif "TRANSFORMATION MATRIX PRIMITIVE-CRYSTALLOGRAPHIC CELL" in line:
primitive = False
locMaille_patt = re.compile(r"^ PRIMITIVE CELL")
elif "FINAL OPTIMIZED GEOMETRY" in line:
end = False
break
elif "SLAB GENERATED" in line:
slab = True
group = ""
print("\nSLAB GENERATED")
elif "CONSTRUCTION OF A NANOTUBE FROM A SLAB" in line:
nanotube = True
slab = False
group = ""
print("\nNANOTUBE FROM SLAB")
if end:
print("\n!! WARNING !!")
print("Optimisation did not converge, final optimized geometry not found.")
print("I will try to export the last geometry instead.\n")
f.seek(0)
line = f.readline()
while " GEOMETRY FOR WAVE FUNCTION " not in line:
line = f.readline()
n_geom = 0
while line != "":
# read geometry located at locMaille
line = f.readline()
while not locMaille_patt.match(line) and line != "":
line = f.readline()
if line == "":
# end of file ?
break
n_geom += 1
if not slab and not nanotube:
volume = float(line.split("=")[1].split()[0].strip(")"))
f.readline()
# lattice parameters
a, b, c, alpha, beta, gamma = [float(val) for val in f.readline().split()]
if slab:
c = c_dum
if nanotube:
b = b_dum
c = c_dum
# if slab:
# c = float(input("\nSLAB : c value ? : "))
#
# if nanotube:
# b = float(input("\nNANOTUBE : b value ? : "))
# c = float(input("\nNANOTUBE : c value ? : "))
for i in range(4):
f.readline()
# read coordinates
nom = list() # atom names
red = list() # reduce coordinates
Z = list() # atomic number
uniq = list() # True if atom belong to the asymmetric unit
radius = list() # distance from the axes of the nanotube
line = f.readline()
while line != "\n":
if not re.match(r"^\s+\d+\s[TF]\s+\d+\s\S+\s+", line):
# avoid MPI messages
line = f.readline()
continue
if nanotube:
i, p, Zi, nomi, xi, yi, zi, ri = line.split()
else:
i, p, Zi, nomi, xi, yi, zi = line.split()
xi = float(xi)
yi = float(yi)
zi = float(zi)
Z.append(int(Zi))
if nanotube:
radius.append(float(ri))
if p == "F":
uniq.append(False)
else:
uniq.append(True)
if len(nomi) == 2:
nom.append(nomi[0] + nomi[1].lower())
else:
nom.append(nomi)
if slab:
if zi > c / 2.:
print("ERROR zi > c / 2")
print("zi = ", zi)
print("c = ", c / 2)
exit(1)
red.append([xi, yi, zi / c])
elif nanotube:
if zi > c / 2.:
print("ERROR zi > c / 2")
print("zi = ", zi)
print("c = ", c / 2)
exit(1)
if yi > b / 2.:
print("ERROR yi > b / 2")
print("yi = ", yi)
print("b = ", b / 2)
exit(1)
red.append([xi, yi / b, zi / c])
else:
red.append([xi, yi, zi])
line = f.readline()
if end:
print("Successfully read %d geometries\n" % n_geom)
print("cell : last geometry")
else:
if primitive:
print("cell : primitive")
else:
print("cell : crystallographic")
print("a : {0}".format(a))
print("b : {0}".format(b))
print("c : {0}".format(c))
print("alpha : {0}".format(alpha))
print("beta : {0}".format(beta))
print("gamma : {0}".format(gamma))
# ----------------------------------------------------------
# system composition
# ----------------------------------------------------------
composition = dict()
atomTypes = list()
for name in nom:
if name in composition:
composition[name] += 1
else:
composition[name] = 1
atomTypes.append(name)
compo = ""
for X, n in composition.items():
compo += "{0}_{1} ".format(X, n)
print("compos : {0}".format(compo.strip()))
print("nat : {0}".format(len(nom)))
# ----------------------------------------------------------
# move slab or nanotube to the center of the box
# ----------------------------------------------------------
if center:
if slab:
red = [[r[0], r[1], r[2] + .5] for r in red]
elif nanotube:
red = [[r[0] + .5, r[1] + .5, r[2] + .5] for r in red]
# ----------------------------------------------------------
# sort atom along x or z axis for slab
# ----------------------------------------------------------
if sortz:
isort = 2
elif sortx:
isort = 0
axes = {2: "z", 0: "x"}
if sortz or sortx:
print("\nSort atoms along %s" % axes[isort])
data = zip(nom, uniq, radius, red)
dataSorted = sorted(data, key=lambda f: f[-1][isort], reverse=True)
red_final = [ired for iname, iuniq, iradius, ired in dataSorted]
uniq_final = [iuniq for iname, iuniq, iradius, ired in dataSorted]
name_final = [iname for iname, iuniq, iradius, ired in dataSorted]
radius_final = [iradius for iname, iuniq, iradius, ired in dataSorted]
else:
red_final = red
uniq_final = uniq
name_final = nom
radius_final = radius
if to == "cif":
# ----------------------------------------------------------
# write cif file
# ----------------------------------------------------------
outname = os.path.splitext(filename)[0] + ".cif"
lines = "#---------------------------------------------------------------------\n"
lines += "# Date : {0}\n".format(datetime.datetime.now().strftime("%A %d %B %Y, %H:%M:%S"))
lines += "# directory : {0}\n".format(os.getcwd())
lines += "# hostname : {0}\n".format(os.uname()[1])
lines += "#---------------------------------------------------------------------\n"
lines += "#\n"
lines += "# This file contains the last CRYSTALLOGRAPHIC CELL read on a CRYSTAL09\n"
lines += "# output file and may be readable by a visualization tool such as VESTA :\n"
lines += "# http://jp-minerals.org/vesta/en/\n"
lines += "#\n"
lines += "# Cell parameters\n"
lines += "_pd_phase_name '{0}'\n".format(phasename)
lines += "_cell_angle_alpha {0}\n".format(alpha)
lines += "_cell_angle_beta {0}\n".format(beta)
lines += "_cell_angle_gamma {0}\n".format(gamma)
lines += "_cell_length_a {0}\n".format(a)
lines += "_cell_length_b {0}\n".format(b)
lines += "_cell_length_c {0}\n".format(c)
if not slab and not nanotube:
lines += "_cell_volume {0}\n".format(volume)
lines += "_symmetry_space_group_name_H-M '{0}'\n".format(group)
lines += "_chemical_formula_sum '{0}'\n".format(compo)
lines += "\n"
lines += "loop_\n"
lines += "_atom_site_label\n"
lines += "_atom_site_fract_x\n"
lines += "_atom_site_fract_y\n"
lines += "_atom_site_fract_z\n"
lines += "_atom_site_occupancy\n"
for name, p, r in zip(name_final, uniq_final, red_final):
if p:
lines += "%4s" % name
lines += "%20.12f %20.12f %20.12f" % tuple(r)
lines += " 1.\n"
if not p and (slab or nanotube):
lines += "%4s" % name
lines += "%20.12f %20.12f %20.12f" % tuple(r)
lines += " 1.\n"
if nanotube:
lines += "\n"
lines += "# distance from the center of the nanotube (in A)\n"
lines += "loop_\n"
lines += "_atom_site_index\n"
lines += "_atom_site_label\n"
lines += "_atom_site_radius\n"
for i, (name, radius) in enumerate(zip(name_final, radius_final)):
lines += "%5d %4s %10.3f\n" % (i, name, radius)
elif to == "POSCAR":
outname = "POSCAR_" + os.path.splitext(filename)[0] + ".vasp"
lines = "Structure from %s\n" % filename
lines += " 1.0\n"
# compute lattice vectors
alphar = alpha * pi / 180.0
betar = beta * pi / 180.0
gammar = gamma * pi / 180.0
veca = [a, 0., 0.]
vecb = [b * cos(gammar), b * sin(gammar), 0.]
vecc = [0., 0., 0.]
vecc[0] = c * cos(betar)
cy = (cos(alphar) - cos(gammar) * cos(betar)) / sin(gammar)
vecc[1] = c * cy
cz = sqrt((sin(betar))**2 - cy**2)
vecc[2] = c * cz
lines += "%20.12f %20.12f %20.12f\n" % tuple(veca)
lines += "%20.12f %20.12f %20.12f\n" % tuple(vecb)
lines += "%20.12f %20.12f %20.12f\n" % tuple(vecc)
# atom names and atom number
lines += "".join(["%4s" % name for name in atomTypes]) + "\n"
lines += "".join(["%4d" % composition[name] for name in atomTypes]) + "\n"
# sort coordinates according to atom name
redSorted = list()
for name in atomTypes:
for iname, r in zip(name_final, red_final):
if iname == name:
redSorted.append(r)
radius_out = "# RAW DATA FROM FILE %s\n" % filename
radius_out += "# LATTICE PARAMETERS:\n"
radius_out += "# a = %20.12f\n" % a
radius_out += "# b = %20.12f\n" % b
radius_out += "# c = %20.12f\n" % c
radius_out += "# alpha = %20.12f\n" % alpha
radius_out += "# beta = %20.12f\n" % beta
radius_out += "# gamma = %20.12f\n" % gamma
radius_out += "# LATTICE VECTORS:\n"
radius_out += "# vector A = %20.12f %20.12f %20.12f\n" % tuple(veca)
radius_out += "# vector B = %20.12f %20.12f %20.12f\n" % tuple(vecb)
radius_out += "# vector C = %20.12f %20.12f %20.12f\n" % tuple(vecc)
radius_out += "#\n# column 1: atom number\n"
radius_out += "# column 2: atom name\n"
radius_out += "# column 3: fractional coordinate x\n"
radius_out += "# column 4: fractional coordinate y\n"
radius_out += "# column 5: fractional coordinate z\n"
radius_out += "# column 6: distance from the nanotube axes (angstrom)\n"
for iat, (name, red, radius) in enumerate(zip(name_final, red_final, radius_final)):
radius_out += "%4d %4s" % (iat, name)
radius_out += "%20.12f %20.12f %20.12f" % tuple(red)
radius_out += "%10.3f\n" % radius
with open(os.path.splitext(filename)[0] + "_radius.dat", "w") as f:
f.write(radius_out)
# fractional coordinates
lines += "Direct\n"
for r in redSorted:
lines += "%20.12f %20.12f %20.12f\n" % tuple(r)
with open(outname, "w") as f:
f.write(lines)
if __name__ == "__main__":
# get arguments
args = vars(get_options())
# call main program
cry2cif(**args)
|
gVallverdu/myScripts
|
CRYSTAL/cry2cif_old.py
|
Python
|
gpl-2.0
| 16,643
|
[
"CRYSTAL",
"VASP"
] |
7cc338cb136a0317a5dec2f23dc03ed6650e04b16f4c255133ae2113d3a05175
|
from __future__ import print_function
#!/usr/bin/env python
'''
isotropy
http://stokes.byu.edu/isolinux.html
http://stokes.byu.edu/iso.tar.gz
You will need to create a directory to unzip the above tarfile in,
cd
mkdir iso
cd iso
wget http://stokes.byu.edu/iso.tar.gz
tar xvzf iso.tar.gz
#put this in your .cshrc
setenv ISODATA $HOME/iso/
set path=($HOME/iso $path)
'''
import math,os,re,string
from Scientific.Geometry import Vector
class FINDSYM:
def __init__(self,atoms,outfile=None):
unitcell = atoms.get_cell()
A = Vector(unitcell[0])
B = Vector(unitcell[1])
C = Vector(unitcell[2])
# lengths of the vectors
a = A.length()#*angstroms2bohr
b = B.length()#*angstroms2bohr
c = C.length()#*angstroms2bohr
# angles between the vectors
rad2deg = 360./(2.*math.pi)
alpha = B.angle(C)*rad2deg
beta = A.angle(C)*rad2deg
gamma = A.angle(B)*rad2deg
scaledpositions = atoms.get_scaled_positions()
chemicalsymbols = [atom.get_symbol() for atom in atoms]
input = ''
input += 'title \n'
input += '0 tolerance\n'
input += '2 lattice parameters in lengths and angles\n'
input += '%1.3f %1.3f %1.3f %1.3f %1.3f %1.3f\n' % (a,b,c,
alpha,beta,gamma)
input += '1 3 basis vectors for unit cell\n'
input += '1.00 0.00 0.00\n'
input += '0.00 1.00 0.00\n'
input += '0.00 0.00 1.00\n'
input += '%i number of atoms\n' % len(atoms)
types = ''
for atom in atoms:
types += str(atom.get_atomic_number()) + ' '
input += types + '\n'
for i,atom in enumerate(atoms):
input += '%1.3f %1.3f %1.3f\n' % tuple(scaledpositions[i])
pin,pout = os.popen2('findsym')
pin.writelines(input)
pin.close()
self.output = pout.readlines()
pout.close()
if outfile:
f = open(outfile,'w')
f.writelines(self.output)
f.close()
if os.path.exists('findsym.log'):
os.remove('findsym.log')
def __str__(self):
return string.join(self.output)
def get_space_group(self):
regexp = re.compile('^Space Group')
for line in self.output:
if regexp.search(line):
return line
if __name__ == '__main__':
from ase.calculators.jacapo import *
from optparse import OptionParser
parser = OptionParser(usage='findsym.py ncfile',
version='0.1')
parser.add_option('-f',
nargs=0,
help = 'print full output')
parser.add_option('-o',
nargs=1,
help = 'save output in filename')
options,args = parser.parse_args()
for ncfile in args:
sg = FINDSYM(Jacapo.read_atoms(ncfile),outfile=options.o)
print(sg.get_space_group())
if options.f is not None:
print(sg)
|
suttond/MODOI
|
ase/calculators/jacapo/utils/findsym.py
|
Python
|
lgpl-3.0
| 3,108
|
[
"ASE"
] |
296fb2a20d135476881ef543f11e48d3036a80c670761f5efb006874d482feca
|
#!python
# coding=utf-8
import os
from datetime import datetime
from .utils import all_subclasses
from .dataset import EnhancedDataset
from . import logger
class CFDataset(EnhancedDataset):
default_fill_value = -9999.9
default_time_unit = 'seconds since 1990-01-01 00:00:00'
@classmethod
def load(cls, path: str):
"""Attempt to load a netCDF file as a CF compatible dataset
Extended description of function.
Parameters
----------
arg1 :
Path to netCDF file
Returns
-------
CFDataset subclass for your netCDF file
Raises
------
ValueError:
If no suitable class is found for your dataset
"""
fpath = os.path.realpath(path)
subs = list(all_subclasses(cls))
dsg = cls(fpath)
try:
for klass in subs:
logger.debug('Trying {}...'.format(klass.__name__))
if hasattr(klass, 'is_mine'):
if klass.is_mine(dsg):
dsg.close()
return klass(path)
finally:
dsg.close()
subnames = ', '.join([ s.__name__ for s in subs ])
raise ValueError(
'Could not open {} as any type of CF Dataset. Tried: {}.'.format(
fpath,
subnames
)
)
def axes(self, name):
return getattr(self, '{}_axes'.format(name.lower()))()
def t_axes(self):
tvars = list(set(
self.filter_by_attrs(
axis=lambda x: x and x.lower() == 't'
) +
self.filter_by_attrs(
standard_name=lambda x: x in ['time', 'forecast_reference_time']
)
))
return tvars
def x_axes(self):
"""
CF X axis will have one of the following:
* The `axis` property has the value ``'X'``
* Units of longitude (see `cf.Units.islongitude` for details)
* The `standard_name` property is one of ``'longitude'``,
``'projection_x_coordinate'`` or ``'grid_longitude'``
"""
xnames = ['longitude', 'grid_longitude', 'projection_x_coordinate']
xunits = [
'degrees_east',
'degree_east',
'degree_E',
'degrees_E',
'degreeE',
'degreesE'
]
xvars = list(set(
self.filter_by_attrs(
axis=lambda x: x and x.lower() == 'x'
) +
self.filter_by_attrs(
standard_name=lambda x: x and x.lower() in xnames
) +
self.filter_by_attrs(
units=lambda x: x and x.lower() in xunits
)
))
return xvars
def y_axes(self):
ynames = ['latitude', 'grid_latitude', 'projection_y_coordinate']
yunits = [
'degrees_north',
'degree_north',
'degree_N',
'degrees_N',
'degreeN',
'degreesN'
]
yvars = list(set(
self.filter_by_attrs(
axis=lambda x: x and x.lower() == 'y'
) +
self.filter_by_attrs(
standard_name=lambda x: x and x.lower() in ynames
) +
self.filter_by_attrs(
units=lambda x: x and x.lower() in yunits
)
))
return yvars
def z_axes(self):
znames = [
'atmosphere_ln_pressure_coordinate',
'atmosphere_sigma_coordinate',
'atmosphere_hybrid_sigma_pressure_coordinate',
'atmosphere_hybrid_height_coordinate',
'atmosphere_sleve_coordinate',
'ocean_sigma_coordinate',
'ocean_s_coordinate',
'ocean_s_coordinate_g1',
'ocean_s_coordinate_g2',
'ocean_sigma_z_coordinate',
'ocean_double_sigma_coordinate'
]
zvars = list(set(
self.filter_by_attrs(
axis=lambda x: x and x.lower() == 'z'
) +
self.filter_by_attrs(
positive=lambda x: x and x.lower() in ['up', 'down']
) +
self.filter_by_attrs(
standard_name=lambda x: x and x.lower() in znames
)
))
return zvars
def data_vars(self):
return self.filter_by_attrs(
coordinates=lambda x: x is not None,
units=lambda x: x is not None,
standard_name=lambda x: x is not None,
flag_values=lambda x: x is None,
flag_masks=lambda x: x is None,
flag_meanings=lambda x: x is None
)
def ancillary_vars(self):
ancillary_variables = []
for rv in self.filter_by_attrs(
ancillary_variables=lambda x: x is not None
):
# Space separated ancillary variables
for av in rv.ancillary_variables.split(' '):
if av in self.variables:
ancillary_variables.append(self.variables[av])
return list(set(ancillary_variables))
def nc_attributes(self):
return {
'global' : {
'Conventions': 'CF-1.6',
'date_created': datetime.utcnow().strftime("%Y-%m-%dT%H:%M:00Z"),
}
}
def cf_safe_name(name):
import re
if isinstance(name, str):
if re.match('^[0-9_]', name):
# Add a letter to the front
name = "v_{}".format(name)
return re.sub(r'[^_a-zA-Z0-9]', "_", name)
|
joefutrelle/pocean-core
|
pocean/cf.py
|
Python
|
mit
| 5,636
|
[
"NetCDF"
] |
bf1a443a758764001a70bbea31356b66db3ded5179fc91355e3755416159ee82
|
# Set up a basic scene for rendering.
from paraview.simple import *
import sys
script = """
import numpy
import paraview.numpy_support
def setup_data(view):
# Don't actually need any data
pass
def render(view, width, height):
cb = numpy.zeros((height, width, 4), dtype=numpy.uint8)
for i in xrange(width):
cb[:,i,0] = i%255
for i in xrange(height):
cb[i,:,1] = i%255
from paraview.python_view import numpy_to_image
return numpy_to_image(cb)
"""
view = CreateView("PythonView")
view.Script = script
Render()
try:
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
print "Could not get baseline directory. Test failed."
import os
baseline_file = os.path.join(baselinePath, "TestPythonViewNumpyScript.png")
import vtk.test.Testing
vtk.test.Testing.VTK_TEMP_DIR = vtk.util.misc.vtkGetTempDir()
vtk.test.Testing.compareImage(view.GetRenderWindow(), baseline_file, threshold=25)
vtk.test.Testing.interact()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/Applications/ParaView/Testing/Python/TestPythonViewNumpyScript.py
|
Python
|
gpl-3.0
| 974
|
[
"ParaView",
"VTK"
] |
80a99973b3115222caf69ea3d2697b548d0fad6415ad5c3016a7b7edf3bae883
|
# coding=utf-8
"""**Utilities for storage module**
"""
import os
import re
import copy
import numpy
import math
from ast import literal_eval
from osgeo import ogr
from collections import OrderedDict
from geometry import Polygon
from safe.gis.numerics import ensure_numeric
from safe.common.utilities import verify
from safe.common.exceptions import (
BoundingBoxError, InaSAFEError, ReadMetadataError)
from safe.utilities.i18n import tr
from safe.utilities.unicode import get_string, get_unicode
from safe.storage.metadata_utilities import (
write_keyword_in_iso_metadata, read_iso_metadata)
# Default attribute to assign to vector layers
DEFAULT_ATTRIBUTE = 'inapolygon'
# Spatial layer file extensions that are recognised in Risiko
# FIXME: Perhaps add '.gml', '.zip', ...
LAYER_TYPES = ['.shp', '.asc', '.tif', '.tiff', '.geotif', '.geotiff']
# Map between extensions and ORG drivers
DRIVER_MAP = {'.sqlite': 'SQLITE',
'.shp': 'ESRI Shapefile',
'.gml': 'GML',
'.tif': 'GTiff',
'.asc': 'AAIGrid'}
# Map between Python types and OGR field types
# FIXME (Ole): I can't find a double precision type for OGR
TYPE_MAP = {type(None): ogr.OFTString, # What else should this be?
type(''): ogr.OFTString,
type(True): ogr.OFTInteger,
type(0): ogr.OFTInteger,
type(0.0): ogr.OFTReal,
type(numpy.array([0.0])[0]): ogr.OFTReal, # numpy.float64
type(numpy.array([[0.0]])[0]): ogr.OFTReal} # numpy.ndarray
# Map between verbose types and OGR geometry types
INVERSE_GEOMETRY_TYPE_MAP = {'point': ogr.wkbPoint,
'line': ogr.wkbLineString,
'polygon': ogr.wkbPolygon}
# Miscellaneous auxiliary functions
def _keywords_to_string(keywords, sublayer=None):
"""Create a string from a keywords dict.
Args:
* keywords: A required dictionary containing the keywords to stringify.
* sublayer: str optional group marker for a sub layer.
Returns:
str: a String containing the rendered keywords list
Raises:
Any exceptions are propogated.
.. note: Only simple keyword dicts should be passed here, not multilayer
dicts.
For example you pass a dict like this::
{'datatype': 'osm',
'category': 'exposure',
'title': 'buildings_osm_4326',
'subcategory': 'building',
'purpose': 'dki'}
and the following string would be returned:
datatype: osm
category: exposure
title: buildings_osm_4326
subcategory: building
purpose: dki
If sublayer is provided e.g. _keywords_to_string(keywords, sublayer='foo'),
the following:
[foo]
datatype: osm
category: exposure
title: buildings_osm_4326
subcategory: building
purpose: dki
"""
# Write
result = get_unicode('')
if sublayer is not None:
result = '[%s]\n' % sublayer
for k, value in keywords.items():
# Create key
msg = ('Key in keywords dictionary must be a string. '
'I got %s with type %s' % (k, str(type(k))[1:-1]))
verify(isinstance(k, basestring), msg)
key = k
msg = ('Key in keywords dictionary must not contain the ":" '
'character. I got "%s"' % key)
verify(':' not in key, msg)
# Store
result += '%s: %s\n' % (key, value)
return result
def write_keywords(keywords, filename, sublayer=None):
"""Write keywords dictonary to file
:param keywords: Dictionary of keyword, value pairs
:type keywords: dict
:param filename: Name of keywords file. Extension expected to be .keywords
:type filename: str
:param sublayer: Optional sublayer applicable only to multilayer formats
such as sqlite or netcdf which can potentially hold more than
one layer. The string should map to the layer group as per the
example below. **If the keywords file contains sublayer
definitions but no sublayer was defined, keywords file content
will be removed and replaced with only the keywords provided
here.**
:type sublayer: str
A keyword file with sublayers may look like this:
[osm_buildings]
datatype: osm
category: exposure
subcategory: building
purpose: dki
title: buildings_osm_4326
[osm_flood]
datatype: flood
category: hazard
subcategory: building
title: flood_osm_4326
Keys must be strings not containing the ":" character
Values can be anything that can be converted to a string (using
Python's str function)
Surrounding whitespace is removed from values, but keys are unmodified
The reason being that keys must always be valid for the dictionary they
came from. For values we have decided to be flexible and treat entries like
'unit:m' the same as 'unit: m', or indeed 'unit: m '.
Otherwise, unintentional whitespace in values would lead to surprising
errors in the application.
"""
# Input checks
basename, ext = os.path.splitext(filename)
msg = ('Unknown extension for file %s. Expected %s.keywords' % (
filename, basename))
verify(ext == '.keywords', msg)
# First read any keywords out of the file so that we can retain
# keywords for other sublayers
existing_keywords = read_keywords(filename, all_blocks=True)
first_value = None
if len(existing_keywords) > 0:
first_value = existing_keywords[existing_keywords.keys()[0]]
multilayer_flag = isinstance(first_value, dict)
handle = file(filename, 'w')
if multilayer_flag:
if sublayer is not None and sublayer != '':
# replace existing keywords / add new for this layer
existing_keywords[sublayer] = keywords
for key, value in existing_keywords.iteritems():
handle.write(_keywords_to_string(value, sublayer=key))
handle.write('\n')
else:
# It is currently a multilayer but we will replace it with
# a single keyword block since the user passed no sublayer
handle.write(_keywords_to_string(keywords))
else:
# currently a simple layer so replace it with our content
keywords = get_string(_keywords_to_string(keywords, sublayer=sublayer))
handle.write(keywords)
handle.close()
write_keyword_in_iso_metadata(filename)
def read_keywords(keyword_filename, sublayer=None, all_blocks=False):
"""Read keywords dictionary from file
:param keyword_filename: Name of keywords file. Extension expected to be
.keywords or .xml metadata.
The format of one line is expected to be either
string: string or string
:type keyword_filename: str
:param sublayer: Optional sublayer applicable only to multilayer formats
such as sqlite or netcdf which can potentially hold more than
one layer. The string should map to the layer group as per the
example below. If the keywords file contains sublayer definitions
but no sublayer was defined, the first layer group will be
returned.
:type sublayer: str
:param all_blocks: Optional, defaults to False. If True will return
a dict of dicts, where the top level dict entries each represent
a sublayer, and the values of that dict will be dicts of keyword
entries.
:type all_blocks: bool
:returns: keywords: Dictionary of keyword, value pairs
A keyword layer with sublayers may look like this:
[osm_buildings]
datatype: osm
category: exposure
subcategory: building
purpose: dki
title: buildings_osm_4326
[osm_flood]
datatype: flood
category: hazard
subcategory: building
title: flood_osm_4326
Whereas a simple keywords file would look like this
datatype: flood
category: hazard
subcategory: building
title: flood_osm_4326
If filename does not exist, an empty dictionary is returned
Blank lines are ignored
Surrounding whitespace is removed from values, but keys are unmodified
If there are no ':', then the keyword is treated as a key with no value
"""
# Input checks
basename, ext = os.path.splitext(keyword_filename)
msg = ('Unknown extension for file %s. '
'Expected %s.keywords or %s.xml' %
(keyword_filename, basename, basename))
verify(ext == '.keywords' or ext == '.xml', msg)
metadata = False
# check .keywords file exist
keywords_file = os.path.isfile(keyword_filename) \
and ext == '.keywords'
try:
# read the xml metadata first
metadata = read_iso_metadata(keyword_filename)
except (IOError, ReadMetadataError):
# error reading xml metadata or file not exist
if keywords_file:
# if there is a keyword file generate an xml file also
write_keyword_in_iso_metadata(keyword_filename)
metadata = read_iso_metadata(keyword_filename)
# we have no valid xml metadata nor a keyword file
if not metadata and not keywords_file:
return {}
if metadata:
lines = metadata['keywords']
else:
# Read all entries
with open(keyword_filename, 'r') as fid:
lines = fid.readlines()
blocks = {}
keywords = {}
current_block = None
first_keywords = None
for line in lines:
# Remove trailing (but not preceeding!) whitespace
# FIXME: Can be removed altogether
text = line.rstrip()
# Ignore blank lines
if text == '':
continue
# Check if it is an ini style group header
block_flag = re.search(r'^\[.*]$', text, re.M | re.I)
if block_flag:
# Write the old block if it exists - must have a current
# block to prevent orphans
if len(keywords) > 0 and current_block is not None:
blocks[current_block] = keywords
if first_keywords is None and len(keywords) > 0:
first_keywords = keywords
# Now set up for a new block
current_block = text[1:-1]
# Reset the keywords each time we encounter a new block
# until we know we are on the desired one
keywords = {}
continue
if ':' not in text:
key = text.strip()
val = None
else:
# Get splitting point
idx = text.find(':')
# Take key as everything up to the first ':'
key = text[:idx]
# Take value as everything after the first ':'
textval = text[idx + 1:].strip()
try:
# Take care of python structures like
# booleans, None, lists, dicts etc
val = literal_eval(textval)
except (ValueError, SyntaxError):
if 'OrderedDict(' == textval[:12]:
try:
val = OrderedDict(
literal_eval(textval[12:-1]))
except (ValueError, SyntaxError, TypeError):
val = textval
else:
val = textval
# Add entry to dictionary
keywords[key] = val
# Write our any unfinalised block data
if len(keywords) > 0 and current_block is not None:
blocks[current_block] = keywords
if first_keywords is None:
first_keywords = keywords
# Ok we have generated a structure that looks like this:
# blocks = {{ 'foo' : { 'a': 'b', 'c': 'd'},
# { 'bar' : { 'd': 'e', 'f': 'g'}}
# where foo and bar are sublayers and their dicts are the sublayer keywords
if all_blocks:
return blocks
if sublayer is not None:
if sublayer in blocks:
return blocks[sublayer]
else:
return first_keywords
# noinspection PyExceptionInherit
def check_geotransform(geotransform):
"""Check that geotransform is valid
:param geotransform: GDAL geotransform (6-tuple).
(top left x, w-e pixel resolution, rotation,
top left y, rotation, n-s pixel resolution).
See e.g. http://www.gdal.org/gdal_tutorial.html
:type geotransform: tuple
.. note::
This assumes that the spatial reference uses geographic coordinates,
so will not work for projected coordinate systems.
"""
msg = ('Supplied geotransform must be a tuple with '
'6 numbers. I got %s' % str(geotransform))
verify(len(geotransform) == 6, msg)
for x in geotransform:
try:
float(x)
except TypeError:
raise InaSAFEError(msg)
# Check longitude
msg = ('Element in 0 (first) geotransform must be a valid '
'longitude. I got %s' % geotransform[0])
verify(-180 <= geotransform[0] <= 180, msg)
# Check latitude
msg = ('Element 3 (fourth) in geotransform must be a valid '
'latitude. I got %s' % geotransform[3])
verify(-90 <= geotransform[3] <= 90, msg)
# Check cell size
msg = ('Element 1 (second) in geotransform must be a positive '
'number. I got %s' % geotransform[1])
verify(geotransform[1] > 0, msg)
msg = ('Element 5 (sixth) in geotransform must be a negative '
'number. I got %s' % geotransform[1])
verify(geotransform[5] < 0, msg)
def geotransform_to_bbox(geotransform, columns, rows):
"""Convert geotransform to bounding box
:param geotransform: GDAL geotransform (6-tuple).
(top left x, w-e pixel resolution, rotation,
top left y, rotation, n-s pixel resolution).
See e.g. http://www.gdal.org/gdal_tutorial.html
:type geotransform: tuple
:param columns: Number of columns in grid
:type columns: int
:param rows: Number of rows in grid
:type rows: int
:returns: bbox: Bounding box as a list of geographic coordinates
[west, south, east, north]
.. note::
Rows and columns are needed to determine eastern and northern bounds.
FIXME: Not sure if the pixel vs gridline registration issue is observed
correctly here. Need to check against gdal > v1.7
"""
x_origin = geotransform[0] # top left x
y_origin = geotransform[3] # top left y
x_res = geotransform[1] # w-e pixel resolution
y_res = geotransform[5] # n-s pixel resolution
x_pix = columns
y_pix = rows
min_x = x_origin
max_x = x_origin + (x_pix * x_res)
min_y = y_origin + (y_pix * y_res)
max_y = y_origin
return [min_x, min_y, max_x, max_y]
def geotransform_to_resolution(geotransform, isotropic=False):
"""Convert geotransform to resolution
:param geotransform: GDAL geotransform (6-tuple).
(top left x, w-e pixel resolution, rotation,
top left y, rotation, n-s pixel resolution).
See e.g. http://www.gdal.org/gdal_tutorial.html
:type geotransform: tuple
:param isotropic: If True, return the average (dx + dy) / 2
:type isotropic: bool
:returns: resolution: grid spacing (res_x, res_y) in (positive) decimal
degrees ordered as longitude first, then latitude.
or (res_x + res_y) / 2 (if isotropic is True)
"""
res_x = geotransform[1] # w-e pixel resolution
res_y = -geotransform[5] # n-s pixel resolution (always negative)
if isotropic:
return (res_x + res_y) / 2
else:
return res_x, res_y
def raster_geometry_to_geotransform(longitudes, latitudes):
"""Convert vectors of longitudes and latitudes to geotransform
Note:
This is the inverse operation of Raster.get_geometry().
:param longitudes: Vectors of geographic coordinates
:type longitudes:
:param latitudes: Vectors of geographic coordinates
:type latitudes:
:returns: geotransform: 6-tuple (top left x, w-e pixel resolution,
rotation, top left y, rotation, n-s pixel resolution)
"""
nx = len(longitudes)
ny = len(latitudes)
msg = ('You must specify more than 1 longitude to make geotransform: '
'I got %s' % str(longitudes))
verify(nx > 1, msg)
msg = ('You must specify more than 1 latitude to make geotransform: '
'I got %s' % str(latitudes))
verify(ny > 1, msg)
dx = float(longitudes[1] - longitudes[0]) # Longitudinal resolution
dy = float(latitudes[0] - latitudes[1]) # Latitudinal resolution (neg)
# Define pixel centers along each directions
# This is to achieve pixel registration rather
# than gridline registration
dx2 = dx / 2
dy2 = dy / 2
geotransform = (longitudes[0] - dx2, # Longitude of upper left corner
dx, # w-e pixel resolution
0, # rotation
latitudes[-1] - dy2, # Latitude of upper left corner
0, # rotation
dy) # n-s pixel resolution
return geotransform
# noinspection PyExceptionInherit
def bbox_intersection(*args):
"""Compute intersection between two or more bounding boxes.
:param args: two or more bounding boxes.
Each is assumed to be a list or a tuple with
four coordinates (W, S, E, N)
:returns: The minimal common bounding box
"""
msg = 'Function bbox_intersection must take at least 2 arguments.'
verify(len(args) > 1, msg)
result = [-180, -90, 180, 90]
for a in args:
if a is None:
continue
msg = ('Bounding box expected to be a list of the '
'form [W, S, E, N]. '
'Instead i got "%s"' % str(a))
try:
box = list(a)
except:
raise Exception(msg)
if not len(box) == 4:
raise BoundingBoxError(msg)
msg = ('Western boundary must be less than or equal to eastern. '
'I got %s' % box)
if not box[0] <= box[2]:
raise BoundingBoxError(msg)
msg = ('Southern boundary must be less than or equal to northern. '
'I got %s' % box)
if not box[1] <= box[3]:
raise BoundingBoxError(msg)
# Compute intersection
# West and South
for i in [0, 1]:
result[i] = max(result[i], box[i])
# East and North
for i in [2, 3]:
result[i] = min(result[i], box[i])
# Check validity and return
if result[0] <= result[2] and result[1] <= result[3]:
return result
else:
return None
def minimal_bounding_box(bbox, min_res, eps=1.0e-6):
"""Grow bounding box to exceed specified resolution if needed
:param bbox: Bounding box with format [W, S, E, N]
:type bbox: list
:param min_res: Minimal acceptable resolution to exceed
:type min_res: float
:param eps: Optional tolerance that will be applied to 'buffer' result
:type eps: float
:returns: Adjusted bounding box guaranteed to exceed specified resolution
"""
# FIXME (Ole): Probably obsolete now
bbox = copy.copy(list(bbox))
delta_x = bbox[2] - bbox[0]
delta_y = bbox[3] - bbox[1]
if delta_x < min_res:
dx = (min_res - delta_x) / 2 + eps
bbox[0] -= dx
bbox[2] += dx
if delta_y < min_res:
dy = (min_res - delta_y) / 2 + eps
bbox[1] -= dy
bbox[3] += dy
return bbox
def buffered_bounding_box(bbox, resolution):
"""Grow bounding box with one unit of resolution in each direction
Note:
This will ensure there are enough pixels to robustly provide
interpolated values without having to painstakingly deal with
all corner cases such as 1 x 1, 1 x 2 and 2 x 1 arrays.
The border will also make sure that points that would otherwise fall
outside the domain (as defined by a tight bounding box) get assigned
values.
:param bbox: Bounding box with format [W, S, E, N]
:type bbox: list
:param resolution: (resx, resy) - Raster resolution in each direction.
res - Raster resolution in either direction
If resolution is None bbox is returned unchanged.
:type resolution: tuple
:returns: Adjusted bounding box
Note:
Case in point: Interpolation point O would fall outside this domain
even though there are enough grid points to support it
::
--------------
| |
| * * | * *
| O|
| |
| * * | * *
--------------
"""
bbox = copy.copy(list(bbox))
if resolution is None:
return bbox
try:
resx, resy = resolution
except TypeError:
resx = resy = resolution
bbox[0] -= resx
bbox[1] -= resy
bbox[2] += resx
bbox[3] += resy
return bbox
def get_geometry_type(geometry, geometry_type):
"""Determine geometry type based on data
:param geometry: A list of either point coordinates [lon, lat] or polygons
which are assumed to be numpy arrays of coordinates
:type geometry: list
:param geometry_type: Optional type - 'point', 'line', 'polygon' or None
:type geometry_type: str, None
:returns: geometry_type: Either ogr.wkbPoint, ogr.wkbLineString or
ogr.wkbPolygon
Note:
If geometry type cannot be determined an Exception is raised.
There is no consistency check across all entries of the
geometry list, only the first element is used in this determination.
"""
# FIXME (Ole): Perhaps use OGR's own symbols
msg = ('Argument geometry_type must be either "point", "line", '
'"polygon" or None')
verify(geometry_type is None or
geometry_type in [1, 2, 3] or
geometry_type.lower() in ['point', 'line', 'polygon'], msg)
if geometry_type is not None:
if isinstance(geometry_type, basestring):
return INVERSE_GEOMETRY_TYPE_MAP[geometry_type.lower()]
else:
return geometry_type
# FIXME (Ole): Should add some additional checks to see if choice
# makes sense
msg = 'Argument geometry must be a sequence. I got %s ' % type(geometry)
verify(is_sequence(geometry), msg)
if len(geometry) == 0:
# Default to point if there is no data
return ogr.wkbPoint
msg = ('The first element in geometry must be a sequence of length > 2. '
'I got %s ' % str(geometry[0]))
verify(is_sequence(geometry[0]), msg)
verify(len(geometry[0]) >= 2, msg)
if len(geometry[0]) == 2:
try:
float(geometry[0][0])
float(geometry[0][1])
except (ValueError, TypeError, IndexError):
pass
else:
# This geometry appears to be point data
geometry_type = ogr.wkbPoint
elif len(geometry[0]) > 2:
try:
x = numpy.array(geometry[0])
except ValueError:
pass
else:
# This geometry appears to be polygon data
if x.shape[0] > 2 and x.shape[1] == 2:
geometry_type = ogr.wkbPolygon
if geometry_type is None:
msg = 'Could not determine geometry type'
raise Exception(msg)
return geometry_type
def is_sequence(x):
"""Determine if x behaves like a true sequence but not a string
:param x: Sequence like object
:type x: object
:returns: Test result
:rtype: bool
Note:
This will for example return True for lists, tuples and numpy arrays
but False for strings and dictionaries.
"""
if isinstance(x, basestring):
return False
try:
list(x)
except TypeError:
return False
else:
return True
def array_to_line(A, geometry_type=ogr.wkbLinearRing):
"""Convert coordinates to linear_ring
:param A: Nx2 Array of coordinates representing either a polygon or a line.
A can be either a numpy array or a list of coordinates.
:type A: numpy.ndarray, list
:param geometry_type: A valid OGR geometry type.
Default type ogr.wkbLinearRing
:type geometry_type: ogr.wkbLinearRing, include ogr.wkbLineString
Returns:
* ring: OGR line geometry
Note:
Based on http://www.packtpub.com/article/working-geospatial-data-python
"""
try:
A = ensure_numeric(A, numpy.float)
except Exception, e:
msg = ('Array (%s) could not be converted to numeric array. '
'I got type %s. Error message: %s'
% (A, str(type(A)), e))
raise Exception(msg)
msg = 'Array must be a 2d array of vertices. I got %s' % (str(A.shape))
verify(len(A.shape) == 2, msg)
msg = 'A array must have two columns. I got %s' % (str(A.shape[0]))
verify(A.shape[1] == 2, msg)
N = A.shape[0] # Number of vertices
line = ogr.Geometry(geometry_type)
for i in range(N):
line.AddPoint(A[i, 0], A[i, 1])
return line
def rings_equal(x, y, rtol=1.0e-6, atol=1.0e-8):
"""Compares to linear rings as numpy arrays
:param x: A 2d array of the first ring
:type x: numpy.ndarray
:param y: A 2d array of the second ring
:type y: numpy.ndarray
:param rtol: The relative tolerance parameter
:type rtol: float
:param atol: The relative tolerance parameter
:type rtol: float
Returns:
* True if x == y or x' == y (up to the specified tolerance)
where x' is x reversed in the first dimension. This corresponds to
linear rings being seen as equal irrespective of whether they are
organised in clock wise or counter clock wise order
"""
x = ensure_numeric(x, numpy.float)
y = ensure_numeric(y, numpy.float)
msg = 'Arrays must a 2d arrays of vertices. I got %s and %s' % (x, y)
verify(len(x.shape) == 2 and len(y.shape) == 2, msg)
msg = 'Arrays must have two columns. I got %s and %s' % (x, y)
verify(x.shape[1] == 2 and y.shape[1] == 2, msg)
if (numpy.allclose(x, y, rtol=rtol, atol=atol) or
numpy.allclose(x, y[::-1], rtol=rtol, atol=atol)):
return True
else:
return False
# FIXME (Ole): We can retire this messy function now
# Positive: Delete it :-)
def array_to_wkt(A, geom_type='POLYGON'):
"""Convert coordinates to wkt format
:param A: Nx2 Array of coordinates representing either a polygon or a line.
A can be either a numpy array or a list of coordinates.
:type A: numpy.array
:param geom_type: Determines output keyword 'POLYGON' or 'LINESTRING'
:type geom_type: str
:returns: wkt: geometry in the format known to ogr: Examples
Note:
POLYGON((1020 1030,1020 1045,1050 1045,1050 1030,1020 1030))
LINESTRING(1000 1000, 1100 1050)
"""
try:
A = ensure_numeric(A, numpy.float)
except Exception, e:
msg = ('Array (%s) could not be converted to numeric array. '
'I got type %s. Error message: %s'
% (geom_type, str(type(A)), e))
raise Exception(msg)
msg = 'Array must be a 2d array of vertices. I got %s' % (str(A.shape))
verify(len(A.shape) == 2, msg)
msg = 'A array must have two columns. I got %s' % (str(A.shape[0]))
verify(A.shape[1] == 2, msg)
if geom_type == 'LINESTRING':
# One bracket
n = 1
elif geom_type == 'POLYGON':
# Two brackets (tsk tsk)
n = 2
else:
msg = 'Unknown geom_type: %s' % geom_type
raise Exception(msg)
wkt_string = geom_type + '(' * n
N = len(A)
for i in range(N):
# Works for both lists and arrays
wkt_string += '%f %f, ' % tuple(A[i])
return wkt_string[:-2] + ')' * n
# Map of ogr numerical geometry types to their textual representation
# FIXME (Ole): Some of them don't exist, even though they show up
# when doing dir(ogr) - Why?:
geometry_type_map = {ogr.wkbPoint: 'Point',
ogr.wkbPoint25D: 'Point25D',
ogr.wkbPolygon: 'Polygon',
ogr.wkbPolygon25D: 'Polygon25D',
# ogr.wkbLinePoint: 'LinePoint', # ??
ogr.wkbGeometryCollection: 'GeometryCollection',
ogr.wkbGeometryCollection25D: 'GeometryCollection25D',
ogr.wkbLineString: 'LineString',
ogr.wkbLineString25D: 'LineString25D',
ogr.wkbLinearRing: 'LinearRing',
ogr.wkbMultiLineString: 'MultiLineString',
ogr.wkbMultiLineString25D: 'MultiLineString25D',
ogr.wkbMultiPoint: 'MultiPoint',
ogr.wkbMultiPoint25D: 'MultiPoint25D',
ogr.wkbMultiPolygon: 'MultiPolygon',
ogr.wkbMultiPolygon25D: 'MultiPolygon25D',
ogr.wkbNDR: 'NDR',
ogr.wkbNone: 'None',
ogr.wkbUnknown: 'Unknown'}
def geometry_type_to_string(g_type):
"""Provides string representation of numeric geometry types
:param g_type: geometry type:
:type g_type: ogr.wkb*, None
FIXME (Ole): I can't find anything like this in ORG. Why?
"""
if g_type in geometry_type_map:
return geometry_type_map[g_type]
elif g_type is None:
return 'No geometry type assigned'
else:
return 'Unknown geometry type: %s' % str(g_type)
# FIXME: Move to common numerics area along with polygon.py
def calculate_polygon_area(polygon, signed=False):
"""Calculate the signed area of non-self-intersecting polygon
:param polygon: Numeric array of points (longitude, latitude). It is
assumed to be closed, i.e. first and last points are identical
:type polygon: numpy.ndarray
:param signed: Optional flag deciding whether returned area retains its
sign:
If points are ordered counter clockwise, the signed area
will be positive.
If points are ordered clockwise, it will be negative
Default is False which means that the area is always
positive.
:type signed: bool
:returns: area: Area of polygon (subject to the value of argument signed)
:rtype: numpy.ndarray
Note:
Sources
http://paulbourke.net/geometry/polyarea/
http://en.wikipedia.org/wiki/Centroid
"""
# Make sure it is numeric
P = numpy.array(polygon)
msg = ('Polygon is assumed to consist of coordinate pairs. '
'I got second dimension %i instead of 2' % P.shape[1])
verify(P.shape[1] == 2, msg)
x = P[:, 0]
y = P[:, 1]
# Calculate 0.5 sum_{i=0}^{N-1} (x_i y_{i+1} - x_{i+1} y_i)
a = x[:-1] * y[1:]
b = y[:-1] * x[1:]
A = numpy.sum(a - b) / 2.
if signed:
return A
else:
return abs(A)
def calculate_polygon_centroid(polygon):
"""Calculate the centroid of non-self-intersecting polygon
:param polygon: Numeric array of points (longitude, latitude). It is
assumed to be closed, i.e. first and last points are identical
:type polygon: numpy.ndarray
:returns: calculated centroid
:rtype: numpy.ndarray
.. note::
Sources
http://paulbourke.net/geometry/polyarea/
http://en.wikipedia.org/wiki/Centroid
"""
# Make sure it is numeric
P = numpy.array(polygon)
# Normalise to ensure numerical accurracy.
# This requirement in backed by tests in test_io.py and without it
# centroids at building footprint level may get shifted outside the
# polygon!
P_origin = numpy.amin(P, axis=0)
P = P - P_origin
# Get area. This calculation could be incorporated to save time
# if necessary as the two formulas are very similar.
A = calculate_polygon_area(polygon, signed=True)
x = P[:, 0]
y = P[:, 1]
# Calculate
# Cx = sum_{i=0}^{N-1} (x_i + x_{i+1})(x_i y_{i+1} - x_{i+1} y_i)/(6A)
# Cy = sum_{i=0}^{N-1} (y_i + y_{i+1})(x_i y_{i+1} - x_{i+1} y_i)/(6A)
a = x[:-1] * y[1:]
b = y[:-1] * x[1:]
cx = x[:-1] + x[1:]
cy = y[:-1] + y[1:]
Cx = numpy.sum(cx * (a - b)) / (6. * A)
Cy = numpy.sum(cy * (a - b)) / (6. * A)
# Translate back to real location
C = numpy.array([Cx, Cy]) + P_origin
return C
def points_between_points(point1, point2, delta):
"""Creates an array of points between two points given a delta
:param point1: The first point
:type point1: numpy.ndarray
:param point2: The second point
:type point2: numpy.ndarray
:param delta: The increment between inserted points
:type delta: float
:returns: Array of points.
:rtype: numpy.ndarray
Note:
u = (x1-x0, y1-y0)/L, where
L=sqrt( (x1-x0)^2 + (y1-y0)^2).
If r is the resolution, then the
points will be given by
(x0, y0) + u * n * r for n = 1, 2, ....
while len(n*u*r) < L
"""
x0, y0 = point1
x1, y1 = point2
L = math.sqrt(math.pow((x1 - x0), 2) + math.pow((y1 - y0), 2))
pieces = int(L / delta)
uu = numpy.array([x1 - x0, y1 - y0]) / L
points = [point1]
for nn in range(pieces):
point = point1 + uu * (nn + 1) * delta
points.append(point)
return numpy.array(points)
def points_along_line(line, delta):
"""Calculate a list of points along a line with a given delta
:param line: Numeric array of points (longitude, latitude).
:type line: numpy.ndarray
:param delta: Decimal number to be used as step
:type delta: float
:returns: Numeric array of points (longitude, latitude).
:rtype: numpy.ndarray
Note:
Sources
http://paulbourke.net/geometry/polyarea/
http://en.wikipedia.org/wiki/Centroid
"""
# Make sure it is numeric
P = numpy.array(line)
points = []
for i in range(len(P) - 1):
pts = points_between_points(P[i], P[i + 1], delta)
# If the first point of this list is the same
# as the last one recorded, do not use it
if len(points) > 0:
if numpy.allclose(points[-1], pts[0]):
pts = pts[1:]
points.extend(pts)
C = numpy.array(points)
return C
def combine_polygon_and_point_layers(layers):
"""Combine polygon and point layers
:param layers: List of vector layers of type polygon or point
:type layers: list
:returns: One point layer with all input point layers and centroids from
all input polygon layers.
:rtype: numpy.ndarray
:raises: InaSAFEError (in case attribute names are not the same.)
"""
# This is to implement issue #276
print layers
def get_ring_data(ring):
"""Extract coordinates from OGR ring object
:param ring: OGR ring object
:type ring:
:returns: Nx2 numpy array of vertex coordinates (lon, lat)
:rtype: numpy.array
"""
N = ring.GetPointCount()
# noinspection PyTypeChecker
A = numpy.zeros((N, 2), dtype='d')
# FIXME (Ole): Is there any way to get the entire data vectors?
for j in range(N):
A[j, :] = ring.GetX(j), ring.GetY(j)
# Return ring as an Nx2 numpy array
return A
def get_polygon_data(G):
"""Extract polygon data from OGR geometry
:param G: OGR polygon geometry
:return: List of InaSAFE polygon instances
"""
# Get outer ring, then inner rings
# http://osgeo-org.1560.n6.nabble.com/
# gdal-dev-Polygon-topology-td3745761.html
number_of_rings = G.GetGeometryCount()
# Get outer ring
outer_ring = get_ring_data(G.GetGeometryRef(0))
# Get inner rings if any
inner_rings = []
if number_of_rings > 1:
for i in range(1, number_of_rings):
inner_ring = get_ring_data(G.GetGeometryRef(i))
inner_rings.append(inner_ring)
# Return Polygon instance
return Polygon(outer_ring=outer_ring,
inner_rings=inner_rings)
def safe_to_qgis_layer(layer):
"""Helper function to make a QgsMapLayer from a safe read_layer layer.
:param layer: Layer object as provided by InaSAFE engine.
:type layer: read_layer
:returns: A validated QGIS layer or None. Returns None when QGIS is not
available.
:rtype: QgsMapLayer, QgsVectorLayer, QgsRasterLayer, None
:raises: Exception if layer is not valid.
"""
try:
from qgis.core import QgsVectorLayer, QgsRasterLayer
except ImportError:
return None
# noinspection PyUnresolvedReferences
message = tr(
'Input layer must be a InaSAFE spatial object. I got %s'
) % (str(type(layer)))
if not hasattr(layer, 'is_inasafe_spatial_object'):
raise Exception(message)
if not layer.is_inasafe_spatial_object:
raise Exception(message)
# Get associated filename and symbolic name
filename = layer.get_filename()
name = layer.get_name()
qgis_layer = None
# Read layer
if layer.is_vector:
qgis_layer = QgsVectorLayer(filename, name, 'ogr')
elif layer.is_raster:
qgis_layer = QgsRasterLayer(filename, name)
# Verify that new qgis layer is valid
if qgis_layer.isValid():
return qgis_layer
else:
# noinspection PyUnresolvedReferences
message = tr('Loaded impact layer "%s" is not valid') % filename
raise Exception(message)
|
wonder-sk/inasafe
|
safe/storage/utilities.py
|
Python
|
gpl-3.0
| 37,736
|
[
"NetCDF"
] |
219aa9f2cf2881972a39e44850fba949ac5edfde5cf6648dd0439fb2318c7a6d
|
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
"""
Module to assist testing in AmCAT.
class AmCATTestCase tests whether code is "AmCAT compliant"
Intended usage is as part of normal django unit testing: make sure that
the test class subclasses AmCATTestCase, and run testing as normal.
functions create_test_* create test objects for use in unit tests
"""
import datetime
import logging
import os
import unittest
from collections import OrderedDict
from contextlib import contextmanager
from functools import wraps
from urllib.parse import urljoin
from uuid import uuid4
import dateparser
import shutil
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core.cache import cache
from django.test import TestCase
from iso8601 import iso8601
from splinter import Browser
from amcat.models import ArticleSet
from amcat.tools.amcates import ES, get_property_primitive_type
log = logging.getLogger(__name__)
# use unique ids for different model objects to avoid false negatives
ID = 1000000000
def _get_next_id():
global ID
ID += 1
return ID
def skip_slow_tests():
"""Should we skip the slow tests, e.g. Solr, Alpino etc"""
return os.environ.get('AMCAT_SKIP_SLOW_TESTS') in ("1","Y", "ON")
def create_test_query(**kargs):
from amcat.models import Query
if "name" not in kargs:
kargs["name"] = "Test query"
if "parameters" not in kargs:
kargs["parameters"] = [1,2,3]
if "project" not in kargs:
kargs["project"] = create_test_project()
if "user" not in kargs:
kargs["user"] = create_test_user()
return Query.objects.create(**kargs)
def create_test_user(**kargs):
"""Create a user to be used in unit testing"""
from amcat.models.user import User, create_user
if 'username' not in kargs:
kargs['username'] = "testuser_%i" % User.objects.count()
if 'email' not in kargs:
kargs['email'] = "testuser_%i@example.com" % User.objects.count()
if 'first_name' not in kargs:
kargs['first_name'] = kargs['username']
if 'last_name' not in kargs:
kargs['last_name'] = kargs['username']
if 'password' not in kargs:
kargs['password'] = 'test'
return create_user(**kargs)
def create_test_project(**kargs):
"""Create a project to be used in unit testing"""
from amcat.models.project import Project
from amcat.models.authorisation import ProjectRole, ROLE_PROJECT_ADMIN
if "owner" not in kargs: kargs["owner"] = create_test_user()
if "insert_user" not in kargs: kargs["insert_user"] = kargs["owner"]
if "id" not in kargs: kargs["id"] = _get_next_id()
p = Project.objects.create(**kargs) # type: Project
ProjectRole.objects.create(project=p, user=p.owner, role_id=ROLE_PROJECT_ADMIN)
return p
def create_test_schema(**kargs):
"""Create a test schema to be used in unit testing"""
from amcat.models.coding.codingschema import CodingSchema
if "project" not in kargs: kargs["project"] = create_test_project()
if "id" not in kargs: kargs["id"] = _get_next_id()
if 'name' not in kargs: kargs['name'] = "testschema_%i" % CodingSchema.objects.count()
return CodingSchema.objects.create(**kargs)
def create_test_schema_with_fields(codebook=None, **kargs):
"""Set up a simple codingschema with fields to use for testing
Returns codebook, schema, textfield, numberfield, codefield
"""
from amcat.models import CodingSchemaFieldType, CodingSchemaField
if codebook is None:
codebook, _ = create_test_codebook_with_codes()
schema = create_test_schema(**kargs)
fields = []
for i, (label, type_id, cb) in enumerate([
("text", 1, None),
("number", 2, None),
("code", 5, codebook),
("boolean", 7, None),
("quality", 9, None)]):
fieldtype = CodingSchemaFieldType.objects.get(pk=type_id)
f = CodingSchemaField.objects.create(codingschema=schema, fieldnr=i, label=label,
fieldtype=fieldtype, codebook=cb)
fields.append(f)
return (schema, codebook) + tuple(fields)
def get_test_language(**kargs):
from amcat.models.language import Language
from amcat.tools import djangotoolkit
return djangotoolkit.get_or_create(Language, label='en')
def _parse_date(s: str):
date = dateparser.parse(s, ['%Y-%m-%d', '%d-%m-%Y'], settings={"STRICT_PARSING": True})
if date is None:
return iso8601.parse_date(s)
return date
def create_test_article(create=True, articleset=None, deduplicate=True, properties=None, project=None, **kargs):
"""Create a test article"""
from amcat.models.article import Article
# Get static properties
title = kargs.pop("title", "test title {}: {}".format(_get_next_id(), uuid4()))
date = kargs.pop("date", datetime.datetime.now())
url = kargs.pop("url", "http://example.com")
text = kargs.pop("text", "Lorum Ipsum: {}".format(_get_next_id()))
if project is None:
project = articleset.project if articleset is not None else create_test_project()
parent_hash = kargs.pop("parent_hash", None)
hash = kargs.pop("hash", None)
# Caller is allowed to pas date as string
if isinstance(date, str):
date = _parse_date(date)
a = Article(title=title, date=date, url=url, text=text, project=project, parent_hash=parent_hash, hash=hash)
if properties:
for propname, value in properties.items():
if get_property_primitive_type(propname) == datetime.datetime and isinstance(value, str):
properties[propname] = _parse_date(value)
a.properties.update(properties)
if create:
Article.create_articles([a], articleset, deduplicate=deduplicate)
return a
def create_test_sentence(**kargs):
"""Create a test sentence"""
from amcat.models.sentence import Sentence
if "article" not in kargs: kargs["article"] = create_test_article()
if "sentence" not in kargs:
kargs["sentence"] = "Test sentence %i." % _get_next_id()
if "parnr" not in kargs: kargs["parnr"] = 1
if "sentnr" not in kargs: kargs["sentnr"] = _get_next_id()
if "id" not in kargs: kargs["id"] = _get_next_id()
return Sentence.objects.create(**kargs)
def create_test_set(articles=0, **kargs) -> ArticleSet:
"""Create a test (Article) set"""
from amcat.models.articleset import ArticleSet, Article
if "name" not in kargs: kargs["name"] = "testset_%i" % len(ArticleSet.objects.all())
if "project" not in kargs: kargs["project"] = create_test_project()
if "id" not in kargs: kargs["id"] = _get_next_id()
s = ArticleSet.objects.create(**kargs)
if type(articles) == int:
if articles > 0:
arts = [create_test_article(create=False) for _x in range(articles)]
Article.create_articles(arts, articleset=s)
elif articles:
s.add_articles(articles)
return s
def create_test_coded_article():
# coded_article gets created automatically when a new job is created
codingjob = create_test_job()
return list(codingjob.coded_articles.all())[0]
def create_test_job(narticles=1, **kargs):
"""Create a test Coding Job"""
from amcat.models.coding.codingjob import CodingJob
if "insertuser" not in kargs: kargs["insertuser"] = create_test_user()
if "project" not in kargs: kargs["project"] = create_test_project()
if "unitschema" not in kargs: kargs["unitschema"] = create_test_schema()
if "articleschema" not in kargs: kargs["articleschema"] = create_test_schema(isarticleschema=True)
if "coder" not in kargs: kargs["coder"] = create_test_user()
if "articleset" not in kargs: kargs["articleset"] = create_test_set(articles=narticles)
if "id" not in kargs: kargs["id"] = _get_next_id()
if "name" not in kargs: kargs["name"] = "Test job {id}".format(**kargs)
return CodingJob.objects.create(**kargs)
def create_test_coding(**kargs):
"""Create a test coding object"""
from amcat.models.coding.coding import create_coding
if "codingjob" not in kargs:
kargs["codingjob"] = create_test_job()
if "article" not in kargs: kargs["article"] = kargs["codingjob"].articleset.articles.all()[0]
if "id" not in kargs: kargs["id"] = _get_next_id()
return create_coding(**kargs)
def create_test_code(label=None, extra_label=None, extra_language=None, codebook=None, parent=None, **kargs):
"""Create a test code with a label"""
from amcat.models.coding.code import Code
from amcat.models.language import Language
if label is None: label = "testcode_%i" % len(Code.objects.all())
if "id" not in kargs: kargs["id"] = _get_next_id()
o = Code.objects.create(label=label, **kargs)
if extra_label is not None:
if extra_language is None: extra_language = Language.objects.get(pk=1)
o.add_label(extra_language, extra_label)
if codebook is not None:
codebook.add_code(o, parent=parent)
return o
def create_test_codebook(**kargs):
"""Create a test codebook"""
from amcat.models.coding.codebook import Codebook
if "project" not in kargs: kargs["project"] = create_test_project()
if "name" not in kargs: kargs["name"] = "testcodebook_%i" % Codebook.objects.count()
if "id" not in kargs: kargs["id"] = _get_next_id()
return Codebook.objects.create(**kargs)
def create_test_codebook_with_codes():
"""
Create a test codebook with codes like this
A
A1
A1a
A1b
A2
B
B1
@return: A pair of the codebook and the {label : code} dict
"""
parents = OrderedDict((
("A1a", "A1"),
("A1b", "A1"),
("A1", "A"),
("A2", "A"),
("B1", "B"),
("A", None),
("B", None)
))
codes = {l: create_test_code(label=l) for l in parents}
codebook = create_test_codebook()
for code, parent in reversed(list(parents.items())):
codebook.add_code(codes[code], codes.get(parent))
return codebook, codes
class AmCATTestCase(TestCase):
fixtures = ['_initial_data.json',]
@classmethod
def setUpClass(cls):
ES().check_index()
ES().refresh()
cache.clear()
super(AmCATTestCase, cls).setUpClass()
def setUp(self):
super().setUp()
ES().check_index()
ES().refresh()
cache.clear()
@contextmanager
def checkMaxQueries(self, n=0, action="Query", **outputargs):
"""Check that the action took at most n queries (which should be collected in seq)"""
# lazy import to prevent cycles
from amcat.tools.djangotoolkit import list_queries
with list_queries(**outputargs) as l:
yield
m = len(l)
if m > n:
msg = """{} should take at most {} queries, but used {}""".format(action, n, m)
for i, q in enumerate(l):
msg += "\n({}) {}".format(i+1, q["sql"])
self.fail(msg)
class AmCATLiveServerTestCase(StaticLiveServerTestCase):
fixtures = ['_initial_data.json',]
def get_url(self, relative_url):
return urljoin(self.live_server_url, relative_url)
def logout(self):
self.browser.visit(self.get_url("/accounts/logout/"))
def login(self, username, password):
self.logout()
self.browser.visit(self.get_url("/accounts/login/"))
self.browser.fill_form({"username": username, "password": password})
self.browser.find_by_css("[type=submit]")[0].click()
@classmethod
def setUpClass(cls):
super(AmCATLiveServerTestCase, cls).setUpClass()
from django.core.cache import cache
if not shutil.which("geckodriver"): # try/except gives warning from selenium destructor
raise unittest.SkipTest("geckodriver needs to be in PATH for LiveServerTestCase")
cls.browser = Browser(driver_name=os.environ.get("AMCAT_WEBDRIVER", "firefox"))
def setUp(self):
self.browser.visit(self.live_server_url)
super(AmCATLiveServerTestCase, self).setUp()
@classmethod
def tearDownClass(cls):
cls.browser.quit()
super(AmCATLiveServerTestCase, cls).tearDownClass()
def require_postgres(func):
def run_or_skip(self, *args, **kargs):
from django.db import connection
if connection.vendor != 'postgresql':
raise unittest.SkipTest("Test function {func.__name__} requires postgres".format(**locals()))
return func(self, *args, **kargs)
return run_or_skip
def skip_TODO(reason):
def inner(func):
def skip(self, *args, **kargs):
raise unittest.SkipTest("TODO: {}. Skipping test {}".format(reason, func.__name__))
return skip
return inner
def use_java(func):
from subprocess import Popen
try:
has_java = Popen(["java", "-version"]).wait() == 0
except FileNotFoundError:
has_java = False
@wraps(func)
def inner(*args, **kwargs):
if not has_java:
raise unittest.SkipTest("Java executable not found")
return func(*args, **kwargs)
return inner
def use_elastic(func):
"""
Decorate a test function to make sure that:
- The ElasticSearch server can be reached (skips otherwise)
"""
@wraps(func)
def inner(*args, **kargs):
from amcat.tools import amcates
amcates._KNOWN_PROPERTIES = None
es = amcates.ES()
if not es.es.ping():
raise unittest.SkipTest("ES not enabled")
es.delete_index()
es.refresh()
es.check_index()
es.refresh()
return func(*args, **kargs)
return inner
def clear_cache(func):
@wraps(func)
def inner(*args, **kargs):
cache.clear()
return func(*args, **kargs)
return inner
|
amcat/amcat
|
amcat/tools/amcattest.py
|
Python
|
agpl-3.0
| 15,168
|
[
"VisIt"
] |
acf02bfd812025fac44cebdce04a8a265e7c4b407b6537469693ce746737a9ff
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import tempfile
import numpy
from pyscf import gto
from pyscf import scf
from pyscf.scf import diis
class KnownValues(unittest.TestCase):
def test_addis_minimize(self):
numpy.random.seed(1)
ds = numpy.random.random((4,2,2))
fs = numpy.random.random((4,2,2))
es = numpy.random.random(4)
v, x = diis.adiis_minimize(ds, fs, -1)
self.assertAlmostEqual(v, -0.44797757916272785, 9)
def test_eddis_minimize(self):
numpy.random.seed(1)
ds = numpy.random.random((4,2,2))
fs = numpy.random.random((4,2,2))
es = numpy.random.random(4)
v, x = diis.ediis_minimize(es, ds, fs)
self.assertAlmostEqual(v, 0.31551563100606295, 9)
def test_input_diis(self):
mol = gto.M(
verbose = 7,
output = '/dev/null',
atom = '''
O 0 0 0
H 0 -0.757 0.587
H 0 0.757 0.587''',
basis = '631g',
)
mf1 = scf.RHF(mol)
mf1.DIIS = diis.EDIIS
mf1.max_cycle = 4
e = mf1.kernel()
self.assertAlmostEqual(e, -75.983875341696987, 9)
mol.stdout.close()
def test_roll_back(self):
mol = gto.M(
verbose = 7,
output = '/dev/null',
atom = '''
O 0 0 0
H 0 -1.757 1.587
H 0 1.757 1.587''',
basis = '631g',
)
mf1 = scf.RHF(mol)
mf1.diis_space = 4
mf1.diis_space_rollback = True
mf1.max_cycle = 10
e = mf1.kernel()
self.assertAlmostEqual(e, -75.446749864901321, 9)
mol.stdout.close()
def test_diis_restart(self):
mol = gto.M(
verbose = 7,
output = '/dev/null',
atom = '''
O 0 0 0
H 0 -1.757 1.587
H 0 1.757 1.587''',
basis = '631g',
)
tmpf = tempfile.NamedTemporaryFile()
mf = scf.RHF(mol)
mf.diis_file = tmpf.name
eref = mf.kernel()
self.assertAlmostEqual(eref, -75.44606939063496, 9)
mf = scf.RHF(mol)
mf.diis = scf.diis.DIIS().restore(tmpf.name)
mf.max_cycle = 3
e = mf.kernel()
self.assertAlmostEqual(e, eref, 9)
if __name__ == "__main__":
print("Full Tests for DIIS")
unittest.main()
|
gkc1000/pyscf
|
pyscf/scf/test/test_diis.py
|
Python
|
apache-2.0
| 3,064
|
[
"PySCF"
] |
cbb99000ca02f759127576fb396115d8a4cd9f625c1a82f050ccac521c49e4e0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send Port channel commands to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_interface
author: "Dave Kasberg (@dkasberg)"
short_description: Manage interface configuration on devices running Lenovo CNOS
description:
- This module allows you to work with interface related configurations. The operators used are
overloaded to ensure control over switch interface configurations. Apart from the regular device
connection related attributes, there are seven interface arguments that will perform further
configurations. They are interfaceArg1, interfaceArg2, interfaceArg3, interfaceArg4, interfaceArg5,
interfaceArg6, and interfaceArg7. For more details on how to use these arguments, see
[Overloaded Variables]. Interface configurations are taken care at six contexts in a regular CLI.
They are
1. Interface Name - Configurations
2. Ethernet Interface - Configurations
3. Loopback Interface Configurations
4. Management Interface Configurations
5. Port Aggregation - Configurations
6. VLAN Configurations
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_interface.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
interfaceRange:
description:
- This specifies the interface range in which the port aggregation is envisaged
required: Yes
default: Null
interfaceOption:
description:
- This specifies the attribute you specify subsequent to interface command
required: Yes
default: Null
choices: [None, ethernet, loopback, mgmt, port-aggregation, vlan]
interfaceArg1:
description:
- This is an overloaded interface first argument. Usage of this argument can be found is the User Guide referenced above.
required: Yes
default: Null
choices: [aggregation-group, bfd, bridgeport, description, duplex, flowcontrol, ip, ipv6, lacp, lldp,
load-interval, mac, mac-address, mac-learn, microburst-detection, mtu, service, service-policy,
shutdown, snmp, spanning-tree, speed, storm-control, vlan, vrrp, port-aggregation]
interfaceArg2:
description:
- This is an overloaded interface second argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [aggregation-group number, access or mode or trunk, description, auto or full or half,
recieve or send, port-priority, suspend-individual, timeout, receive or transmit or trap-notification,
tlv-select, Load interval delay in seconds, counter, Name for the MAC Access List, mac-address in HHHH.HHHH.HHHH format,
THRESHOLD Value in unit of buffer cell, <64-9216> MTU in bytes-<64-9216> for L2 packet,<576-9216> for L3 IPv4 packet,
<1280-9216> for L3 IPv6 packet, enter the instance id, input or output, copp-system-policy,
type, 1000 or 10000 or 40000 or auto, broadcast or multicast or unicast, disable or enable or egress-only,
Virtual router identifier, destination-ip or destination-mac or destination-port or source-dest-ip or
source-dest-mac or source-dest-port or source-interface or source-ip or source-mac or source-port]
interfaceArg3:
description:
- This is an overloaded interface third argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [active or on or passive, on or off, LACP port priority, long or short, link-aggregation or
mac-phy-status or management-address or max-frame-size or port-description or port-protocol-vlan or
port-vlan or power-mdi or protocol-identity or system-capabilities or system-description or system-name
or vid-management or vlan-name, counter for load interval, policy input name, all or Copp class name to attach,
qos, queing, Enter the allowed traffic level, ipv6]
interfaceArg4:
description:
- This is an overloaded interface fourth argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [key-chain, key-id, keyed-md5 or keyed-sha1 or meticulous-keyed-md5 or meticulous-keyed-sha1 or simple, Interval value in milliseconds,
Destination IP (Both IPV4 and IPV6),in or out, MAC address, Time-out value in seconds, class-id, request, Specify the IPv4 address,
OSPF area ID as a decimal value, OSPF area ID in IP address format, anycast or secondary, ethernet, vlan,
MAC (hardware) address in HHHH.HHHH.HHHH format,
Load interval delay in seconds, Specify policy input name, input or output, cost, port-priority, BFD minimum receive interval,source-interface]
interfaceArg5:
description:
- This is an overloaded interface fifth argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [name of key-chain, key-Id Value, key-chain , key-id, BFD minimum receive interval, Value of Hello Multiplier,
admin-down or multihop or non-persistent, Vendor class-identifier name, bootfile-name or host-name or log-server or ntp-server or tftp-server-name,
Slot/chassis number, Vlan interface, Specify policy input name, Port path cost or auto, Port priority increments of 32]
interfaceArg6:
description:
- This is an overloaded interface sixth argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [Authentication key string, name of key-chain, key-Id Value, Value of Hello Multiplier, admin-down or non-persistent]
interfaceArg7:
description:
- This is an overloaded interface seventh argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [Authentication key string, admin-down]
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_interface. These are written in the main.yml file of the tasks directory.
---
- name: Test Interface Ethernet - aggregation-group
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 1
interfaceArg1: "aggregation-group"
interfaceArg2: 33
interfaceArg3: "on"
- name: Test Interface Ethernet - bridge-port
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "bridge-port"
interfaceArg2: "access"
interfaceArg3: 33
- name: Test Interface Ethernet - bridgeport mode
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "bridge-port"
interfaceArg2: "mode"
interfaceArg3: "access"
- name: Test Interface Ethernet - Description
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "description"
interfaceArg2: "Hentammoo "
- name: Test Interface Ethernet - Duplex
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 1
interfaceArg1: "duplex"
interfaceArg2: "auto"
- name: Test Interface Ethernet - flowcontrol
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "flowcontrol"
interfaceArg2: "send"
interfaceArg3: "off"
- name: Test Interface Ethernet - lacp
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "lacp"
interfaceArg2: "port-priority"
interfaceArg3: 33
- name: Test Interface Ethernet - lldp
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "lldp"
interfaceArg2: "tlv-select"
interfaceArg3: "max-frame-size"
- name: Test Interface Ethernet - load-interval
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "load-interval"
interfaceArg2: "counter"
interfaceArg3: 2
interfaceArg4: 33
- name: Test Interface Ethernet - mac
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "mac"
interfaceArg2: "copp-system-acl-vlag-hc"
- name: Test Interface Ethernet - microburst-detection
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "microburst-detection"
interfaceArg2: 25
- name: Test Interface Ethernet - mtu
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "mtu"
interfaceArg2: 66
- name: Test Interface Ethernet - service-policy
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "service-policy"
interfaceArg2: "input"
interfaceArg3: "Anil"
- name: Test Interface Ethernet - speed
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 1
interfaceArg1: "speed"
interfaceArg2: "auto"
- name: Test Interface Ethernet - storm
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "storm-control"
interfaceArg2: "broadcast"
interfaceArg3: 12.5
- name: Test Interface Ethernet - vlan
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "vlan"
interfaceArg2: "disable"
- name: Test Interface Ethernet - vrrp
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "vrrp"
interfaceArg2: 33
- name: Test Interface Ethernet - spanning tree1
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "spanning-tree"
interfaceArg2: "bpduguard"
interfaceArg3: "enable"
- name: Test Interface Ethernet - spanning tree 2
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "spanning-tree"
interfaceArg2: "mst"
interfaceArg3: "33-35"
interfaceArg4: "cost"
interfaceArg5: 33
- name: Test Interface Ethernet - ip1
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "ip"
interfaceArg2: "access-group"
interfaceArg3: "anil"
interfaceArg4: "in"
- name: Test Interface Ethernet - ip2
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "ip"
interfaceArg2: "port"
interfaceArg3: "anil"
- name: Test Interface Ethernet - bfd
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "bfd"
interfaceArg2: "interval"
interfaceArg3: 55
interfaceArg4: 55
interfaceArg5: 33
- name: Test Interface Ethernet - bfd
cnos_interface:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_interface_{{ inventory_hostname }}_output.txt"
interfaceOption: 'ethernet'
interfaceRange: 33
interfaceArg1: "bfd"
interfaceArg2: "ipv4"
interfaceArg3: "authentication"
interfaceArg4: "meticulous-keyed-md5"
interfaceArg5: "key-chain"
interfaceArg6: "mychain"
'''
RETURN = '''
return value: |
On successful execution, the method returns a message in JSON format
[Interface configurations accomplished.]
Upon any failure, the method returns an error display string.
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
interfaceRange=dict(required=False),
interfaceOption=dict(required=False),
interfaceArg1=dict(required=True),
interfaceArg2=dict(required=False),
interfaceArg3=dict(required=False),
interfaceArg4=dict(required=False),
interfaceArg5=dict(required=False),
interfaceArg6=dict(required=False),
interfaceArg7=dict(required=False),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
interfaceRange = module.params['interfaceRange']
interfaceOption = module.params['interfaceOption']
interfaceArg1 = module.params['interfaceArg1']
interfaceArg2 = module.params['interfaceArg2']
interfaceArg3 = module.params['interfaceArg3']
interfaceArg4 = module.params['interfaceArg4']
interfaceArg5 = module.params['interfaceArg5']
interfaceArg6 = module.params['interfaceArg6']
interfaceArg7 = module.params['interfaceArg7']
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#", 2, remote_conn)
# Send the CLi command
if(interfaceOption is None or interfaceOption == ""):
output = output + cnos.interfaceConfig(remote_conn, deviceType, "(config)#", 2, None, interfaceRange,
interfaceArg1, interfaceArg2, interfaceArg3, interfaceArg4, interfaceArg5, interfaceArg6, interfaceArg7)
elif(interfaceOption == "ethernet"):
output = output + cnos.interfaceConfig(remote_conn, deviceType, "(config)#", 2, "ethernet", interfaceRange,
interfaceArg1, interfaceArg2, interfaceArg3, interfaceArg4, interfaceArg5, interfaceArg6, interfaceArg7)
elif(interfaceOption == "loopback"):
output = output + cnos.interfaceConfig(remote_conn, deviceType, "(config)#", 2, "loopback", interfaceRange,
interfaceArg1, interfaceArg2, interfaceArg3, interfaceArg4, interfaceArg5, interfaceArg6, interfaceArg7)
elif(interfaceOption == "mgmt"):
output = output + cnos.interfaceConfig(remote_conn, deviceType, "(config)#", 2, "mgmt", interfaceRange,
interfaceArg1, interfaceArg2, interfaceArg3, interfaceArg4, interfaceArg5, interfaceArg6, interfaceArg7)
elif(interfaceOption == "port-aggregation"):
output = output + cnos.interfaceConfig(remote_conn, deviceType, "(config)#", 2, "port-aggregation", interfaceRange,
interfaceArg1, interfaceArg2, interfaceArg3, interfaceArg4, interfaceArg5, interfaceArg6, interfaceArg7)
elif(interfaceOption == "vlan"):
output = output + cnos.interfaceConfig(remote_conn, deviceType, "(config)#", 2, "vlan", interfaceRange,
interfaceArg1, interfaceArg2, interfaceArg3, interfaceArg4, interfaceArg5, interfaceArg6, interfaceArg7)
else:
output = "Invalid interface option \n"
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Interface Configuration is done")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
dmitry-sobolev/ansible
|
lib/ansible/modules/network/lenovo/cnos_interface.py
|
Python
|
gpl-3.0
| 27,026
|
[
"VisIt"
] |
c0456ef3e1a944c51ae96133c08b62cc64e30eb5d324bfa7058782241e2054cd
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloud
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of Cloud Avi RESTful Object
description:
- This module is used to configure Cloud object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
apic_configuration:
description:
- Apicconfiguration settings for cloud.
apic_mode:
description:
- Boolean flag to set apic_mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
aws_configuration:
description:
- Awsconfiguration settings for cloud.
cloudstack_configuration:
description:
- Cloudstackconfiguration settings for cloud.
dhcp_enabled:
description:
- Select the ip address management scheme.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
dns_provider_ref:
description:
- Dns profile for the cloud.
- It is a reference to an object of type ipamdnsproviderprofile.
docker_configuration:
description:
- Dockerconfiguration settings for cloud.
east_west_dns_provider_ref:
description:
- Dns profile for east-west services.
- It is a reference to an object of type ipamdnsproviderprofile.
east_west_ipam_provider_ref:
description:
- Ipam profile for east-west services.
- Warning - please use virtual subnets in this ipam profile that do not conflict with the underlay networks or any overlay networks in the cluster.
- For example in aws and gcp, 169.254.0.0/16 is used for storing instance metadata.
- Hence, it should not be used in this profile.
- It is a reference to an object of type ipamdnsproviderprofile.
enable_vip_static_routes:
description:
- Use static routes for vip side network resolution during virtualservice placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
ipam_provider_ref:
description:
- Ipam profile for the cloud.
- It is a reference to an object of type ipamdnsproviderprofile.
license_type:
description:
- If no license type is specified then default license enforcement for the cloud type is chosen.
- The default mappings are container cloud is max ses, openstack and vmware is cores and linux it is sockets.
- Enum options - LIC_BACKEND_SERVERS, LIC_SOCKETS, LIC_CORES, LIC_HOSTS.
linuxserver_configuration:
description:
- Linuxserverconfiguration settings for cloud.
mesos_configuration:
description:
- Mesosconfiguration settings for cloud.
mtu:
description:
- Mtu setting for the cloud.
- Default value when not specified in API or module is interpreted by Avi Controller as 1500.
name:
description:
- Name of the object.
required: true
nsx_configuration:
description:
- Configuration parameters for nsx manager.
- Field introduced in 17.1.1.
obj_name_prefix:
description:
- Default prefix for all automatically created objects in this cloud.
- This prefix can be overridden by the se-group template.
openstack_configuration:
description:
- Openstackconfiguration settings for cloud.
oshiftk8s_configuration:
description:
- Oshiftk8sconfiguration settings for cloud.
prefer_static_routes:
description:
- Prefer static routes over interface routes during virtualservice placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
proxy_configuration:
description:
- Proxyconfiguration settings for cloud.
rancher_configuration:
description:
- Rancherconfiguration settings for cloud.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
vca_configuration:
description:
- Vcloudairconfiguration settings for cloud.
vcenter_configuration:
description:
- Vcenterconfiguration settings for cloud.
vtype:
description:
- Cloud type.
- Enum options - CLOUD_NONE, CLOUD_VCENTER, CLOUD_OPENSTACK, CLOUD_AWS, CLOUD_VCA, CLOUD_APIC, CLOUD_MESOS, CLOUD_LINUXSERVER, CLOUD_DOCKER_UCP,
- CLOUD_RANCHER, CLOUD_OSHIFT_K8S.
- Default value when not specified in API or module is interpreted by Avi Controller as CLOUD_NONE.
required: true
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a VMware cloud with write access mode
avi_cloud:
username: ''
controller: ''
password: ''
apic_mode: false
dhcp_enabled: true
enable_vip_static_routes: false
license_type: LIC_CORES
mtu: 1500
name: VCenter Cloud
prefer_static_routes: false
tenant_ref: admin
vcenter_configuration:
datacenter_ref: /api/vimgrdcruntime/datacenter-2-10.10.20.100
management_network: /api/vimgrnwruntime/dvportgroup-103-10.10.20.100
password: password
privilege: WRITE_ACCESS
username: user
vcenter_url: 10.10.20.100
vtype: CLOUD_VCENTER
'''
RETURN = '''
obj:
description: Cloud (api/cloud) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
apic_configuration=dict(type='dict',),
apic_mode=dict(type='bool',),
aws_configuration=dict(type='dict',),
cloudstack_configuration=dict(type='dict',),
dhcp_enabled=dict(type='bool',),
dns_provider_ref=dict(type='str',),
docker_configuration=dict(type='dict',),
east_west_dns_provider_ref=dict(type='str',),
east_west_ipam_provider_ref=dict(type='str',),
enable_vip_static_routes=dict(type='bool',),
ipam_provider_ref=dict(type='str',),
license_type=dict(type='str',),
linuxserver_configuration=dict(type='dict',),
mesos_configuration=dict(type='dict',),
mtu=dict(type='int',),
name=dict(type='str', required=True),
nsx_configuration=dict(type='dict',),
obj_name_prefix=dict(type='str',),
openstack_configuration=dict(type='dict',),
oshiftk8s_configuration=dict(type='dict',),
prefer_static_routes=dict(type='bool',),
proxy_configuration=dict(type='dict',),
rancher_configuration=dict(type='dict',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
vca_configuration=dict(type='dict',),
vcenter_configuration=dict(type='dict',),
vtype=dict(type='str', required=True),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloud',
set([]))
if __name__ == '__main__':
main()
|
andreaso/ansible
|
lib/ansible/modules/network/avi/avi_cloud.py
|
Python
|
gpl-3.0
| 9,205
|
[
"VisIt"
] |
184def41458fd123e358112153a741e0ab9f23cf972ad6f3b4b0060f53ea6200
|
# Authors: Nicolas Goix <nicolas.goix@telecom-paristech.fr>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numbers
import numpy as np
from scipy.sparse import issparse
from warnings import warn
from ..tree import ExtraTreeRegressor
from ..utils import (
check_random_state,
check_array,
gen_batches,
get_chunk_n_rows,
)
from ..utils.fixes import _joblib_parallel_args
from ..utils.validation import check_is_fitted, _num_samples
from ..utils.validation import _deprecate_positional_args
from ..base import OutlierMixin
from ._bagging import BaseBagging
__all__ = ["IsolationForest"]
class IsolationForest(OutlierMixin, BaseBagging):
"""
Isolation Forest Algorithm.
Return the anomaly score of each sample using the IsolationForest algorithm
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a
measure of normality and our decision function.
Random partitioning produces noticeably shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path
lengths for particular samples, they are highly likely to be anomalies.
Read more in the :ref:`User Guide <isolation_forest>`.
.. versionadded:: 0.18
Parameters
----------
n_estimators : int, default=100
The number of base estimators in the ensemble.
max_samples : "auto", int or float, default="auto"
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
- If "auto", then `max_samples=min(256, n_samples)`.
If max_samples is larger than the number of samples provided,
all samples will be used for all trees (no sampling).
contamination : 'auto' or float, default='auto'
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Used when fitting to define the threshold
on the scores of the samples.
- If 'auto', the threshold is determined as in the
original paper.
- If float, the contamination should be in the range [0, 0.5].
.. versionchanged:: 0.22
The default value of ``contamination`` changed from 0.1
to ``'auto'``.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : bool, default=False
If True, individual trees are fit on random subsets of the training
data sampled with replacement. If False, sampling without replacement
is performed.
n_jobs : int, default=None
The number of jobs to run in parallel for both :meth:`fit` and
:meth:`predict`. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context. ``-1`` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the pseudo-randomness of the selection of the feature
and split values for each branching step and each tree in the forest.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
Controls the verbosity of the tree building process.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.21
Attributes
----------
base_estimator_ : ExtraTreeRegressor instance
The child estimator template used to create the collection of
fitted sub-estimators.
estimators_ : list of ExtraTreeRegressor instances
The collection of fitted sub-estimators.
estimators_features_ : list of ndarray
The subset of drawn features for each base estimator.
estimators_samples_ : list of ndarray
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
max_samples_ : int
The actual number of samples.
offset_ : float
Offset used to define the decision function from the raw scores. We
have the relation: ``decision_function = score_samples - offset_``.
``offset_`` is defined as follows. When the contamination parameter is
set to "auto", the offset is equal to -0.5 as the scores of inliers are
close to 0 and the scores of outliers are close to -1. When a
contamination parameter different than "auto" is provided, the offset
is defined in such a way we obtain the expected number of outliers
(samples with decision function < 0) in training.
.. versionadded:: 0.20
n_features_ : int
The number of features when ``fit`` is performed.
Notes
-----
The implementation is based on an ensemble of ExtraTreeRegressor. The
maximum depth of each tree is set to ``ceil(log_2(n))`` where
:math:`n` is the number of samples used to build the tree
(see (Liu et al., 2008) for more details).
References
----------
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
.. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation-based
anomaly detection." ACM Transactions on Knowledge Discovery from
Data (TKDD) 6.1 (2012): 3.
See Also
----------
sklearn.covariance.EllipticEnvelope : An object for detecting outliers in a
Gaussian distributed dataset.
sklearn.svm.OneClassSVM : Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection
using Local Outlier Factor (LOF).
Examples
--------
>>> from sklearn.ensemble import IsolationForest
>>> X = [[-1.1], [0.3], [0.5], [100]]
>>> clf = IsolationForest(random_state=0).fit(X)
>>> clf.predict([[0.1], [0], [90]])
array([ 1, 1, -1])
"""
@_deprecate_positional_args
def __init__(self, *,
n_estimators=100,
max_samples="auto",
contamination="auto",
max_features=1.,
bootstrap=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False):
super().__init__(
base_estimator=ExtraTreeRegressor(
max_features=1,
splitter='random',
random_state=random_state),
# here above max_features has no links with self.max_features
bootstrap=bootstrap,
bootstrap_features=False,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.contamination = contamination
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by iforest")
def _parallel_args(self):
# ExtraTreeRegressor releases the GIL, so it's more efficient to use
# a thread-based backend rather than a process-based backend so as
# to avoid suffering from communication overhead and extra memory
# copies.
return _joblib_parallel_args(prefer='threads')
def fit(self, X, y=None, sample_weight=None):
"""
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Fitted estimator.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
n_samples = X.shape[0]
if isinstance(self.max_samples, str):
if self.max_samples == 'auto':
max_samples = min(256, n_samples)
else:
raise ValueError('max_samples (%s) is not supported.'
'Valid choices are: "auto", int or'
'float' % self.max_samples)
elif isinstance(self.max_samples, numbers.Integral):
if self.max_samples > n_samples:
warn("max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples))
max_samples = n_samples
else:
max_samples = self.max_samples
else: # float
if not 0. < self.max_samples <= 1.:
raise ValueError("max_samples must be in (0, 1], got %r"
% self.max_samples)
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
super()._fit(X, y, max_samples,
max_depth=max_depth,
sample_weight=sample_weight)
if self.contamination == "auto":
# 0.5 plays a special role as described in the original paper.
# we take the opposite as we consider the opposite of their score.
self.offset_ = -0.5
return self
# else, define offset_ wrt contamination parameter
self.offset_ = np.percentile(self.score_samples(X),
100. * self.contamination)
return self
def predict(self, X):
"""
Predict if a particular sample is an outlier or not.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
For each observation, tells whether or not (+1 or -1) it should
be considered as an inlier according to the fitted model.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse='csr')
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[self.decision_function(X) < 0] = -1
return is_inlier
def decision_function(self, X):
"""
Average anomaly score of X of the base classifiers.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal. Negative scores represent outliers,
positive scores represent inliers.
"""
# We subtract self.offset_ to make 0 be the threshold value for being
# an outlier:
return self.score_samples(X) - self.offset_
def score_samples(self, X):
"""
Opposite of the anomaly score defined in the original paper.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal.
"""
# code structure from ForestClassifier/predict_proba
check_is_fitted(self)
# Check data
X = check_array(X, accept_sparse='csr')
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} and "
"input n_features is {1}."
"".format(self.n_features_, X.shape[1]))
# Take the opposite of the scores as bigger is better (here less
# abnormal)
return -self._compute_chunked_score_samples(X)
def _compute_chunked_score_samples(self, X):
n_samples = _num_samples(X)
if self._max_features == X.shape[1]:
subsample_features = False
else:
subsample_features = True
# We get as many rows as possible within our working_memory budget
# (defined by sklearn.get_config()['working_memory']) to store
# self._max_features in each row during computation.
#
# Note:
# - this will get at least 1 row, even if 1 row of score will
# exceed working_memory.
# - this does only account for temporary memory usage while loading
# the data needed to compute the scores -- the returned scores
# themselves are 1D.
chunk_n_rows = get_chunk_n_rows(row_bytes=16 * self._max_features,
max_n_rows=n_samples)
slices = gen_batches(n_samples, chunk_n_rows)
scores = np.zeros(n_samples, order="f")
for sl in slices:
# compute score on the slices of test samples:
scores[sl] = self._compute_score_samples(X[sl], subsample_features)
return scores
def _compute_score_samples(self, X, subsample_features):
"""
Compute the score of each samples in X going through the extra trees.
Parameters
----------
X : array-like or sparse matrix
Data matrix.
subsample_features : bool
Whether features should be subsampled.
"""
n_samples = X.shape[0]
depths = np.zeros(n_samples, order="f")
for tree, features in zip(self.estimators_, self.estimators_features_):
X_subset = X[:, features] if subsample_features else X
leaves_index = tree.apply(X_subset)
node_indicator = tree.decision_path(X_subset)
n_samples_leaf = tree.tree_.n_node_samples[leaves_index]
depths += (
np.ravel(node_indicator.sum(axis=1))
+ _average_path_length(n_samples_leaf)
- 1.0
)
scores = 2 ** (
-depths
/ (len(self.estimators_)
* _average_path_length([self.max_samples_]))
)
return scores
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'zero sample_weight is not equivalent to removing samples',
}
}
def _average_path_length(n_samples_leaf):
"""
The average path length in a n_samples iTree, which is equal to
the average path length of an unsuccessful BST search since the
latter has the same structure as an isolation tree.
Parameters
----------
n_samples_leaf : array-like of shape (n_samples,)
The number of training samples in each test sample leaf, for
each estimators.
Returns
-------
average_path_length : ndarray of shape (n_samples,)
"""
n_samples_leaf = check_array(n_samples_leaf, ensure_2d=False)
n_samples_leaf_shape = n_samples_leaf.shape
n_samples_leaf = n_samples_leaf.reshape((1, -1))
average_path_length = np.zeros(n_samples_leaf.shape)
mask_1 = n_samples_leaf <= 1
mask_2 = n_samples_leaf == 2
not_mask = ~np.logical_or(mask_1, mask_2)
average_path_length[mask_1] = 0.
average_path_length[mask_2] = 1.
average_path_length[not_mask] = (
2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma)
- 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask]
)
return average_path_length.reshape(n_samples_leaf_shape)
|
xuewei4d/scikit-learn
|
sklearn/ensemble/_iforest.py
|
Python
|
bsd-3-clause
| 18,579
|
[
"Gaussian"
] |
35f9a977d0bb84873c32ac480413f0de89f209e279736706be91e5cf67625394
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0018_auto_20150505_0322'),
]
operations = [
migrations.AlterField(
model_name='staff',
name='email',
field=models.EmailField(unique=True, max_length=254),
),
]
|
koebbe/homeworks
|
visit/migrations/0019_auto_20150505_0324.py
|
Python
|
mit
| 413
|
[
"VisIt"
] |
693dfdf15cddc102ae1d9b9648a36c718d4eb56feb8efb2e1169adcee8cfb496
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import unittest
import os
import tempfile
import numpy as np
from logging import getLogger
import pyemma.coordinates as coor
import pyemma.util.types as types
from six.moves import range
logger = getLogger('pyemma.'+'TestReaderUtils')
class TestCluster(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestCluster, cls).setUpClass()
cls.dtraj_dir = tempfile.mkdtemp()
# generate Gaussian mixture
means = [np.array([-3,0]),
np.array([-1,1]),
np.array([0,0]),
np.array([1,-1]),
np.array([4,2])]
widths = [np.array([0.3,2]),
np.array([0.3,2]),
np.array([0.3,2]),
np.array([0.3,2]),
np.array([0.3,2])]
# continuous trajectory
nsample = 1000
cls.T = len(means)*nsample
cls.X = np.zeros((cls.T, 2))
for i in range(len(means)):
cls.X[i*nsample:(i+1)*nsample,0] = widths[i][0] * np.random.randn() + means[i][0]
cls.X[i*nsample:(i+1)*nsample,1] = widths[i][1] * np.random.randn() + means[i][1]
# cluster in different ways
cls.km = coor.cluster_kmeans(data = cls.X, k = 100)
cls.rs = coor.cluster_regspace(data = cls.X, dmin=0.5)
cls.rt = coor.cluster_uniform_time(data = cls.X, k = 100)
cls.cl = [cls.km, cls.rs, cls.rt]
def setUp(self):
pass
def test_chunksize(self):
for c in self.cl:
assert types.is_int(c.chunksize)
def test_clustercenters(self):
for c in self.cl:
assert c.clustercenters.shape[0] == c.n_clusters
assert c.clustercenters.shape[1] == 2
def test_data_producer(self):
for c in self.cl:
assert c.data_producer is not None
def test_describe(self):
for c in self.cl:
desc = c.describe()
assert types.is_string(desc) or types.is_list_of_string(desc)
def test_dimension(self):
for c in self.cl:
assert types.is_int(c.dimension())
assert c.dimension() == 1
def test_dtrajs(self):
for c in self.cl:
assert len(c.dtrajs) == 1
assert c.dtrajs[0].dtype == c.output_type()
assert len(c.dtrajs[0]) == self.T
def test_get_output(self):
for c in self.cl:
O = c.get_output()
assert types.is_list(O)
assert len(O) == 1
assert types.is_int_matrix(O[0])
assert O[0].shape[0] == self.T
assert O[0].shape[1] == 1
def test_in_memory(self):
for c in self.cl:
assert isinstance(c.in_memory, bool)
def test_iterator(self):
for c in self.cl:
for itraj, chunk in c:
assert types.is_int(itraj)
assert types.is_int_matrix(chunk)
assert chunk.shape[0] <= c.chunksize or c.chunksize == 0
assert chunk.shape[1] == c.dimension()
def test_map(self):
for c in self.cl:
Y = c.transform(self.X)
assert Y.shape[0] == self.T
assert Y.shape[1] == 1
# test if consistent with get_output
assert np.allclose(Y, c.get_output()[0])
def test_n_frames_total(self):
for c in self.cl:
c.n_frames_total() == self.T
def test_number_of_trajectories(self):
for c in self.cl:
c.number_of_trajectories() == 1
def test_output_type(self):
for c in self.cl:
assert c.output_type() == np.int32
def test_parametrize(self):
for c in self.cl:
# nothing should happen
c.parametrize()
def test_save_dtrajs(self):
extension = ".dtraj"
outdir = self.dtraj_dir
for c in self.cl:
prefix = "test_save_dtrajs_%s" % type(c).__name__
c.save_dtrajs(trajfiles=None, prefix=prefix, output_dir=outdir, extension=extension)
names = ["%s_%i%s" % (prefix, i, extension)
for i in range(c.data_producer.number_of_trajectories())]
names = [os.path.join(outdir, n) for n in names]
# check files with given patterns are there
for f in names:
os.stat(f)
def test_trajectory_length(self):
for c in self.cl:
assert c.trajectory_length(0) == self.T
with self.assertRaises(IndexError):
c.trajectory_length(1)
def test_trajectory_lengths(self):
for c in self.cl:
assert len(c.trajectory_lengths()) == 1
assert c.trajectory_lengths()[0] == c.trajectory_length(0)
if __name__ == "__main__":
unittest.main()
|
gph82/PyEMMA
|
pyemma/coordinates/tests/test_cluster.py
|
Python
|
lgpl-3.0
| 5,596
|
[
"Gaussian"
] |
dab7011098c9cbce75143b4ec43bbb2cc515a25d7de9cd5888d10eae56b855f0
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a core class LammpsData for generating/parsing
LAMMPS data file, and other bridging classes to build LammpsData from
molecules. This module also implements a subclass CombinedData for
merging LammpsData object.
Only point particle styles are supported for now (atom_style in angle,
atomic, bond, charge, full and molecular only). See the pages below for
more info.
http://lammps.sandia.gov/doc/atom_style.html
http://lammps.sandia.gov/doc/read_data.html
"""
import itertools
import re
import warnings
from io import StringIO
from pathlib import Path
import numpy as np
import pandas as pd
from monty.json import MSONable
from monty.serialization import loadfn
from ruamel.yaml import YAML
from pymatgen.core.lattice import Lattice
from pymatgen.core.operations import SymmOp
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Molecule, Structure
from pymatgen.util.io_utils import clean_lines
__author__ = "Kiran Mathew, Zhi Deng, Tingzheng Hou"
__copyright__ = "Copyright 2018, The Materials Virtual Lab"
__version__ = "2.0"
__maintainer__ = "Tingzheng Hou"
__email__ = "tingzheng_hou@berkeley.edu"
__date__ = "May 29, 2021"
MODULE_DIR = Path(__file__).resolve().parent
SECTION_KEYWORDS = {
"atom": [
"Atoms",
"Velocities",
"Masses",
"Ellipsoids",
"Lines",
"Triangles",
"Bodies",
],
"topology": ["Bonds", "Angles", "Dihedrals", "Impropers"],
"ff": [
"Pair Coeffs",
"PairIJ Coeffs",
"Bond Coeffs",
"Angle Coeffs",
"Dihedral Coeffs",
"Improper Coeffs",
],
"class2": [
"BondBond Coeffs",
"BondAngle Coeffs",
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs",
"AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs",
"AngleAngle Coeffs",
],
}
CLASS2_KEYWORDS = {
"Angle Coeffs": ["BondBond Coeffs", "BondAngle Coeffs"],
"Dihedral Coeffs": [
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs",
"AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs",
],
"Improper Coeffs": ["AngleAngle Coeffs"],
}
SECTION_HEADERS = {
"Masses": ["mass"],
"Velocities": ["vx", "vy", "vz"],
"Bonds": ["type", "atom1", "atom2"],
"Angles": ["type", "atom1", "atom2", "atom3"],
"Dihedrals": ["type", "atom1", "atom2", "atom3", "atom4"],
"Impropers": ["type", "atom1", "atom2", "atom3", "atom4"],
}
ATOMS_HEADERS = {
"angle": ["molecule-ID", "type", "x", "y", "z"],
"atomic": ["type", "x", "y", "z"],
"bond": ["molecule-ID", "type", "x", "y", "z"],
"charge": ["type", "q", "x", "y", "z"],
"full": ["molecule-ID", "type", "q", "x", "y", "z"],
"molecular": ["molecule-ID", "type", "x", "y", "z"],
}
class LammpsBox(MSONable):
"""
Object for representing a simulation box in LAMMPS settings.
"""
def __init__(self, bounds, tilt=None):
"""
Args:
bounds: A (3, 2) array/list of floats setting the
boundaries of simulation box.
tilt: A (3,) array/list of floats setting the tilt of
simulation box. Default to None, i.e., use an
orthogonal box.
"""
bounds_arr = np.array(bounds)
assert bounds_arr.shape == (
3,
2,
), f"Expecting a (3, 2) array for bounds, got {bounds_arr.shape}"
self.bounds = bounds_arr.tolist()
matrix = np.diag(bounds_arr[:, 1] - bounds_arr[:, 0])
self.tilt = None
if tilt is not None:
tilt_arr = np.array(tilt)
assert tilt_arr.shape == (3,), f"Expecting a (3,) array for box_tilt, got {tilt_arr.shape}"
self.tilt = tilt_arr.tolist()
matrix[1, 0] = tilt_arr[0]
matrix[2, 0] = tilt_arr[1]
matrix[2, 1] = tilt_arr[2]
self._matrix = matrix
def __str__(self):
return self.get_string()
def __repr__(self):
return self.get_string()
@property
def volume(self):
"""
Volume of simulation box.
"""
m = self._matrix
return np.dot(np.cross(m[0], m[1]), m[2])
def get_string(self, significant_figures=6):
"""
Returns the string representation of simulation box in LAMMPS
data file format.
Args:
significant_figures (int): No. of significant figures to
output for box settings. Default to 6.
Returns:
String representation
"""
ph = "{:.%df}" % significant_figures
lines = []
for bound, d in zip(self.bounds, "xyz"):
fillers = bound + [d] * 2
bound_format = " ".join([ph] * 2 + [" {}lo {}hi"])
lines.append(bound_format.format(*fillers))
if self.tilt:
tilt_format = " ".join([ph] * 3 + [" xy xz yz"])
lines.append(tilt_format.format(*self.tilt))
return "\n".join(lines)
def get_box_shift(self, i):
"""
Calculates the coordinate shift due to PBC.
Args:
i: A (n, 3) integer array containing the labels for box
images of n entries.
Returns:
Coorindate shift array with the same shape of i
"""
return np.inner(i, self._matrix.T)
def to_lattice(self):
"""
Converts the simulation box to a more powerful Lattice backend.
Note that Lattice is always periodic in 3D space while a
simulation box is not necessarily periodic in all dimensions.
Returns:
Lattice
"""
return Lattice(self._matrix)
def lattice_2_lmpbox(lattice, origin=(0, 0, 0)):
"""
Converts a lattice object to LammpsBox, and calculates the symmetry
operation used.
Args:
lattice (Lattice): Input lattice.
origin: A (3,) array/list of floats setting lower bounds of
simulation box. Default to (0, 0, 0).
Returns:
LammpsBox, SymmOp
"""
a, b, c = lattice.abc
xlo, ylo, zlo = origin
xhi = a + xlo
m = lattice.matrix
xy = np.dot(m[1], m[0] / a)
yhi = np.sqrt(b**2 - xy**2) + ylo
xz = np.dot(m[2], m[0] / a)
yz = (np.dot(m[1], m[2]) - xy * xz) / (yhi - ylo)
zhi = np.sqrt(c**2 - xz**2 - yz**2) + zlo
tilt = None if lattice.is_orthogonal else [xy, xz, yz]
rot_matrix = np.linalg.solve([[xhi - xlo, 0, 0], [xy, yhi - ylo, 0], [xz, yz, zhi - zlo]], m)
bounds = [[xlo, xhi], [ylo, yhi], [zlo, zhi]]
symmop = SymmOp.from_rotation_and_translation(rot_matrix, origin)
return LammpsBox(bounds, tilt), symmop
class LammpsData(MSONable):
"""
Object for representing the data in a LAMMPS data file.
"""
def __init__(
self,
box,
masses,
atoms,
velocities=None,
force_field=None,
topology=None,
atom_style="full",
):
"""
This is a low level constructor designed to work with parsed
data or other bridging objects (ForceField and Topology). Not
recommended to use directly.
Args:
box (LammpsBox): Simulation box.
masses (pandas.DataFrame): DataFrame with one column
["mass"] for Masses section.
atoms (pandas.DataFrame): DataFrame with multiple columns
for Atoms section. Column names vary with atom_style.
velocities (pandas.DataFrame): DataFrame with three columns
["vx", "vy", "vz"] for Velocities section. Optional
with default to None. If not None, its index should be
consistent with atoms.
force_field (dict): Data for force field sections. Optional
with default to None. Only keywords in force field and
class 2 force field are valid keys, and each value is a
DataFrame.
topology (dict): Data for topology sections. Optional with
default to None. Only keywords in topology are valid
keys, and each value is a DataFrame.
atom_style (str): Output atom_style. Default to "full".
"""
if velocities is not None:
assert len(velocities) == len(atoms), "Inconsistency found between atoms and velocities"
if force_field:
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
force_field = {k: v for k, v in force_field.items() if k in all_ff_kws}
if topology:
topology = {k: v for k, v in topology.items() if k in SECTION_KEYWORDS["topology"]}
self.box = box
self.masses = masses
self.atoms = atoms
self.velocities = velocities
self.force_field = force_field
self.topology = topology
self.atom_style = atom_style
def __str__(self):
return self.get_string()
def __repr__(self):
return self.get_string()
@property
def structure(self):
"""
Exports a periodic structure object representing the simulation
box.
Return:
Structure
"""
masses = self.masses
atoms = self.atoms.copy()
if "nx" in atoms.columns:
atoms.drop(["nx", "ny", "nz"], axis=1, inplace=True)
atoms["molecule-ID"] = 1
ld_copy = self.__class__(self.box, masses, atoms)
topologies = ld_copy.disassemble()[-1]
molecule = topologies[0].sites
coords = molecule.cart_coords - np.array(self.box.bounds)[:, 0]
species = molecule.species
latt = self.box.to_lattice()
site_properties = {}
if "q" in atoms:
site_properties["charge"] = atoms["q"].values
if self.velocities is not None:
site_properties["velocities"] = self.velocities.values
return Structure(
latt,
species,
coords,
coords_are_cartesian=True,
site_properties=site_properties,
)
def get_string(self, distance=6, velocity=8, charge=4, hybrid=True):
"""
Returns the string representation of LammpsData, essentially
the string to be written to a file. Support hybrid style
coeffs read and write.
Args:
distance (int): No. of significant figures to output for
box settings (bounds and tilt) and atomic coordinates.
Default to 6.
velocity (int): No. of significant figures to output for
velocities. Default to 8.
charge (int): No. of significant figures to output for
charges. Default to 4.
hybrid (bool): Whether to write hybrid coeffs types.
Default to True. If the data object has no hybrid
coeffs types and has large coeffs section, one may
use False to speedup the process. Otherwise the
default is recommended.
Returns:
String representation
"""
file_template = """Generated by pymatgen.io.lammps.data.LammpsData
{stats}
{box}
{body}
"""
box = self.box.get_string(distance)
body_dict = {}
body_dict["Masses"] = self.masses
types = {}
types["atom"] = len(self.masses)
if self.force_field:
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
ff_kws = [k for k in all_ff_kws if k in self.force_field]
for kw in ff_kws:
body_dict[kw] = self.force_field[kw]
if kw in SECTION_KEYWORDS["ff"][2:]:
types[kw.lower()[:-7]] = len(self.force_field[kw])
body_dict["Atoms"] = self.atoms
counts = {}
counts["atoms"] = len(self.atoms)
if self.velocities is not None:
body_dict["Velocities"] = self.velocities
if self.topology:
for kw in SECTION_KEYWORDS["topology"]:
if kw in self.topology:
body_dict[kw] = self.topology[kw]
counts[kw.lower()] = len(self.topology[kw])
all_stats = list(counts.values()) + list(types.values())
stats_template = "{:>%d} {}" % len(str(max(all_stats)))
count_lines = [stats_template.format(v, k) for k, v in counts.items()]
type_lines = [stats_template.format(v, k + " types") for k, v in types.items()]
stats = "\n".join(count_lines + [""] + type_lines)
def map_coords(q):
return ("{:.%df}" % distance).format(q)
def map_velos(q):
return ("{:.%df}" % velocity).format(q)
def map_charges(q):
return ("{:.%df}" % charge).format(q)
float_format = "{:.9f}".format
float_format_2 = "{:.1f}".format
int_format = "{:.0f}".format
default_formatters = {
"x": map_coords,
"y": map_coords,
"z": map_coords,
"vx": map_velos,
"vy": map_velos,
"vz": map_velos,
"q": map_charges,
}
coeffsdatatype = loadfn(str(MODULE_DIR / "CoeffsDataType.yaml"))
coeffs = {}
for style, types in coeffsdatatype.items():
coeffs[style] = {}
for type, formatter in types.items():
coeffs[style][type] = {}
for coeff, datatype in formatter.items():
if datatype == "int_format":
coeffs[style][type][coeff] = int_format
elif datatype == "float_format_2":
coeffs[style][type][coeff] = float_format_2
else:
coeffs[style][type][coeff] = float_format
section_template = "{kw}\n\n{df}\n"
parts = []
for k, v in body_dict.items():
index = k != "PairIJ Coeffs"
if (
k
in [
"Bond Coeffs",
"Angle Coeffs",
"Dihedral Coeffs",
"Improper Coeffs",
]
and hybrid
):
listofdf = np.array_split(v, len(v.index))
df_string = ""
for i, df in enumerate(listofdf):
if isinstance(df.iloc[0]["coeff1"], str):
try:
formatters = {
**default_formatters,
**coeffs[k][df.iloc[0]["coeff1"]],
}
except KeyError:
formatters = default_formatters
line_string = df.to_string(
header=False,
formatters=formatters,
index_names=False,
index=index,
na_rep="",
)
else:
line_string = v.to_string(
header=False,
formatters=default_formatters,
index_names=False,
index=index,
na_rep="",
).splitlines()[i]
df_string += line_string.replace("nan", "").rstrip() + "\n"
else:
df_string = v.to_string(
header=False,
formatters=default_formatters,
index_names=False,
index=index,
na_rep="",
)
parts.append(section_template.format(kw=k, df=df_string))
body = "\n".join(parts)
return file_template.format(stats=stats, box=box, body=body)
def write_file(self, filename, distance=6, velocity=8, charge=4):
"""
Writes LammpsData to file.
Args:
filename (str): Filename.
distance (int): No. of significant figures to output for
box settings (bounds and tilt) and atomic coordinates.
Default to 6.
velocity (int): No. of significant figures to output for
velocities. Default to 8.
charge (int): No. of significant figures to output for
charges. Default to 4.
"""
with open(filename, "w") as f:
f.write(self.get_string(distance=distance, velocity=velocity, charge=charge))
def disassemble(self, atom_labels=None, guess_element=True, ff_label="ff_map"):
"""
Breaks down LammpsData to building blocks
(LammpsBox, ForceField and a series of Topology).
RESTRICTIONS APPLIED:
1. No complex force field defined not just on atom
types, where the same type or equivalent types of topology
may have more than one set of coefficients.
2. No intermolecular topologies (with atoms from different
molecule-ID) since a Topology object includes data for ONE
molecule or structure only.
Args:
atom_labels ([str]): List of strings (must be different
from one another) for labelling each atom type found in
Masses section. Default to None, where the labels are
automatically added based on either element guess or
dummy specie assignment.
guess_element (bool): Whether to guess the element based on
its atomic mass. Default to True, otherwise dummy
species "Qa", "Qb", ... will be assigned to various
atom types. The guessed or assigned elements will be
reflected on atom labels if atom_labels is None, as
well as on the species of molecule in each Topology.
ff_label (str): Site property key for labeling atoms of
different types. Default to "ff_map".
Returns:
LammpsBox, ForceField, [Topology]
"""
atoms_df = self.atoms.copy()
if "nx" in atoms_df.columns:
atoms_df[["x", "y", "z"]] += self.box.get_box_shift(atoms_df[["nx", "ny", "nz"]].values)
atoms_df = pd.concat([atoms_df, self.velocities], axis=1)
mids = atoms_df.get("molecule-ID")
if mids is None:
unique_mids = [1]
data_by_mols = {1: {"Atoms": atoms_df}}
else:
unique_mids = np.unique(mids)
data_by_mols = {}
for k in unique_mids:
df = atoms_df[atoms_df["molecule-ID"] == k]
data_by_mols[k] = {"Atoms": df}
masses = self.masses.copy()
masses["label"] = atom_labels
unique_masses = np.unique(masses["mass"])
if guess_element:
ref_masses = [el.atomic_mass.real for el in Element]
diff = np.abs(np.array(ref_masses) - unique_masses[:, None])
atomic_numbers = np.argmin(diff, axis=1) + 1
symbols = [Element.from_Z(an).symbol for an in atomic_numbers]
else:
symbols = [f"Q{a}" for a in map(chr, range(97, 97 + len(unique_masses)))]
for um, s in zip(unique_masses, symbols):
masses.loc[masses["mass"] == um, "element"] = s
if atom_labels is None: # add unique labels based on elements
for el, vc in masses["element"].value_counts().iteritems():
masses.loc[masses["element"] == el, "label"] = [f"{el}{c}" for c in range(1, vc + 1)]
assert masses["label"].nunique(dropna=False) == len(masses), "Expecting unique atom label for each type"
mass_info = [(row.label, row.mass) for row in masses.itertuples()]
nonbond_coeffs, topo_coeffs = None, None
if self.force_field:
if "PairIJ Coeffs" in self.force_field:
nbc = self.force_field["PairIJ Coeffs"]
nbc = nbc.sort_values(["id1", "id2"]).drop(["id1", "id2"], axis=1)
nonbond_coeffs = [list(t) for t in nbc.itertuples(False, None)]
elif "Pair Coeffs" in self.force_field:
nbc = self.force_field["Pair Coeffs"].sort_index()
nonbond_coeffs = [list(t) for t in nbc.itertuples(False, None)]
topo_coeffs = {k: [] for k in SECTION_KEYWORDS["ff"][2:] if k in self.force_field}
for kw in topo_coeffs.keys():
class2_coeffs = {
k: list(v.itertuples(False, None))
for k, v in self.force_field.items()
if k in CLASS2_KEYWORDS.get(kw, [])
}
ff_df = self.force_field[kw]
for t in ff_df.itertuples(True, None):
d = {"coeffs": list(t[1:]), "types": []}
if class2_coeffs:
d.update({k: list(v[t[0] - 1]) for k, v in class2_coeffs.items()})
topo_coeffs[kw].append(d)
if self.topology:
def label_topo(t):
return tuple(masses.loc[atoms_df.loc[t, "type"], "label"])
for k, v in self.topology.items():
ff_kw = k[:-1] + " Coeffs"
for topo in v.itertuples(False, None):
topo_idx = topo[0] - 1
indices = list(topo[1:])
mids = atoms_df.loc[indices]["molecule-ID"].unique()
assert (
len(mids) == 1
), "Do not support intermolecular topology formed by atoms with different molecule-IDs"
label = label_topo(indices)
topo_coeffs[ff_kw][topo_idx]["types"].append(label)
if data_by_mols[mids[0]].get(k):
data_by_mols[mids[0]][k].append(indices)
else:
data_by_mols[mids[0]][k] = [indices]
if topo_coeffs:
for v in topo_coeffs.values():
for d in v:
d["types"] = list(set(d["types"]))
ff = ForceField(mass_info=mass_info, nonbond_coeffs=nonbond_coeffs, topo_coeffs=topo_coeffs)
topo_list = []
for mid in unique_mids:
data = data_by_mols[mid]
atoms = data["Atoms"]
shift = min(atoms.index)
type_ids = atoms["type"]
species = masses.loc[type_ids, "element"]
labels = masses.loc[type_ids, "label"]
coords = atoms[["x", "y", "z"]]
m = Molecule(species.values, coords.values, site_properties={ff_label: labels.values})
charges = atoms.get("q")
velocities = atoms[["vx", "vy", "vz"]] if "vx" in atoms.columns else None
topologies = {}
for kw in SECTION_KEYWORDS["topology"]:
if data.get(kw):
topologies[kw] = (np.array(data[kw]) - shift).tolist()
topologies = None if not topologies else topologies
topo_list.append(
Topology(
sites=m,
ff_label=ff_label,
charges=charges,
velocities=velocities,
topologies=topologies,
)
)
return self.box, ff, topo_list
@classmethod
def from_file(cls, filename, atom_style="full", sort_id=False):
"""
Constructor that parses a file.
Args:
filename (str): Filename to read.
atom_style (str): Associated atom_style. Default to "full".
sort_id (bool): Whether sort each section by id. Default to
True.
"""
with open(filename) as f:
lines = f.readlines()
kw_pattern = r"|".join(itertools.chain(*SECTION_KEYWORDS.values()))
section_marks = [i for i, l in enumerate(lines) if re.search(kw_pattern, l)]
parts = np.split(lines, section_marks)
float_group = r"([0-9eE.+-]+)"
header_pattern = {}
header_pattern["counts"] = r"^\s*(\d+)\s+([a-zA-Z]+)$"
header_pattern["types"] = r"^\s*(\d+)\s+([a-zA-Z]+)\s+types$"
header_pattern["bounds"] = r"^\s*{}$".format(r"\s+".join([float_group] * 2 + [r"([xyz])lo \3hi"]))
header_pattern["tilt"] = r"^\s*{}$".format(r"\s+".join([float_group] * 3 + ["xy xz yz"]))
header = {"counts": {}, "types": {}}
bounds = {}
for l in clean_lines(parts[0][1:]): # skip the 1st line
match = None
for k, v in header_pattern.items():
match = re.match(v, l)
if match:
break
if match and k in ["counts", "types"]:
header[k][match.group(2)] = int(match.group(1))
elif match and k == "bounds":
g = match.groups()
bounds[g[2]] = [float(i) for i in g[:2]]
elif match and k == "tilt":
header["tilt"] = [float(i) for i in match.groups()]
header["bounds"] = [bounds.get(i, [-0.5, 0.5]) for i in "xyz"]
box = LammpsBox(header["bounds"], header.get("tilt"))
def parse_section(sec_lines):
title_info = sec_lines[0].split("#", 1)
kw = title_info[0].strip()
sio = StringIO("".join(sec_lines[2:])) # skip the 2nd line
if kw.endswith("Coeffs") and not kw.startswith("PairIJ"):
df_list = [
pd.read_csv(StringIO(line), header=None, comment="#", delim_whitespace=True)
for line in sec_lines[2:]
if line.strip()
]
df = pd.concat(df_list, ignore_index=True)
names = ["id"] + [f"coeff{i}" for i in range(1, df.shape[1])]
else:
df = pd.read_csv(sio, header=None, comment="#", delim_whitespace=True)
if kw == "PairIJ Coeffs":
names = ["id1", "id2"] + [f"coeff{i}" for i in range(1, df.shape[1] - 1)]
df.index.name = None # pylint: disable=E1101
elif kw in SECTION_HEADERS:
names = ["id"] + SECTION_HEADERS[kw]
elif kw == "Atoms":
names = ["id"] + ATOMS_HEADERS[atom_style]
if df.shape[1] == len(names): # pylint: disable=E1101
pass
elif df.shape[1] == len(names) + 3: # pylint: disable=E1101
names += ["nx", "ny", "nz"]
else:
raise ValueError(f"Format in Atoms section inconsistent with atom_style {atom_style}")
else:
raise NotImplementedError(f"Parser for {kw} section not implemented")
df.columns = names
if sort_id:
sort_by = "id" if kw != "PairIJ Coeffs" else ["id1", "id2"]
df.sort_values(sort_by, inplace=True)
if "id" in df.columns:
df.set_index("id", drop=True, inplace=True)
df.index.name = None
return kw, df
err_msg = "Bad LAMMPS data format where "
body = {}
seen_atoms = False
for part in parts[1:]:
name, section = parse_section(part)
if name == "Atoms":
seen_atoms = True
if (
name in ["Velocities"] + SECTION_KEYWORDS["topology"] and not seen_atoms
): # Atoms must appear earlier than these
raise RuntimeError(err_msg + f"{name} section appears before Atoms section")
body.update({name: section})
err_msg += "Nos. of {} do not match between header and {} section"
assert len(body["Masses"]) == header["types"]["atom"], err_msg.format("atom types", "Masses")
atom_sections = ["Atoms", "Velocities"] if "Velocities" in body else ["Atoms"]
for s in atom_sections:
assert len(body[s]) == header["counts"]["atoms"], err_msg.format("atoms", s)
for s in SECTION_KEYWORDS["topology"]:
if header["counts"].get(s.lower(), 0) > 0:
assert len(body[s]) == header["counts"][s.lower()], err_msg.format(s.lower(), s)
items = {k.lower(): body[k] for k in ["Masses", "Atoms"]}
items["velocities"] = body.get("Velocities")
ff_kws = [k for k in body if k in SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]]
items["force_field"] = {k: body[k] for k in ff_kws} if ff_kws else None
topo_kws = [k for k in body if k in SECTION_KEYWORDS["topology"]]
items["topology"] = {k: body[k] for k in topo_kws} if topo_kws else None
items["atom_style"] = atom_style
items["box"] = box
return cls(**items)
@classmethod
def from_ff_and_topologies(cls, box, ff, topologies, atom_style="full"):
"""
Constructor building LammpsData from a ForceField object and a
list of Topology objects. Do not support intermolecular
topologies since a Topology object includes data for ONE
molecule or structure only.
Args:
box (LammpsBox): Simulation box.
ff (ForceField): ForceField object with data for Masses and
force field sections.
topologies ([Topology]): List of Topology objects with data
for Atoms, Velocities and topology sections.
atom_style (str): Output atom_style. Default to "full".
"""
atom_types = set.union(*(t.species for t in topologies))
assert atom_types.issubset(ff.maps["Atoms"].keys()), "Unknown atom type found in topologies"
items = dict(box=box, atom_style=atom_style, masses=ff.masses, force_field=ff.force_field)
mol_ids, charges, coords, labels = [], [], [], []
v_collector = [] if topologies[0].velocities else None
topo_collector = {"Bonds": [], "Angles": [], "Dihedrals": [], "Impropers": []}
topo_labels = {"Bonds": [], "Angles": [], "Dihedrals": [], "Impropers": []}
for i, topo in enumerate(topologies):
if topo.topologies:
shift = len(labels)
for k, v in topo.topologies.items():
topo_collector[k].append(np.array(v) + shift + 1)
topo_labels[k].extend([tuple(topo.type_by_sites[j] for j in t) for t in v])
if isinstance(v_collector, list):
v_collector.append(topo.velocities)
mol_ids.extend([i + 1] * len(topo.sites))
labels.extend(topo.type_by_sites)
coords.append(topo.sites.cart_coords)
q = [0.0] * len(topo.sites) if not topo.charges else topo.charges
charges.extend(q)
atoms = pd.DataFrame(np.concatenate(coords), columns=["x", "y", "z"])
atoms["molecule-ID"] = mol_ids
atoms["q"] = charges
atoms["type"] = list(map(ff.maps["Atoms"].get, labels))
atoms.index += 1
atoms = atoms[ATOMS_HEADERS[atom_style]]
velocities = None
if v_collector:
velocities = pd.DataFrame(np.concatenate(v_collector), columns=SECTION_HEADERS["Velocities"])
velocities.index += 1
topology = {k: None for k, v in topo_labels.items() if len(v) > 0}
for k in topology:
df = pd.DataFrame(np.concatenate(topo_collector[k]), columns=SECTION_HEADERS[k][1:])
df["type"] = list(map(ff.maps[k].get, topo_labels[k]))
if any(pd.isnull(df["type"])): # Throw away undefined topologies
warnings.warn(f"Undefined {k.lower()} detected and removed")
df.dropna(subset=["type"], inplace=True)
df.reset_index(drop=True, inplace=True)
df.index += 1
topology[k] = df[SECTION_HEADERS[k]]
topology = {k: v for k, v in topology.items() if not v.empty}
items.update({"atoms": atoms, "velocities": velocities, "topology": topology})
return cls(**items)
@classmethod
def from_structure(cls, structure, ff_elements=None, atom_style="charge", is_sort=False):
"""
Simple constructor building LammpsData from a structure without
force field parameters and topologies.
Args:
structure (Structure): Input structure.
ff_elements ([str]): List of strings of elements that must
be present due to force field settings but not
necessarily in the structure. Default to None.
atom_style (str): Choose between "atomic" (neutral) and
"charge" (charged). Default to "charge".
is_sort (bool): whether to sort sites
"""
if is_sort:
s = structure.get_sorted_structure()
else:
s = structure.copy()
box, symmop = lattice_2_lmpbox(s.lattice)
coords = symmop.operate_multi(s.cart_coords)
site_properties = s.site_properties
if "velocities" in site_properties:
velos = np.array(s.site_properties["velocities"])
rot = SymmOp.from_rotation_and_translation(symmop.rotation_matrix)
rot_velos = rot.operate_multi(velos)
site_properties.update({"velocities": rot_velos})
boxed_s = Structure(
box.to_lattice(),
s.species,
coords,
site_properties=site_properties,
coords_are_cartesian=True,
)
symbols = list(s.symbol_set)
if ff_elements:
symbols.extend(ff_elements)
elements = sorted(Element(el) for el in set(symbols))
mass_info = [tuple([i.symbol] * 2) for i in elements]
ff = ForceField(mass_info)
topo = Topology(boxed_s)
return cls.from_ff_and_topologies(box=box, ff=ff, topologies=[topo], atom_style=atom_style)
class Topology(MSONable):
"""
Class carrying most data in Atoms, Velocities and molecular
topology sections for ONE SINGLE Molecule or Structure
object, or a plain list of Sites.
"""
def __init__(self, sites, ff_label=None, charges=None, velocities=None, topologies=None):
"""
Args:
sites ([Site] or SiteCollection): A group of sites in a
list or as a Molecule/Structure.
ff_label (str): Site property key for labeling atoms of
different types. Default to None, i.e., use
site.species_string.
charges ([q, ...]): Charge of each site in a (n,)
array/list, where n is the No. of sites. Default to
None, i.e., search site property for charges.
velocities ([[vx, vy, vz], ...]): Velocity of each site in
a (n, 3) array/list, where n is the No. of sites.
Default to None, i.e., search site property for
velocities.
topologies (dict): Bonds, angles, dihedrals and improper
dihedrals defined by site indices. Default to None,
i.e., no additional topology. All four valid keys
listed below are optional.
{
"Bonds": [[i, j], ...],
"Angles": [[i, j, k], ...],
"Dihedrals": [[i, j, k, l], ...],
"Impropers": [[i, j, k, l], ...]
}
"""
if not isinstance(sites, (Molecule, Structure)):
sites = Molecule.from_sites(sites)
if ff_label:
type_by_sites = sites.site_properties.get(ff_label)
else:
type_by_sites = [site.specie.symbol for site in sites]
# search for site property if not override
if charges is None:
charges = sites.site_properties.get("charge")
if velocities is None:
velocities = sites.site_properties.get("velocities")
# validate shape
if charges is not None:
charge_arr = np.array(charges)
assert charge_arr.shape == (len(sites),), "Wrong format for charges"
charges = charge_arr.tolist()
if velocities is not None:
velocities_arr = np.array(velocities)
assert velocities_arr.shape == (
len(sites),
3,
), "Wrong format for velocities"
velocities = velocities_arr.tolist()
if topologies:
topologies = {k: v for k, v in topologies.items() if k in SECTION_KEYWORDS["topology"]}
self.sites = sites
self.ff_label = ff_label
self.charges = charges
self.velocities = velocities
self.topologies = topologies
self.type_by_sites = type_by_sites
self.species = set(type_by_sites)
@classmethod
def from_bonding(cls, molecule, bond=True, angle=True, dihedral=True, tol=0.1, **kwargs):
"""
Another constructor that creates an instance from a molecule.
Covalent bonds and other bond-based topologies (angles and
dihedrals) can be automatically determined. Cannot be used for
non bond-based topologies, e.g., improper dihedrals.
Args:
molecule (Molecule): Input molecule.
bond (bool): Whether find bonds. If set to False, angle and
dihedral searching will be skipped. Default to True.
angle (bool): Whether find angles. Default to True.
dihedral (bool): Whether find dihedrals. Default to True.
tol (float): Bond distance tolerance. Default to 0.1.
Not recommended to alter.
**kwargs: Other kwargs supported by Topology.
"""
real_bonds = molecule.get_covalent_bonds(tol=tol)
bond_list = [list(map(molecule.index, [b.site1, b.site2])) for b in real_bonds]
if not all((bond, bond_list)):
# do not search for others if not searching for bonds or no bonds
return cls(sites=molecule, **kwargs)
angle_list, dihedral_list = [], []
dests, freq = np.unique(bond_list, return_counts=True)
hubs = dests[np.where(freq > 1)].tolist()
bond_arr = np.array(bond_list)
if len(hubs) > 0:
hub_spokes = {}
for hub in hubs:
ix = np.any(np.isin(bond_arr, hub), axis=1)
bonds = np.unique(bond_arr[ix]).tolist()
bonds.remove(hub)
hub_spokes[hub] = bonds
# skip angle or dihedral searching if too few bonds or hubs
dihedral = False if len(bond_list) < 3 or len(hubs) < 2 else dihedral
angle = False if len(bond_list) < 2 or len(hubs) < 1 else angle
if angle:
for k, v in hub_spokes.items():
angle_list.extend([[i, k, j] for i, j in itertools.combinations(v, 2)])
if dihedral:
hub_cons = bond_arr[np.all(np.isin(bond_arr, hubs), axis=1)]
for i, j in hub_cons.tolist():
ks = [k for k in hub_spokes[i] if k != j]
ls = [l for l in hub_spokes[j] if l != i]
dihedral_list.extend([[k, i, j, l] for k, l in itertools.product(ks, ls) if k != l])
topologies = {
k: v for k, v in zip(SECTION_KEYWORDS["topology"][:3], [bond_list, angle_list, dihedral_list]) if len(v) > 0
}
topologies = None if len(topologies) == 0 else topologies
return cls(sites=molecule, topologies=topologies, **kwargs)
class ForceField(MSONable):
"""
Class carrying most data in Masses and force field sections.
Attributes:
masses (pandas.DataFrame): DataFrame for Masses section.
force_field (dict): Force field section keywords (keys) and
data (values) as DataFrames.
maps (dict): Dict for labeling atoms and topologies.
"""
@staticmethod
def _is_valid(df):
return not pd.isnull(df).values.any()
def __init__(self, mass_info, nonbond_coeffs=None, topo_coeffs=None):
"""
Args:
mass_info (list): List of atomic mass info. Elements,
strings (symbols) and floats are all acceptable for the
values, with the first two converted to the atomic mass
of an element. It is recommended to use
dict.items() to prevent key duplications.
[("C", 12.01), ("H", Element("H")), ("O", "O"), ...]
nonbond_coeffs [coeffs]: List of pair or pairij
coefficients, of which the sequence must be sorted
according to the species in mass_dict. Pair or PairIJ
determined by the length of list. Optional with default
to None.
topo_coeffs (dict): Dict with force field coefficients for
molecular topologies. Optional with default
to None. All four valid keys listed below are optional.
Each value is a list of dicts with non optional keys
"coeffs" and "types", and related class2 force field
keywords as optional keys.
{
"Bond Coeffs":
[{"coeffs": [coeff],
"types": [("C", "C"), ...]}, ...],
"Angle Coeffs":
[{"coeffs": [coeff],
"BondBond Coeffs": [coeff],
"types": [("H", "C", "H"), ...]}, ...],
"Dihedral Coeffs":
[{"coeffs": [coeff],
"BondBond13 Coeffs": [coeff],
"types": [("H", "C", "C", "H"), ...]}, ...],
"Improper Coeffs":
[{"coeffs": [coeff],
"AngleAngle Coeffs": [coeff],
"types": [("H", "C", "C", "H"), ...]}, ...],
}
Topology of same type or equivalent types (e.g.,
("C", "H") and ("H", "C") bonds) are NOT ALLOWED to
be defined MORE THAN ONCE with DIFFERENT coefficients.
"""
def map_mass(v):
return (
v.atomic_mass.real
if isinstance(v, Element)
else Element(v).atomic_mass.real
if isinstance(v, str)
else v
)
index, masses, self.mass_info, atoms_map = [], [], [], {}
for i, m in enumerate(mass_info):
index.append(i + 1)
mass = map_mass(m[1])
masses.append(mass)
self.mass_info.append((m[0], mass))
atoms_map[m[0]] = i + 1
self.masses = pd.DataFrame({"mass": masses}, index=index)
self.maps = {"Atoms": atoms_map}
ff_dfs = {}
self.nonbond_coeffs = nonbond_coeffs
if self.nonbond_coeffs:
ff_dfs.update(self._process_nonbond())
self.topo_coeffs = topo_coeffs
if self.topo_coeffs:
self.topo_coeffs = {k: v for k, v in self.topo_coeffs.items() if k in SECTION_KEYWORDS["ff"][2:]}
for k in self.topo_coeffs.keys():
coeffs, mapper = self._process_topo(k)
ff_dfs.update(coeffs)
self.maps.update(mapper)
self.force_field = None if len(ff_dfs) == 0 else ff_dfs
def _process_nonbond(self):
pair_df = pd.DataFrame(self.nonbond_coeffs)
assert self._is_valid(pair_df), "Invalid nonbond coefficients with rows varying in length"
npair, ncoeff = pair_df.shape
pair_df.columns = [f"coeff{i}" for i in range(1, ncoeff + 1)]
nm = len(self.mass_info)
ncomb = int(nm * (nm + 1) / 2)
if npair == nm:
kw = "Pair Coeffs"
pair_df.index = range(1, nm + 1)
elif npair == ncomb:
kw = "PairIJ Coeffs"
ids = list(itertools.combinations_with_replacement(range(1, nm + 1), 2))
id_df = pd.DataFrame(ids, columns=["id1", "id2"])
pair_df = pd.concat([id_df, pair_df], axis=1)
else:
raise ValueError(
"Expecting {} Pair Coeffs or "
"{} PairIJ Coeffs for {} atom types,"
" got {}".format(nm, ncomb, nm, npair)
)
return {kw: pair_df}
def _process_topo(self, kw):
def find_eq_types(label, section):
if section.startswith("Improper"):
label_arr = np.array(label)
seqs = [[0, 1, 2, 3], [0, 2, 1, 3], [3, 1, 2, 0], [3, 2, 1, 0]]
return [tuple(label_arr[s]) for s in seqs]
return [label] + [label[::-1]]
main_data, distinct_types = [], []
class2_data = {k: [] for k in self.topo_coeffs[kw][0].keys() if k in CLASS2_KEYWORDS.get(kw, [])}
for i, d in enumerate(self.topo_coeffs[kw]):
main_data.append(d["coeffs"])
distinct_types.append(d["types"])
for k in class2_data.keys():
class2_data[k].append(d[k])
distinct_types = [set(itertools.chain(*(find_eq_types(t, kw) for t in dt))) for dt in distinct_types]
type_counts = sum(len(dt) for dt in distinct_types)
type_union = set.union(*distinct_types)
assert len(type_union) == type_counts, f"Duplicated items found under different coefficients in {kw}"
atoms = set(np.ravel(list(itertools.chain(*distinct_types))))
assert atoms.issubset(self.maps["Atoms"].keys()), f"Undefined atom type found in {kw}"
mapper = {}
for i, dt in enumerate(distinct_types):
for t in dt:
mapper[t] = i + 1
def process_data(data):
df = pd.DataFrame(data)
assert self._is_valid(df), "Invalid coefficients with rows varying in length"
n, c = df.shape
df.columns = [f"coeff{i}" for i in range(1, c + 1)]
df.index = range(1, n + 1)
return df
all_data = {kw: process_data(main_data)}
if class2_data:
all_data.update({k: process_data(v) for k, v in class2_data.items()})
return all_data, {kw[:-7] + "s": mapper}
def to_file(self, filename):
"""
Saves object to a file in YAML format.
Args:
filename (str): Filename.
"""
d = {
"mass_info": self.mass_info,
"nonbond_coeffs": self.nonbond_coeffs,
"topo_coeffs": self.topo_coeffs,
}
with open(filename, "w") as f:
yaml = YAML()
yaml.dump(d, f)
@classmethod
def from_file(cls, filename):
"""
Constructor that reads in a file in YAML format.
Args:
filename (str): Filename.
"""
with open(filename) as f:
yaml = YAML()
d = yaml.load(f)
return cls.from_dict(d)
@classmethod
def from_dict(cls, d):
"""
Constructor that reads in a dictionary.
Args:
d (dict): Dictionary to read.
"""
d["mass_info"] = [tuple(m) for m in d["mass_info"]]
if d.get("topo_coeffs"):
for v in d["topo_coeffs"].values():
for c in v:
c["types"] = [tuple(t) for t in c["types"]]
return cls(d["mass_info"], d["nonbond_coeffs"], d["topo_coeffs"])
class CombinedData(LammpsData):
"""
Object for a collective set of data for a series of LAMMPS data file.
velocities not yet implemented.
"""
def __init__(
self,
list_of_molecules,
list_of_names,
list_of_numbers,
coordinates,
atom_style="full",
):
"""
Args:
list_of_molecules: A list of LammpsData objects of a chemical cluster.
Each LammpsData object (cluster) may contain one or more molecule ID.
list_of_names: A list of name (string) for each cluster. The characters in each name are
restricted to word characters ([a-zA-Z0-9_]). If names with any non-word characters
are passed in, the special characters will be substituted by '_'.
list_of_numbers: A list of Integer for counts of each molecule
coordinates (pandas.DataFrame): DataFrame at least containing
columns of ["x", "y", "z"] for coordinates of atoms.
atom_style (str): Output atom_style. Default to "full".
"""
self._list_of_molecules = list_of_molecules
self._list_of_names = list_of_names
self._list_of_numbers = list_of_numbers
self._coordinates = coordinates
self._coordinates.index = self._coordinates.index.map(int)
max_xyz = self._coordinates[["x", "y", "z"]].max().max()
min_xyz = self._coordinates[["x", "y", "z"]].min().min()
self.box = LammpsBox(np.array(3 * [[min_xyz - 0.5, max_xyz + 0.5]]))
self.atom_style = atom_style
self.n = sum(self._list_of_numbers)
self.names = []
for name in self._list_of_names:
self.names.append("_".join(re.findall(r"\w+", name)))
self.mols = self._list_of_molecules
self.nums = self._list_of_numbers
self.masses = pd.concat([mol.masses.copy() for mol in self.mols], ignore_index=True)
self.masses.index += 1
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
appeared_kws = {k for mol in self.mols if mol.force_field is not None for k in mol.force_field}
ff_kws = [k for k in all_ff_kws if k in appeared_kws]
self.force_field = {}
for kw in ff_kws:
self.force_field[kw] = pd.concat(
[mol.force_field[kw].copy() for mol in self.mols if kw in mol.force_field],
ignore_index=True,
)
self.force_field[kw].index += 1
if not bool(self.force_field):
self.force_field = None
self.atoms = pd.DataFrame()
mol_count = 0
type_count = 0
self.mols_per_data = []
for i, mol in enumerate(self.mols):
atoms_df = mol.atoms.copy()
atoms_df["molecule-ID"] += mol_count
atoms_df["type"] += type_count
mols_in_data = len(atoms_df["molecule-ID"].unique())
self.mols_per_data.append(mols_in_data)
for j in range(self.nums[i]):
self.atoms = self.atoms.append(atoms_df, ignore_index=True)
atoms_df["molecule-ID"] += mols_in_data
type_count += len(mol.masses)
mol_count += self.nums[i] * mols_in_data
self.atoms.index += 1
assert len(self.atoms) == len(self._coordinates), "Wrong number of coordinates."
self.atoms.update(self._coordinates)
self.velocities = None
assert self.mols[0].velocities is None, "Velocities not supported"
self.topology = {}
atom_count = 0
count = {"Bonds": 0, "Angles": 0, "Dihedrals": 0, "Impropers": 0}
for i, mol in enumerate(self.mols):
for kw in SECTION_KEYWORDS["topology"]:
if bool(mol.topology) and kw in mol.topology:
if kw not in self.topology:
self.topology[kw] = pd.DataFrame()
topo_df = mol.topology[kw].copy()
topo_df["type"] += count[kw]
for col in topo_df.columns[1:]:
topo_df[col] += atom_count
for j in range(self.nums[i]):
self.topology[kw] = self.topology[kw].append(topo_df, ignore_index=True)
for col in topo_df.columns[1:]:
topo_df[col] += len(mol.atoms)
count[kw] += len(mol.force_field[kw[:-1] + " Coeffs"])
atom_count += len(mol.atoms) * self.nums[i]
for kw in SECTION_KEYWORDS["topology"]:
if kw in self.topology:
self.topology[kw].index += 1
if not bool(self.topology):
self.topology = None
@property
def structure(self):
"""
Exports a periodic structure object representing the simulation
box.
Return:
Structure
"""
ld_cp = self.as_lammpsdata()
return ld_cp.structure
def disassemble(self, atom_labels=None, guess_element=True, ff_label="ff_map"):
"""
Breaks down each LammpsData in CombinedData to building blocks
(LammpsBox, ForceField and a series of Topology).
RESTRICTIONS APPLIED:
1. No complex force field defined not just on atom
types, where the same type or equivalent types of topology
may have more than one set of coefficients.
2. No intermolecular topologies (with atoms from different
molecule-ID) since a Topology object includes data for ONE
molecule or structure only.
Args:
atom_labels ([str]): List of strings (must be different
from one another) for labelling each atom type found in
Masses section. Default to None, where the labels are
automatically added based on either element guess or
dummy specie assignment.
guess_element (bool): Whether to guess the element based on
its atomic mass. Default to True, otherwise dummy
species "Qa", "Qb", ... will be assigned to various
atom types. The guessed or assigned elements will be
reflected on atom labels if atom_labels is None, as
well as on the species of molecule in each Topology.
ff_label (str): Site property key for labeling atoms of
different types. Default to "ff_map".
Returns:
[(LammpsBox, ForceField, [Topology]), ...]
"""
disassembles = []
for mol in self.mols:
disassembles.append(
mol.disassemble(atom_labels=atom_labels, guess_element=guess_element, ff_label=ff_label)
)
return disassembles
@classmethod
def from_ff_and_topologies(cls):
"""
Unsupported constructor for CombinedData objects.
"""
raise AttributeError("Unsupported constructor for CombinedData objects.")
@classmethod
def from_structure(cls):
"""
Unsupported constructor for CombinedData objects.
"""
raise AttributeError("Unsupported constructor for CombinedData objects.")
@classmethod
def parse_xyz(cls, filename):
"""
load xyz file generated from packmol (for those who find it hard to install openbabel)
Returns:
pandas.DataFrame
"""
with open(filename) as f:
lines = f.readlines()
sio = StringIO("".join(lines[2:])) # skip the 2nd line
df = pd.read_csv(
sio,
header=None,
comment="#",
delim_whitespace=True,
names=["atom", "x", "y", "z"],
)
df.index += 1
return df
@classmethod
def from_files(cls, coordinate_file, list_of_numbers, *filenames):
"""
Constructor that parse a series of data file.
Args:
coordinate_file (str): The filename of xyz coordinates.
list_of_numbers (list): A list of numbers specifying counts for each
clusters parsed from files.
filenames (str): A series of LAMMPS data filenames in string format.
"""
names = []
mols = []
styles = []
coordinates = cls.parse_xyz(filename=coordinate_file)
for i in range(0, len(filenames)):
exec(f"cluster{i + 1} = LammpsData.from_file(filenames[i])")
names.append(f"cluster{i + 1}")
mols.append(eval(f"cluster{i + 1}"))
styles.append(eval(f"cluster{i + 1}").atom_style)
style = set(styles)
assert len(style) == 1, "Files have different atom styles."
return cls.from_lammpsdata(mols, names, list_of_numbers, coordinates, style.pop())
@classmethod
def from_lammpsdata(cls, mols, names, list_of_numbers, coordinates, atom_style=None):
"""
Constructor that can infer atom_style.
The input LammpsData objects are used non-destructively.
Args:
mols: a list of LammpsData of a chemical cluster.Each LammpsData object (cluster)
may contain one or more molecule ID.
names: a list of name for each cluster.
list_of_numbers: a list of Integer for counts of each molecule
coordinates (pandas.DataFrame): DataFrame at least containing
columns of ["x", "y", "z"] for coordinates of atoms.
atom_style (str): Output atom_style. Default to "full".
"""
styles = []
for mol in mols:
styles.append(mol.atom_style)
style = set(styles)
assert len(style) == 1, "Data have different atom_style."
style_return = style.pop()
if atom_style:
assert atom_style == style_return, "Data have different atom_style as specified."
return cls(mols, names, list_of_numbers, coordinates, style_return)
def get_string(self, distance=6, velocity=8, charge=4, hybrid=True):
"""
Returns the string representation of CombinedData, essentially
the string to be written to a file. Combination info is included
as a comment. For single molecule ID data, the info format is:
num name
For data with multiple molecule ID, the format is:
num(mols_per_data) name
Args:
distance (int): No. of significant figures to output for
box settings (bounds and tilt) and atomic coordinates.
Default to 6.
velocity (int): No. of significant figures to output for
velocities. Default to 8.
charge (int): No. of significant figures to output for
charges. Default to 4.
hybrid (bool): Whether to write hybrid coeffs types.
Default to True. If the data object has no hybrid
coeffs types and has large coeffs section, one may
use False to speedup the process. Otherwise the
default is recommended.
Returns:
String representation
"""
lines = LammpsData.get_string(self, distance, velocity, charge, hybrid).splitlines()
info = "# " + " + ".join(
(str(a) + " " + b) if c == 1 else (str(a) + "(" + str(c) + ") " + b)
for a, b, c in zip(self.nums, self.names, self.mols_per_data)
)
lines.insert(1, info)
return "\n".join(lines)
def as_lammpsdata(self):
"""
Convert a CombinedData object to a LammpsData object. attributes are deepcopied.
box (LammpsBox): Simulation box.
force_field (dict): Data for force field sections. Optional
with default to None. Only keywords in force field and
class 2 force field are valid keys, and each value is a
DataFrame.
topology (dict): Data for topology sections. Optional with
default to None. Only keywords in topology are valid
keys, and each value is a DataFrame.
"""
items = {}
items["box"] = LammpsBox(self.box.bounds, self.box.tilt)
items["masses"] = self.masses.copy()
items["atoms"] = self.atoms.copy()
items["atom_style"] = self.atom_style
items["velocities"] = None # Velocities not supported
if self.force_field:
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
items["force_field"] = {k: v.copy() for k, v in self.force_field.items() if k in all_ff_kws}
if self.topology:
items["topology"] = {k: v.copy() for k, v in self.topology.items() if k in SECTION_KEYWORDS["topology"]}
return LammpsData(**items)
|
materialsproject/pymatgen
|
pymatgen/io/lammps/data.py
|
Python
|
mit
| 60,452
|
[
"LAMMPS",
"pymatgen"
] |
968cb54512883f047a11560a867ee8de46e255a74c8a40e928428c2b3850795d
|
# Copyright 2015-2016 The James Hutton Institute
# Author: Leighton Pritchard
#
# blast.py
#
# This code is part of the pyrbbh package, and is governed by its licence.
# Please see the LICENSE file that should have been included as part of this
# package.
"""Module to produce BLAST command-line jobs for RBH analysis.
"""
import os
import time
from .config import BLASTP_DEFAULT, BLASTDB_DEFAULT
from . import jobs
# Make a dependency graph of BLAST database and query jobs
def make_blast_jobs(infiles, outdir,
blastp_exe=BLASTP_DEFAULT, blastdb_exe=BLASTDB_DEFAULT,
jobprefix="PYRBBH_%s" % str(int(time.time()))):
"""Returns a list of Job objects that represent BLAST commands required
to conduct RBBH on the list of passed sequence files.
The returned list is essentially a job dependency graph. Individual jobs
record their upstream dependencies on other jobs. In all cases, this
should take the form of each query against a database constructed from
one of the input files being dependent on the database construction job.
The database construction jobs are initially stored in a dictionary, keyed
by the input filestem. Query job dependencies are then determined by
reference to this dictionary.
- infiles - a list of paths to input FASTA files
- outdir - path to directory for BLAST databases/output
- blastp_exe - path to BLASTP executable
- blastdb_exe - path to BLAST database formatting executable
- jobprefix - a string to prefix job IDs if run on SGE scheduler
"""
# Create dictionary of database jobs, keyed by filestem
dbjobs = make_blastdb_jobs(infiles, outdir, blastdb_exe, jobprefix)
# Create list of BLAST query jobs
queryjobs = make_blastp_jobs(infiles, outdir, blastp_exe, jobprefix, dbjobs)
return list(dbjobs.values()) + queryjobs
# Make a dictionary of makeblastdb jobs
def make_blastdb_jobs(infiles, outdir, blastdb_exe, jobprefix):
"""Returns a dictionary of BLAST database construction command-lines,
keyed by the input filestem.
- infiles - a list of paths to input FASTA files
- outdir - path to directory for BLAST databases/output
- blastdb_exe - path to BLAST database formatting executable
- jobprefix - a string to prefix job IDs if run on SGE scheduler
>>> sorted(make_blastdb_jobs(['../tests/seqdata/infile1.fasta', \
'../tests/seqdata/infile2.fasta'], '../tests/output/', 'makeblastdb', \
'RBH_BLAST').items()) #doctest: +ELLIPSIS
[('infile1', <jobs.Job instance at 0x...>), ('infile2', \
<jobs.Job instance at 0x...>)]
"""
# Create dictionary of database jobs
dbjobdict = {}
for idx, fname in enumerate(infiles):
dbcmd, dbname = construct_makeblastdb_cmd(fname, outdir, blastdb_exe)
job = jobs.Job("%s_db_%06d" % (jobprefix, idx), dbcmd)
dbjobdict[dbname] = job
return dbjobdict
# Build a makeblastdb command line
def construct_makeblastdb_cmd(infile, outdir, blastdb_exe):
"""Returns a tuple of (cmd_line, filestem) where cmd_line is the BLAST
database formatting command for the passed filename, placing the result
in outdir, with the same filestem as the input filename.
The formatting assumes that the executable is makeblastdb from BLAST+
- infile - input filename
- outdir - location to write the database
- blastdb_exe - path toBLAST database construction executable
>>> construct_makeblastdb_cmd('../tests/seqdata/infile1.fasta', \
'../tests/output/', 'makeblastdb')
('makeblastdb -dbtype prot -in ../tests/seqdata/infile1.fasta -title \
infile1 -out ../tests/output/infile1.fasta', 'infile1')
"""
filename = os.path.split(infile)[-1] # strip directory
filestem = os.path.splitext(filename)[0] # strip extension
outfname = os.path.join(outdir, filename) # location to write db
cmd = "{0} -dbtype prot -in {1} -title {2} -out {3}".format(blastdb_exe,
infile,
filestem,
outfname)
return (cmd, filestem)
# Make list of BLAST query jobs
def make_blastp_jobs(infiles, outdir, blastp_exe, jobprefix, dbjobs):
"""Returns a list of BLASTP query jobs for RBH analysis.
This requires nested loops of
- infiles - a list of paths to input FASTA files
- outdir - path to directory for BLAST output
- blastp_exe - path to BLASTP
- jobprefix - a string to prefix job IDs if run on SGE scheduler
- dbjobs - dictionary of database construction jobs, keyed by filestem
>>> joblist = make_blastp_jobs(['../tests/seqdata/infile1.fasta', \
'../tests/seqdata/infile2.fasta', '../tests/seqdata/infile3.fasta'], \
'../tests/output/', 'makeblastdb', 'RBH_BLAST', {'infile1': 'dbjob1', \
'infile2': 'dbjob2', 'infile3': 'dbjob3'})
>>> [j.name for j in joblist]
['RBH_BLAST_query_000001_fwd', 'RBH_BLAST_query_000001_rev', \
'RBH_BLAST_query_000002_fwd', 'RBH_BLAST_query_000002_rev', \
'RBH_BLAST_query_000003_fwd', 'RBH_BLAST_query_000003_rev']
>>> joblist[0].dependencies
['dbjob2']
"""
# Create list of BLASTP jobs
joblist = []
jobnum = 0
for idx, infile1 in enumerate(infiles):
fname1 = os.path.split(infile1)[-1] # strip directory
fstem1 = os.path.splitext(fname1)[0] # strip extension
dbname1 = os.path.join(outdir, fname1)
for infile2 in infiles[idx+1:]:
jobnum += 1
fname2 = os.path.split(infile2)[-1] # strip directory
fstem2 = os.path.splitext(fname2)[0] # strip extension
dbname2 = os.path.join(outdir, fname2)
cmd1 = construct_blastp_cmd(infile1, dbname2, outdir, blastp_exe)
cmd2 = construct_blastp_cmd(infile2, dbname1, outdir, blastp_exe)
job1 = jobs.Job("%s_query_%06d_fwd" % (jobprefix, jobnum), cmd1)
job2 = jobs.Job("%s_query_%06d_rev" % (jobprefix, jobnum), cmd2)
job1.add_dependency(dbjobs[fstem2]) # add dependency on db job
job2.add_dependency(dbjobs[fstem1])
joblist.extend([job1, job2])
return joblist
# Make a BLASTP query command line
def construct_blastp_cmd(qfile, dbname, outdir, blastp_exe):
"""Returns a single BLASTP command, using the input qfile against the
database dbname, writing results to outdir, using the executable in
blastp_exe.
Output filename is formatted 'qstem_vs_dbstem.out'
The BLASTP command writes a tabular format output file. The formatting
string returns the following information in columns:
qseqid - query sequence ID
sseqid - subject sequence ID
qlen - query sequence length
slen - subject sequence length
bitscore - bitscore of HSP match
length - length of HSP match
nident - number of identical matches in HSP
pident - percentage identity of HSP match (should = nident/length)
qcovhsp - query coverage per HSP
qcovs - query coverage per subject
qstart - HSP start in query
qend - HSP end in query
sstart - HSP start in subject
send - HSP start in subject
>>> construct_blastp_cmd('../tests/seqdata/infile1.fasta', \
'../tests/output/infile2.fasta', '../tests/output', 'blastp')
'blastp -out ../tests/output/infile1_vs_infile2.tab -query \
../tests/seqdata/infile1.fasta -db ../tests/output/infile2.fasta -outfmt 6'
"""
qstem = os.path.splitext(os.path.split(qfile)[-1])[0]
dbstem = os.path.splitext(os.path.split(dbname)[-1])[0]
prefix = os.path.join(outdir, '%s_vs_%s' % (qstem, dbstem))
formatstr = "'6 qseqid sseqid qlen slen bitscore length nident pident \
qcovhsp qcovs qstart qend sstart send'"
cmd = "{0} -out {1}.tab -query {2} -db {3} -outfmt {4}"
return cmd.format(blastp_exe, prefix, qfile, dbname, formatstr)
|
widdowquinn/pyrbbh
|
pyrbbh/blast.py
|
Python
|
mit
| 7,943
|
[
"BLAST"
] |
4d40b5e552073283053af7038e15b94f658de80d44677e9fcd05e763d9670333
|
# -*- test-case-name: buildbot.test.test_web -*-
from zope.interface import implements
from twisted.python import log, components
import urllib
import time, locale
import operator
from buildbot import interfaces, util
from buildbot.status import builder
from buildbot.status.web.base import Box, HtmlResource, IBox, ICurrentBox, \
ITopBox, build_get_class, path_to_build, path_to_step, path_to_root, \
map_branches
def earlier(old, new):
# minimum of two things, but "None" counts as +infinity
if old:
if new < old:
return new
return old
return new
def later(old, new):
# maximum of two things, but "None" counts as -infinity
if old:
if new > old:
return new
return old
return new
class CurrentBox(components.Adapter):
# this provides the "current activity" box, just above the builder name
implements(ICurrentBox)
def formatETA(self, prefix, eta):
if eta is None:
return []
if eta < 60:
return ["< 1 min"]
eta_parts = ["~"]
eta_secs = eta
if eta_secs > 3600:
eta_parts.append("%d hrs" % (eta_secs / 3600))
eta_secs %= 3600
if eta_secs > 60:
eta_parts.append("%d mins" % (eta_secs / 60))
eta_secs %= 60
abstime = time.strftime("%H:%M", time.localtime(util.now()+eta))
return [prefix, " ".join(eta_parts), "at %s" % abstime]
def getBox(self, status):
# getState() returns offline, idle, or building
state, builds = self.original.getState()
# look for upcoming builds. We say the state is "waiting" if the
# builder is otherwise idle and there is a scheduler which tells us a
# build will be performed some time in the near future. TODO: this
# functionality used to be in BuilderStatus.. maybe this code should
# be merged back into it.
upcoming = []
builderName = self.original.getName()
for s in status.getSchedulers():
if builderName in s.listBuilderNames():
upcoming.extend(s.getPendingBuildTimes())
if state == "idle" and upcoming:
state = "waiting"
if state == "building":
text = ["building"]
if builds:
for b in builds:
eta = b.getETA()
text.extend(self.formatETA("ETA in", eta))
elif state == "offline":
text = ["offline"]
elif state == "idle":
text = ["idle"]
elif state == "waiting":
text = ["waiting"]
else:
# just in case I add a state and forget to update this
text = [state]
# TODO: for now, this pending/upcoming stuff is in the "current
# activity" box, but really it should go into a "next activity" row
# instead. The only times it should show up in "current activity" is
# when the builder is otherwise idle.
# are any builds pending? (waiting for a slave to be free)
pbs = self.original.getPendingBuilds()
if pbs:
text.append("%d pending" % len(pbs))
for t in upcoming:
eta = t - util.now()
text.extend(self.formatETA("next in", eta))
return Box(text, class_="Activity " + state)
components.registerAdapter(CurrentBox, builder.BuilderStatus, ICurrentBox)
class BuildTopBox(components.Adapter):
# this provides a per-builder box at the very top of the display,
# showing the results of the most recent build
implements(IBox)
def getBox(self, req):
assert interfaces.IBuilderStatus(self.original)
branches = [b for b in req.args.get("branch", []) if b]
builder = self.original
builds = list(builder.generateFinishedBuilds(map_branches(branches),
num_builds=1))
if not builds:
return Box(["none"], class_="LastBuild")
b = builds[0]
text = b.getText()
tests_failed = b.getSummaryStatistic('tests-failed', operator.add, 0)
if tests_failed: text.extend(["Failed tests: %d" % tests_failed])
# TODO: maybe add logs?
# TODO: add link to the per-build page at 'url'
class_ = build_get_class(b)
return Box(text, class_="LastBuild %s" % class_)
components.registerAdapter(BuildTopBox, builder.BuilderStatus, ITopBox)
class BuildBox(components.Adapter):
# this provides the yellow "starting line" box for each build
implements(IBox)
def getBox(self, req):
b = self.original
number = b.getNumber()
url = path_to_build(req, b)
reason = b.getReason()
template = req.site.buildbot_service.templates.get_template("box_macros.html")
text = template.module.build_box(reason=reason,url=url,number=number)
class_ = "start"
if b.isFinished() and not b.getSteps():
# the steps have been pruned, so there won't be any indication
# of whether it succeeded or failed.
class_ = build_get_class(b)
return Box([text], class_="BuildStep " + class_)
components.registerAdapter(BuildBox, builder.BuildStatus, IBox)
class StepBox(components.Adapter):
implements(IBox)
def getBox(self, req):
urlbase = path_to_step(req, self.original)
text = self.original.getText()
if text is None:
log.msg("getText() gave None", urlbase)
text = []
text = text[:]
logs = self.original.getLogs()
cxt = dict(text=text, logs=[], urls=[])
for num in range(len(logs)):
name = logs[num].getName()
if logs[num].hasContents():
url = urlbase + "/logs/%s" % urllib.quote(name)
else:
url = None
cxt['logs'].append(dict(name=name, url=url))
for name, target in self.original.getURLs().items():
cxt['urls'].append(dict(link=target,name=name))
template = req.site.buildbot_service.templates.get_template("box_macros.html")
text = template.module.step_box(**cxt)
class_ = "BuildStep " + build_get_class(self.original)
return Box(text, class_=class_)
components.registerAdapter(StepBox, builder.BuildStepStatus, IBox)
class EventBox(components.Adapter):
implements(IBox)
def getBox(self, req):
text = self.original.getText()
class_ = "Event"
return Box(text, class_=class_)
components.registerAdapter(EventBox, builder.Event, IBox)
class Spacer:
implements(interfaces.IStatusEvent)
def __init__(self, start, finish):
self.started = start
self.finished = finish
def getTimes(self):
return (self.started, self.finished)
def getText(self):
return []
class SpacerBox(components.Adapter):
implements(IBox)
def getBox(self, req):
#b = Box(["spacer"], "white")
b = Box([])
b.spacer = True
return b
components.registerAdapter(SpacerBox, Spacer, IBox)
def insertGaps(g, showEvents, lastEventTime, idleGap=2):
debug = False
e = g.next()
starts, finishes = e.getTimes()
if debug: log.msg("E0", starts, finishes)
if finishes == 0:
finishes = starts
if debug: log.msg("E1 finishes=%s, gap=%s, lET=%s" % \
(finishes, idleGap, lastEventTime))
if finishes is not None and finishes + idleGap < lastEventTime:
if debug: log.msg(" spacer0")
yield Spacer(finishes, lastEventTime)
followingEventStarts = starts
if debug: log.msg(" fES0", starts)
yield e
while 1:
e = g.next()
if not showEvents and isinstance(e, builder.Event):
continue
starts, finishes = e.getTimes()
if debug: log.msg("E2", starts, finishes)
if finishes == 0:
finishes = starts
if finishes is not None and finishes + idleGap < followingEventStarts:
# there is a gap between the end of this event and the beginning
# of the next one. Insert an idle event so the waterfall display
# shows a gap here.
if debug:
log.msg(" finishes=%s, gap=%s, fES=%s" % \
(finishes, idleGap, followingEventStarts))
yield Spacer(finishes, followingEventStarts)
yield e
followingEventStarts = starts
if debug: log.msg(" fES1", starts)
class WaterfallHelp(HtmlResource):
title = "Waterfall Help"
def __init__(self, categories=None):
HtmlResource.__init__(self)
self.categories = categories
def content(self, request, cxt):
status = self.getStatus(request)
cxt['show_events_checked'] = request.args.get("show_events", ["false"])[0].lower() == "true"
cxt['branches'] = [b for b in request.args.get("branch", []) if b]
cxt['failures_only'] = request.args.get("failures_only", ["false"])[0].lower() == "true"
cxt['committers'] = [c for c in request.args.get("committer", []) if c]
# this has a set of toggle-buttons to let the user choose the
# builders
show_builders = request.args.get("show", [])
show_builders.extend(request.args.get("builder", []))
cxt['show_builders'] = show_builders
cxt['all_builders'] = status.getBuilderNames(categories=self.categories)
# a couple of radio-button selectors for refresh time will appear
# just after that text
times = [("none", "None"),
("60", "60 seconds"),
("300", "5 minutes"),
("600", "10 minutes"),
]
current_reload_time = request.args.get("reload", ["none"])
if current_reload_time:
current_reload_time = current_reload_time[0]
if current_reload_time not in [t[0] for t in times]:
times.insert(0, (current_reload_time, current_reload_time) )
cxt['times'] = times
cxt['current_reload_time'] = current_reload_time
template = request.site.buildbot_service.templates.get_template("waterfallhelp.html")
return template.render(**cxt)
class WaterfallStatusResource(HtmlResource):
"""This builds the main status page, with the waterfall display, and
all child pages."""
def __init__(self, categories=None, num_events=200, num_events_max=None):
HtmlResource.__init__(self)
self.categories = categories
self.num_events=num_events
self.num_events_max=num_events_max
self.putChild("help", WaterfallHelp(categories))
def getTitle(self, request):
status = self.getStatus(request)
p = status.getProjectName()
if p:
return "BuildBot: %s" % p
else:
return "BuildBot"
def getChangeManager(self, request):
# TODO: this wants to go away, access it through IStatus
return request.site.buildbot_service.getChangeSvc()
def get_reload_time(self, request):
if "reload" in request.args:
try:
reload_time = int(request.args["reload"][0])
return max(reload_time, 15)
except ValueError:
pass
return None
def isSuccess(self, builderStatus):
# Helper function to return True if the builder is not failing.
# The function will return false if the current state is "offline",
# the last build was not successful, or if a step from the current
# build(s) failed.
# Make sure the builder is online.
if builderStatus.getState()[0] == 'offline':
return False
# Look at the last finished build to see if it was success or not.
lastBuild = builderStatus.getLastFinishedBuild()
if lastBuild and lastBuild.getResults() != builder.SUCCESS:
return False
# Check all the current builds to see if one step is already
# failing.
currentBuilds = builderStatus.getCurrentBuilds()
if currentBuilds:
for build in currentBuilds:
for step in build.getSteps():
if step.getResults()[0] == builder.FAILURE:
return False
# The last finished build was successful, and all the current builds
# don't have any failed steps.
return True
def content(self, request, ctx):
status = self.getStatus(request)
ctx['refresh'] = self.get_reload_time(request)
# we start with all Builders available to this Waterfall: this is
# limited by the config-file -time categories= argument, and defaults
# to all defined Builders.
allBuilderNames = status.getBuilderNames(categories=self.categories)
builders = [status.getBuilder(name) for name in allBuilderNames]
# but if the URL has one or more builder= arguments (or the old show=
# argument, which is still accepted for backwards compatibility), we
# use that set of builders instead. We still don't show anything
# outside the config-file time set limited by categories=.
showBuilders = request.args.get("show", [])
showBuilders.extend(request.args.get("builder", []))
if showBuilders:
builders = [b for b in builders if b.name in showBuilders]
# now, if the URL has one or category= arguments, use them as a
# filter: only show those builders which belong to one of the given
# categories.
showCategories = request.args.get("category", [])
if showCategories:
builders = [b for b in builders if b.category in showCategories]
# If the URL has the failures_only=true argument, we remove all the
# builders that are not currently red or won't be turning red at the end
# of their current run.
failuresOnly = request.args.get("failures_only", ["false"])[0]
if failuresOnly.lower() == "true":
builders = [b for b in builders if not self.isSuccess(b)]
(changeNames, builderNames, timestamps, eventGrid, sourceEvents) = \
self.buildGrid(request, builders)
# start the table: top-header material
locale_enc = locale.getdefaultlocale()[1]
if locale_enc is not None:
locale_tz = unicode(time.tzname[time.localtime()[-1]], locale_enc)
else:
locale_tz = unicode(time.tzname[time.localtime()[-1]])
ctx['tz'] = locale_tz
ctx['changes_url'] = request.childLink("../changes")
bn = ctx['builders'] = []
for name in builderNames:
builder = status.getBuilder(name)
top_box = ITopBox(builder).getBox(request)
current_box = ICurrentBox(builder).getBox(status)
bn.append({'name': name,
'url': request.childLink("../builders/%s" % urllib.quote(name, safe='')),
'top': top_box.text,
'top_class': top_box.class_,
'status': current_box.text,
'status_class': current_box.class_,
})
ctx.update(self.phase2(request, changeNames + builderNames, timestamps, eventGrid,
sourceEvents))
def with_args(req, remove_args=[], new_args=[], new_path=None):
# sigh, nevow makes this sort of manipulation easier
newargs = req.args.copy()
for argname in remove_args:
newargs[argname] = []
if "branch" in newargs:
newargs["branch"] = [b for b in newargs["branch"] if b]
for k,v in new_args:
if k in newargs:
newargs[k].append(v)
else:
newargs[k] = [v]
newquery = "&".join(["%s=%s" % (urllib.quote(k), urllib.quote(v))
for k in newargs
for v in newargs[k]
])
if new_path:
new_url = new_path
elif req.prepath:
new_url = req.prepath[-1]
else:
new_url = ''
if newquery:
new_url += "?" + newquery
return new_url
if timestamps:
bottom = timestamps[-1]
ctx['nextpage'] = with_args(request, ["last_time"],
[("last_time", str(int(bottom)))])
helpurl = path_to_root(request) + "waterfall/help"
ctx['help_url'] = with_args(request, new_path=helpurl)
if self.get_reload_time(request) is not None:
ctx['no_reload_page'] = with_args(request, remove_args=["reload"])
template = request.site.buildbot_service.templates.get_template("waterfall.html")
data = template.render(**ctx)
return data
def buildGrid(self, request, builders):
debug = False
# TODO: see if we can use a cached copy
showEvents = False
if request.args.get("show_events", ["false"])[0].lower() == "true":
showEvents = True
filterCategories = request.args.get('category', [])
filterBranches = [b for b in request.args.get("branch", []) if b]
filterBranches = map_branches(filterBranches)
filterCommitters = [c for c in request.args.get("committer", []) if c]
maxTime = int(request.args.get("last_time", [util.now()])[0])
if "show_time" in request.args:
minTime = maxTime - int(request.args["show_time"][0])
elif "first_time" in request.args:
minTime = int(request.args["first_time"][0])
elif filterBranches or filterCommitters:
minTime = util.now() - 24 * 60 * 60
else:
minTime = 0
spanLength = 10 # ten-second chunks
req_events=int(request.args.get("num_events", [self.num_events])[0])
if self.num_events_max and req_events > self.num_events_max:
maxPageLen = self.num_events_max
else:
maxPageLen = req_events
# first step is to walk backwards in time, asking each column
# (commit, all builders) if they have any events there. Build up the
# array of events, and stop when we have a reasonable number.
commit_source = self.getChangeManager(request)
lastEventTime = util.now()
sources = [commit_source] + builders
changeNames = ["changes"]
builderNames = map(lambda builder: builder.getName(), builders)
sourceNames = changeNames + builderNames
sourceEvents = []
sourceGenerators = []
def get_event_from(g):
try:
while True:
e = g.next()
# e might be builder.BuildStepStatus,
# builder.BuildStatus, builder.Event,
# waterfall.Spacer(builder.Event), or changes.Change .
# The showEvents=False flag means we should hide
# builder.Event .
if not showEvents and isinstance(e, builder.Event):
continue
break
event = interfaces.IStatusEvent(e)
if debug:
log.msg("gen %s gave1 %s" % (g, event.getText()))
except StopIteration:
event = None
return event
for s in sources:
gen = insertGaps(s.eventGenerator(filterBranches,
filterCategories,
filterCommitters,
minTime),
showEvents,
lastEventTime)
sourceGenerators.append(gen)
# get the first event
sourceEvents.append(get_event_from(gen))
eventGrid = []
timestamps = []
lastEventTime = 0
for e in sourceEvents:
if e and e.getTimes()[0] > lastEventTime:
lastEventTime = e.getTimes()[0]
if lastEventTime == 0:
lastEventTime = util.now()
spanStart = lastEventTime - spanLength
debugGather = 0
while 1:
if debugGather: log.msg("checking (%s,]" % spanStart)
# the tableau of potential events is in sourceEvents[]. The
# window crawls backwards, and we examine one source at a time.
# If the source's top-most event is in the window, is it pushed
# onto the events[] array and the tableau is refilled. This
# continues until the tableau event is not in the window (or is
# missing).
spanEvents = [] # for all sources, in this span. row of eventGrid
firstTimestamp = None # timestamp of first event in the span
lastTimestamp = None # last pre-span event, for next span
for c in range(len(sourceGenerators)):
events = [] # for this source, in this span. cell of eventGrid
event = sourceEvents[c]
while event and spanStart < event.getTimes()[0]:
# to look at windows that don't end with the present,
# condition the .append on event.time <= spanFinish
if not IBox(event, None):
log.msg("BAD EVENT", event, event.getText())
assert 0
if debug:
log.msg("pushing", event.getText(), event)
events.append(event)
starts, finishes = event.getTimes()
firstTimestamp = earlier(firstTimestamp, starts)
event = get_event_from(sourceGenerators[c])
if debug:
log.msg("finished span")
if event:
# this is the last pre-span event for this source
lastTimestamp = later(lastTimestamp,
event.getTimes()[0])
if debugGather:
log.msg(" got %s from %s" % (events, sourceNames[c]))
sourceEvents[c] = event # refill the tableau
spanEvents.append(events)
# only show events older than maxTime. This makes it possible to
# visit a page that shows what it would be like to scroll off the
# bottom of this one.
if firstTimestamp is not None and firstTimestamp <= maxTime:
eventGrid.append(spanEvents)
timestamps.append(firstTimestamp)
if lastTimestamp:
spanStart = lastTimestamp - spanLength
else:
# no more events
break
if minTime is not None and lastTimestamp < minTime:
break
if len(timestamps) > maxPageLen:
break
# now loop
# loop is finished. now we have eventGrid[] and timestamps[]
if debugGather: log.msg("finished loop")
assert(len(timestamps) == len(eventGrid))
return (changeNames, builderNames, timestamps, eventGrid, sourceEvents)
def phase2(self, request, sourceNames, timestamps, eventGrid,
sourceEvents):
if not timestamps:
return dict(grid=[], gridlen=0)
# first pass: figure out the height of the chunks, populate grid
grid = []
for i in range(1+len(sourceNames)):
grid.append([])
# grid is a list of columns, one for the timestamps, and one per
# event source. Each column is exactly the same height. Each element
# of the list is a single <td> box.
lastDate = time.strftime("%d %b %Y",
time.localtime(util.now()))
for r in range(0, len(timestamps)):
chunkstrip = eventGrid[r]
# chunkstrip is a horizontal strip of event blocks. Each block
# is a vertical list of events, all for the same source.
assert(len(chunkstrip) == len(sourceNames))
maxRows = reduce(lambda x,y: max(x,y),
map(lambda x: len(x), chunkstrip))
for i in range(maxRows):
if i != maxRows-1:
grid[0].append(None)
else:
# timestamp goes at the bottom of the chunk
stuff = []
# add the date at the beginning (if it is not the same as
# today's date), and each time it changes
todayday = time.strftime("%a",
time.localtime(timestamps[r]))
today = time.strftime("%d %b %Y",
time.localtime(timestamps[r]))
if today != lastDate:
stuff.append(todayday)
stuff.append(today)
lastDate = today
stuff.append(
time.strftime("%H:%M:%S",
time.localtime(timestamps[r])))
grid[0].append(Box(text=stuff, class_="Time",
valign="bottom", align="center"))
# at this point the timestamp column has been populated with
# maxRows boxes, most None but the last one has the time string
for c in range(0, len(chunkstrip)):
block = chunkstrip[c]
assert(block != None) # should be [] instead
for i in range(maxRows - len(block)):
# fill top of chunk with blank space
grid[c+1].append(None)
for i in range(len(block)):
# so the events are bottom-justified
b = IBox(block[i]).getBox(request)
b.parms['valign'] = "top"
b.parms['align'] = "center"
grid[c+1].append(b)
# now all the other columns have maxRows new boxes too
# populate the last row, if empty
gridlen = len(grid[0])
for i in range(len(grid)):
strip = grid[i]
assert(len(strip) == gridlen)
if strip[-1] == None:
if sourceEvents[i-1]:
filler = IBox(sourceEvents[i-1]).getBox(request)
else:
# this can happen if you delete part of the build history
filler = Box(text=["?"], align="center")
strip[-1] = filler
strip[-1].parms['rowspan'] = 1
# second pass: bubble the events upwards to un-occupied locations
# Every square of the grid that has a None in it needs to have
# something else take its place.
noBubble = request.args.get("nobubble",['0'])
noBubble = int(noBubble[0])
if not noBubble:
for col in range(len(grid)):
strip = grid[col]
if col == 1: # changes are handled differently
for i in range(2, len(strip)+1):
# only merge empty boxes. Don't bubble commit boxes.
if strip[-i] == None:
next = strip[-i+1]
assert(next)
if next:
#if not next.event:
if next.spacer:
# bubble the empty box up
strip[-i] = next
strip[-i].parms['rowspan'] += 1
strip[-i+1] = None
else:
# we are above a commit box. Leave it
# be, and turn the current box into an
# empty one
strip[-i] = Box([], rowspan=1,
comment="commit bubble")
strip[-i].spacer = True
else:
# we are above another empty box, which
# somehow wasn't already converted.
# Shouldn't happen
pass
else:
for i in range(2, len(strip)+1):
# strip[-i] will go from next-to-last back to first
if strip[-i] == None:
# bubble previous item up
assert(strip[-i+1] != None)
strip[-i] = strip[-i+1]
strip[-i].parms['rowspan'] += 1
strip[-i+1] = None
else:
strip[-i].parms['rowspan'] = 1
# convert to dicts
for i in range(gridlen):
for strip in grid:
if strip[i]:
strip[i] = strip[i].td()
return dict(grid=grid, gridlen=gridlen, no_bubble=noBubble, time=lastDate)
|
centrumholdings/buildbot
|
buildbot/status/web/waterfall.py
|
Python
|
gpl-2.0
| 29,662
|
[
"VisIt"
] |
6581e2c147db1e55f1e4954a1b0421b98cbaf3522756ca703ee7e9451213b407
|
from cloudbio.galaxy.tools import _install_application
def install_tool(options):
version = options.get("galaxy_tool_version")
name = options.get("galaxy_tool_name")
install_dir = options.get("galaxy_tool_dir", None)
_install_application(name, version, tool_install_dir=install_dir)
configure_actions = {
"install_galaxy_tool": install_tool,
}
|
heuermh/cloudbiolinux
|
cloudbio/deploy/plugins/galaxy.py
|
Python
|
mit
| 368
|
[
"Galaxy"
] |
ae9a0ac69a2f6a7f84838c342d40e7453ad6812f92708b8e7bf70a0c284a5a71
|
"""
Records who we trust to sign feeds.
Trust is divided up into domains, so that it is possible to trust a key
in some cases and not others.
@var trust_db: Singleton trust database instance.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, SafeException, logger
import os
from zeroinstall import support
from zeroinstall.support import basedir, tasks
from .namespaces import config_site, config_prog, XMLNS_TRUST
KEY_INFO_TIMEOUT = 10 # Maximum time to wait for response from key-info-server
class TrustDB(object):
"""A database of trusted keys.
@ivar keys: maps trusted key fingerprints to a set of domains for which where it is trusted
@type keys: {str: set(str)}
@ivar watchers: callbacks invoked by L{notify}
@see: L{trust_db} - the singleton instance of this class"""
__slots__ = ['keys', 'watchers']
def __init__(self):
self.keys = None
self.watchers = []
def is_trusted(self, fingerprint, domain = None):
self.ensure_uptodate()
domains = self.keys.get(fingerprint, None)
if not domains: return False # Unknown key
if domain is None:
return True # Deprecated
return domain in domains or '*' in domains
def get_trust_domains(self, fingerprint):
"""Return the set of domains in which this key is trusted.
If the list includes '*' then the key is trusted everywhere.
@since: 0.27
"""
self.ensure_uptodate()
return self.keys.get(fingerprint, set())
def get_keys_for_domain(self, domain):
"""Return the set of keys trusted for this domain.
@since: 0.27"""
self.ensure_uptodate()
return set([fp for fp in self.keys
if domain in self.keys[fp]])
def trust_key(self, fingerprint, domain = '*'):
"""Add key to the list of trusted fingerprints.
@param fingerprint: base 16 fingerprint without any spaces
@type fingerprint: str
@param domain: domain in which key is to be trusted
@type domain: str
@note: call L{notify} after trusting one or more new keys"""
if self.is_trusted(fingerprint, domain): return
int(fingerprint, 16) # Ensure fingerprint is valid
if fingerprint not in self.keys:
self.keys[fingerprint] = set()
#if domain == '*':
# warn("Calling trust_key() without a domain is deprecated")
self.keys[fingerprint].add(domain)
self.save()
def untrust_key(self, key, domain = '*'):
self.ensure_uptodate()
self.keys[key].remove(domain)
if not self.keys[key]:
# No more domains for this key
del self.keys[key]
self.save()
def save(self):
from xml.dom import minidom
import tempfile
doc = minidom.Document()
root = doc.createElementNS(XMLNS_TRUST, 'trusted-keys')
root.setAttribute('xmlns', XMLNS_TRUST)
doc.appendChild(root)
for fingerprint in self.keys:
keyelem = doc.createElementNS(XMLNS_TRUST, 'key')
root.appendChild(keyelem)
keyelem.setAttribute('fingerprint', fingerprint)
for domain in self.keys[fingerprint]:
domainelem = doc.createElementNS(XMLNS_TRUST, 'domain')
domainelem.setAttribute('value', domain)
keyelem.appendChild(domainelem)
d = basedir.save_config_path(config_site, config_prog)
with tempfile.NamedTemporaryFile(dir = d, prefix = 'trust-', delete = False, mode = 'wt') as tmp:
doc.writexml(tmp, indent = "", addindent = " ", newl = "\n", encoding = 'utf-8')
support.portable_rename(tmp.name, os.path.join(d, 'trustdb.xml'))
def notify(self):
"""Call all watcher callbacks.
This should be called after trusting or untrusting one or more new keys.
@since: 0.25"""
for w in self.watchers: w()
def ensure_uptodate(self):
from xml.dom import minidom
# This is a bit inefficient... (could cache things)
self.keys = {}
trust = basedir.load_first_config(config_site, config_prog, 'trustdb.xml')
if trust:
keys = minidom.parse(trust).documentElement
for key in keys.getElementsByTagNameNS(XMLNS_TRUST, 'key'):
domains = set()
self.keys[key.getAttribute('fingerprint')] = domains
for domain in key.getElementsByTagNameNS(XMLNS_TRUST, 'domain'):
domains.add(domain.getAttribute('value'))
else:
# Convert old database to XML format
trust = basedir.load_first_config(config_site, config_prog, 'trust')
if trust:
#print "Loading trust from", trust_db
with open(trust, 'rt') as stream:
for key in stream:
if key:
self.keys[key] = set(['*'])
def domain_from_url(url):
"""Extract the trust domain for a URL.
@param url: the feed's URL
@type url: str
@return: the trust domain
@rtype: str
@since: 0.27
@raise SafeException: the URL can't be parsed"""
try:
import urlparse
except ImportError:
from urllib import parse as urlparse # Python 3
if os.path.isabs(url):
raise SafeException(_("Can't get domain from a local path: '%s'") % url)
domain = urlparse.urlparse(url)[1]
if domain and domain != '*':
return domain
raise SafeException(_("Can't extract domain from URL '%s'") % url)
trust_db = TrustDB()
class TrustMgr(object):
"""A TrustMgr handles the process of deciding whether to trust new keys
(contacting the key information server, prompting the user, accepting automatically, etc)
@since: 0.53"""
__slots__ = ['config', '_current_confirm']
def __init__(self, config):
self.config = config
self._current_confirm = None # (a lock to prevent asking the user multiple questions at once)
@tasks.async
def confirm_keys(self, pending):
"""We don't trust any of the signatures yet. Collect information about them and add the keys to the
trusted list, possibly after confirming with the user (via config.handler).
Updates the L{trust} database, and then calls L{trust.TrustDB.notify}.
@since: 0.53
@arg pending: an object holding details of the updated feed
@type pending: L{PendingFeed}
@return: A blocker that triggers when the user has chosen, or None if already done.
@rtype: None | L{Blocker}"""
assert pending.sigs
from zeroinstall.injector import gpg
valid_sigs = [s for s in pending.sigs if isinstance(s, gpg.ValidSig)]
if not valid_sigs:
def format_sig(sig):
msg = str(sig)
if sig.messages:
msg += "\nMessages from GPG:\n" + sig.messages
return msg
raise SafeException(_('No valid signatures found on "%(url)s". Signatures:%(signatures)s') %
{'url': pending.url, 'signatures': ''.join(['\n- ' + format_sig(s) for s in pending.sigs])})
# Start downloading information about the keys...
fetcher = self.config.fetcher
kfs = {}
for sig in valid_sigs:
kfs[sig] = fetcher.fetch_key_info(sig.fingerprint)
# Wait up to KEY_INFO_TIMEOUT seconds for key information to arrive. Avoids having the dialog
# box update while the user is looking at it, and may allow it to be skipped completely in some
# cases.
timeout = tasks.TimeoutBlocker(KEY_INFO_TIMEOUT, "key info timeout")
while True:
key_info_blockers = [sig_info.blocker for sig_info in kfs.values() if sig_info.blocker is not None]
if not key_info_blockers:
break
logger.info("Waiting for response from key-info server: %s", key_info_blockers)
yield [timeout] + key_info_blockers
if timeout.happened:
logger.info("Timeout waiting for key info response")
break
# If we're already confirming something else, wait for that to finish...
while self._current_confirm is not None:
logger.info("Waiting for previous key confirmations to finish")
yield self._current_confirm
domain = domain_from_url(pending.url)
if self.config.auto_approve_keys:
existing_feed = self.config.iface_cache.get_feed(pending.url)
if not existing_feed:
changes = False
for sig, kf in kfs.items():
for key_info in kf.info:
if key_info.getAttribute("vote") == "good":
logger.info(_("Automatically approving key for new feed %s based on response from key info server"), pending.url)
trust_db.trust_key(sig.fingerprint, domain)
changes = True
if changes:
trust_db.notify()
# Check whether we still need to confirm. The user may have
# already approved one of the keys while dealing with another
# feed, or we may have just auto-approved it.
for sig in kfs:
is_trusted = trust_db.is_trusted(sig.fingerprint, domain)
if is_trusted:
return
# Take the lock and confirm this feed
self._current_confirm = lock = tasks.Blocker('confirm key lock')
try:
done = self.config.handler.confirm_import_feed(pending, kfs)
if done is not None:
yield done
tasks.check(done)
finally:
self._current_confirm = None
lock.trigger()
|
timdiels/0install
|
zeroinstall/injector/trust.py
|
Python
|
lgpl-2.1
| 8,547
|
[
"VisIt"
] |
b964cf3b94b0f311ffc499b1c731ab9facb0e972e00caf8bcc6b0bf3ad81bbc5
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2014 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Batch
Batches
=======
Batches allow you to optimize the number of gl calls using pyglets batch
"""
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
from cocos.cocosnode import CocosNode
import pyglet
from pyglet.graphics import OrderedGroup
from pyglet import image
from pyglet.gl import *
__all__ = ['BatchNode','BatchableNode']
def ensure_batcheable(node):
if not isinstance(node, BatchableNode):
raise Exception("Children node of a batch must be of class BatchableNode")
for c in node.get_children():
ensure_batcheable(c)
class BatchNode( CocosNode ):
def __init__(self):
super(BatchNode, self).__init__()
self.batch = pyglet.graphics.Batch()
self.groups = {}
def add(self, child, z=0, name=None):
ensure_batcheable(child)
child.set_batch(self.batch, self.groups, z)
super(BatchNode, self).add(child, z, name)
def visit(self):
""" All children are placed in to self.batch, so nothing to visit """
if not self.visible:
return
glPushMatrix()
self.transform()
self.batch.draw()
glPopMatrix()
def remove(self, child):
if isinstance(child, str):
child_node = self.get(child)
else:
child_node = child
child_node.set_batch(None)
super(BatchNode, self).remove(child)
def draw(self):
pass # All drawing done in visit!
class BatchableNode( CocosNode ):
def add(self, child, z=0, name=None):
batchnode = self.get_ancestor(BatchNode)
if not batchnode:
# this node was addded, but theres no batchnode in the
# hierarchy. so we proceed as normal
super(BatchableNode, self).add(child, z, name)
return
ensure_batcheable(child)
super(BatchableNode, self).add(child, z, name)
child.set_batch(self.batch, batchnode.groups, z)
def remove(self, child):
if isinstance(child, str):
child_node = self.get(child)
else:
child_node = child
child_node.set_batch(None)
super(BatchableNode, self).remove(child)
def set_batch(self, batch, groups=None, z=0):
self.batch = batch
if batch is None:
self.group = None
else:
group = groups.get(z)
if group is None:
group = pyglet.graphics.Group()
groups[z] = group
self.group = group
for childZ, child in self.children:
child.set_batch(self.batch, groups, z + childZ)
|
google-code-export/los-cocos
|
cocos/batch.py
|
Python
|
bsd-3-clause
| 4,493
|
[
"VisIt"
] |
a03b01e2b9b59574cd9edb9c27ca16eccc76fc66ef0a435c8d410b9a1f76a05f
|
"""
Definition of the direction class
"""
import os
import logging
from astropy.coordinates import Angle
import numpy as np
import lsmtool
from lsmtool.operations_lib import radec2xy
import matplotlib.path as mplPath
from scipy.ndimage import gaussian_filter
from scipy.special import erf
import sys
import glob
class Direction(object):
"""
Generic direction class
A direction object holds all the parameters needed for an operation in a
given direction (facet).
Note:
All attributes needed by the pipeline templates should be set on the class
instance so that they can be passed with self.__dict__
Parameters
----------
name : str
Name of direction
ra : float
RA in degrees of calibrator center
dec : float
Dec in degrees of calibrator center
mscale_selfcal_do : bool
Use multiscale clean for selfcal?
mscale_facet_do : bool
Use multiscale clean for facet field?
cal_imsize : int
Size of calibrator image in 1.5 arcsec pixels
solint_p : int
Solution interval for phase calibration (# of time slots)
solint_a : int
Solution interval for amplitude calibration (# of time slots)
dynamic_range : str
LD (low dynamic range) or HD (high dynamic range)
region_selfcal : str
Region for clean mask for calibrator selfcal
region_field : str
Region for clean mask for facet image
peel_skymodel : str
Sky model for peeling
outlier_do : bool
If True, peel source without selfcal
factor_working_dir : str
Full path of working directory
cal_size_deg : float, optional
Size in degrees of calibrator source(s)
"""
def __init__(self, name, ra, dec, mscale_selfcal_do=False, mscale_facet_do=False,
cal_imsize=512, solint_p=1, solint_a=30, dynamic_range='LD',
region_selfcal='empty', region_field='empty', peel_skymodel='empty',
outlier_do=False, factor_working_dir='', cal_size_deg=None):
self.name = name
self.log = logging.getLogger('factor:{0}'.format(self.name))
if type(ra) is str:
ra = Angle(ra).to('deg').value
if type(dec) is str:
dec = Angle(dec).to('deg').value
self.ra = ra
self.dec = dec
self.mscale_selfcal_do = mscale_selfcal_do
self.mscale_facet_do = mscale_facet_do
self.cal_imsize = cal_imsize
self.solint_time_p = solint_p
self.solint_time_a = solint_a
self.dynamic_range = dynamic_range
if self.dynamic_range.lower() not in ['ld', 'hd']:
self.log.error('Dynamic range is "{}" but must be either "LD" or "HD".'.format(self.dynamic_range))
sys.exit(1)
if region_selfcal.lower() == 'empty':
# Set to empty list (casa format)
self.region_selfcal = '[]'
else:
if not os.path.exists(region_selfcal):
self.log.error('Calibrator clean-mask region file {} not found.'.format(region_selfcal))
sys.exit(1)
self.region_selfcal = '["{0}"]'.format(region_selfcal)
self.log.info('Using calibrator clean-mask region file {}'.format(self.region_selfcal))
self.region_field = region_field
if self.region_field.lower() == 'empty':
self.region_field = '[]'
elif not os.path.exists(self.region_field):
self.log.error('Facet region file {} not found.'.format(self.region_field))
sys.exit(1)
else:
self.log.info('Using facet clean-mask region file {}'.format(self.region_field))
self.peel_skymodel = peel_skymodel
if self.peel_skymodel.lower() == 'empty':
self.peel_skymodel = None
elif not os.path.exists(self.peel_skymodel):
self.log.error('Peel sky model file {} not found.'.format(self.peel_skymodel))
sys.exit(1)
else:
self.log.info('Using sky model file {} for selfcal/peeling'.format(self.peel_skymodel))
self.is_outlier = outlier_do
self.cal_size_deg = cal_size_deg
# Initialize some parameters to default/initial values
self.selfcal_ok = False # whether selfcal succeeded
self.wsclean_nchannels = 1 # set number of wide-band channels
self.use_new_sub_data = False # set flag that tells which subtracted-data column to use
self.cellsize_verify_deg = 0.00833 # verify subtract cell size
self.target_rms_rad = 0.2 # preaverage target rms
self.subtracted_data_colname = 'SUBTRACTED_DATA_ALL' # name of empty data column
self.use_compression = False # whether to use Dysco compression
self.pre_average = False # whether to use baseline averaging
self.peel_calibrator = False # whether to peel calibrator before imaging
self.solve_all_correlations = False # whether to solve for all corrs for slow gain
self.do_reset = False # whether to reset this direction
self.is_patch = False # whether direction is just a patch (not full facet)
self.skymodel = None # direction's sky model
self.use_existing_data = False # whether to use existing data for reimaging
self.full_res_facetimage_freqstep = None # frequency step of existing data
self.full_res_facetimage_timestep = None # time step of existing data
self.average_image_data = False # whether to average the existing data before imaging them
self.facet_imsize = None # size of facet image (None for patch and field directions)
self.started_operations = []
self.completed_operations = []
self.reset_operations = []
self.cleanup_mapfiles = []
self.preapply_phase_cal = False
self.create_preapply_h5parm = False
self.contains_target = False # whether this direction contains the target (if any)
self.skip_selfcal_source_detection = False # whether to do source detection to update supplied clean mask
# Define some directories and files
self.working_dir = factor_working_dir
self.save_file = os.path.join(self.working_dir, 'state',
self.name+'_save.pkl')
self.vertices_file = self.save_file
def set_cal_size(self, selfcal_cellsize_arcsec):
"""
Sets the calibrator image size from the calibrator size (or vice versa)
Parameters
----------
selfcal_cellsize_arcsec : float
Cellsize for selfcal imaging
padding : float, optional
Padding factor for image. Padded regions to
"""
self.cellsize_selfcal_deg = selfcal_cellsize_arcsec / 3600.0
if self.cal_size_deg is None:
# Set calibrator size from cal_imsize assuming 50% padding
if self.cal_imsize == 0:
self.log.error('The cal_imsize must be specified in the directions '
'file if cal_size_deg is not specified')
sys.exit(1)
else:
self.cal_size_deg = self.cal_imsize * self.cellsize_selfcal_deg / 1.5
self.cal_imsize = max(512, self.get_optimum_size(self.cal_size_deg
/ self.cellsize_selfcal_deg / 0.6))
else:
if self.cal_imsize == 0:
# Set image size to size of calibrator, padded to 40% extra
# (the padded region is not cleaned)
self.cal_imsize = max(512, self.get_optimum_size(self.cal_size_deg
/ self.cellsize_selfcal_deg / 0.6))
self.cal_imsize = max(512, self.cal_imsize) # ensure size is at least 512
self.cal_radius_deg = self.cal_size_deg / 2.0
self.cal_rms_box = self.cal_size_deg / self.cellsize_selfcal_deg
def set_imcal_parameters(self, parset, bands, facet_cellsize_arcsec=None,
facet_robust=None, facet_taper_arcsec=None, facet_min_uv_lambda=None,
imaging_only=False, use_existing_data=False, existing_data_freqstep=None,
existing_data_timestep=None):
"""
Sets various parameters for imaging and calibration
Parameters
----------
parset : dict
Parset of operation
bands : list of Band objects
Bands for this operation
facet_cellsize_arcsec : float, optional
Pixel size in arcsec for facet imaging
facet_robust : float, optional
Briggs robust parameter for facet imaging
facet_taper_arcsec : float, optional
Taper in arcsec for facet imaging
imaging_only : bool, optional
If True, set only imaging-related parameters
use_existing_data : bool, optional
If True, existing, potentially averaged, data are to be used instead
of the unaveraged data
existing_data_freqstep : int, optional
The freqstep used to average the existing data
existing_data_timestep : int, optional
The timestep used to average the existing data
"""
mean_freq_mhz = np.mean([b.freq for b in bands]) / 1e6
min_peak_smearing_factor = 1.0 - parset['imaging_specific']['max_peak_smearing']
padding = parset['imaging_specific']['wsclean_image_padding']
wsclean_nchannels_factor = parset['imaging_specific']['wsclean_nchannels_factor']
chan_width_hz = bands[0].chan_width_hz
nchan = bands[0].nchan
timestep_sec = bands[0].timepersample
ntimes = bands[0].minSamplesPerFile
if (use_existing_data and existing_data_freqstep is not None and
existing_data_timestep is not None):
# Adjust the above values to match the existing data if necessary
chan_width_hz *= existing_data_freqstep
nchan /= existing_data_freqstep
timestep_sec *= existing_data_timestep
ntimes /= existing_data_timestep
nbands, max_gap = self.get_nbands(bands)
preaverage_flux_jy = parset['calibration_specific']['preaverage_flux_jy']
if self.preapply_phase_cal:
# If dir-dependent phase solutions are preapplied, we can solve for
# just a single TEC solution across the whole bandwidth. To do this,
# we can just set the TEC block to a large value
tec_block_mhz = 100.0
else:
tec_block_mhz = parset['calibration_specific']['tec_block_mhz']
peel_flux_jy = parset['calibration_specific']['peel_flux_jy']
self.robust_selfcal = parset['imaging_specific']['selfcal_robust']
self.solve_min_uv_lambda = parset['calibration_specific']['solve_min_uv_lambda']
self.selfcal_min_uv_lambda = parset['imaging_specific']['selfcal_min_uv_lambda']
self.selfcal_multiscale_scales_pixel = parset['imaging_specific']['selfcal_multiscale_scales_pixel']
self.frac_bandwidth_selfcal_facet_image = parset['imaging_specific']['fractional_bandwidth_selfcal_facet_image']
self.nbands_selfcal_facet_image = min(6, len(bands))
self.use_idg = parset['imaging_specific']['wsclean_use_idg']
if self.use_idg:
self.idg_arg = '-use-idg,'
else:
self.idg_arg = ''
self.idg_mode = parset['imaging_specific']['idg_mode']
self.approximatetec = parset['calibration_specific']['approximatetec']
self.maxapproxiter = parset['calibration_specific']['maxapproxiter']
self.maxiter = parset['calibration_specific']['maxiter']
self.propagatesolutions = parset['calibration_specific']['propagatesolutions']
self.stepsize = parset['calibration_specific']['stepsize']
self.tolerance = parset['calibration_specific']['tolerance']
if facet_cellsize_arcsec is None:
facet_cellsize_arcsec = parset['imaging_specific']['selfcal_cellsize_arcsec']
self.cellsize_facet_deg = facet_cellsize_arcsec / 3600.0
self.cellsize_facet_med_deg = self.cellsize_facet_deg * 4.0
self.cellsize_facet_low_deg = self.cellsize_facet_deg * 16.0
if facet_robust is None:
facet_robust = parset['imaging_specific']['selfcal_robust']
self.robust_facet = facet_robust
if facet_taper_arcsec is None:
facet_taper_arcsec = 0.0
self.taper_facet_arcsec = facet_taper_arcsec
if facet_min_uv_lambda is None:
facet_min_uv_lambda = 80.0
self.facet_min_uv_lambda = facet_min_uv_lambda
self.facet_multiscale_scales_pixel = parset['imaging_specific']['facet_multiscale_scales_pixel']
self.set_imaging_parameters(nbands, self.nbands_selfcal_facet_image,
self.frac_bandwidth_selfcal_facet_image, padding)
self.set_averaging_steps_and_solution_intervals(chan_width_hz, nchan,
timestep_sec, ntimes, nbands, mean_freq_mhz, self.skymodel,
preaverage_flux_jy, min_peak_smearing_factor, tec_block_mhz,
peel_flux_jy, imaging_only=imaging_only)
# Set channelsout for wide-band imaging with WSClean. Note that the
# number of WSClean channels must be an even divisor of the total number
# of channels in the full bandwidth after averaging to prevent
# mismatches during the predict step on the unaveraged data. For selfcal,
# we want 2 bands per WSClean channel and fit a third-order polynomial
# using 3 averaged channels
#
# Also define the image suffixes (which depend on whether or not
# wide-band clean is done)
if wsclean_nchannels_factor < max_gap + 1:
# Ensure we can cover any frequency gaps with a single WSClean channel
wsclean_nchannels_factor = max_gap + 1
self.log.info('Detected a frequency gap of {0} bands; setting wsclean_nchannels_factor '
'to {1} to avoid having a fully flagged WSClean channel'.format(max_gap, wsclean_nchannels_factor))
if self.use_wideband:
self.wsclean_nchannels = max(1, int(np.ceil(nbands / float(wsclean_nchannels_factor))))
nchan_after_avg = nchan * nbands / self.facetimage_freqstep
self.nband_pad = 0 # padding to allow self.wsclean_nchannels to be a divisor
while nchan_after_avg % self.wsclean_nchannels:
self.nband_pad += 1
nchan_after_avg = nchan * (nbands + self.nband_pad) / self.facetimage_freqstep
if self.wsclean_nchannels > nbands:
self.wsclean_nchannels = nbands
if hasattr(self, 'facetselfcal_freqstep'):
wsclean_nchannels_factor_selfcal = max(2, max_gap + 1)
self.wsclean_nchannels_selfcal = max(1, int(np.ceil(nbands / float(wsclean_nchannels_factor_selfcal))))
self.nband_pad_selfcal = 0 # padding to allow self.wsclean_nchannels to be a divisor
nchan_after_avg = nchan * nbands / self.facetselfcal_freqstep
while nchan_after_avg % self.wsclean_nchannels_selfcal:
self.nband_pad_selfcal += 1
nchan_after_avg = nchan * (nbands + self.nband_pad_selfcal) / self.facetselfcal_freqstep
self.wsclean_suffix = '-MFS-image.fits'
else:
self.wsclean_nchannels = 1
self.wsclean_nchannels_selfcal = 1
self.nband_pad = 0
self.nband_pad_selfcal = 0
self.wsclean_suffix = '-image.fits'
# Set the channel values for the shallow facet imaging steps
fb = self.frac_bandwidth_selfcal_facet_image
self.startchan_selfcal_facet_image = int((1.0 - fb) * nchan / 2.0)
self.nchan_selfcal_facet_image = int(fb * nchan)
while self.nchan_selfcal_facet_image % self.facetimage_freqstep:
self.nchan_selfcal_facet_image += 1
while self.nchan_selfcal_facet_image/self.facetimage_freqstep % self.facetimage_low_freqstep:
self.facetimage_low_freqstep -= 1
if self.nbands_selfcal_facet_image > 1:
self.wsclean_selfcal_facet_image_suffix = '-MFS-image.fits'
else:
self.wsclean_selfcal_facet_image_suffix = '-image.fits'
# Set the baseline-dependent averaging limit for WSClean
# Note: IDG does not work with baseline-dependent averaging
if (hasattr(self, 'facetselfcal_timestep_sec') and
parset['imaging_specific']['wsclean_bl_averaging'] and not self.use_idg):
self.facetselfcal_wsclean_nwavelengths = self.get_nwavelengths(self.cellsize_selfcal_deg,
self.facetselfcal_timestep_sec)
else:
self.facetselfcal_wsclean_nwavelengths = 0
if (hasattr(self, 'facetimage_timestep_sec') and
parset['imaging_specific']['wsclean_bl_averaging'] and not self.use_idg):
self.facetimage_wsclean_nwavelengths = self.get_nwavelengths(self.cellsize_facet_deg,
self.facetimage_timestep_sec)
else:
self.facetimage_wsclean_nwavelengths = 0
if (hasattr(self, 'facetimage_low_timestep_sec') and
parset['imaging_specific']['wsclean_bl_averaging'] and not self.use_idg):
self.facetimage_medlow_wsclean_nwavelengths = self.get_nwavelengths(self.cellsize_facet_deg,
self.facetimage_low_timestep_sec)
else:
self.facetimage_medlow_wsclean_nwavelengths = 0
def get_nwavelengths(self, cellsize_deg, timestep_sec):
"""
Returns nwavelengths for WSClean BL-based averaging
The value depends on the integration time given the specified maximum
allowed smearing. We scale it from the imaging cell size assuming normal
sampling as:
max baseline in nwavelengths = 1 / theta_rad ~= 1 / (cellsize_deg * 3 * pi / 180)
nwavelengths = max baseline in nwavelengths * 2 * pi * integration time in seconds / (24 * 60 * 60) / 4
Parameters
----------
cellsize_deg : float
Pixel size of image in degrees
timestep_sec : float
Length of one timestep in seconds
"""
max_baseline = 1 / (3 * cellsize_deg * np.pi / 180)
wsclean_nwavelengths_time = int(max_baseline * 2*np.pi * timestep_sec /
(24 * 60 * 60) / 4)
return wsclean_nwavelengths_time
def set_imaging_parameters(self, nbands, nbands_selfcal, frac_bandwidth_selfcal,
padding=1.05):
"""
Sets various parameters for images in facetselfcal and facetimage pipelines
Parameters
----------
nbands : int
Number of bands
nbands_selfcal : int
Number of bands for full facet image during selfcal
frac_bandwidth_selfcal : float
Fractional bandwidth for full facet image during selfcal
padding : float, optional
Padding factor by which size of facet is multiplied to determine
the facet image size
"""
# Set facet image size
if hasattr(self, 'width'):
self.facet_imsize_nopadding = int(self.width / self.cellsize_facet_deg)
self.facet_imsize = max(512, self.get_optimum_size(self.width
/ self.cellsize_facet_deg * padding))
self.facet_med_imsize_nopadding = int(self.width / self.cellsize_facet_deg / 4.0)
self.facet_med_imsize = max(512, self.get_optimum_size(self.width
/ self.cellsize_facet_deg / 4.0 * padding))
self.facet_low_imsize_nopadding = int(self.width / self.cellsize_facet_deg / 16.0)
self.facet_low_imsize = max(512, self.get_optimum_size(self.width
/ self.cellsize_facet_deg / 16.0 * padding))
# Determine whether the total bandwidth is large enough that wide-band
# imaging is needed
if nbands > 5:
self.use_wideband = True
else:
self.use_wideband = False
# Set number of iterations and threshold for full facet image, scaled to
# the number of bands. We use 6 times more iterations for the full2
# image to ensure the imager has a reasonable chance to reach the
# threshold first (which is set by the masking step)
scaling_factor = np.sqrt(np.float(nbands))
scaling_factor_selfcal = np.sqrt(np.float(nbands_selfcal)*frac_bandwidth_selfcal)
self.wsclean_selfcal_full_image_niter = int(4000 * scaling_factor_selfcal)
self.wsclean_selfcal_full_image_threshold_jy = 1.5e-3 * 0.7 / scaling_factor_selfcal
self.wsclean_full1_image_niter = int(4000 * scaling_factor)
self.wsclean_full1_image_threshold_jy = 1.5e-3 * 0.7 / scaling_factor
self.wsclean_full2_image_niter = int(12000 * scaling_factor)
# Set multiscale imaging parameters: Get source sizes and check for large
# sources (anything above 4 arcmin -- the CC sky model was convolved
# with a Gaussian of 1 arcmin, so unresolved sources have sizes of ~ 1
# arcmin). For the target facet, always turn on multiscale
if self.contains_target:
self.mscale_facet_do = True
large_size_arcmin = 4.0
if self.mscale_selfcal_do is None:
sizes_arcmin = self.get_source_sizes(cal_only=True)
if sizes_arcmin is not None and any([s > large_size_arcmin for s in sizes_arcmin]):
self.mscale_selfcal_do = True
else:
self.mscale_selfcal_do = False
if self.mscale_facet_do is None:
sizes_arcmin = self.get_source_sizes()
if sizes_arcmin is not None and any([s > large_size_arcmin for s in sizes_arcmin]):
self.mscale_facet_do = True
else:
self.mscale_facet_do = False
if self.mscale_facet_do:
# Set some parameters, needed only for full facet imaging since
# they are always set in selfcal
self.wsclean_multiscale = '-multiscale,'
self.wsclean_full1_image_niter /= 2 # fewer iterations are needed
self.wsclean_full2_image_niter /= 2 # fewer iterations are needed
else:
self.wsclean_multiscale = ''
# Let the user know what we are doing
if self.mscale_selfcal_do and self.mscale_facet_do:
self.log.debug("Will do multiscale-cleaning for calibrator and facet.")
elif self.mscale_selfcal_do:
self.log.debug("Will do multiscale-cleaning for calibrator only.")
elif self.mscale_facet_do:
self.log.debug("Will do multiscale-cleaning for facet only.")
def get_optimum_size(self, size):
"""
Gets the nearest optimum image size
Taken from the casa source code (cleanhelper.py)
Parameters
----------
size : int
Target image size in pixels
Returns
-------
optimum_size : int
Optimum image size nearest to target size
"""
import numpy
def prime_factors(n, douniq=True):
""" Return the prime factors of the given number. """
factors = []
lastresult = n
sqlast=int(numpy.sqrt(n))+1
if n == 1:
return [1]
c=2
while 1:
if (lastresult == 1) or (c > sqlast):
break
sqlast=int(numpy.sqrt(lastresult))+1
while 1:
if(c > sqlast):
c=lastresult
break
if lastresult % c == 0:
break
c += 1
factors.append(c)
lastresult /= c
if (factors==[]): factors=[n]
return numpy.unique(factors).tolist() if douniq else factors
n = int(size)
if (n%2 != 0):
n+=1
fac=prime_factors(n, False)
for k in range(len(fac)):
if (fac[k] > 7):
val=fac[k]
while (numpy.max(prime_factors(val)) > 7):
val +=1
fac[k]=val
newlarge=numpy.product(fac)
for k in range(n, newlarge, 2):
if ((numpy.max(prime_factors(k)) < 8)):
return k
return newlarge
def set_skymodel(self, skymodel):
"""
Sets the direction sky model
The sky model is filtered to include only those sources within the
direction facet
Parameters
----------
skymodel : LSMTool SkyModel object
CC sky model
"""
x, y, midRA, midDec = skymodel._getXY()
xv, yv = radec2xy(self.vertices[0], self.vertices[1], midRA, midDec)
xyvertices = np.array([[xp, yp] for xp, yp in zip(xv, yv)])
bbPath = mplPath.Path(xyvertices)
inside = np.zeros(len(skymodel), dtype=bool)
for i in range(len(skymodel)):
inside[i] = bbPath.contains_point((x[i], y[i]))
skymodel.select(inside, force=True)
self.skymodel = skymodel
def get_source_sizes(self, cal_only=False):
"""
Returns list of source sizes in arcmin
"""
skymodel = self.skymodel.copy()
if cal_only:
dist = skymodel.getDistance(self.ra, self.dec, byPatch=True)
skymodel.select(dist < self.cal_radius_deg, aggregate=True)
sizes = skymodel.getPatchSizes(units='arcmin', weight=False)
return sizes
def get_cal_fluxes(self, fwhmArcsec=25.0, threshold=0.1):
"""
Returns total flux density in Jy and max peak flux density in
Jy per beam for calibrator
Parameters
----------
fwhmArcsec : float, optional
Smoothing scale
threshold : float, optional
Threshold
Returns
-------
tot_flux_jy, peak_flux_jy_bm : float, float
Total flux density in Jy and max peak flux density in
Jy per beam for calibrator
"""
dist = self.skymodel.getDistance(self.ra, self.dec)
skymodel = self.skymodel.copy()
skymodel.select(dist < self.cal_radius_deg)
# Generate image grid with 1 pix = FWHM / 4
x, y, midRA, midDec = skymodel._getXY(crdelt=fwhmArcsec/4.0/3600.0)
fluxes_jy = skymodel.getColValues('I', units='Jy')
sizeX = int(np.ceil(1.2 * (max(x) - min(x)))) + 1
sizeY = int(np.ceil(1.2 * (max(y) - min(y)))) + 1
image = np.zeros((sizeX, sizeY))
xint = np.array(x, dtype=int)
xint += -1 * min(xint)
yint = np.array(y, dtype=int)
yint += -1 * min(yint)
for xi, yi, f in zip(xint, yint, fluxes_jy):
image[xi, yi] = f
# Convolve with Gaussian of FWHM = 4 pixels
image_blur = gaussian_filter(image, [4.0/2.35482, 4.0/2.35482])
beam_area_pix = 1.1331*(4.0)**2
return np.sum(fluxes_jy), np.max(image_blur)*beam_area_pix
def set_averaging_steps_and_solution_intervals(self, chan_width_hz, nchan,
timestep_sec, ntimes_min, nbands, mean_freq_mhz, initial_skymodel=None,
preaverage_flux_jy=0.0, min_peak_smearing_factor=0.95, tec_block_mhz=10.0,
peel_flux_jy=25.0, imaging_only=False):
"""
Sets the averaging step sizes and solution intervals
The averaging is set by the need to keep product of bandwidth and time
smearing below 1 - min_peak_smearing_factor. Averaging is limited to
a maximum of ~ 120 sec and ~ 2 MHz.
The solution-interval scaling is done so that sources with total flux
densities below 1.4 Jy at the highest frequency have a fast interval of
8 time slots and a slow interval of 240 time slots for a bandwidth of 4
bands. The fast intervals are scaled with the bandwidth and flux as
nbands^-0.5 and flux^2. The slow intervals are scaled as flux^2.
Note: the frequency step for averaging must be an even divisor of the
number of channels
Parameters
----------
chan_width_hz : float
Channel width in Hz
nchan : int
Number of channels per band
timestep_sec : float
Time step
ntimes_min : int
Minimum number of timeslots in a chunk
nbands : int
Number of bands
mean_freq_mhz : float
Mean frequency in MHz of full bandwidth
initial_skymodel : LSMTool SkyModel object, optional
Sky model used to check source sizes
preaverage_flux_jy : bool, optional
Use baseline-dependent averaging and solint_time_p = 1 for phase-only
calibration for sources below this flux value
min_peak_smearing_factor : float, optional
Min allowed peak flux density reduction due to smearing at the mean
frequency (facet imaging only)
tec_block_mhz : float, optional
Size of frequency block in MHz over which a single TEC solution is
fit
peel_flux_jy : float, optional
Peel cailbrators with fluxes above this value
imaging_only : bool, optional
If True, set only imaging-related parameters
"""
# generate a (numpy-)array with the divisors of nchan
tmp_divisors = []
for step in range(nchan, 0, -1):
if (nchan % step) == 0:
tmp_divisors.append(step)
freq_divisors = np.array(tmp_divisors)
# For selfcal, use the size of the calibrator to set the averaging
# steps
if not imaging_only and self.cal_size_deg is not None:
# Set min allowable smearing reduction factor for bandwidth and time
# smearing so that they are equal and their product is 0.85
min_peak_smearing_factor_selfcal = 0.85
peak_smearing_factor = np.sqrt(min_peak_smearing_factor_selfcal)
# Get target time and frequency averaging steps
delta_theta_deg = self.cal_size_deg / 2.0
self.log.debug('Calibrator is {} deg across'.format(delta_theta_deg*2.0))
resolution_deg = 3.0 * self.cellsize_selfcal_deg # assume normal sampling of restoring beam
target_timewidth_s = min(120.0, self.get_target_timewidth(delta_theta_deg,
resolution_deg, peak_smearing_factor))
if self.dynamic_range.lower() == 'hd':
# For high-dynamic range calibration, we use 0.2 MHz per channel
target_bandwidth_mhz = 0.2
else:
target_bandwidth_mhz = min(2.0, self.get_target_bandwidth(mean_freq_mhz,
delta_theta_deg, resolution_deg, peak_smearing_factor))
self.log.debug('Target timewidth for selfcal is {} s'.format(target_timewidth_s))
self.log.debug('Target bandwidth for selfcal is {} MHz'.format(target_bandwidth_mhz))
# Find averaging steps for given target values
self.facetselfcal_freqstep = max(1, min(int(round(target_bandwidth_mhz * 1e6 / chan_width_hz)), nchan))
self.facetselfcal_freqstep = freq_divisors[np.argmin(np.abs(freq_divisors - self.facetselfcal_freqstep))]
self.facetselfcal_timestep = max(1, int(round(target_timewidth_s / timestep_sec)))
self.facetselfcal_timestep_sec = self.facetselfcal_timestep * timestep_sec
self.log.debug('Using averaging steps of {0} channels and {1} time slots '
'for selfcal'.format(self.facetselfcal_freqstep, self.facetselfcal_timestep))
# For selfcal verify, average to 2 MHz per channel and 120 sec per time
# slot
self.verify_freqstep = max(1, min(int(round(2.0 * 1e6 / chan_width_hz)), nchan))
self.verify_freqstep = freq_divisors[np.argmin(np.abs(freq_divisors - self.verify_freqstep))]
self.verify_timestep = max(1, int(round(120.0 / timestep_sec)))
# For facet imaging, use the facet image size (before padding) to set the averaging steps
if self.facet_imsize is not None:
# Set min allowable smearing reduction factor for bandwidth and time
# smearing so that they are equal and their product is
# min_peak_smearing_factor
peak_smearing_factor = np.sqrt(min_peak_smearing_factor)
# Get target time and frequency averaging steps
delta_theta_deg = self.facet_imsize_nopadding * self.cellsize_facet_deg / 2.0
self.log.debug('Facet image before padding is {0} x {0} pixels ({1} x {1} deg)'.format(
self.facet_imsize_nopadding, delta_theta_deg*2.0))
resolution_deg = 3.0 * self.cellsize_facet_deg # assume normal sampling of restoring beam
target_timewidth_s = min(120.0, self.get_target_timewidth(delta_theta_deg,
resolution_deg, peak_smearing_factor))
target_bandwidth_mhz = min(2.0, self.get_target_bandwidth(mean_freq_mhz,
delta_theta_deg, resolution_deg, peak_smearing_factor))
self.log.debug('Target timewidth for facet imaging is {} s'.format(target_timewidth_s))
self.log.debug('Target bandwidth for facet imaging is {} MHz'.format(target_bandwidth_mhz))
# Find averaging steps for given target values
self.facetimage_freqstep = max(1, min(int(round(target_bandwidth_mhz * 1e6 / chan_width_hz)), nchan))
self.facetimage_freqstep = freq_divisors[np.argmin(np.abs(freq_divisors - self.facetimage_freqstep))]
self.facetimage_timestep = max(1, int(round(target_timewidth_s / timestep_sec)))
self.facetimage_timestep_sec = self.facetimage_timestep * timestep_sec
self.log.debug('Using averaging steps of {0} channels and {1} time slots '
'for facet imaging'.format(self.facetimage_freqstep, self.facetimage_timestep))
# Do the same for the low-resolution facet image. Note that these steps
# are in addition to the full-res steps
# generate another (numpy-)array with the divisors of nchan after averaging
nchan_after_facetimage = nchan / self.facetimage_freqstep
chan_width_hz_after_facetimage = chan_width_hz * self.facetimage_freqstep
tmp_divisors = []
for step in range(nchan_after_facetimage, 0, -1):
if (nchan_after_facetimage % step) == 0:
tmp_divisors.append(step)
freq_divisors_low = np.array(tmp_divisors)
low_res_factor = 4.0 # how much lower resolution is than high-res image
resolution_low_deg = 3.0 * low_res_factor * self.cellsize_facet_deg # assume normal sampling of restoring beam
target_timewidth_s = min(120.0, self.get_target_timewidth(delta_theta_deg,
resolution_low_deg, peak_smearing_factor))
target_bandwidth_mhz = min(2.0, self.get_target_bandwidth(mean_freq_mhz,
delta_theta_deg, resolution_low_deg, peak_smearing_factor))
self.facetimage_low_freqstep = max(1, min(int(round(target_bandwidth_mhz * 1e6 /
chan_width_hz_after_facetimage)), nchan_after_facetimage))
self.facetimage_low_freqstep = freq_divisors_low[np.argmin(np.abs(
freq_divisors_low - self.facetimage_low_freqstep))]
self.facetimage_low_timestep = max(1, int(round(target_timewidth_s /
self.facetimage_timestep_sec)))
self.facetimage_low_timestep_sec = (self.facetimage_low_timestep *
self.facetimage_timestep_sec)
# Set time intervals for selfcal solve steps
if not imaging_only:
# Calculate the effective flux density. This is the one used to set the
# intervals. It is the peak flux density adjusted to account for cases
# in which the total flux density is larger than the peak flux density
# would indicate (either due to source being extended or to multiple
# calibrator sources). In these cases, we can use a higher effective
# flux density to set the intervals. A scaling with a power of 1/1.5
# seems to work well
total_flux_jy, peak_flux_jy_bm = self.get_cal_fluxes()
if total_flux_jy == 0.0 or peak_flux_jy_bm == 0.0:
self.log.warn('Could not find source components for the calibrator '
'in one or more of the direction-independent sky models. Using '
'largest allowable solution intervals')
effective_flux_jy = 0.0
else:
effective_flux_jy = peak_flux_jy_bm * (total_flux_jy / peak_flux_jy_bm)**0.667
self.log.debug('Total flux density of calibrator: {} Jy'.format(total_flux_jy))
self.log.debug('Peak flux density of calibrator: {} Jy/beam'.format(peak_flux_jy_bm))
self.log.debug('Effective flux density of calibrator: {} Jy'.format(effective_flux_jy))
# Set baseline-dependent pre-averaging flag
if effective_flux_jy < preaverage_flux_jy:
self.pre_average = True
self.log.debug('Preaveraging in frequency and time will be done')
else:
self.pre_average = False
# Set fast (phase-only) solution time interval
ref_flux_jy = 1.4 * (4.0 / nbands)**0.5
if self.solint_time_p == 0:
if self.pre_average:
# Set solution interval to 1 timeslot and vary the target rms per
# solution interval instead (which affects the width of the
# preaveraging Gaussian)
self.solint_time_p = 1
if effective_flux_jy > 0.0:
self.target_rms_rad = 0.5 * (ref_flux_jy / effective_flux_jy)**2
else:
self.target_rms_rad = 0.5
if self.target_rms_rad > 0.5:
self.target_rms_rad = 0.5
else:
if effective_flux_jy > 0.0:
target_timewidth_s = 16.0 * (ref_flux_jy / effective_flux_jy)**2
else:
target_timewidth_s = 16.0
if target_timewidth_s > 16.0:
target_timewidth_s = 16.0
self.solint_time_p = max(1, int(round(target_timewidth_s / timestep_sec)))
# Set slow (gain) solution time interval
if self.solint_time_a == 0:
# Slow gain solve is per band, so don't scale the interval with
# the number of bands but only with the effective flux. Also,
# avoid cases in which the last solution interval is much smaller
# than the target interval (for the smallest time chunk; this
# assumes that the chunks are all about the same length)
ref_flux_jy = 1.4
if effective_flux_jy > 0.0:
target_timewidth_s = 1200.0 * (ref_flux_jy / effective_flux_jy)**2
else:
target_timewidth_s = 1200.0
if target_timewidth_s < 240.0:
target_timewidth_s = 240.0
if target_timewidth_s > 1200.0:
target_timewidth_s = 1200.0
solint_time_a = int(round(target_timewidth_s / timestep_sec))
solint_time_a_lower = solint_time_a
solint_time_a_upper = solint_time_a
while (ntimes_min % solint_time_a_lower > 0 and
ntimes_min % solint_time_a_lower < solint_time_a_lower / 2.0):
solint_time_a_lower -= 1
while (ntimes_min % solint_time_a_upper > 0 and
ntimes_min % solint_time_a_upper < solint_time_a_upper / 2.0):
solint_time_a_upper += 1
if solint_time_a - solint_time_a_lower <= solint_time_a_upper - solint_time_a:
self.solint_time_a = solint_time_a_lower
else:
self.solint_time_a = solint_time_a_upper
self.solint_time_a += 1
self.log.debug('Using solution intervals of {0} (fast) and {1} '
'(slow) time slots'.format(self.solint_time_p, self.solint_time_a))
# Set frequency intervals for selfcal solve steps. The interval for
# slow (amp) selfcal should be the number of channels in a band after
# averaging.
num_chan_per_band_after_avg = nchan / self.facetselfcal_freqstep
if self.dynamic_range.lower() == 'hd':
# For high-dynamic range solve, the interval for slow (amp) selfcal
# should be 1 (every channel)
self.solint_freq_a = 1
else:
self.solint_freq_a = num_chan_per_band_after_avg
# The interval for fast (phase) selfcal should be the number of
# channels in tec_block_mhz, but no less than 2 MHz
min_block_mhz = 2.0
if tec_block_mhz < min_block_mhz:
self.log.warn('Setting TEC block size to minimum allowed value of {} MHz'.format(min_block_mhz))
tec_block_mhz = min_block_mhz
mhz_per_chan_after_avg = self.facetselfcal_freqstep * chan_width_hz / 1e6
total_bandwidth_mhz = nchan * nbands * chan_width_hz / 1e6
num_cal_blocks = np.ceil(total_bandwidth_mhz / tec_block_mhz)
nchan_per_block = np.ceil(num_chan_per_band_after_avg * nbands /
num_cal_blocks)
# Check for a partial block, and adjust the number to ensure that
# it is at least half of the desired block size
num_cal_blocks_lower = num_cal_blocks
num_cal_blocks_upper = num_cal_blocks
partial_block_mhz = self.calc_partial_block(num_chan_per_band_after_avg,
nbands, num_cal_blocks, mhz_per_chan_after_avg)
while (partial_block_mhz > 0.0 and partial_block_mhz < tec_block_mhz/2.0):
num_cal_blocks_lower -= 1
partial_block_mhz = self.calc_partial_block(num_chan_per_band_after_avg,
nbands, num_cal_blocks_lower, mhz_per_chan_after_avg)
partial_block_mhz = self.calc_partial_block(num_chan_per_band_after_avg,
nbands, num_cal_blocks, mhz_per_chan_after_avg)
while (partial_block_mhz > 0.0 and partial_block_mhz < tec_block_mhz/2.0):
num_cal_blocks_upper += 1
partial_block_mhz = self.calc_partial_block(num_chan_per_band_after_avg,
nbands, num_cal_blocks_upper, mhz_per_chan_after_avg)
if num_cal_blocks - num_cal_blocks_lower < num_cal_blocks_upper - num_cal_blocks:
num_cal_blocks = num_cal_blocks_lower
else:
num_cal_blocks = num_cal_blocks_upper
if num_cal_blocks < 1:
num_cal_blocks = 1
self.num_cal_blocks = int(num_cal_blocks)
self.num_bands_per_cal_block = int(np.ceil(nbands / float(num_cal_blocks)))
self.solint_freq_p = int(np.ceil(num_chan_per_band_after_avg * nbands /
float(num_cal_blocks)))
def calc_partial_block(self, num_chan_per_band_after_avg, nbands,
num_cal_blocks, mhz_per_chan_after_avg):
"""
Returns size of partial block in MHz
"""
nband_per_block = np.ceil(nbands / num_cal_blocks)
partial_block_mhz = (num_chan_per_band_after_avg * nbands %
nband_per_block) * mhz_per_chan_after_avg
return partial_block_mhz
def get_target_timewidth(self, delta_theta, resolution, reduction_factor):
"""
Returns the time width for given peak flux density reduction factor
Parameters
----------
delta_theta : float
Distance from phase center
resolution : float
Resolution of restoring beam
reduction_factor : float
Ratio of pre-to-post averaging peak flux density
Returns
-------
delta_time : float
Time width in seconds for target reduction_factor
"""
delta_time = np.sqrt( (1.0 - reduction_factor) /
(1.22E-9 * (delta_theta / resolution)**2.0) )
return delta_time
def get_bandwidth_smearing_factor(self, freq, delta_freq, delta_theta, resolution):
"""
Returns peak flux density reduction factor due to bandwidth smearing
Parameters
----------
freq : float
Frequency at which averaging will be done
delta_freq : float
Bandwidth over which averaging will be done
delta_theta : float
Distance from phase center
resolution : float
Resolution of restoring beam
Returns
-------
reduction_factor : float
Ratio of pre-to-post averaging peak flux density
"""
beta = (delta_freq/freq) * (delta_theta/resolution)
gamma = 2*(np.log(2)**0.5)
reduction_factor = ((np.pi**0.5)/(gamma * beta)) * (erf(beta*gamma/2.0))
return reduction_factor
def get_target_bandwidth(self, freq, delta_theta, resolution, reduction_factor):
"""
Returns the bandwidth for given peak flux density reduction factor
Parameters
----------
freq : float
Frequency at which averaging will be done
delta_theta : float
Distance from phase center
resolution : float
Resolution of restoring beam
reduction_factor : float
Ratio of pre-to-post averaging peak flux density
Returns
-------
delta_freq : float
Bandwidth over which averaging will be done
"""
# Increase delta_freq until we drop below target reduction_factor
delta_freq = 1e-3 * freq
while self.get_bandwidth_smearing_factor(freq, delta_freq, delta_theta,
resolution) > reduction_factor:
delta_freq *= 1.1
return delta_freq
def find_peel_skymodel(self):
"""
Searches for an appropriate sky model for peeling
"""
if self.peel_skymodel is not None:
return
max_separation_arcmin = 1.0
factor_lib_dir = os.path.dirname(os.path.abspath(__file__))
skymodel_dir = os.path.join(os.path.split(factor_lib_dir)[0], 'skymodels')
skymodels = glob.glob(os.path.join(skymodel_dir, '*.skymodel'))
for skymodel in skymodels:
try:
s = lsmtool.load(skymodel)
dist_deg = s.getDistance(self.ra, self.dec)
if any(dist_deg*60.0 < max_separation_arcmin):
self.peel_skymodel = skymodel
break
except IOError:
pass
def get_nbands(self, bands):
"""
Returns total number of bands including missing ones
Parameters
----------
bands : list of Band objects
Bands for this operation
Returns
-------
nbands, max_gap : int, int
The total number of bands (including gaps) and the size in number
of bands of the maximum gap
"""
freqs_hz = [b.freq for b in bands]
chan_width_hz = bands[0].chan_width_hz
nchan = bands[0].nchan
freq_width_hz = chan_width_hz * nchan
# Find gaps, if any
missing_bands = []
max_gap = 0
for i, (freq1, freq2) in enumerate(zip(freqs_hz[:-1], freqs_hz[1:])):
ngap = int(round((freq2 - freq1)/freq_width_hz))
if ngap - 1 > max_gap:
max_gap = ngap - 1
missing_bands.extend([i + j + 1 for j in range(ngap-1)])
nbands = len(bands) + len(missing_bands)
return (nbands, max_gap)
def save_state(self):
"""
Saves the direction state to a file
"""
import pickle
with open(self.save_file, 'wb') as f:
# Remove log and skymodel objects, as they cannot be pickled
save_dict = self.__dict__.copy()
save_dict.pop('log')
save_dict.pop('skymodel')
pickle.dump(save_dict, f)
def load_state(self):
"""
Loads the direction state from a file
Note: only state attributes are loaded to avoid overwritting
non-state attributes
Returns
-------
success : bool
True if state was successfully loaded, False if not
"""
import pickle
try:
with open(self.save_file, 'r') as f:
d = pickle.load(f)
# Load list of started operations
if 'started_operations' in d:
self.started_operations = d['started_operations']
# Load list of completed operations
if 'completed_operations' in d:
self.completed_operations = d['completed_operations']
# Load mapfiles needed for facetsubreset
if 'converted_parmdb_mapfile' in d:
self.converted_parmdb_mapfile = d['converted_parmdb_mapfile']
if 'sourcedb_new_facet_sources' in d:
self.sourcedb_new_facet_sources = d['sourcedb_new_facet_sources']
# Load mapfile of imaging data
if 'image_data_mapfile' in d:
self.image_data_mapfile = d['image_data_mapfile']
return True
except:
return False
def reset_state(self, op_names=None):
"""
Resets the direction to allow reprocessing
Currently, this means just deleting the results directories,
but it could be changed to delete only a subset of selfcal steps (by
modifying the selfcal pipeline statefile).
Parameters
----------
op_names : list of str, optional
List of names of operations to reset. Reset is done only if the
operation name appears in self.reset_operations. If None, all
started and completed operations that are in self.reset_operations
are reset
"""
if op_names is None:
op_names = self.completed_operations[:] + self.started_operations[:]
elif type(op_names) is str:
op_names = [op_names]
op_names_reset = [op for op in op_names if op in self.reset_operations]
if len(op_names_reset) > 0:
self.log.info('Resetting state for operation(s): {}'.format(', '.join(op_names_reset)))
else:
return
# Reset selfcal flag
if 'facetselfcal' in op_names_reset:
self.selfcal_ok = False
# Remove operation name from lists of started and completed operations
# and delete the results directories
for op_name in op_names_reset:
while op_name.lower() in self.completed_operations:
self.completed_operations.remove(op_name.lower())
while op_name.lower() in self.started_operations:
self.started_operations.remove(op_name.lower())
# Delete results directory for this operation
op_dir = os.path.join(self.working_dir, 'results', op_name.lower(), self.name)
if os.path.exists(op_dir):
os.system('rm -rf {0}'.format(op_dir))
self.save_state()
def cleanup(self):
"""
Cleans up unneeded data
"""
from lofarpipe.support.data_map import DataMap
import glob
for mapfile in self.cleanup_mapfiles:
try:
datamap = DataMap.load(mapfile)
for item in datamap:
# Handle case in which item.file is a Python list
if item.file[0] == '[' and item.file[-1] == ']':
files = item.file.strip('[]').split(',')
else:
files = [item.file]
for f in files:
if os.path.exists(f):
os.system('rm -rf {0} &'.format(f))
# Also delete associated "_CONCAT" files that result
# from virtual concatenation
extra_files = glob.glob(f+'_CONCAT')
for e in extra_files:
if os.path.exists(e):
os.system('rm -rf {0} &'.format(e))
# Deal with special case of f being a WSClean image
if f.endswith('MFS-image.fits'):
# Search for related images and delete if found
image_root = f.split('MFS-image.fits')[0]
extra_files = glob.glob(image_root+'*.fits')
for e in extra_files:
if os.path.exists(e):
os.system('rm -rf {0} &'.format(e))
elif f.endswith('-image.fits'):
# Search for related images and delete if found
image_root = f.split('-image.fits')[0]
extra_files = glob.glob(image_root+'*.fits')
for e in extra_files:
if os.path.exists(e):
os.system('rm -rf {0} &'.format(e))
except IOError:
pass
|
revoltek/factor
|
factor/lib/direction.py
|
Python
|
gpl-2.0
| 54,010
|
[
"Gaussian"
] |
d30c90e2b07a781739a66560680ca8621b40b5dd4321be3fe0e585a380284271
|
#!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# vizplotgui_vmd.py
# Purpose: viz running LAMMPS simulation via VMD with plot and GUI
# Syntax: vizplotgui_vmd.py in.lammps Nfreq compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point and viz shapshot every this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
from __future__ import print_function
import sys,time
sys.path.append("./pizza")
# methods called by GUI
def run():
global runflag
runflag = 1
def stop():
global runflag
runflag = 0
def settemp(value):
global temptarget
temptarget = slider.get()
def quit():
global breakflag
breakflag = 1
# method called by timestep loop every Nfreq steps
# read dump snapshot and viz it, update plot with compute value
def update(ntimestep):
d.next()
d.unscale()
p.single(ntimestep)
v.append('tmp.pdb','pdb')
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
gn.plot(xaxis,yaxis)
# parse command line
argv = sys.argv
if len(argv) != 4:
print("Syntax: vizplotgui_vmd.py in.lammps Nfreq compute-ID")
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
compute = sys.argv[3]
me = 0
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in native LAMMPS dump format for Pizza.py dump tool
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate initial 1-point plot, dump file, and image
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
breakflag = 0
runflag = 0
temptarget = 1.0
# wrapper on VMD window via Pizza.py vmd tool
# just proc 0 handles reading of dump file and viz
if me == 0:
from vmd import vmd
v = vmd()
v('menu main off')
v.rep('VDW')
from dump import dump
from pdbfile import pdbfile
d = dump('tmp.dump',0)
p = pdbfile(d)
d.next()
d.unscale()
p.single(ntimestep)
v.new('tmp.pdb','pdb')
# display GUI with run/stop buttons and slider for temperature
if me == 0:
try:
from Tkinter import *
except:
from tkinter import *
tkroot = Tk()
tkroot.withdraw()
root = Toplevel(tkroot)
root.title("LAMMPS GUI")
frame = Frame(root)
Button(frame,text="Run",command=run).pack(side=LEFT)
Button(frame,text="Stop",command=stop).pack(side=LEFT)
slider = Scale(frame,from_=0.0,to=5.0,resolution=0.1,
orient=HORIZONTAL,label="Temperature")
slider.bind('<ButtonRelease-1>',settemp)
slider.set(temptarget)
slider.pack(side=LEFT)
Button(frame,text="Quit",command=quit).pack(side=RIGHT)
frame.pack()
tkroot.update()
# wrapper on GnuPlot via Pizza.py gnu tool
if me == 0:
from gnu import gnu
gn = gnu()
gn.plot(xaxis,yaxis)
gn.title(compute,"Timestep","Temperature")
# endless loop, checking status of GUI settings every Nfreq steps
# run with pre yes/no and post yes/no depending on go/stop status
# re-invoke fix langevin with new seed when temperature slider changes
# after re-invoke of fix langevin, run with pre yes
running = 0
temp = temptarget
seed = 12345
lmp.command("fix 2 all langevin %g %g 0.1 %d" % (temp,temp,seed))
while 1:
if me == 0: tkroot.update()
if temp != temptarget:
temp = temptarget
seed += me+1
lmp.command("fix 2 all langevin %g %g 0.1 12345" % (temp,temp))
running = 0
if runflag and running:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif runflag and not running:
lmp.command("run %d pre yes post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif not runflag and running:
lmp.command("run %d pre no post yes" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
if breakflag: break
if runflag: running = 1
else: running = 0
time.sleep(0.01)
lmp.command("run 0 pre no post yes")
|
akohlmey/lammps
|
python/examples/vizplotgui_vmd.py
|
Python
|
gpl-2.0
| 4,114
|
[
"LAMMPS",
"VMD"
] |
db8e103443a5bd804975c6c0c5269f6fc908ab8163644162285b627a991afd0a
|
from __future__ import division, print_function
# Multicut Pipeline implemented with luigi
# Blockwise solver tasks
import luigi
from .pipelineParameter import PipelineParameter
from .dataTasks import ExternalSegmentation
from .customTargets import HDF5DataTarget
from .multicutProblemTasks import MulticutProblem
from .blocking_helper import NodesToBlocks
from .tools import config_logger, run_decorator
from .nifty_helper import run_nifty_solver, string_to_factory, available_factorys
import os
import logging
import time
import numpy as np
from concurrent import futures
# import the proper nifty version
try:
import nifty
except ImportError:
try:
import nifty_with_cplex as nifty
except ImportError:
import nifty_with_gurobi as nifty
# init the workflow logger
workflow_logger = logging.getLogger(__name__)
config_logger(workflow_logger)
# parent class for blockwise solvers
class BlockwiseSolver(luigi.Task):
pathToSeg = luigi.Parameter()
globalProblem = luigi.TaskParameter()
numberOfLevels = luigi.Parameter()
keyToSeg = luigi.Parameter(default='data')
def requires(self):
# block size in first hierarchy level
initialBlockShape = PipelineParameter().multicutBlockShape
# block overlap, for now same for each hierarchy lvl
block_overlap = PipelineParameter().multicutBlockOverlap
problems = [self.globalProblem]
block_factor = 1
for level in range(self.numberOfLevels):
# TODO check that we don't get larger than the actual shape here
block_shape = list(map(lambda x: x * block_factor, initialBlockShape))
problems.append(ReducedProblem(pathToSeg=self.pathToSeg,
problem=problems[-1],
blockShape=block_shape,
blockOverlap=block_overlap,
level=level,
keyToSeg=self.keyToSeg))
block_factor *= 2
return problems
def run(self):
raise NotImplementedError("BlockwiseSolver is abstract and does not implement a run functionality!")
# map back to the global solution
def map_node_result_to_global(self, problems, reduced_node_result, reduced_problem_index=-1):
n_nodes_global = problems[0].read('number_of_nodes')
reduced_problem = problems[reduced_problem_index]
to_global_nodes = reduced_problem.read("new2global")
# TODO vectorize
node_result = np.zeros(n_nodes_global, dtype='uint32')
for node_id, node_res in enumerate(reduced_node_result):
node_result[to_global_nodes[node_id]] = node_res
return node_result
def output(self):
raise NotImplementedError("BlockwiseSolver is abstract and does not implement the output functionality!")
# produce reduced global graph from subproblems
# solve global multicut problem on the reduced graph
class BlockwiseMulticutSolver(BlockwiseSolver):
@run_decorator
def run(self):
problems = self.input()
# we solve the problem for the costs and the edges of the last level of hierarchy
reduced_problem = problems[-1]
reduced_graph = nifty.graph.UndirectedGraph()
reduced_graph.deserialize(reduced_problem.read("graph"))
reduced_costs = reduced_problem.read("costs")
reduced_objective = nifty.graph.opt.multicut.multicutObjective(reduced_graph, reduced_costs)
#
# run global multicut inference
#
# we use fm with kl as default backend, because this shows the best scaling behaviour
solver_type = PipelineParameter().globalSolverType
inf_params = dict(sigma=PipelineParameter().multicutSigmaFusion,
number_of_iterations=PipelineParameter().multicutNumIt,
n_stop=PipelineParameter().multicutNumItStopGlobal,
n_threads=PipelineParameter().multicutNThreadsGlobal,
n_fuse=PipelineParameter().multicutNumFuse,
seed_fraction=PipelineParameter().multicutSeedFractionGlobal)
workflow_logger.info("BlockwiseMulticutSolver: Solving problems with solver %s" % solver_type)
workflow_logger.info(
"BlockwiseMulticutSolver: With Params %s" % ' '.join(
['%s, %s,' % (str(k), str(v)) for k, v in inf_params.items()]
)
)
# we set visit-nth to 1 for the fusion move solvers and to 100 for kernighan lin
# NOTE: we will not use ilp here, so it does not matter that it is handled incorrectly
visit_nth = 1 if solver_type.startswith('fm') else 100
factory = string_to_factory(reduced_objective, solver_type, inf_params)
reduced_node_result, energy, t_inf = run_nifty_solver(
reduced_objective,
factory,
verbose=1,
time_limit=PipelineParameter().multicutGlobalTimeLimit,
visit_nth=visit_nth
)
workflow_logger.info(
"BlockwiseMulticutSolver: Inference of reduced problem for the whole volume took: %f s" % t_inf[-1]
)
workflow_logger.info(
"BlockwiseMulticutSolver: Problem solved with energy %f"
% energy[-1]
)
# TODO change to debug
if workflow_logger.isEnabledFor(logging.INFO):
assert len(energy) == len(t_inf)
workflow_logger.info(
"BlockwiseMulticutSolver: logging energy during inference:"
)
for ii in range(len(energy)):
workflow_logger.info(
"BlockwiseMulticutSolver: t: %f s energy: %f" % (t_inf[ii], energy[ii])
)
node_result = self.map_node_result_to_global(problems, reduced_node_result)
self.output().write(node_result)
def output(self):
save_name = "BlockwiseMulticutSolver_L%i_%s_%s_%s.h5" % (
self.numberOfLevels,
'_'.join(map(str, PipelineParameter().multicutBlockShape)),
'_'.join(map(str, PipelineParameter().multicutBlockOverlap)),
"modified" if PipelineParameter().defectPipeline else "standard"
)
save_path = os.path.join(PipelineParameter().cache, save_name)
return HDF5DataTarget(save_path)
class ReducedProblem(luigi.Task):
pathToSeg = luigi.Parameter()
problem = luigi.TaskParameter()
blockShape = luigi.ListParameter()
blockOverlap = luigi.ListParameter()
level = luigi.Parameter()
keyToSeg = luigi.Parameter(default='data')
def requires(self):
return {"sub_solution": BlockwiseSubSolver(pathToSeg=self.pathToSeg,
problem=self.problem,
blockShape=self.blockShape,
blockOverlap=self.blockOverlap,
level=self.level,
keyToSeg=self.keyToSeg),
"problem": self.problem}
# TODO we need to recover the edges between blocks for the stitching solver
@run_decorator
def run(self):
workflow_logger.info(
"ReducedProblem: Reduced problem for level %i with block shape: %s"
% (self.level, str(self.blockShape))
)
inp = self.input()
problem = inp["problem"]
cut_edges = inp["sub_solution"].read()
g = nifty.graph.UndirectedGraph()
g.deserialize(problem.read("graph"))
# merge the edges that are not cuts
# and find the new nodes as well as the mapping
# from new 2 old nodes
# we time all the stuff for benchmarking
t_merge = time.time()
uv_ids = g.uvIds()
ufd = nifty.ufd.ufd(g.numberOfNodes)
merge_nodes = uv_ids[cut_edges == 0]
ufd.merge(merge_nodes)
old2new_nodes = ufd.elementLabeling()
new2old_nodes = ufd.representativesToSets()
# number of nodes for the new problem
number_of_new_nodes = len(new2old_nodes)
workflow_logger.info("ReducedProblem: Merging nodes took: %f s" % (time.time() - t_merge))
t_edges = time.time()
# find new edges and costs
uv_ids_new = self.find_new_edges_and_costs(uv_ids,
problem,
cut_edges,
number_of_new_nodes,
old2new_nodes)
workflow_logger.info("ReducedProblem: Computing new edges took: %f s" % (time.time() - t_edges))
# serialize the node converter
t_serialize = time.time()
self.serialize_node_conversions(problem, old2new_nodes, new2old_nodes, number_of_new_nodes)
workflow_logger.info("ReducedProblem: Serializing node converters took: %f s" % (time.time() - t_serialize))
workflow_logger.info("ReucedProblem: Nodes: From %i to %i" % (g.numberOfNodes, number_of_new_nodes))
workflow_logger.info("ReucedProblem: Edges: From %i to %i" % (g.numberOfEdges, len(uv_ids_new)))
def find_new_edges_and_costs(self,
uv_ids,
problem,
cut_edges,
number_of_new_nodes,
old2new_nodes):
# find mapping from new to old edges with nifty impl
edge_mapping = nifty.tools.EdgeMapping(len(uv_ids))
edge_mapping.initializeMapping(uv_ids, old2new_nodes)
# get the new uv-ids
uv_ids_new = edge_mapping.getNewUvIds()
# map the old costs to new costs
costs = problem.read("costs")
new_costs = edge_mapping.mapEdgeValues(costs)
# map the old outer edges to new outer edges
outer_edge_ids = self.input()["sub_solution"].read("outer_edges")
with futures.ThreadPoolExecutor(max_workers=PipelineParameter().nThreads) as tp:
tasks = [tp.submit(edge_mapping.getNewEdgeIds, oeids) for oeids in outer_edge_ids]
new_outer_edges = np.array([np.array(t.result()) for t in tasks])
assert len(new_costs) == len(uv_ids_new)
reduced_graph = nifty.graph.UndirectedGraph(number_of_new_nodes)
reduced_graph.insertEdges(uv_ids_new)
out = self.output()
out.write(reduced_graph.serialize(), "graph")
out.write(reduced_graph.numberOfNodes, 'number_of_nodes')
out.write(new_costs, "costs")
out.writeVlen(new_outer_edges, 'outer_edges')
return uv_ids_new
def serialize_node_conversions(self, problem, old2new_nodes, new2old_nodes, number_of_new_nodes):
if self.level == 0:
global2new = old2new_nodes
new2global = new2old_nodes
else:
global2new_last = problem.read("global2new").astype(np.uint32)
new2global_last = problem.read("new2global")
global2new = -1 * np.ones_like(global2new_last, dtype=np.int32)
new2global = []
# TODO vectorize
for newNode in range(number_of_new_nodes):
oldNodes = new2old_nodes[newNode]
globalNodes = np.concatenate(new2global_last[oldNodes])
global2new[globalNodes] = newNode
new2global.append(globalNodes)
assert -1 not in global2new
global2new = global2new.astype('uint32')
out = self.output()
out.write(global2new, "global2new")
# need to serialize this differently, due to list of list
new2old_nodes = np.array([np.array(n2o, dtype='uint32') for n2o in new2old_nodes])
out.writeVlen(new2old_nodes, "new2old")
new2global = np.array([np.array(n2g, dtype='uint32') for n2g in new2global])
out.writeVlen(new2global, "new2global")
def output(self):
save_name = "ReducedProblem_L%i_%s_%s_%s.h5" % (
self.level,
'_'.join(map(str, self.blockShape)),
'_'.join(map(str, self.blockOverlap)),
"modified" if PipelineParameter().defectPipeline else "standard"
)
save_path = os.path.join(PipelineParameter().cache, save_name)
return HDF5DataTarget(save_path)
class BlockwiseSubSolver(luigi.Task):
pathToSeg = luigi.Parameter()
problem = luigi.TaskParameter()
blockShape = luigi.ListParameter()
blockOverlap = luigi.ListParameter()
level = luigi.Parameter()
# needs to be true if we want to use the stitching - by overlap solver
serializeSubResults = luigi.Parameter(default=False)
# will outer edges be cut ?
# should be left at true, because results seem to degraded if false
cutOuterEdges = luigi.Parameter(default=True)
keyToSeg = luigi.Parameter(default='data')
def requires(self):
initialShape = PipelineParameter().multicutBlockShape
overlap = PipelineParameter().multicutBlockOverlap
nodes2blocks = NodesToBlocks(self.pathToSeg,
initialShape,
overlap,
keyToSeg=self.keyToSeg)
return {"seg": ExternalSegmentation(self.pathToSeg),
"problem": self.problem,
"nodes2blocks": nodes2blocks}
@run_decorator
def run(self):
# Input
inp = self.input()
seg = inp["seg"]
seg.open(self.keyToSeg)
problem = inp["problem"]
costs = problem.read("costs")
nodes2blocks = inp["nodes2blocks"].read()
graph = nifty.graph.UndirectedGraph()
graph.deserialize(problem.read("graph"))
number_of_edges = graph.numberOfEdges
global2new_nodes = None if self.level == 0 else problem.read("global2new")
workflow_logger.info("BlockwiseSubSolver: Starting extraction of subproblems.")
t_extract = time.time()
subproblems = self._run_subproblems_extraction(seg, graph, nodes2blocks, global2new_nodes)
workflow_logger.info("BlockwiseSubSolver: Extraction time for subproblems %f s" % (time.time() - t_extract,))
workflow_logger.info("BlockwiseSubSolver: Starting solvers for subproblems.")
t_inf_total = time.time()
self._solve_subproblems(costs, subproblems, number_of_edges)
workflow_logger.info(
"BlockwiseSubSolver: Inference time total for subproblems %f s" % (time.time() - t_inf_total,)
)
seg.close()
# extract all sub-problems for current level
def _run_subproblems_extraction(self, seg, graph, nodes2blocks, global2new_nodes):
# get the initial blocking
# block size in first hierarchy level
initial_block_shape = PipelineParameter().multicutBlockShape
initial_overlap = list(PipelineParameter().multicutBlockOverlap)
initial_blocking = nifty.tools.blocking(roiBegin=[0, 0, 0],
roiEnd=list(seg.shape(key=self.keyToSeg)),
blockShape=list(initial_block_shape))
# function for subproblem extraction
# extraction only for level 0
def extract_subproblem(block_id, sub_blocks):
node_list = np.unique(np.concatenate([nodes2blocks[sub_id] for sub_id in sub_blocks]))
if self.level != 0:
node_list = np.unique(global2new_nodes[node_list])
workflow_logger.debug(
"BlockwiseSubSolver: block id %i: Number of nodes %i" % (block_id, len(node_list.shape))
)
inner_edges, outer_edges, subgraph = graph.extractSubgraphFromNodes(node_list.tolist())
# we can get 0 inner edges, if we have a subblock with just a single node
# or a subblock with only ignore edges.
# we filter these blocks, because they mess up the sub-solver
if len(inner_edges) <= 1:
return False
return np.array(inner_edges), np.array(outer_edges), subgraph, node_list
block_overlap = list(self.blockOverlap)
blocking = nifty.tools.blocking(roiBegin=[0, 0, 0],
roiEnd=list(seg.shape(key=self.keyToSeg)),
blockShape=self.blockShape)
number_of_blocks = blocking.numberOfBlocks
workflow_logger.info(
"BlockwiseSubSolver: Extracting sub-problems with current blocking of shape %s with overlaps %s."
% (str(self.blockShape), str(block_overlap))
)
n_workers = min(number_of_blocks, PipelineParameter().nThreads)
# parallel
with futures.ThreadPoolExecutor(max_workers=n_workers) as executor:
tasks = []
for block_id in range(number_of_blocks):
# get the current block with additional overlap
block = blocking.getBlockWithHalo(block_id, block_overlap).outerBlock
block_begin, block_end = block.begin, block.end
workflow_logger.debug(
"BlockwiseSubSolver: block id %i start %s end %s" % (block_id, str(block_begin), str(block_end))
)
# if we are past level 0, we must assemble the initial blocks, from which this block is made up of
# otherwise we simply schedule this block
if self.level > 0:
sub_blocks = initial_blocking.getBlockIdsInBoundingBox(block_begin, block_end, initial_overlap)
else:
sub_blocks = [block_id]
tasks.append(executor.submit(extract_subproblem, block_id, sub_blocks))
sub_problems = [task.result() for task in tasks]
sub_problems = [sub_prob for sub_prob in sub_problems if sub_prob]
out = self.output()
out.writeVlen(np.array([sub_prob[1] for sub_prob in sub_problems]),
'outer_edges')
# if we serialize the sub-results, write out the block positions and the sub nodes here
if self.serializeSubResults:
out.write(
np.concatenate([
np.array(blocking.getBlockWithHalo(block_id, block_overlap).outerBlock.begin)[None, :]
for block_id in range(number_of_blocks)],
axis=0
),
'block_begins'
)
out.write(
np.concatenate([
np.array(blocking.getBlockWithHalo(block_id, block_overlap).outerBlock.end)[None, :]
for block_id in range(number_of_blocks)],
axis=0
),
'block_ends'
)
out.writeVlen(
np.array([sub_prob[3] for sub_prob in sub_problems]),
'sub_nodes'
)
return sub_problems
def _solve_subproblems(self, costs, sub_problems, number_of_edges):
sub_solver_type = PipelineParameter().subSolverType
if sub_solver_type in ('fm-ilp', 'fm-kl'):
solver_params = dict(
sigma=PipelineParameter().multicutSigmaFusion,
number_of_iterations=PipelineParameter().multicutNumIt,
n_stop=PipelineParameter().multicutNumItStopGlobal,
n_threads=0,
n_fuse=PipelineParameter().multicutNumFuse,
seed_fraction=PipelineParameter().multicutSeedFraction
)
else:
solver_params = dict()
workflow_logger.info("BlockwiseSubSolver: Solving sub-problems with solver %s" % sub_solver_type)
workflow_logger.info(
"BlockwiseSubSolver: With Params %s" % ' '.join(
['%s, %s,' % (str(k), str(v)) for k, v in solver_params.items()]
)
)
def _solve_mc(g, costs, block_id):
workflow_logger.debug(
"BlockwiseSubSolver: Solving MC Problem for block %i with %i nodes / %i edges"
% (block_id, g.numberOfNodes, g.numberOfEdges)
)
obj = nifty.graph.opt.multicut.multicutObjective(g, costs)
factory = string_to_factory(obj, sub_solver_type, solver_params)
solver = factory.create(obj)
t_inf = time.time()
res = solver.optimize()
workflow_logger.debug(
"BlockwiseSubSolver: Inference for block %i with fusion moves solver in %f s"
% (block_id, time.time() - t_inf)
)
return res
# sequential for debugging
# sub_results = []
# for block_id, sub_problem in enumerate(sub_problems):
# print("Sequential prediction for block id:", block_id)
# print(type(sub_problem[0]))
# print(sub_problem[0].shape)
# print(sub_problem[0].dtype)
# sub_results.append(_solve_mc(sub_problem[2], costs[sub_problem[0]], block_id))
n_workers = min(len(sub_problems), PipelineParameter().nThreads)
with futures.ThreadPoolExecutor(max_workers=n_workers) as executor:
tasks = [executor.submit(_solve_mc,
sub_problem[2],
costs[sub_problem[0]],
block_id) for block_id, sub_problem in enumerate(sub_problems)]
sub_results = [task.result() for task in tasks]
cut_edges = np.zeros(number_of_edges, dtype=np.uint8)
assert len(sub_results) == len(sub_problems), str(len(sub_results)) + " , " + str(len(sub_problems))
for block_id in range(len(sub_problems)):
# get the cut edges from the subproblem
node_result = sub_results[block_id]
sub_uv_ids = sub_problems[block_id][2].uvIds()
edge_result = node_result[sub_uv_ids[:, 0]] != node_result[sub_uv_ids[:, 1]]
cut_edges[sub_problems[block_id][0]] += edge_result
# add up outer edges
if self.cutOuterEdges:
cut_edges[sub_problems[block_id][1]] += 1
# all edges which are cut at least once will be cut
out = self.output()
cut_edges[cut_edges >= 1] = 1
out.write(cut_edges)
# if we serialize the sub results, write them here
if self.serializeSubResults:
out.writeVlen(
np.array([sub_res for sub_res in sub_results]),
'sub_results'
)
def output(self):
save_name = "BlockwiseSubSolver_L%i_%s_%s_%s.h5" % (
self.level,
'_'.join(map(str, self.blockShape)),
'_'.join(map(str, self.blockOverlap)),
"modified" if PipelineParameter().defectPipeline else "standard"
)
save_path = os.path.join(PipelineParameter().cache, save_name)
return HDF5DataTarget(save_path)
# only works for level 1 for now!
class TestSubSolver(luigi.Task):
pathToSeg = luigi.Parameter()
pathToClassifier = luigi.Parameter()
blockShape = luigi.ListParameter(default=PipelineParameter().multicutBlockShape)
blockOverlap = luigi.ListParameter(default=PipelineParameter().multicutBlockOverlap)
def requires(self):
nodes2blocks = NodesToBlocks(self.pathToSeg, self.blockShape, self.blockOverlap)
return {
"seg": ExternalSegmentation(self.pathToSeg),
"problem": MulticutProblem(self.pathToSeg, self.pathToClassifier),
"nodes2blocks": nodes2blocks
}
@run_decorator
def run(self):
# Input
inp = self.input()
seg = inp["seg"]
seg.open()
problem = inp["problem"]
costs = problem.read("costs")
nodes2blocks = inp["nodes2blocks"].read()
graph = nifty.graph.UndirectedGraph()
graph.deserialize(problem.read("graph"))
workflow_logger.info("TestSubSolver: Starting extraction of subproblems.")
subproblems = self._run_subproblems_extraction(seg, graph, nodes2blocks)
workflow_logger.info("TestSubSolver: Starting solvers for subproblems.")
self._solve_subproblems(costs, subproblems)
seg.close()
def _run_subproblems_extraction(self, seg, graph, nodes2blocks):
# function for subproblem extraction
# extraction only for level 0
def extract_subproblem(blockId):
node_list = nodes2blocks[blockId]
inner_edges, outer_edges, subgraph = graph.extractSubgraphFromNodes(node_list.tolist())
return np.array(inner_edges), np.array(outer_edges), subgraph
blocking = nifty.tools.blocking(roiBegin=[0, 0, 0], roiEnd=seg.shape(), blockShape=self.blockShape)
number_of_blocks = blocking.numberOfBlocks
# sample block-ids corresponding to the number of threads
n_threads = PipelineParameter().nThreads
sampled_blocks = np.random.choice(number_of_blocks, min(n_threads, number_of_blocks), replace=False)
# parallel
with futures.ThreadPoolExecutor(max_workers=PipelineParameter().nThreads) as executor:
tasks = [executor.submit(extract_subproblem, block_id) for block_id in sampled_blocks]
sub_problems = [task.result() for task in tasks]
assert len(sub_problems) == len(sampled_blocks), "%i, %i" % (len(sub_problems), len(sampled_blocks))
return sub_problems
def _solve_subproblems(self, costs, sub_problems):
def _test_mc(g, costs, sub_solver_type):
if sub_solver_type in ('fm-ilp', 'fm-kl'):
solver_params = dict(
sigma=PipelineParameter().multicutSigmaFusion,
number_of_iterations=PipelineParameter().multicutNumIt,
n_stop=PipelineParameter().multicutNumItStopGlobal,
n_threads=0,
n_fuse=PipelineParameter().multicutNumFuse,
seed_fraction=PipelineParameter().multicutSeedFraction
)
else:
solver_params = dict()
obj = nifty.graph.opt.multicut.multicutObjective(g, costs)
solver = string_to_factory(obj, sub_solver_type, solver_params).create(obj)
t_inf = time.time()
res = solver.optimize()
t_inf = time.time() - t_inf
return obj.evalNodeLabels(res), t_inf
workflow_logger.info("TestSubSolver: Running sub-block tests for %i blocks" % len(sub_problems))
available = available_factorys()
results = {}
for sub_solver_type in available:
with futures.ThreadPoolExecutor(max_workers=PipelineParameter().nThreads) as executor:
tasks = [executor.submit(
_test_mc,
sub_problem[2],
costs[sub_problem[0]],
sub_solver_type) for sub_problem in sub_problems]
sub_results = [task.result() for task in tasks]
mean_energy = np.mean([rr[0] for rr in sub_results])
mean_time = np.mean([rr[1] for rr in sub_results])
results[sub_solver_type] = (mean_energy, mean_time)
for_serialization = []
for sub_solver_type in available:
res = results[sub_solver_type]
workflow_logger.info(
"TestSubSolver: Result of %s: mean-energy: %f, mean-inference-time: %f"
% (sub_solver_type, res[0], res[1])
)
for_serialization.append([res[0], res[1]])
self.output().write(available, 'solvers')
self.output().write(np.array(for_serialization), 'results')
def output(self):
blcksize_str = "_".join(map(str, list(self.blockShape)))
save_name = "TestSubSolver_%s_%s.h5" \
% (blcksize_str, "modified" if PipelineParameter().defectPipeline else "standard")
save_path = os.path.join(PipelineParameter().cache, save_name)
return HDF5DataTarget(save_path)
|
constantinpape/mc_luigi
|
mc_luigi/blockwiseMulticutTasks.py
|
Python
|
mit
| 28,304
|
[
"VisIt"
] |
cd764d442511f063e1340667daf813c97ac8cec86aa47beb72d463fbece64c04
|
# This module implements classes for peptide chains and proteins.
#
# Written by Konrad Hinsen
# last revision: 2001-4-2
#
import Biopolymers, Bonds, ChemicalObjects, Collection, ConfigIO, Database
import Units, Universe, Utility
from Scientific.Geometry import Vector
import operator, string, umath
from Biopolymers import defineAminoAcidResidue
#
# Residues are special groups
#
class Residue(Biopolymers.Residue):
"""Amino acid residue
A Glossary:Subclass of Class:MMTK.ChemicalObjects.Group.
Amino acid residues are a special kind of group. Like any other
group, they are defined in the chemical database. Each residue
has two subgroups ('peptide' and 'sidechain') and is usually
connected to other residues to form a peptide chain. The database
contains three variants of each residue (N-terminal, C-terminal,
non-terminal) and various models (all-atom, united-atom,
C_alpha).
Constructor: Residue(|kind|, |model|="all")
Arguments:
|kind| -- the name of the residue in the chemical database. This
is the full name of the residue plus the suffix
"_nt" or "_ct" for the terminal variants.
|model| -- one of "all" (all-atom), "none" (no hydrogens),
"polar" (united-atom with only polar hydrogens),
"polar_charmm" (like "polar", but defining
polar hydrogens like in the CHARMM force field),
"polar_opls" (like "polar", but defining
polar hydrogens like in the latest OPLS force field),
"calpha" (only the C_alpha atom)
"""
def __init__(self, name = None, model = 'all'):
if name is not None:
blueprint = _residueBlueprint(name, model)
ChemicalObjects.Group.__init__(self, blueprint)
self.model = model
self._init()
def _init(self):
Biopolymers.Residue._init(self)
# create peptide attribute for calpha model
if self.model == 'calpha':
self.peptide = self
def _makeCystine(self):
if self.model == 'calpha':
return self
if string.lower(self.symbol) != 'cys':
raise ValueError, `self` + " is not cysteine."
new_residue = 'cystine_ss'
if hasattr(self, 'H_3'):
new_residue = new_residue + '_nt'
elif hasattr(self, 'O_2'):
new_residue = new_residue + '_ct'
new_residue = Residue(new_residue, self.model)
for g in ['peptide', 'sidechain']:
g_old = getattr(self, g)
g_new = getattr(new_residue, g)
atoms = map(lambda a: a.name, getattr(g_new, 'atoms'))
for a in atoms:
set_method = getattr(getattr(g_new, a), 'setPosition')
set_method(getattr(getattr(g_old, a), 'position')())
return new_residue
def isSubsetModel(self):
return self.model == 'calpha'
def backbone(self):
"Returns the peptide group."
return self.peptide
def sidechains(self):
"Returns the sidechain group."
return self.sidechain
def phiPsi(self, conf = None):
"Returns the values of the backbone dihedral angles phi and psi."
universe = self.universe()
if universe is None:
universe = Universe.InfiniteUniverse()
C = None
for a in self.peptide.N.bondedTo():
if a.parent != self.peptide:
C = a
if C is None:
phi = None
else:
phi = universe.dihedral(self.peptide.C, self.peptide.C_alpha,
self.peptide.N, C, conf)
N = None
for a in self.peptide.C.bondedTo():
if a.parent != self.peptide:
N = a
if N is None:
psi = None
else:
psi = universe.dihedral(N, self.peptide.C, self.peptide.C_alpha,
self.peptide.N, conf)
return phi, psi
def _residueBlueprint(name, model):
try:
blueprint = _residue_blueprints[(name, model)]
except KeyError:
if model == 'polar':
name = name + '_uni'
elif model == 'polar_charmm':
name = name + '_uni2'
elif model == 'polar_oldopls':
name = name + '_uni3'
elif model == 'none':
name = name + '_noh'
elif model == 'calpha':
name = name + '_calpha'
blueprint = Database.BlueprintGroup(name)
_residue_blueprints[(name, model)] = blueprint
return blueprint
_residue_blueprints = {}
#
# Peptide chains are molecules with added features.
#
class PeptideChain(Biopolymers.ResidueChain):
"""Peptide chain
A Glossary:Subclass of Class:MMTK.Biopolymers.ResidueChain.
Peptide chains consist of amino acid residues that are linked
by peptide bonds. They are a special kind of molecule, i.e.
all molecule operations are available.
Constructor: PeptideChain(|sequence|, **|properties|)
Arguments:
|sequence| -- the amino acid sequence. This can be a string
containing the one-letter codes, or a list
of three-letter codes, or a PDBPeptideChain object.
If a PDBPeptideChain object is supplied, the atomic
positions it contains are assigned to the atoms
of the newly generated peptide chain, otherwise the
positions of all atoms are undefined.
|properties| -- optional keyword properties:
- model: one of "all" (all-atom), "no_hydrogens" or "none" (no hydrogens),
"polar_hydrogens" or "polar" (united-atom with only polar
hydrogens), "polar_charmm" (like "polar", but defining
polar hydrogens like in the CHARMM force field),
"polar_opls" (like "polar", but defining
polar hydrogens like in the latest OPLS force field),
"calpha" (only the C_alpha atom of each residue). Default
is "all".
- n_terminus: 1 if the first residue should be constructed using the
N-terminal variant, 0 if the non-terminal version should
be used. Default is 1.
- c_terminus: 1 if the last residue should be constructed using the
C-terminal variant, 0 if the non-terminal version should
be used. Default is 1.
- circular: 1 if a peptide bond should be constructed between the first
and the last residue. Default is 0.
- name: a name for the chain (a string)
Peptide chains act as sequences of residues. If 'p' is a PeptideChain
object, then
- 'len(p)' yields the number of residues
- 'p[i]' yields residue number 'i' (counting from zero)
- 'p[i:j]' yields the subchain from residue number 'i' up to but excluding
residue number 'j'
"""
def __init__(self, sequence, **properties):
if sequence is not None:
model = 'all'
if properties.has_key('model'):
model = string.lower(properties['model'])
elif properties.has_key('hydrogens'):
model = properties['hydrogens']
if model == 1: model = 'all'
elif model == 0: model = 'none'
else: model = string.lower(model)
if model == 'no_hydrogens':
model = 'none'
elif model == 'polar_hydrogens':
model = 'polar'
n_term = self.binaryProperty(properties, 'n_terminus', 1)
c_term = self.binaryProperty(properties, 'c_terminus', 1)
circular = self.binaryProperty(properties, 'circular', 0)
self.version_spec = {'n_terminus': n_term,
'c_terminus': c_term,
'model': model,
'circular': circular}
if type(sequence[0]) == type(''):
conf = None
numbers = range(len(sequence))
else:
conf = sequence
sequence = conf.sequence()
numbers = map(lambda r: r.number, conf)
sequence = map(Biopolymers._fullName, sequence)
if model != 'calpha':
if n_term:
sequence[0] = sequence[0] + '_nt'
if c_term:
sequence[-1] = sequence[-1] + '_ct'
self.groups = []
n = 0
for residue, number in map(None, sequence, numbers):
n = n + 1
r = Residue(residue, model)
r.name = r.symbol + str(number)
r.sequence_number = n
r.parent = self
self.groups.append(r)
self._setupChain(circular, properties, conf)
is_peptide_chain = 1
def __getslice__(self, first, last):
return SubChain(self, self.groups[first:last])
def sequence(self):
"""Returns the primary sequence as a list of three-letter residue
codes."""
return map(lambda r: r.symbol, self.groups)
def backbone(self):
"Returns a collection containing the peptide groups of all residues."
backbone = Collection.Collection()
for r in self.groups:
try:
backbone.addObject(r.peptide)
except AttributeError:
pass
return backbone
def sidechains(self):
"Returns a collection containing the sidechain groups of all residues."
sidechains = Collection.Collection()
for r in self.groups:
try:
sidechains.addObject(r.sidechain)
except AttributeError:
pass
return sidechains
def phiPsi(self, conf = None):
"""Returns a list of the (phi, psi) backbone angle pairs
for each residue."""
universe = self.universe()
if universe is None:
universe = Universe.InfiniteUniverse()
angles = []
for i in range(len(self)):
r = self[i]
if i == 0:
phi = None
else:
phi = universe.dihedral(r.peptide.C, r.peptide.C_alpha,
r.peptide.N,
self[i-1].peptide.C, conf)
if i == len(self)-1:
psi = None
else:
psi = universe.dihedral(self[i+1].peptide.N,
r.peptide.C, r.peptide.C_alpha,
r.peptide.N, conf)
angles.append((phi, psi))
return angles
def replaceResidue(self, r_old, r_new):
"""Replaces residue |r_old|, which must be a residue object that
is part of the chain, by the residue object |r_new|."""
n = self.groups.index(r_old)
for a in r_old.atoms:
self.atoms.remove(a)
for b in r_old.bonds:
self.bonds.remove(b)
self.atoms = self.atoms + r_new.atoms
self.bonds = self.bonds + r_new.bonds
r_new.sequence_number = n+1
r_new.name = r_new.symbol+`n+1`
r_new.parent = self
self.groups[n] = r_new
if n > 0:
peptide_old = self.bonds.bondsOf(r_old.peptide.N)
self.bonds.remove(peptide_old[0])
self.bonds.append(Bonds.Bond((self.groups[n-1].peptide.C,
self.groups[n].peptide.N)))
if n < len(self.groups)-1:
peptide_old = self.bonds.bondsOf(r_old.peptide.C)
self.bonds.remove(peptide_old[0])
self.bonds.append(Bonds.Bond((self.groups[n].peptide.C,
self.groups[n+1].peptide.N)))
# add sulfur bridges between cysteine residues
def _addSSBridges(self, bonds):
for b in bonds:
cys1 = b[0]
if string.lower(cys1.symbol) == 'cyx':
cys_ss1 = cys1
else:
cys_ss1 = cys1._makeCystine()
self.replaceResidue(cys1, cys_ss1)
cys2 = b[1]
if string.lower(cys2.symbol) == 'cyx':
cys_ss2 = cys2
else:
cys_ss2 = cys2._makeCystine()
self.replaceResidue(cys2, cys_ss2)
self.bonds.append(Bonds.Bond((cys_ss1.sidechain.S_gamma,
cys_ss2.sidechain.S_gamma)))
def _descriptionSpec(self):
kwargs = ''
for name, value in self.version_spec.items():
kwargs = kwargs + name + '=' + `value` + ','
return "S", kwargs[:-1]
def _typeName(self):
return reduce(operator.add, self.sequence())
def _graphics(self, conf, distance_fn, model, module, options):
if model != 'backbone':
return ChemicalObjects.Molecule._graphics(self, conf,
distance_fn, model,
module, options)
color = options.get('color', 'black')
material = module.EmissiveMaterial(color)
objects = []
for i in range(len(self.groups)-1):
a1 = self.groups[i].peptide.C_alpha
a2 = self.groups[i+1].peptide.C_alpha
p1 = a1.position(conf)
p2 = a2.position(conf)
if p1 is not None and p2 is not None:
bond_vector = 0.5*distance_fn(a1, a2, conf)
cut = bond_vector != 0.5*(p2-p1)
if not cut:
objects.append(module.Line(p1, p2, material = material))
else:
objects.append(module.Line(p1, p1+bond_vector,
material = material))
objects.append(module.Line(p2, p2-bond_vector,
material = material))
return objects
#
# Subchains are created by slicing chains or extracting a chain from
# a group of connected chains.
#
class SubChain(PeptideChain):
"""A contiguous part of a peptide chain
SubChain objects are the result of slicing operations on
PeptideChain objects. They cannot be created directly.
SubChain objects permit all operations of PeptideChain
objects, but cannot be added to a universe.
"""
def __init__(self, chain=None, groups=None, name = ''):
if chain is not None:
self.groups = groups
self.atoms = []
self.bonds = []
for g in self.groups:
self.atoms = self.atoms + g.atoms
self.bonds = self.bonds + g.bonds
for i in range(len(self.groups)-1):
link1 = self.groups[i].chain_links[1]
link2 = self.groups[i+1].chain_links[0]
self.bonds.append(Bonds.Bond((link1, link2)))
self.bonds = Bonds.BondList(self.bonds)
self.name = name
self.model = chain.model
self.parent = chain.parent
self.type = None
self.configurations = {}
self.part_of = chain
is_incomplete = 1
def __repr__(self):
if self.name == '':
return 'SubChain of ' + repr(self.part_of)
else:
return ChemicalObjects.Molecule.__repr__(self)
__str__ = __repr__
#
# Connected chains are collections of peptide chains connected by s-s bridges.
#
class ConnectedChains(PeptideChain):
# Peptide chains connected by sulfur bridges
#
# A group of peptide chains connected by sulfur bridges must be considered
# a single molecule due to the presence of chemical bonds. Such a molecule
# is represented by a ConnectedChains object. These objects are created
# automatically when a Protein object is assembled. They are normally
# not used directly by application programs. When a chain with sulfur
# bridges to other chains is extracted from a Protein object, the
# return value is a SubChain object that indirectly refers to a
# ConnectedChains object.
def __init__(self, chains=None):
if chains is not None:
self.chains = []
self.groups = []
self.atoms = []
self.bonds = Bonds.BondList([])
self.chain_names = []
self.model = chains[0].model
version_spec = chains[0].version_spec
for c in chains:
if c.version_spec['model'] != version_spec['model']:
raise ValueError, "mixing chains of different model: " + \
c.version_spec['model'] + "/" + \
version_spec['model']
ng = len(self.groups)
self.chains.append((c.name, ng, ng+len(c.groups),
c.version_spec))
self.groups = self.groups + c.groups
self.atoms = self.atoms + c.atoms
self.bonds = self.bonds + c.bonds
try: name = c.name
except AttributeError: name = ''
self.chain_names.append(name)
for g in self.groups:
g.parent = self
self.name = ''
self.parent = None
self.type = None
self.configurations = {}
is_connected_chains = 1
def __len__(self):
return len(self.chains)
def __getitem__(self, item):
c = self.chains[item]
chain = SubChain(self, self.groups[c[1]:c[2]], c[0])
chain.version_spec = c[3]
return chain
def __getslice__(self, first, last):
raise TypeError, "Can't slice connected chains"
def _graphics(self, conf, distance_fn, model, module, options):
if model != 'backbone':
return ChemicalObjects.Molecule._graphics(self, conf,
distance_fn, model,
module, options)
objects = []
for chain in self:
objects = objects + chain._graphics(conf, distance_fn,
model, module, options)
return objects
#
# Proteins are complexes of peptide chains, connected peptide chains,
# and possibly other things.
#
class Protein(ChemicalObjects.Complex):
"""Protein
A Glossary:Subclass of Class:MMTK.Complex.
A Protein object is a special kind of a Complex object which
is made up of peptide chains.
Constructor: Protein(|specification|, **|properties|)
Arguments:
|specification| -- one of:
- a list of peptide chain objects
- a string, which is interpreted as the name of a database definition
for a protein. If that definition does not exist, the string
is taken to be the name of a PDB file, from which all peptide chains
are constructed and assembled into a protein.
|properties| -- optional keyword properties:
- model: one of "all" (all-atom), "no_hydrogens" or "none" (no hydrogens),
"polar_hydrogens" or "polar" (united-atom with only polar
hydrogens), "polar_charmm" (like "polar", but defining
polar hydrogens like in the CHARMM force field),
"polar_opls" (like "polar", but defining
polar hydrogens like in the latest OPLS force field),
"calpha" (only the C_alpha atom of each residue). Default
is "all".
- position: the center-of-mass position of the protein (a vector)
- name: a name for the protein (a string)
If the atoms in the peptide chains that make up a protein have
defined positions, sulfur bridges within chains and between
chains will be constructed automatically during protein generation
based on a distance criterion between cystein sidechains.
Proteins act as sequences of chains. If 'p' is a Protein object, then
- 'len(p)' yields the number of chains
- 'p[i]' yields chain number 'i' (counting from zero)
"""
def __init__(self, *items, **properties):
if items == (None,):
return
self.name = ''
if len(items) == 1 and type(items[0]) == type(''):
try:
filename = Database.databasePath(items[0], 'Proteins')
found = 1
except IOError:
found = 0
if found:
blueprint = Database.BlueprintProtein(items[0])
items = blueprint.chains
for attr, value in vars(blueprint).items():
if attr not in ['type', 'chains']:
setattr(self, attr, value)
else:
import PDB
conf = PDB.PDBConfiguration(items[0])
model = properties.get('model', 'all')
items = conf.createPeptideChains(model)
molecules = []
for i in items:
if ChemicalObjects.isChemicalObject(i):
molecules.append(i)
else:
molecules = molecules + list(i)
for m, i in map(None, molecules, range(len(molecules))):
m._numbers = [i]
if not m.name:
m.name = 'chain'+`i`
ss = self._findSSBridges(molecules)
new_mol = {}
for m in molecules:
new_mol[m] = ([m],[])
for bond in ss:
m1 = new_mol[bond[0].topLevelChemicalObject()]
m2 = new_mol[bond[1].topLevelChemicalObject()]
if m1 == m2:
m1[1].append(bond)
else:
combined = (m1[0] + m2[0], m1[1] + m2[1] + [bond])
for m in combined[0]:
new_mol[m] = combined
self.molecules = []
while new_mol:
m = new_mol.values()[0]
for i in m[0]:
del new_mol[i]
bonds = m[1]
if len(m[0]) == 1:
m = m[0][0]
else:
numbers = reduce(operator.add, map(lambda i: i._numbers, m[0]))
m = ConnectedChains(m[0])
m._numbers = numbers
m._addSSBridges(bonds)
m.parent = self
self.molecules.append(m)
self.atoms = []
self.chains = []
for m in self.molecules:
self.atoms = self.atoms + m.atoms
if hasattr(m, 'is_connected_chains'):
for c, name, i in map(None, range(len(m)),
m.chain_names, m._numbers):
self.chains.append((m, c, name, i))
else:
try: name = m.name
except AttributeError: name = ''
self.chains.append((m, None, name, m._numbers[0]))
self.chains.sort(lambda c1, c2: cmp(c1[3], c2[3]))
self.chains = map(lambda c: c[:3], self.chains)
self.parent = None
self.type = None
self.configurations = {}
try:
self.name = properties['name']
del properties['name']
except KeyError: pass
if properties.has_key('position'):
self.translateTo(properties['position'])
del properties['position']
self.addProperties(properties)
undefined = 0
for a in self.atoms:
if a.position() is None:
undefined = undefined + 1
if undefined > 0 and undefined != len(self.atoms):
Utility.warning('Some atoms in a protein ' +
'have undefined positions.')
is_protein = 1
def __len__(self):
return len(self.chains)
def __getitem__(self, item):
if type(item) == type(0):
m, c, name = self.chains[item]
else:
for m, c, name in self.chains:
if name == item:
break
if name != item:
raise ValueError, 'No chain with name ' + item
if c is None:
return m
else:
return m[c]
def residuesOfType(self, *types):
"""Returns a collection that contains all residues whose type
(one- or three-letter code) is contained in |types|."""
rlist = Collection.Collection([])
for m in self.molecules:
if isPeptideChain(m):
rlist = rlist + apply(m.residuesOfType, types)
return rlist
def backbone(self):
"""Returns a collection containing the peptide groups of all residues
in all chains."""
rlist = Collection.Collection([])
for m in self.molecules:
if isPeptideChain(m):
rlist = rlist + m.backbone()
return rlist
def sidechains(self):
"""Returns a collection containing the sidechain groups of all
residues in all chains."""
rlist = Collection.Collection([])
for m in self.molecules:
if isPeptideChain(m):
rlist = rlist + m.sidechains()
return rlist
def residues(self):
"Returns a collection containing all residues in all chains."
rlist = Collection.Collection([])
for m in self.molecules:
if isPeptideChain(m):
rlist = rlist + m.residues()
return rlist
def phiPsi(self, conf = None):
"""Returns a list containing the phi/psi backbone dihedrals for
all chains."""
angles = []
for chain in self:
angles.append(chain.phiPsi(conf))
return angles
_ss_bond_max = 0.25*Units.nm
def _findSSBridges(self, molecules):
molecules = filter(lambda m: hasattr(m, 'is_peptide_chain'), molecules)
cys = Collection.Collection([])
for m in molecules:
if m.version_spec['model'] != 'calpha':
cys = cys + m.residuesOfType('cys') + m.residuesOfType('cyx')
s = cys.map(lambda r: r.sidechain.S_gamma)
ns = len(s)
ss = []
for i in xrange(ns-1):
for j in xrange(i+1,ns):
r1 = s[i].position()
r2 = s[j].position()
if r1 and r2 and (r1-r2).length() < self._ss_bond_max:
ss.append((cys[i], cys[j]))
return ss
def _subunits(self):
return list(self)
def _description(self, tag, index_map, toplevel):
if not toplevel:
raise ValueError
return 'l(' + `self.__class__.__name__` + ',' + `self.name` + ',[' + \
reduce(operator.add,
map(lambda o, t=tag, m=index_map:
o._description(t, m, 1)+',',
self)) + '])'
def _graphics(self, conf, distance_fn, model, module, options):
if model != 'backbone':
return ChemicalObjects.Complex._graphics(self, conf, distance_fn,
model, module, options)
objects = []
for chain in self:
objects = objects + chain._graphics(conf, distance_fn,
model, module, options)
return objects
#
# Type check functions
#
def isPeptideChain(x):
"Returns 1 f |x| is a peptide chain."
return hasattr(x, 'is_peptide_chain')
def isProtein(x):
"Returns 1 f |x| is a protein."
return hasattr(x, 'is_protein')
|
fxia22/ASM_xf
|
PythonD/site_python/MMTK/Proteins.py
|
Python
|
gpl-2.0
| 24,134
|
[
"CHARMM"
] |
d5b0e406e070974e9c911ca40f56fdcbfb2a3d8f9ac6c5e606f9c6b8a775ea3c
|
#!/usr/bin/env Python
##########################################################################
#
# Copyright (C) 2015-2016 Sam Westreich
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation;
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##########################################################################
#
# DIAMOND_subsystems_analysis_counter.py
# Created 2/01/2017, this version edited 3/20/2017
# Sam Westreich, stwestreich@ucdavis.edu, github.com/transcript
#
# This program parses through the results file from a DIAMOND annotation run
# (in BLAST m8 format) to get the results into something more compressed
# and readable, against the SUBSYSTEMS database.
#
# Usage:
#
# -I infile specifies the infile (a DIAMOND results file
# in m8 format)
# -D database specifies a reference database to search against
# for results
# -O outfile specifies a name for the outfile (otherwise defaults
# to $name_hierarchy.tsv)
# -P partial partial outfile; a list of all reads with their
# hierarchy results (OPTIONAL)
#
##########################################################################
# imports
import operator, sys, time, gzip, re
# String searching function:
def string_find(usage_term):
for idx, elem in enumerate(sys.argv):
this_elem = elem
next_elem = sys.argv[(idx + 1) % len(sys.argv)]
if elem == usage_term:
return next_elem
t0 = time.time()
# loading starting file
if "-I" in sys.argv:
infile_name = string_find("-I")
else:
sys.exit ("WARNING: infile must be specified using '-I' flag.")
infile = open (infile_name, "r")
# setting up databases
hit_count_db = {}
unique_seq_db = {}
read_id_db = {}
line_counter = 0
# reading through the infile
for line in infile:
line_counter += 1
splitline = line.split("\t")
if line_counter % 1000000 == 0:
t99 = time.time()
print (str(line_counter)[:-6] + "M lines processed so far in " + str(t99-t0) + " seconds.")
unique_seq_db[splitline[0]] = 1
if "-P" in sys.argv:
read_id_db[splitline[0]] = splitline[1]
try:
hit_count_db[splitline[1]] += 1
except KeyError:
hit_count_db[splitline[1]] = 1
continue
t1 = time.time()
# results reporting
print ("\nAnalysis of " + infile_name + " complete.")
print ("Number of total lines: " + str(line_counter))
print ("Number of unique sequences: " + str(len(unique_seq_db)))
print ("Time elapsed: " + str(t1-t0) + " seconds.")
infile.close()
# time to search for these in the reference database
if "-D" in sys.argv:
db_name = string_find("-D")
else:
sys.exit( "No database file indicated; skipping database search step.")
# IO
try:
db = open (db_name, "r", encoding='utf-8', errors='ignore')
except TypeError:
# error catching for Python 2
db = open (db_name, "r")
if "-P" in sys.argv:
partial_outfile_name = string_find("-P")
partial_outfile = open(partial_outfile_name, "w")
print ("\nStarting database analysis now.")
t2 = time.time()
# building a dictionary of the reference database
db_hier_dictionary = {}
db_line_counter = 0
db_error_counter = 0
for line in db:
if line.startswith(">") == True:
db_line_counter += 1
splitline = line.split("\t", 1)
# ID, the hit returned in DIAMOND results
db_id = str(splitline[0])[1:]
# name and functional description
if "NO HIERARCHY" in splitline[1]:
db_hier = "NO HIERARCHY"
else:
hier_split = splitline[1].split("\t")
if hier_split[3].strip() != "":
db_hier = hier_split[0] + "\t" + hier_split[1] + "\t" + hier_split[2] + "\t" + hier_split[3]
else:
db_hier = hier_split[0] + "\t" + hier_split[1] + "\t\t" + hier_split[2] + "\t" + hier_split[3]
# add to dictionaries
db_hier_dictionary[db_id] = db_hier
# line counter to show progress
if db_line_counter % 1000000 == 0: # each million
t95 = time.time()
print (str(db_line_counter) + " lines processed so far in " + str(t95-t2) + " seconds.")
t3 = time.time()
print ("\nSuccess!")
print ("Time elapsed: " + str(t3-t2) + " seconds.")
print ("Number of lines: " + str(db_line_counter))
print ("Number of errors: " + str(db_error_counter))
# printing out the partial outfile
if "-P" in sys.argv:
for entry in read_id_db.keys():
partial_outfile.write(entry + "\t" + read_id_db[entry] + "\t" + db_hier_dictionary[read_id_db[entry]] + "\n")
# condensing down the identical matches
condensed_hit_db = {}
for entry in hit_count_db.keys():
org = db_hier_dictionary[entry]
if org in condensed_hit_db.keys():
condensed_hit_db[org] += hit_count_db[entry]
else:
condensed_hit_db[org] = hit_count_db[entry]
# dictionary output and summary
print ("\nDictionary database assembled.")
print ("Time elapsed: " + str(t3-t2) + " seconds.")
print ("Number of errors: " + str(db_error_counter))
print ("\nTop ten hierarchy matches:")
for k, v in sorted(condensed_hit_db.items(), key=lambda kv: -kv[1])[:10]:
try:
print (str(v) + "\t" + k )
except KeyError:
print (str(v) + "\tWARNING: Key not found for " + k)
continue
# creating the outfiles
if "-O" in sys.argv:
outfile_name = string_find("-O")
else:
outfile_name = infile_name[:-4] + ".hierarchy"
outfile = open (outfile_name, "w")
# writing the output
error_counter = 0
for k, v in sorted(condensed_hit_db.items(), key=lambda kv: -kv[1]):
try:
q = v * 100 / float(line_counter)
outfile.write (str(q) + "\t" + str(v) + "\t" + k + "\n")
except KeyError:
outfile.write (str(q) + "\t" + str(v) + "\tWARNING: Key not found for " + k + "\n")
error_counter += 1
continue
print ("\nAnnotations saved to file: '" + outfile_name + "'.")
print ("Number of errors: " + str(error_counter))
db.close()
outfile.close()
|
transcript/samsa_v2
|
python_scripts/DIAMOND_subsystems_analysis_counter.py
|
Python
|
gpl-3.0
| 6,159
|
[
"BLAST"
] |
77a5cb808ee37c2b026b964ca3883e157b2b3783aa94daca028ddd7e8a4358ec
|
class User(object):
def __init__(self):
pass
def schema():
_schema = {
"title" : "User",
"type" : "object",
"properties" : {
"id" : {"type": "string"},
"apikey" : {
"description" : "User apikey for webservice calls",
"type" : "string"
},
"created_on" : {"type" : "date-time"},
"last_modified" : {"type" : "date-time"},
"email" : {"type" : "email"},
"fullname" : {"type" : "string"},
"password_hash" : {
"description" : "Hash of users password",
"type" : "string"
},
"contact" : {
"description" : "Embedded Contact object",
"type" : "object"
},
"notes" : {
"description" : "List of Notes objects",
"type" : "array",
"minimum" : 0,
"items": {"type" : "object"}
}
}
}
return _schema
class Note(object):
def __init__(self):
pass
def schema():
_schema = {
"type" : "object",
"properties": {
"id" : {"type" : "string"},
"created_by" : {
"description" : "Id of user who created note",
"type" : "string"
},
"created_on" : { "type" : "date-time"},
"key" : { "type" : "string"},
"note" : {
"description" : "The text for the entry",
"type" : "string"
},
"units" : {"type" : "string"}
}
}
return _schema
class Project(object):
def __init__(self):
pass
def schema():
_schema = {
"type" : "object",
"description" : "",
"properties" : {
"id" : {"type" : "string"},
"name" : {"type" : "string"},
"description" : { "type" : "string"},
"datadir" : {
"description" : "id of root dataDir",
"type" : "string"
},
"owner" : {"type" : "string"},
"created_on" : {"type" : "date-time"},
"last_modified" : {"type" : "date-time"},
"notes" : {
"description" : "List of Notes objects",
"type" : "array",
"minimum" : 0,
"items": {"type" : "object"}
},
"tags" : {
"description" : "List of GlobalTag Ids",
"type": "array",
"minimum" : 0,
"items" : {"type" : "string"}
},
"reviews" : {
"description" : "List of Review Ids",
"type" : "array",
"minimum" : 0,
"items" : {"type" : "string"}
}
}
}
return _schema
class GlobalTag(object):
def __init__(self):
pass
def schema():
_schema = {
"description" : "Tags that are global to the system",
"type" : "object",
"properties" : {
"id" : {
"description" : "Because the name is unique, the name is the id",
"type" : "string"
},
"description" : {"type" : "string"},
"notes" : {
"description" : "List of Notes objects",
"type" : "array",
"minimum" : 0,
"items": {"type" : "object"}
},
}
}
return _schema
class UserTag(object):
def __init__(self):
pass
def schema():
_schema = {
"description" : "Tags that are global to the system",
"type" : "object",
"properties" : {
"id" : {
"description" : "Unlike global tags, user tags do not have unique names",
"type" : "string"
},
"name" : {"type" : "string"},
"user" : {
"description" : "Id of user owning this tag",
"type" : "string"
},
"description" : {"type" : "string"},
"notes" : {
"description" : "List of Notes objects",
"type" : "array",
"minimum" : 0,
"items": {"type" : "object"}
},
# Needs a list of items that it is tagged to and moved out of DataDir, and DataFile
}
}
return _schema
class Machine(object):
def __init__(self):
pass
def schema():
_schema = {
"description" : "hardware such as a computer, microscope, etc... where you generated data",
"type" : "object",
"properties" : {
"id" : {"type" : "string"},
"name" : {
"description" : "machine name, eg 'flux'",
"type" : "hostname"
},
"fullname": {
"description" : "machine long name, eg flux-log.engin.umich.edu",
"type": "hostname"
},
"contact" : {
"description" : "Embedded Contact object",
"type" : "object"
},
"notes" : {
"description" : "List of Notes objects",
"type" : "array",
"minimum" : 0,
"items": {"type" : "object"}
}
}
}
return _schema
class Contact(object):
def __init__(self):
pass
def schema():
_schema = {
"description" : "Embedded object, not a database table",
"type" : "object",
"properties" : {
"fullname" : {"type" : "string"},
"email" : {"type" : "email"},
"phone" : {"type" : "string"},
"website" : {"type" : "uri"},
"avatar" : {
"description" : "http or file reference for picture",
"type" : "uri"
},
"description" : {
"description" : "Contact description/bio",
"type" : "string"
},
"affiliation" : {
"description" : "Contact affiliation, eg University of Michigan",
"type" : "string"
},
"notes" : {
"description" : "List of Notes objects",
"type" : "array",
"minimum" : 0,
"items": {"type" : "object"}
},
}
}
return _schema
class UserGroup(object):
def __init__(self):
pass
def schema():
_schema = {
"description" : "",
"type" : "object",
"properties" : {
"id" : {"type" : "string"},
"created_on" : {"type" : "date-time"},
"last_modified" : {"type" : "date-time"},
"owner" : {
"description" : "The Id for the user who created the group",
"type" : "string"
},
"name" : {"type" : "string"},
"users" : {
"description" : "List of user (ids) who are in the group",
"type" : "array",
"minimum" : 0,
"items" : {"type" : "string"}
}
}
}
return _schema
class Process(object):
def __init__(self):
pass
def schema():
_schema = {
"description" : "",
"type" : "object",
"properties" : {
"id" : {"type" : "string"},
"name" : {
"description" : "Name given by user",
"type" : "string"
},
"created_on" : {"type" : "date-time"},
"last_modified" : {"type" : "date-time"},
"created_by" : {
"description" : "The id of user who created the process",
"type" : "string"
},
"machine" : {
"description" : "id of machine process was performed on",
"type" : "string"
},
"process_type" : {
"description" : "Type of process, eg 'VASP', 'GCMC', etc...",
"type" : "string"
},
"version" : {"type" : "string"},
"parent" : {
# What is this?
"description:" : "Parent process id this process was based on",
"type" : "string"
},
"notes" : {
"description" : "List of Notes objects",
"type" : "array",
"minimum" : 0,
"items": {"type" : "object"}
},
"input_dataitems" : {
"description" : "List of DataItem ids",
"type" : "array",
"minimum" : 0,
"items" : {"type" : "string"}
},
"input_datasets" : {
"description" : "List of DataSet ids",
"type" : "array",
"minimum" : 0,
"items" : {"type" : "string"}
},
"output_dataitems" : {
"description" : "List of DataItem ids",
"type" : "array",
"minimum" : 0,
"items" : {"type" : "string"}
},
"output_datasets" : {
"description" : "List of DataSet ids",
"type" : "array",
"minimum" : 0,
"items" : {"type" : "string"}
},
"runs" : {
"description" : "List of Run objects",
"type" : "array",
"minimum" : 0,
"items" : {
"type" : "object",
"properties" : {
"startTime" : {"type" : "date-time"},
"stopTime" : {"type" : "date-time"},
"errorMessages" : {
"description" : "List of error messages",
"type" : "array",
"minimum" : 0,
"items" : {"type" : "string"}
},
},
},
"citations" : {
# What to do with this - separate into an object?
"description" : "List of Citation objects",
"type" : "array",
"minimum" : 0,
"items" : {"type" : "object"}
},
"status" : {"enum" : ["aborted", "successful", "running", "cancelled", "queued"]}
# For prov, also want:
# created_by_name,
# machine_name,
# input_dataitem_names,
# input_dataset_names,
# output_dataitem_names,
# output_dataset_names
}
}
return _schema
class Run(object):
def __init__(self):
pass
def schema():
_schema = {
"description" : "Start time, stop time, and error messages associated with one run of a Process.",
"type" : "object",
"properties" : {
"started" : {"type" : "date-time"},
"stopped" : {"type" : "date-time"}
"error_messages" : {
"type" : "array",
"minimum" : 0,
"items" : {"type" : "string"}
}
}
}
return _schema
class Citation(object):
def __init__(self):
pass
def schema():
_schema = {
"description" : "",
"type" : "object",
"properties" : {
# what should go here? BibTex string? or BibTex data items?
"text" : {"type" : "string"}
}
}
return _schema
class DataDir(object):
def __init__(self):
pass
def schema():
_schema = {
"description" : "",
"type" : "object",
"properties" : {
"id" : {"type" : "string"},
"name" : {"type" : "string"},
"owner" : {
"description" : "Id of user who owns this object",
"type" : "string"
},
"access" : {
"description" : "Is it private or public",
# Are there other access types?
"enum" : ["private", "public"]
},
"dataitems" : {
"description" : "List of DataItem Ids",
"type" : "string"
},
"created_on" : {"type" : "date-time"},
"last_modified" : {"type" : "date-time"},
"marked_for_review" : {"type" : "boolean"},
"my_tags" : {
# must be filtered for a particular user
"description" : "List of ids for tags users privately create",
"type" : "array",
"minimum" : 0,
"items" : {"type" : "string"}
},
"tags" : {
"description" : "List of ids for global tags",
"type" : "array",
"minimum" : 0,
"items" : {"type" : "string"}
},
"parent_datadir" : {
"description" : "parent data dir id",
"type" : "string"
},
"reviews" : {
"description" : "List of review object ids",
"type" : "array",
"minimum" : 0,
"items" : {"type", "string"}
}
}
}
return _schema
class Review(object):
def __init__(self):
pass
def schema():
_schema = {
"description" : "",
"type" : "object",
"properties" : {
"id" : {"type" : "string"},
"who" : {
"description" : "Who should conduct the review",
"type" : "string"
},
"owner" : {
"description" : "Who requested the review",
"type" : "string"
},
"note" : {
"description" : "A note describing what you want out of the review",
"type" : "string"
},
"type" {
"description" : "The type of review requested.",
# Are there other types?
"enum" : ["dataparam", "datafile", "datadir", "project"]
},
"item_name" : {
"description" : "Name of item the review is for",
"type" : "string"
},
"item_id" : {
"description" : "Id of item to review",
"type" : "string"
},
"marked_on" : {
"description" : "Date item was marked for review",
"type" : "date-time"
}
}
}
return _schema
class DataItem(object):
def __init__(self):
pass
def schema():
_schema = {
"description" : "A data file and/or parameters",
"type" : "object",
"properties" : {
## Static items -----------------------------------------------
## These must not change after creating the DataItem
## Provenance is unchanging, trustworthy
## The file this points at should not change, though it's location might
"id" : {"type" : "string"},
"checksum" : {
"description" : "file checksum",
"type" : "string"
},
"size" : {"type" : "integer"},
"created_on" : {"type" : "date-time"}, # is the file or the DataItem object? I assume object?
"owner" : {
"description" : "Id of user who owns this object",
"type" : "string"
},
"machine" : {
"description" : "ID of machine dataitem was created with",
"type" : "string"
},
"media_type" : {
"description" : "Type of file as determined by Tika",
"type" : "string"
},
"params" : {
"description" : "List of Notes objects used as parameters",
"type" : "array",
"minimum" : 0,
"items": {"type" : "object"}
},
## Mutable items -----------------------------------------------
## These may change after creating the DataItem
## Should not use for provenance
"name" : {"type" : "string"},
"description" : {"type" : "string"},
"last_modified" : {"type" : "date-time"}, # is the file or the DataItem object? I assume object?
"location" : {
"description" : "Location on disk, relative to some base",
"type" : "string"
},
"access" : {
"description" : "Is it private or public",
# Are there other access types?
"enum" : ["private", "public"]
},
"datadirs" : {
"description" : "List of dataDir ids this file can be found in",
"type" : "array",
"minimum" : 1,
"items" : {"type" : "string"}
},
"creator" : {
"description" : "The ID of the Process that created this",
"type" : "string"
},
"my_tags" : {
# must be filtered for a particular user
"description" : "List of ids for tags users privately create",
"type" : "array",
"minimum" : 0,
"items" : {"type" : "string"}
},
"tags" : {
"description" : "List of ids for global tags",
"type" : "array",
"minimum" : 0,
"items" : {"type" : "string"}
},
"meta_tags" : {
"description" : "Dictionary of key/value pairs 'name'/'value'",
"type" : "array",
"items" : {"type" : "object"}
},
"marked_for_review" : {"type" : "boolean"},
"reviews" : {
"description" : "List of review object ids",
"type" : "array",
"minimum" : 0,
"items" : {"type", "string"}
},
"notes" : {
"description" : "List of Notes objects",
"type" : "array",
"minimum" : 0,
"items": {"type" : "object"}
},
"text" : {
"description" : "Text extracted from document",
# We may not need to store this
"type" : "string"
}
# For prov, also want:
# owner_name,
# machine_name,
# input_dataitem_names,
# input_dataset_names,
# output_dataitem_names,
# output_dataset_names
}
}
return _schema
class DataSet(object):
def __init__(self):
pass
def schema():
_schema = {
"description" : "",
"type" : "object",
"properties" : {
## Static items -----------------------------------------------
## These must not change after creating the DataSet
## Provenance is unchanging, trustworthy
## The file this points at should not change, though it's location might
"id" : {"type" : "string"},
"name": {"type" : "string"},
"created_on" : {"type" : "date-time"},
"dataitems" : {
"description" : "List of DataFile ids",
"type" : "array",
"minimum" : 0,
"items" : {"type" : "string"}
},
"owner" : {
"description" : "User id who created this DataSet"
"type" : "string"
},
## Mutable items -----------------------------------------------
## These may change after creating the DataItem
## Should not use for provenance
"notes" : {
"description" : "List of Notes objects",
"type" : "array",
"minimum" : 0,
"items": {"type" : "object"}
}
# For prov, also want:
# dataitem_names,
# owner_name
}
}
return _schema
class News(object):
def __init__(self):
pass
def schema():
_schema = {
"description" : "",
"type" : "object",
"properties" : {
"id" : {"type" : "string"},
"date" : {"type" : "date-time"},
"title" : {"type" : "string"},
"body" : {"type" : "string"}
}
}
return _schema
class UDJob(object):
def __init__(self):
pass
def schema():
_schema = {
# Needs work
"description" : "Upload/Download Job",
"type" : "object",
"properties" : {
"id" : {"type" : "string"},
"name" : {"type" : "string"},
"type" : {
"description" : "Type of job",
"enum" : ["upload", "download"]
},
"created_by" : {
"description" : "User id who created",
"type" : "string"
},
"host" : {
"description" : "Host to do upload/download to",
"type" : "ipv4",
},
"path" : {
"description" : "Path to load from/to",
"type" : "string"
},
"status" : {
"enum" : ["TBD"]
},
"note" : {"type" : "string"},
"submitted_on" : {"type" : "date-time"},
"datadir" : {
"description" : "DataDir Id to upload to/download from"
"type" : "string"
}
"logs" : {
"description" : "List of Log entries"
"type" : "array",
"minimum" : 0,
"items" : {
"type" : "object",
"properties" : {
"tstamp" : {"type" : "date-time"},
"entry" : {"type" : "string"}
}
}
}
}
}
return _schema
class Template(object):
def __init__(self):
pass
def schema():
_schema = {
"description" : "Template for various types - TBD",
"type" : "object",
"properties" : {
"id" : {"type" : "string"},
}
}
return _schema
|
materials-commons/materialscommons-clients
|
python/materialscommons/dm.py
|
Python
|
gpl-2.0
| 25,238
|
[
"VASP"
] |
798bf5d3760a623603e5ad0a799bec21ed2c1395fb85cb422f466d44069c1bc0
|
#!/bin/env python3
import sys
import argparse
import math
import re
import traceback
from directives import DirectiveProcessor, D_Pragma
from tokens import Tokenizer
from renderers import *
from sdscp_errors import *
import statements
import getpass
VERSION = '1.5.0'
# ==================== Command Line Arguments processing =======================
parser = argparse.ArgumentParser(
description='SDS-C macro preprocessor v%s' % VERSION,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
+-------------------------------------------------------+
| For bug reporting and more info, please visit: |
| https://github.com/MightyPork/sdscp |
| |
| Complete documentation in Czech can be viewed here: |
| https://goo.gl/mZ1oOg (Google Docs) |
| |
| SDSCP (c) Ondřej Hruška, 2014-2017 |
+-------------------------------------------------------+
"""
)
parser.add_argument(
'source',
help='The source file to process'
)
parser.add_argument(
'-V', '--version',
help='Show the SDSCP version.',
action='version',
version=VERSION
)
parser.add_argument(
'-o', '--output',
help='The output file; %%v in the name will be replaced with the \
program\'s version. To just print the output, use -d instead.',
action='store',
)
parser.add_argument(
'-p', '--pragma',
help='Set a pragma value (syntax like #pragma). All pragmas are \
also accessible to the program as defines named __NAME__',
action='append',
nargs='+',
default=[]
)
parser.add_argument(
'-d', '--display',
action='store_true',
default=False,
help='Show the final source (Works together with -o)'
)
parser.add_argument(
'-v', '--verbose',
action='store_true',
default=False,
help='Show all optional debug info.'
)
parser.add_argument(
'-G', '--show-generated',
action='store_true',
default=False,
help='Show the code generated from statements.'
)
parser.add_argument(
'-O', '--show-original',
action='store_true',
default=False,
help='Show original source (only main file)'
)
parser.add_argument(
'-M', '--show-macros',
action='store_true',
default=False,
help='List all macros'
)
parser.add_argument(
'-R', '--show-resolved',
action='store_true',
default=False,
help='Show code after processing includes and # branching.'
)
parser.add_argument(
'-P', '--show-processed',
action='store_true',
default=False,
help='Show code after replacing macros'
)
parser.add_argument(
'-T', '--show-tokens',
action='store_true',
default=False,
help='Show tokens (source divided to pieces).'
)
parser.add_argument(
'-S', '--show-statements',
action='store_true',
default=False,
help='Show statements (high-level code abstraction).'
)
parser.add_argument(
'-x', '--error-trace',
action='store_true',
default=False,
help='Show stack trace for SDSCP syntax errors (for debugging)'
)
args = parser.parse_args()
SRC = args.source
DEST = args.output
SHOW_ORIGINAL = args.verbose or args.show_original
SHOW_RESOLVED = args.verbose or args.show_resolved
SHOW_MACROS = args.verbose or args.show_macros
SHOW_PROCESSED = args.verbose or args.show_processed
SHOW_TOKENS = args.verbose or args.show_tokens
SHOW_STATEMENTS = args.verbose or args.show_statements
SHOW_GENERATED = args.verbose or args.show_generated
SHOW_OUTPUT = args.verbose or args.display
SHOW_STRACE = args.error_trace
pragmas_args = {}
for p in args.pragma:
pr = D_Pragma('#pragma ' + ' '.join(p))
pragmas_args[pr.name] = pr.value
# ==================== Utils =======================
def banner(text, fill='-', length=80):
""" Show a banner line """
blob = (fill*length + ' ' + text + ' ' + fill*length)
overlap = len(blob)-80
print('\n' + blob[ math.floor(overlap/2) : math.floor(-overlap/2)] + '\n')
def prep4disp(code):
c = ' ' + re.sub(r'\n', '\n ', code)
c = re.sub(r'\t', ' ', c)
return c
# ==================== MAIN TASK =======================
try:
banner('SDS-C Preprocessor', ':')
print('Reading file:', SRC)
# read the file
dproc = DirectiveProcessor(SRC, pragmas_args)
if SHOW_ORIGINAL:
banner('SOURCE', '-')
print(prep4disp( dproc.source ) + '\n')
# ---------------- Resolve directives ------------------
print('Resolving directives...')
# include files, resolve branching, find macros...
dproc.process()
# -------------------- Apply macros --------------------
pragmas = dproc.get_pragmas()
pragmas['main_file'] = SRC
pragmas['sdscp_version'] = VERSION
if 'name' not in pragmas.keys():
pragmas['name'] = SRC
if 'author' not in pragmas.keys():
try:
pragmas['author'] = getpass.getuser()
except Exception:
pass
if SHOW_MACROS:
banner('MACROS', '-')
print('List of all found macros, in definition order:\n')
for d in dproc.get_defines().values():
for m in d:
print(' ' + str(m))
print()
banner('PRAGMAS', '-')
print('List of all #pragma config key-value pairs\n')
for (k, v) in dproc.get_pragmas().items():
print('%s = %s' % (k, v))
print()
if SHOW_RESOLVED:
banner('RESOLVED', '-')
print('Code after resolving includes, # branching, and extracting macros:\n')
print(prep4disp( dproc.get_output() ) + '\n')
print('Applying macros...')
# perform macro replacements
dproc.apply_macros()
# get output code
processed = dproc.get_output()
if SHOW_PROCESSED:
banner('PROCESSED', '-')
print('Code after replacing macros:\n')
print(prep4disp(processed) + '\n')
print('Tokenizing code...')
tk = Tokenizer(processed)
tokens = tk.tokenize()
sts = statements.parse(tokens)
if SHOW_TOKENS:
banner('TOKENIZED', '-')
print('Tokenization of the processed code:\n')
tk.show()
print('')
if SHOW_STATEMENTS:
banner('STATEMENTS', '-')
print('Source code abstraction:\n')
for s in sts:
print(str(s))
if SHOW_GENERATED:
banner('GENERATED', '-')
print('Code generated from statements:\n')
rndr = CSyntaxRenderer(sts)
print(prep4disp(rndr.render()))
if DEST != None or SHOW_OUTPUT:
# perform tweaks to match some of SDS-C's broken syntax
rtype = pragmas.get('renderer', 'asm')
if rtype in ['sds', 'simple']:
rtype = 'simple'
rndr = SimpleSdsRenderer(sts)
elif rtype in ['sds2', 'asm']:
rtype = 'asm'
rndr = AsmSdsRenderer(sts)
elif rtype in ['debug']:
rndr = CSyntaxRenderer(sts)
else:
raise Exception('Unknown renderer: "%s"' % rtype)
rndr.set_pragmas(pragmas)
print('Rendering to SDS-C using "%s" renderer...' % rtype)
for_sds = rndr.render()
if SHOW_OUTPUT:
banner('OUTPUT SDS-C CODE', '-')
print(prep4disp(for_sds) + '\n')
if DEST != None:
if 'version' in pragmas:
DEST = DEST.replace("%V", pragmas.get('version'))
print('Writing to file: %s' % DEST)
f = open(DEST, 'w')
f.write(for_sds)
f.close()
else:
print('No output file specified.')
print('\nDone.\n')
except Exception as e:
errname = type(e).__name__
disp_errname = errname
is_custom = False
if errname == 'SdscpSyntaxError':
disp_errname = 'SYNTAX ERROR'
is_custom = True
elif errname == 'CompatibilityError':
disp_errname = 'COMPATIBILITY ERROR'
is_custom = True
elif errname == 'IsADirectoryError':
disp_errname = 'INVALID INPUT FILE'
is_custom = True
elif errname == 'FileNotFoundError':
disp_errname = 'FILE NOT FOUND'
is_custom = True
else:
disp_errname = 'COMPILATION FAILED'
banner(disp_errname, '#')
# Extra debug info requested
if SHOW_STRACE or not is_custom:
type_, value_, traceback_ = sys.exc_info()
ex = traceback.format_exception(type_, value_, traceback_)
for line in ex:
# discard useless junk
if (('raise %s' % errname) in line) or ('File "<string>", line None' in line):
continue
print(line)
else:
print(str(e) + '\n')
print('To see debug info, please use the -x flag.\n')
|
naro/sdscp
|
sdscp.py
|
Python
|
gpl-2.0
| 7,989
|
[
"VisIt"
] |
77857521bc1548a979afadf757f3c28c9a97525eb57373fab1d7d8f32f2e2f7a
|
# $Id$
#
# Copyright (c) 2010, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Greg Landrum, October 2006
#
import os, weakref, re
from rdkit.six.moves import cStringIO as StringIO
from rdkit import RDConfig
class FGHierarchyNode(object):
children = None
name = ""
label = ""
pattern = None
smarts = ""
rxnSmarts = ""
parent = None
removalReaction = None
def __init__(self, name, patt, smarts="", label="", rxnSmarts="", parent=None):
self.name = name
self.pattern = patt
if parent:
self.parent = weakref.ref(parent)
self.label = label
self.smarts = smarts
self.children = []
self.rxnSmarts = rxnSmarts
def __len__(self):
res = 1
for child in self.children:
res += len(child)
return res
class FuncGroupFileParseError(ValueError):
pass
groupDefns = {}
hierarchy = None
lastData = None
lastFilename = None
def BuildFuncGroupHierarchy(fileNm=None, data=None, force=False):
global groupDefns, hierarchy, lastData, lastFilename
if not force and hierarchy and (not data or data==lastData) and \
(not fileNm or fileNm==lastFilename):
return hierarchy[:]
lastData = data
splitter = re.compile('\t+')
from rdkit import Chem
if not fileNm and not data:
fileNm = os.path.join(RDConfig.RDDataDir, 'Functional_Group_Hierarchy.txt')
if fileNm:
inF = open(fileNm, 'r')
lastFilename = fileNm
elif data:
inF = StringIO(data)
else:
raise ValueError("need data or filename")
groupDefns = {}
res = []
lineNo = 0
for line in inF.readlines():
lineNo += 1
line = line.strip()
line = line.split('//')[0]
if not line:
continue
splitL = splitter.split(line)
if len(splitL) < 3:
raise FuncGroupFileParseError("Input line %d (%s) is not long enough." % (lineNo, repr(line)))
label = splitL[0].strip()
if label in groupDefns:
raise FuncGroupFileParseError("Duplicate label on line %d." % lineNo)
labelHierarchy = label.split('.')
if len(labelHierarchy) > 1:
for i in range(len(labelHierarchy) - 1):
tmp = '.'.join(labelHierarchy[:i + 1])
if not tmp in groupDefns:
raise FuncGroupFileParseError("Hierarchy member %s (line %d) not found." % (tmp, lineNo))
parent = groupDefns['.'.join(labelHierarchy[:-1])]
else:
parent = None
smarts = splitL[1]
patt = Chem.MolFromSmarts(smarts)
if not patt:
raise FuncGroupFileParseError('Smarts "%s" (line %d) could not be parsed.' % (smarts, lineNo))
name = splitL[2].strip()
rxnSmarts = ''
if len(splitL) > 3:
rxnSmarts = splitL[3]
node = FGHierarchyNode(name, patt, smarts=smarts, label=label, parent=parent,
rxnSmarts=rxnSmarts)
if parent:
parent.children.append(node)
else:
res.append(node)
groupDefns[label] = node
hierarchy = res[:]
return res
def _SetNodeBits(mol, node, res, idx):
ms = mol.GetSubstructMatches(node.pattern)
count = 0
seen = {}
for m in ms:
if m[0] not in seen:
count += 1
seen[m[0]] = 1
if count:
res[idx] = count
idx += 1
for child in node.children:
idx = _SetNodeBits(mol, child, res, idx)
else:
idx += len(node)
return idx
def CreateMolFingerprint(mol, hierarchy):
totL = 0
for entry in hierarchy:
totL += len(entry)
res = [0] * totL
idx = 0
for entry in hierarchy:
idx = _SetNodeBits(mol, entry, res, idx)
return res
|
jandom/rdkit
|
rdkit/Chem/FunctionalGroups.py
|
Python
|
bsd-3-clause
| 5,073
|
[
"RDKit"
] |
06d2d98598fc35bbe02e3787eae51764430f61c33eb431840ac99b8e1cc864a4
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
def to_string(x):
if x.__class__.__name__ == 'bytes':
return x.decode('utf-8')
return x
import unittest, regex, os
from __init__ import oprex, OprexSyntaxError
class TestErrorHandling(unittest.TestCase):
def given(self, oprex_source, expect_error):
oprex_source = to_string(oprex_source)
expect_error = to_string(expect_error)
if expect_error:
expect_error = '\n' + expect_error
try:
oprex(oprex_source)
except Exception as err:
got_error = str(err)
else:
got_error = ''
if got_error != expect_error:
msg = 'For input: %s\n----------------------------- Got Error: -----------------------------%s\n\n-------------------------- Expected Error: ---------------------------%s'
raise AssertionError(msg % (
oprex_source or '(empty string)',
got_error or '\n(no error)',
expect_error or '\n(no error)',
))
def test_white_guards(self):
self.given('one-liner input',
expect_error=b'Line 1: First line must be blank, not: one-liner input')
self.given(b'''something in the first line
''',
expect_error=b'Line 1: First line must be blank, not: something in the first line')
self.given(b'''
something in the last line''',
expect_error=b'Line 2: Last line must be blank, not: something in the last line')
def test_unknown_symbol(self):
self.given(b'''
`@$%^&;{}[]\\
''',
expect_error=b'Line 2: Syntax error at or near: `@$%^&;{}[]\\')
def test_unexpected_token(self):
self.given(b'''
/to/be/?
''',
expect_error=b'''Line 2: Unexpected QUESTMARK
/to/be/?
^''')
self.given(b'''
root
branch
''',
expect_error=b'''Line 3: Unexpected NEWLINE
branch
^''')
self.given(b'''
root
root = '/'
root2
''',
expect_error=b'''Line 4: Unexpected VARNAME
root2
^''')
self.given(b'''
root
root = '/'\nroot2
''',
expect_error=b'Line 4: Unexpected VARNAME\nroot2\n^')
self.given(b'''
*) /warming/and/warming/
''',
expect_error=b'Line 2: Unexpected GLOBALMARK\n*) /warming/and/warming/\n^')
self.given(b'''
/greeting/world/
greeting = 'hello'
world = 'world'
''',
expect_error="Line 4: 'world' is defined but not used (by its parent expression)")
def test_indentation_error(self):
self.given(b'''
/greeting/world/
greeting = 'hello'
world = 'world'
''',
expect_error="Line 4: 'world' is defined but not used (by its parent expression)")
self.given(b'''
root
branch
misaligned
''',
expect_error=b'Line 4: Indentation error')
self.given(b'''
root
branch
hyperroot
''',
expect_error=b'Line 4: Indentation error')
def test_correct_error_line_numbering(self):
self.given(b'''
/greeting/world/
greeting = 'hello'
world = 'world'
''',
expect_error="Line 5: 'world' is defined but not used (by its parent expression)")
self.given(b'''
/greeting/world/
greeting = 'hello'
world = 'world'
''',
expect_error="Line 8: 'world' is defined but not used (by its parent expression)")
self.given(b'''
/greeting/world/
greeting = 'hello'
world = 'world'
''',
expect_error=b'Line 6: Indentation error')
self.given(b'''
warming
*) warming = 'global'
''',
expect_error="Line 5: The GLOBALMARK *) must be put at the line's beginning")
def test_mixed_indentation(self):
self.given(b'''
\tthis_line_mixes_tab_and_spaces_for_indentation
''',
expect_error=b'Line 2: Cannot mix space and tab for indentation')
self.given(b'''
/tabs/vs/spaces/
\t\ttabs = 'this line is tabs-indented'
spaces = 'this line is spaces-indented'
''',
expect_error=b'Line 3: Inconsistent indentation character')
def test_undefined_variable(self):
self.given(b'''
bigfoot
''',
expect_error="Line 2: 'bigfoot' is not defined")
self.given(b'''
/horses/and/unicorns/
horses = 'Thoroughbreds'
and = ' and '
''',
expect_error="Line 2: 'unicorns' is not defined")
self.given(b'''
/unicorns/and/horses/
horses = 'Thoroughbreds'
and = ' and '
''',
expect_error="Line 2: 'unicorns' is not defined")
def test_illegal_variable_name(self):
self.given(b'''
101dalmatians
101dalmatians = 101 of 'dalmatians'
''',
expect_error=b'''Line 2: Unexpected VARNAME
101dalmatians
^''')
self.given(b'''
/101dalmatians/
101dalmatians = 101 of 'dalmatians'
''',
expect_error=b'''Line 2: Unexpected NUMBER
/101dalmatians/
^''')
self.given(b'''
_
''',
expect_error=b'''Line 2: Unexpected NEWLINE
_
^''')
self.given(b'''
/_/
''',
expect_error=b'''Line 2: Unexpected UNDERSCORE
/_/
^''')
self.given(b'''
underscore
_ = '_'
''',
expect_error=b'''Line 3: Unexpected UNDERSCORE
_ = '_'
^''')
self.given(b'''
<<|
|_
''',
expect_error=b'''Line 3: Unexpected NEWLINE
|_
^''')
self.given(b'''
@|
|/_/
''',
expect_error=b'''Line 3: Unexpected UNDERSCORE
|/_/
^''')
self.given(b'''
<@>
_
''',
expect_error=b'''Line 3: Unexpected UNDERSCORE
_
^''')
self.given(b'''
<@>
|_|
''',
expect_error=b'''Line 3: Unexpected UNDERSCORE
|_|
^''')
def test_duplicate_variable(self):
self.given(u'''
dejavu
dejavu = 'Déjà vu'
dejavu = 'Déjà vu'
''',
expect_error="Line 4: Names must be unique within a scope, 'dejavu' is already defined (previous definition at line 3)")
self.given(u'''
dejavu
dejavu = dejavu = 'Déjà vu'
''',
expect_error="Line 3: Names must be unique within a scope, 'dejavu' is already declared (previous declaration at line 3)")
self.given(u'''
dejavu
dejavu = 'Déjà vu'
dejavu = dejavu
''',
expect_error="Line 4: Names must be unique within a scope, 'dejavu' is already defined (previous definition at line 3)")
self.given(u'''
/de/jade/
de = 'de'
jade = /ja/de/
ja = 'JA'
de = 'DE'
''',
expect_error="Line 6: Names must be unique within a scope, 'de' is already defined (previous definition at line 3)")
self.given(u'''
/deja/de/
deja = /de/ja/
de = 'de' -- different scope
ja = 'JA'
de = 'DE' -- different scope, so should be no error
''',
expect_error=b'')
self.given(u'''
chicken
chicken = /egg/hatches/
egg = /chicken/lays/
chicken = /velociraptor/evolves/
''',
expect_error="Line 5: Names must be unique within a scope, 'chicken' is already declared (previous declaration at line 3)")
self.given(b'''
/subject/predicate/object/
subject = /article/adjective/noun/
*) article = 'the'
*) adjective = /speed/color/
speed = 'quick'
color = 'brown'
*) noun = 'fox'
predicate = /verb/adverb/
verb = 'jumps'
adverb = 'over'
object = /article/adjective/noun/
article = 'an'
''',
expect_error="Line 13: Names must be unique within a scope, 'article' is already defined (previous definition at line 4)")
def test_unused_variable(self):
self.given(b'''
/alice/bob/
alice = 'alice'
bob = 'bob'
trudy = 'trudy'
''',
expect_error="Line 5: 'trudy' is defined but not used (by its parent expression)")
self.given(b'''
/alice/bob/
alice = 'alice'
bob = robert
robert = 'bob'
doe = 'doe'
''',
expect_error="Line 6: 'doe' is defined but not used (by its parent expression)")
self.given(b'''
non-vowel
vowel: a i u e o
''',
expect_error=b'') # vowel should be counted as used
def test_invalid_atomizer(self):
self.given(b'''
@alpha -- atomizer only applicable to chained lookup
''',
expect_error=b'''Line 2: Unexpected VARNAME
@alpha -- atomizer only applicable to chained lookup
^''')
def test_unclosed_literal(self):
self.given(b'''
mcd
mcd = 'McDonald's
''',
expect_error=b'''Line 3: Unexpected VARNAME
mcd = 'McDonald's
^''')
self.given(b'''
"she said \\"Hi\\"
''',
expect_error=b'Line 2: Syntax error at or near: "she said \\"Hi\\"')
self.given(b'''
quotes_mismatch
quotes_mismatch = "'
''',
expect_error="""Line 3: Syntax error at or near: "'""")
def test_invalid_string_escape(self):
self.given(b'''
'\N{KABAYAN}'
''',
expect_error="Line 2: undefined character name 'KABAYAN'")
self.given(u'''
'\N{APOSTROPHE}'
''',
expect_error="Line 2: Syntax error at or near: '")
def test_invalid_global_mark(self):
self.given(b'''
*)
''',
expect_error="Line 2: The GLOBALMARK *) must be put at the line's beginning")
self.given(b'''
*)
''',
expect_error=b'Line 2: Syntax error: *)')
self.given(b'''
*)\t
''',
expect_error=b'Line 2: Unexpected GLOBALMARK\n*) \n^')
self.given(b'''
*)warming
''',
expect_error=b'Line 2: Syntax error: *)')
self.given(b'''
*) warming
''',
expect_error=b'Line 2: Unexpected GLOBALMARK\n*) warming\n^')
self.given(b'''
warming
*)warming = 'global'
''',
expect_error="Line 3: The GLOBALMARK *) must be put at the line's beginning")
self.given(b'''
warming
*) warming = 'global'
''',
expect_error="Line 3: The GLOBALMARK *) must be put at the line's beginning")
self.given(b'''
warming
warming*) = 'global'
''',
expect_error="Line 3: Syntax error at or near: *) = 'global'")
self.given(b'''
warming
warming = global *)
''',
expect_error="Line 3: Syntax error: warming = global *)")
self.given(b'''
warming
warming = *) 'global'
''',
expect_error="Line 3: Syntax error: warming = *) 'global'")
self.given(b'''
warming
warming *) = 'global'
''',
expect_error="Line 3: Syntax error: warming *) = 'global'")
self.given(b'''
warming
*) *) warming = 'global'
''',
expect_error=b'Line 3: Syntax error: *) *) ')
self.given(b'''
warming
*) warming*) = 'global'
''',
expect_error="Line 3: Syntax error at or near: *) = 'global'")
self.given(b'''
warming
*) warming = global *)
''',
expect_error="Line 3: Syntax error: *) warming = global *)")
self.given(b'''
warming
warming = 'global'
*)
''',
expect_error="Line 4: Unexpected NEWLINE\n*) \n ^")
self.given(b'''
warming
warming = 'global'
*)
''',
expect_error="Line 4: Syntax error: *)")
self.given(b'''
warming
warming = 'global'
*) junk
''',
expect_error="Line 4: Unexpected NEWLINE\n*) junk\n ^")
self.given(b'''
warming
warming = 'global'
*) *)junk
''',
expect_error="Line 4: Syntax error: *) *)")
self.given(b'''
warming
warming = 'global'
*) *)
''',
expect_error="Line 4: Syntax error: *) *)")
self.given(b'''
warming
warming = 'global'
*) *)
''',
expect_error="Line 4: Syntax error: *) *)")
def test_global_aliasing(self):
self.given(b'''
/oneoneone/oneone/one/
oneoneone = /satu/uno/ichi/
satu = '1'
*) uno = ichi = satu
oneone = /uno/ichi/
one = ichi
ichi: 1
''',
expect_error="Line 8: Names must be unique within a scope, 'ichi' is already defined (previous definition at line 5)")
self.given(b'''
/oneoneone/oneone/one/
oneoneone = /satu/uno/ichi/
satu = '1'
*) uno = ichi = satu
oneone = /uno/ichi/
one = uno
uno: 1
''',
expect_error="Line 8: Names must be unique within a scope, 'uno' is already defined (previous definition at line 5)")
self.given(b'''
/oneoneone/oneone/one/
oneoneone = /satu/uno/ichi/
satu = '1'
*) uno = ichi = satu
oneone = /uno/ichi/
one = satu
''',
expect_error="Line 7: 'satu' is not defined")
self.given(b'''
/oneoneone/oneone/one/
oneoneone = /satu/uno/ichi/
*) satu = '1'
uno = ichi = satu
one = satu
oneone = /uno/ichi/
''',
expect_error="Line 7: 'uno' is not defined")
def test_invalid_charclass(self):
self.given(b'''
empty_charclass
empty_charclass:
''',
expect_error=b'''Line 3: Unexpected NEWLINE
empty_charclass:
^''')
self.given(b'''
noSpaceAfterColon
noSpaceAfterColon:n o
''',
expect_error=b'''Line 3: Unexpected CHAR
noSpaceAfterColon:n o
^''')
self.given(b'''
diphtong
diphtong: ae au
''',
expect_error="Line 3: Cannot include 'ae': not defined")
self.given(b'''
miscolon
miscolon: /colon/should/be/equal/sign/
''',
expect_error=b'Line 3: /colon compiles to \p{colon} which is rejected by the regex engine with error message: unknown property at position 10')
self.given(b'''
miscolon
miscolon: /alphabetic/colon/should/be/equal/sign/
''',
expect_error=b'Line 3: /colon compiles to \p{colon} which is rejected by the regex engine with error message: unknown property at position 10')
self.given(b'''
miscolon
miscolon: 'colon should be equal sign'
''',
expect_error=b'''Line 3: Unexpected CHAR
miscolon: 'colon should be equal sign'
^''')
self.given(b'''
/A/a/
A: a: A a
''',
expect_error=b'''Line 3: Unexpected CHAR
A: a: A a
^''')
self.given(b'''
/A/a/
A: a = A a
''',
expect_error="Line 2: 'a' is not defined")
self.given(b'''
/A/a/
A: a = A
''',
expect_error="Line 2: 'a' is not defined")
self.given(b'''
/shouldBeColon/
shouldBeColon = A a
''',
expect_error=b'''Line 3: Unexpected VARNAME
shouldBeColon = A a
^''')
self.given(b'''
mixedAssignment
mixedAssignment : = x
''',
expect_error=b'''Line 3: Unexpected COLON
mixedAssignment : = x
^''')
self.given(b'''
mixedAssignment
mixedAssignment := x
''',
expect_error=b'''Line 3: Unexpected COLON
mixedAssignment := x
^''')
self.given(b'''
mixedAssignment
mixedAssignment:= x
''',
expect_error=b'''Line 3: Unexpected CHAR
mixedAssignment:= x
^''')
self.given(b'''
mixedAssignment
mixedAssignment=: x
''',
expect_error=b'''Line 3: Unexpected COLON
mixedAssignment=: x
^''')
self.given(b'''
mixedAssignment
mixedAssignment =: x
''',
expect_error=b'''Line 3: Unexpected COLON
mixedAssignment =: x
^''')
self.given(b'''
x
x: /IsAwesome
''',
expect_error=b'Line 3: /IsAwesome compiles to \p{IsAwesome} which is rejected by the regex engine with error message: unknown property at position 14')
self.given(b'''
x
x: :KABAYAN_SABA_KOTA
''',
expect_error=b'Line 3: :KABAYAN_SABA_KOTA compiles to \N{KABAYAN SABA KOTA} which is rejected by the regex engine with error message: undefined character name at position 22')
self.given(br'''
x
x: \N{KABAYAN}
''',
expect_error=b'Line 3: \N{KABAYAN} compiles to \N{KABAYAN} which is rejected by the regex engine with error message: undefined character name at position 12')
self.given(br'''
x
x: \o
''',
expect_error=b'Line 3: Bad escape sequence: \o')
self.given(br'''
x
x: \w
''',
expect_error=b'Line 3: Bad escape sequence: \w')
self.given(br'''
x
x: \'
''',
expect_error="Line 3: Bad escape sequence: \\'")
self.given(br'''
x
x: \"
''',
expect_error=b'Line 3: Bad escape sequence: \\"')
self.given(br'''
x
x: \ron
''',
expect_error=br'Line 3: Bad escape sequence: \ron')
self.given(br'''
x
x: \u123
''',
expect_error=b'Line 3: Bad escape sequence: \u123')
self.given(br'''
x
x: \U1234
''',
expect_error=b'Line 3: Bad escape sequence: \U1234')
self.given(br'''
x
x: \u12345
''',
expect_error=b'Line 3: Bad escape sequence: \u12345')
def test_invalid_char(self):
self.given(b'''
x
x: u1234
''',
expect_error="Line 3: Cannot include 'u1234': not defined")
self.given(b'''
x
x: \uab
''',
expect_error=b'Line 3: Bad escape sequence: \uab')
self.given(b'''
x
x: \u123z
''',
expect_error=b'Line 3: Bad escape sequence: \u123z')
self.given(b'''
x
x: \U1234567z
''',
expect_error=b'Line 3: Bad escape sequence: \U1234567z')
self.given(b'''
x
x: \U123456789
''',
expect_error=b'Line 3: Bad escape sequence: \U123456789')
self.given(b'''
x
x: \U
''',
expect_error=b'Line 3: Bad escape sequence: \U')
self.given(b'''
x
x: :YET_ANOTHER_CHARACTER_THAT_SHOULD_NOT_BE_IN_UNICODE
''',
expect_error=b'Line 3: :YET_ANOTHER_CHARACTER_THAT_SHOULD_NOT_BE_IN_UNICODE compiles to \N{YET ANOTHER CHARACTER THAT SHOULD NOT BE IN UNICODE} which is rejected by the regex engine with error message: undefined character name at position 56')
# unicode character name should be in uppercase
self.given(b'''
x
x: check-mark
''',
expect_error=b'''Line 3: Unexpected CHAR
x: check-mark
^''')
self.given(b'''
x
x: @omic
''',
expect_error=b'''Line 3: Unexpected CHAR
x: @omic
^''')
self.given(b'''
x
x: awe$ome
''',
expect_error=b'''Line 3: Unexpected CHAR
x: awe$ome
^''')
def test_invalid_range(self):
self.given(b'''
x
x: ..
''',
expect_error=b'''Line 3: Unexpected DOT
x: ..
^''')
self.given(b'''
x
x: ...,
''',
expect_error=b'''Line 3: Unexpected DOT
x: ...,
^''')
self.given(b'''
x
x: ,...
''',
expect_error=b'''Line 3: Unexpected DOT
x: ,...
^''')
self.given(b'''
x
x: ;..,
''',
expect_error=b'Line 3: ;.., compiles to [;-,] which is rejected by the regex engine with error message: bad character range at position 4')
self.given(b'''
x
x: x....
''',
expect_error=b'''Line 3: Unexpected DOT
x: x....
^''')
self.given(b'''
x
x: infinity..
''',
expect_error=b'''Line 3: Unexpected NEWLINE
x: infinity..
^''')
self.given(b'''
x
x: ..bigbang
''',
expect_error=b'''Line 3: Unexpected DOT
x: ..bigbang
^''')
self.given(b'''
x
x: bigcrunch..bigbang
''',
expect_error=b'Line 3: Invalid character range: bigcrunch..bigbang')
self.given(b'''
x
x: A...Z
''',
expect_error=b'''Line 3: Unexpected DOT
x: A...Z
^''')
self.given(b'''
x
x: 1..2..3
''',
expect_error=b'''Line 3: Unexpected DOT
x: 1..2..3
^''')
self.given(b'''
x
x: /IsAlphabetic..Z
''',
expect_error=b'Line 3: Invalid character range: /IsAlphabetic..Z')
self.given(b'''
x
x: +alpha..Z
''',
expect_error=b'Line 3: Invalid character range: +alpha..Z')
self.given(b'''
aB
aB: a..B
''',
expect_error=b'Line 3: a..B compiles to [a-B] which is rejected by the regex engine with error message: bad character range at position 4')
self.given(br'''
BA
BA: \u0042..A
''',
expect_error=b'Line 3: \u0042..A compiles to [\u0042-A] which is rejected by the regex engine with error message: bad character range at position 9')
self.given(br'''
BA
BA: \U00000042..\u0041
''',
expect_error=b'Line 3: \U00000042..\u0041 compiles to [\U00000042-\u0041] which is rejected by the regex engine with error message: bad character range at position 18')
self.given(br'''
BA
BA: \x42..\U00000041
''',
expect_error=br'Line 3: \x42..\U00000041 compiles to [\x42-\U00000041] which is rejected by the regex engine with error message: bad character range at position 16')
self.given(br'''
BA
BA: \102..\x41
''',
expect_error=br'Line 3: \102..\x41 compiles to [\102-\x41] which is rejected by the regex engine with error message: bad character range at position 10')
self.given(br'''
BA
BA: \N{LATIN CAPITAL LETTER B}..\101
''',
expect_error=br'Line 3: \N{LATIN CAPITAL LETTER B}..\101 compiles to [\N{LATIN CAPITAL LETTER B}-\101] which is rejected by the regex engine with error message: bad character range at position 32')
self.given(b'''
BA
BA: :LATIN_CAPITAL_LETTER_B..\N{LATIN CAPITAL LETTER A}
''',
expect_error=b'Line 3: :LATIN_CAPITAL_LETTER_B..\N{LATIN CAPITAL LETTER A} compiles to [\N{LATIN CAPITAL LETTER B}-\N{LATIN CAPITAL LETTER A}] which is rejected by the regex engine with error message: bad character range at position 54')
self.given(b'''
BA
BA: \N{LATIN CAPITAL LETTER B}..:LATIN_CAPITAL_LETTER_A
''',
expect_error=b'Line 3: \N{LATIN CAPITAL LETTER B}..:LATIN_CAPITAL_LETTER_A compiles to [\N{LATIN CAPITAL LETTER B}-\N{LATIN CAPITAL LETTER A}] which is rejected by the regex engine with error message: bad character range at position 54')
self.given(br'''
aZ
aZ: \N{LATIN SMALL LETTER A}..:LATIN_CAPITAL_LETTER_Z
''',
expect_error=b'Line 3: \N{LATIN SMALL LETTER A}..:LATIN_CAPITAL_LETTER_Z compiles to [\N{LATIN SMALL LETTER A}-\N{LATIN CAPITAL LETTER Z}] which is rejected by the regex engine with error message: bad character range at position 52')
def test_invalid_charclass_include(self):
self.given(b'''
x
x: +1
''',
expect_error="Line 3: Cannot include '1': not defined")
self.given(b'''
x
x: +7even
''',
expect_error="Line 3: Cannot include '7even': not defined")
self.given(b'''
x
x: +7even
7even: 7
''',
expect_error=b'''Line 4: Unexpected NUMBER
7even: 7
^''')
self.given(b'''
x
x: +bang!
''',
expect_error=b'''Line 3: Unexpected CHAR
x: +bang!
^''')
self.given(b'''
x
x: ++
''',
expect_error=b'''Line 3: Unexpected CHAR
x: ++
^''')
self.given(b'''
x
x: +!awe+some
''',
expect_error=b'''Line 3: Unexpected CHAR
x: +!awe+some
^''')
self.given(b'''
x
x: y
y: m i s n g +
''',
expect_error="Line 4: 'y' is defined but not used (by its parent expression)")
self.given(b'''
x
x: +y
y = 'should be a charclass'
''',
expect_error="Line 3: Cannot include 'y': not a character class")
self.given(u'''
vowhex
vowhex: +!vowel +hex
vowel: a i u e o A I U E O
hex: 0..9 a..f A..F
''',
expect_error=b'''Line 3: Unexpected CHAR
vowhex: +!vowel +hex
^''')
self.given(u'''
/x/y/
x = 'x'
y: +x
''',
expect_error="Line 4: Cannot include 'x': not a character class")
self.given(u'''
/plus/minus/pmz/
plus: +
minus = '-' -- gotcha: exactly-same output with "minus: -" but not includable
pmz: +plus +minus z
''',
expect_error="Line 5: Cannot include 'minus': not a character class")
self.given(u'''
/plus/minus/pmz/
plus: +
minus = '-'
pmz: +plus +dash z
dash: +minus
''',
expect_error="Line 6: Cannot include 'minus': not a character class")
def test_invalid_charclass_operation(self):
self.given(u'''
missing_arg
missing_arg: /Alphabetic and
''',
expect_error="Line 3: Invalid use of binary 'and' operator")
self.given(u'''
missing_arg
missing_arg: and /Alphabetic
''',
expect_error="Line 3: Invalid use of binary 'and' operator")
self.given(u'''
missing_arg
missing_arg: /Alphabetic not
''',
expect_error="Line 3: Invalid use of binary 'not' operator")
self.given(u'''
missing_arg
missing_arg: not /Alphabetic
''',
expect_error="Line 3: Invalid use of binary 'not' operator")
self.given(u'''
missing_args
missing_args: and
''',
expect_error="Line 3: Invalid use of binary 'and' operator")
self.given(u'''
missing_args
missing_args: not
''',
expect_error="Line 3: Invalid use of binary 'not' operator")
self.given(u'''
missing_args
missing_args: not:
''',
expect_error="Line 3: Invalid use of unary 'not:' operator")
self.given(u'''
1 of: x and not y
''',
expect_error="Line 2: Bad set operation 'and not'")
self.given(u'''
1 of: x not and y
''',
expect_error="Line 2: Bad set operation 'not and'")
self.given(u'''
1 of: x not not y
''',
expect_error="Line 2: Bad set operation 'not not'")
self.given(u'''
1 of: x and and y
''',
expect_error="Line 2: Bad set operation 'and and'")
self.given(u'''
1 of: not: not x
''',
expect_error="Line 2: Bad set operation 'not: not'")
self.given(u'''
1 of: not: and x
''',
expect_error="Line 2: Bad set operation 'not: and'")
def test_invalid_quantifier(self):
self.given(b'''
3 of
''',
expect_error=b'''Line 2: Unexpected VARNAME
3 of
^''')
self.given(b'''
3 of
of = 'trailing spaces above after the "of"'
''',
expect_error=b'''Line 2: Unexpected VARNAME
3 of
^''')
self.given(b'''
3 of -- 3 of what?
''',
expect_error=b'''Line 2: Unexpected VARNAME
3 of -- 3 of what?
^''')
self.given(b'''
3 of-- 3 of what?
''',
expect_error=b'''Line 2: Unexpected VARNAME
3 of-- 3 of what?
^''')
self.given(b'''
3 of of--
''',
expect_error=b'''Line 2: Unexpected MINUS
3 of of--
^''')
self.given(b'''
3 alpha
''',
expect_error=b'''Line 2: Unexpected VARNAME
3 alpha
^''')
self.given(b'''
3 ofalpha
''',
expect_error=b'''Line 2: Unexpected VARNAME
3 ofalpha
^''')
self.given(b'''
3of alpha
''',
expect_error=b'''Line 2: Unexpected VARNAME
3of alpha
^''')
self.given(b'''
3 o falpha
''',
expect_error=b'''Line 2: Unexpected VARNAME
3 o falpha
^''')
self.given(b'''
3 office alpha
''',
expect_error=b'''Line 2: Unexpected VARNAME
3 office alpha
^''')
self.given(b'''
3. of alpha
''',
expect_error=b'''Line 2: Unexpected WHITESPACE
3. of alpha
^''')
self.given(b'''
3... of alpha
''',
expect_error=b'''Line 2: Unexpected DOT
3... of alpha
^''')
self.given(b'''
3+ of alpha
''',
expect_error=b'''Line 2: Unexpected PLUS
3+ of alpha
^''')
self.given(b'''
3+3 of alpha
''',
expect_error=b'''Line 2: Unexpected PLUS
3+3 of alpha
^''')
self.given(b'''
@3..2 of alpha
''',
expect_error=b'Line 2: Repeat max must be > min')
self.given(b'''
@2..2 of alpha
''',
expect_error=b'Line 2: Repeat max must be > min')
self.given(b'''
@1..1 of alpha
''',
expect_error=b'Line 2: Repeat max must be > min')
self.given(b'''
@0..0 of alpha
''',
expect_error=b'Line 2: Repeat max must be > min')
self.given(b'''
1 ..3 of alpha
''',
expect_error=b'''Line 2: Unexpected DOT
1 ..3 of alpha
^''')
self.given(b'''
1.. 3 of alpha
''',
expect_error=b'''Line 2: Unexpected NUMBER
1.. 3 of alpha
^''')
self.given(b'''
1 .. of alpha
''',
expect_error=b'''Line 2: Unexpected DOT
1 .. of alpha
^''')
self.given(b'''
1 <<- of alpha
''',
expect_error=b'''Line 2: Unexpected MINUS
1 <<- of alpha
^''')
self.given(b'''
1 <<+ of alpha
''',
expect_error=b'''Line 2: Unexpected WHITESPACE
1 <<+ of alpha
^''')
self.given(b'''
1 <<+..0 of alpha
''',
expect_error=b'Line 2: Repeat max must be > min')
self.given(b'''
0 <<+..0 of alpha
''',
expect_error=b'Line 2: Repeat max must be > min')
self.given(b'''
1 <<+..1 of alpha
''',
expect_error=b'Line 2: Repeat max must be > min')
self.given(b'''
2 <<+..2 of alpha
''',
expect_error=b'Line 2: Repeat max must be > min')
self.given(b'''
2..1 <<- of alpha
''',
expect_error=b'Line 2: Repeat max must be > min')
self.given(b'''
? <<- of alpha
''',
expect_error=b'''Line 2: Unexpected LT
? <<- of alpha
^''')
self.given(b'''
1.. of alpha
''',
expect_error=b'''Line 2: Unexpected OF
1.. of alpha
^''')
self.given(b'''
1..2 of alpha
''',
expect_error=b'''Line 2: Unexpected OF
1..2 of alpha
^''')
self.given(b'''
.. of alpha
''',
expect_error=b'''Line 2: Unexpected DOT
.. of alpha
^''')
self.given(b'''
..1 of alpha
''',
expect_error=b'''Line 2: Unexpected DOT
..1 of alpha
^''')
self.given(b'''
..2 <<- of alpha
''',
expect_error=b'''Line 2: Unexpected DOT
..2 <<- of alpha
^''')
self.given(b'''
@.. of alpha
''',
expect_error=b'''Line 2: Unexpected DOT
@.. of alpha
^''')
self.given(b'''
@..1 of alpha
''',
expect_error=b'''Line 2: Unexpected DOT
@..1 of alpha
^''')
self.given(b'''
@..2 <<- of alpha
''',
expect_error=b'''Line 2: Unexpected DOT
@..2 <<- of alpha
^''')
self.given(b'''
@? of alpha
''',
expect_error=b'''Line 2: Unexpected QUESTMARK
@? of alpha
^''')
def test_commenting_error(self):
self.given(b'''
- this comment is missing another - prefix
''',
expect_error=b'''Line 2: Unexpected MINUS
- this comment is missing another - prefix
^''')
self.given(b'''
1 of vowel - this comment is missing another - prefix
vowel: a i u e o
''',
expect_error=b'''Line 2: Unexpected WHITESPACE
1 of vowel - this comment is missing another - prefix
^''')
self.given(b'''
1 of vowel- this comment is missing another - prefix
vowel: a i u e o
''',
expect_error=b'''Line 2: Unexpected MINUS
1 of vowel- this comment is missing another - prefix
^''')
self.given(b'''
1 of vowel
vowel: a i u e o - this comment is missing another - prefix
''',
expect_error="Line 3: Cannot include 'this': not defined")
self.given(b'''
1 of vowel
vowel: a i u e o- this comment is missing another - prefix
''',
expect_error=b'''Line 3: Unexpected CHAR
vowel: a i u e o- this comment is missing another - prefix
^''')
self.given(b'''
/comment/-- whitespace required before the "--"
comment = 'first'
''',
expect_error=b'''Line 2: Unexpected MINUS
/comment/-- whitespace required before the "--"
^''')
self.given(b'''
/comment/--
comment = 'first'
''',
expect_error=b'''Line 2: Unexpected MINUS
/comment/--
^''')
def test_invalid_reference(self):
self.given(b'''
=missing
''',
expect_error="Line 2: Bad Backreference: 'missing' is not defined/not a capturing group")
self.given(b'''
=missing?
''',
expect_error="Line 2: Bad Backreference: 'missing' is not defined/not a capturing group")
self.given(b'''
=alpha
''',
expect_error="Line 2: Bad Backreference: 'alpha' is not defined/not a capturing group")
self.given(b'''
/bang/=bang/
bang: b a n g !
''',
expect_error="Line 2: Bad Backreference: 'bang' is not defined/not a capturing group")
def test_invalid_boundaries(self):
self.given(b'''
/cat./
cat = 'cat'
''',
expect_error=b'''Line 2: Unexpected DOT
/cat./
^''')
self.given(b'''
/.cat/
cat = 'cat'
''',
expect_error=b'''Line 2: Unexpected DOT
/.cat/
^''')
self.given(b'''
/cat_/
cat = 'cat'
''',
expect_error="Line 2: 'cat_' is not defined")
self.given(b'''
/cat/
cat = 'cat' .
''',
expect_error=b'''Line 3: Unexpected WHITESPACE
cat = 'cat' .
^''')
self.given(b'''
/cat/
cat = 'cat'__
''',
expect_error=b'''Line 3: Unexpected DOUBLEUNDERSCORE
cat = 'cat'__
^''')
self.given(b'''
/_/
_ = 'underscore'
''',
expect_error=b'''Line 2: Unexpected UNDERSCORE
/_/
^''')
self.given(b'''
/_./
''',
expect_error=b'''Line 2: Unexpected UNDERSCORE
/_./
^''')
def test_invalid_flags(self):
self.given(b'''
(pirate) 'carribean'
''',
expect_error="Line 2: Unknown flag 'pirate'. Supported flags are: ascii bestmatch dotall enhancematch fullcase ignorecase locale multiline reverse unicode verbose version0 version1 word")
self.given(b'''
(-pirate) 'carribean'
''',
expect_error="Line 2: Unknown flag '-pirate'. Supported flags are: ascii bestmatch dotall enhancematch fullcase ignorecase locale multiline reverse unicode verbose version0 version1 word")
self.given(b'''
(--ignorecase) 'carribean'
''',
expect_error="Line 2: Unknown flag '--ignorecase'. Supported flags are: ascii bestmatch dotall enhancematch fullcase ignorecase locale multiline reverse unicode verbose version0 version1 word")
self.given(b'''
(unicode-ignorecase)
alpha
''',
expect_error="Line 2: Unknown flag 'unicode-ignorecase'. Supported flags are: ascii bestmatch dotall enhancematch fullcase ignorecase locale multiline reverse unicode verbose version0 version1 word")
self.given(b'''
(unicode) alpha
''',
expect_error="Line 2: 'unicode' is a global flag and must be set using global flag syntax, not scoped.")
self.given(b'''
(ignorecase)alpha
''',
expect_error=b'''Line 2: Unexpected VARNAME
(ignorecase)alpha
^''')
self.given(b'''
(ignorecase)
alpha
''',
expect_error=b'Line 3: Unexpected INDENT')
self.given(b'''
(ignorecase -ignorecase) alpha
''',
expect_error=b'Line 2: (ignorecase -ignorecase) compiles to (?i-i) which is rejected by the regex engine with error message: bad inline flags: flag turned on and off at position 10')
self.given(b'''
(-ignorecase ignorecase) alpha
''',
expect_error=b'Line 2: (-ignorecase ignorecase) compiles to (?i-i) which is rejected by the regex engine with error message: bad inline flags: flag turned on and off at position 10')
self.given(b'''
(-ignorecase ignorecase unicode)
alpha
''',
expect_error=b'Line 2: (-ignorecase ignorecase unicode) compiles to (?iu-i) which is rejected by the regex engine with error message: bad inline flags: flag turned on and off at position 11')
self.given(b'''
(-ignorecase unicode ignorecase)
alpha
''',
expect_error=b'Line 2: (-ignorecase unicode ignorecase) compiles to (?ui-i) which is rejected by the regex engine with error message: bad inline flags: flag turned on and off at position 11')
self.given(b'''
(-unicode)
alpha
''',
expect_error=b'Line 2: (-unicode) compiles to (?-u) which is rejected by the regex engine with error message: bad inline flags: cannot turn off global flag at position 9')
self.given(b'''
(ignorecase)
(-ignorecase)
''',
expect_error=b'''Line 3: Unexpected NEWLINE
(-ignorecase)
^''')
self.given(b'''
(unicode ignorecase)
(-ignorecase)
''',
expect_error=b'''Line 3: Unexpected NEWLINE
(-ignorecase)
^''')
self.given(b'''
(ascii unicode)
''',
expect_error=b'Line 2: (ascii unicode) compiles to (?au) which is rejected by the regex engine with error message: ASCII, LOCALE and UNICODE flags are mutually incompatible')
self.given(b'''
(unicode ascii)
''',
expect_error=b'Line 2: (unicode ascii) compiles to (?ua) which is rejected by the regex engine with error message: ASCII, LOCALE and UNICODE flags are mutually incompatible')
self.given(b'''
(ascii locale)
''',
expect_error=b'Line 2: (ascii locale) compiles to (?aL) which is rejected by the regex engine with error message: ASCII, LOCALE and UNICODE flags are mutually incompatible')
self.given(b'''
(unicode locale)
''',
expect_error=b'Line 2: (unicode locale) compiles to (?uL) which is rejected by the regex engine with error message: ASCII, LOCALE and UNICODE flags are mutually incompatible')
self.given(b'''
(version0 version1)
''',
expect_error=b'Line 2: (version0 version1) compiles to (?V0V1) which is rejected by the regex engine with error message: 8448')
self.given(b'''
(version1 version0)
''',
expect_error=b'Line 2: (version1 version0) compiles to (?V1V0) which is rejected by the regex engine with error message: 8448')
def test_invalid_orblock(self):
self.given(b'''
empty_orblock_not_allowed
empty_orblock_not_allowed = @|
''',
expect_error=b'Line 4: Unexpected END_OF_ORBLOCK')
self.given(b'''
/empty_orblock/not_allowed/
empty_orblock = @|
not_allowed = 'NOTALLOWED'
''',
expect_error=b'Line 5: Unexpected END_OF_ORBLOCK')
self.given(b'''
<<|
''',
expect_error=b'Line 3: Unexpected END_OF_ORBLOCK')
self.given(b'''
/x/y/
x = @|
|'AM'
|'PM'
y = 'forgot empty line to terminate the orblock'
''',
expect_error=b'''Line 6: Unexpected VARNAME (forgot to close ORBLOCK?)
y = 'forgot empty line to terminate the orblock'
^''')
self.given(b'''
@|
|am
|pm
am = 'AM'
pm = 'PM
''',
expect_error=b'''Line 5: Unexpected VARNAME (forgot to close ORBLOCK?)
am = 'AM'
^''')
self.given(b'''
/trailing/bar/
trailing = <<|
|'choice 1'
|'choice 2'
|
bar: |
''',
expect_error=b'''Line 7: Unexpected VARNAME (forgot to close ORBLOCK?)
bar: |
^''')
self.given(b'''
<<|
|'alignment check'
|'this one is bad'
''',
expect_error=b'Line 4: Misaligned OR')
self.given(b'''
@|
|'also misalignment'
''',
expect_error=b'Line 3: Misaligned OR')
self.given(b'''
orblock_type
orblock_type = | -- atomic? backtrack? must specify
|'to be'
|'not to be'
''',
expect_error=b'''Line 3: Unexpected BAR
orblock_type = | -- atomic? backtrack? must specify
^''')
self.given(b'''
syntax_err
syntax_err = <<|'to be' -- choices should start in second line
|'not to be'
''',
expect_error=b'''Line 3: Unexpected STRING
syntax_err = <<|'to be' -- choices should start in second line
^''')
self.given(b'''
<<|
|missing/a/slash/
''',
expect_error=b'''Line 3: Unexpected SLASH
|missing/a/slash/
^''')
self.given(b'''
nested_orblock
nested_orblock = @|
|'nested orblock not allowed'
|@|
|'make it a var'
|'then lookup'
''',
expect_error=b'Line 5: ORBLOCK cannot contain ORBLOCK')
self.given(b'''
nested_orblock
nested_orblock = <<|
|@|
''',
expect_error=b'Line 4: ORBLOCK cannot contain ORBLOCK')
self.given(b'''
nested_orblock
nested_orblock = @|
|<<|
''',
expect_error=b'Line 4: ORBLOCK cannot contain ORBLOCK')
self.given(b'''
nested_orblock
nested_orblock = <<|
|<<|
''',
expect_error=b'Line 4: ORBLOCK cannot contain ORBLOCK')
self.given(b'''
orblock_containing_lookblock
orblock_containing_lookblock = <<|
|<@>
|!/allowed/>
''',
expect_error=b'Line 4: ORBLOCK cannot contain LOOKAROUND')
def test_invalid_conditionals(self):
self.given(b'''
<<|
|[capt]?'whitespace needed around the ?'
''',
expect_error=b'''Line 3: Unexpected QUESTMARK
|[capt]?'whitespace needed around the ?'
^''')
self.given(b'''
<<|
|[capt]? 'whitespace needed around the ?'
''',
expect_error=b'''Line 3: Unexpected QUESTMARK
|[capt]? 'whitespace needed around the ?'
^''')
self.given(b'''
<<|
|[capt] ?'whitespace needed around the ?'
''',
expect_error=b'''Line 3: Unexpected STRING
|[capt] ?'whitespace needed around the ?'
^''')
self.given(b'''
<<|
|[capt] ? 'the capture must be defined'
|
''',
expect_error="Line 3: Bad CaptureCondition: 'capt' is not defined/not a capturing group")
self.given(b'''
<<|
|[alpha] ? 'the capture must be a capture'
|
''',
expect_error="Line 3: Bad CaptureCondition: 'alpha' is not defined/not a capturing group")
self.given(b'''
<<|
|[capt] ? 'last branch must not be conditional'
''',
expect_error=b'Line 3: The last branch of OR-block must not be conditional')
self.given(b'''
/currency/amount/
currency = <<|
|dollar
|euro
[dollar]: $
[euro]: :EURO_SIGN
amount = <<|
|[dollar] ? /digits/dot/digits/
|[euro] ? /digits/comma/digits/
digits = @1.. of digit
dot: .
comma: ,
''',
expect_error=b'Line 12: The last branch of OR-block must not be conditional')
def test_invalid_lookaround(self):
self.given(b'''
empty_lookaround_not_allowed
empty_lookaround_not_allowed = <@>
''',
expect_error=b'Line 4: Unexpected END_OF_LOOKAROUND')
self.given(b'''
/empty_lookaround/not_allowed/
empty_lookaround = <@>
not_allowed = 'NOTALLOWED'
''',
expect_error=b'Line 5: Unexpected END_OF_LOOKAROUND')
self.given(b'''
<@>
|> -- empty lookahead
''',
expect_error=b'''Line 3: Unexpected GT
|> -- empty lookahead
^''')
self.given(b'''
<@>
<| -- empty lookbehind
''',
expect_error=b'''Line 3: Unexpected BAR
<| -- empty lookbehind
^''')
self.given(b'''
<@>
|| -- empty base
''',
expect_error=b'''Line 3: Unexpected BAR
|| -- empty base
^''')
self.given(b'''
<@>
''',
expect_error=b'Line 3: Unexpected END_OF_LOOKAROUND')
self.given(b'''
/x/y/
x = <@>
<behind|
|ahead>
y = 'forgot empty line to terminate the lookaround'
''',
expect_error=b'''Line 6: Unexpected VARNAME (forgot to close LOOKAROUND?)
y = 'forgot empty line to terminate the lookaround'
^''')
self.given(b'''
<@>
<past|
|future>
past = 'behind'
future = 'ahead'
''',
expect_error=b'''Line 5: Unexpected VARNAME (forgot to close LOOKAROUND?)
past = 'behind'
^''')
self.given(b'''
alignment_check
alignment_check = <@>
<mis|
|align>
''',
expect_error=b'Line 5: Misaligned |')
self.given(b'''
<@>
<mis|
|align>
''',
expect_error=b'Line 4: Misaligned |')
self.given(b'''
<@>
<mis|
|align>
''',
expect_error=b'Line 4: Misaligned |')
self.given(b'''
<@>
<this_is_good|
|alignment|
<this_is_bad|
''',
expect_error=b'Line 5: Misaligned |')
self.given(b'''
<@>
|this_is_good>
|alignment|
|this_is_bad>
''',
expect_error=b'Line 5: Misaligned |')
self.given(b'''
<@>
|wrong|
|chaining|
''',
expect_error=b'Line 4: Misaligned |')
self.given(b'''
check_indent
check_indent = <@>
|/this/line/OK/|
</this/line/needs/deeper/indentation/|
''',
expect_error=b'Line 5: needs deeper indentation')
self.given(b'''
check_indent
check_indent = <@>
|this_line_OK|
</this/line/needs/deeper/indentation/|
''',
expect_error=b'Line 5: needs deeper indentation')
self.given(b'''
check_indent
check_indent = <@>
<this_line_needs_deeper_indentation|
|/this/one/OK/|
''',
expect_error=b'Line 4: needs deeper indentation')
self.given(b'''
<@>
|/more/indent/please/|
''',
expect_error=b'Line 3: needs deeper indentation')
self.given(b'''
<@>
|more_indent_please>
''',
expect_error=b'Line 3: needs deeper indentation')
self.given(b'''
<@>
<!enough_indent|
''',
expect_error=b'Line 3: needs deeper indentation')
self.given(b'''
syntax_err
syntax_err = <@>|ahead>
''',
expect_error=b'''Line 3: Unexpected BAR
syntax_err = <@>|ahead>
^''')
self.given(b'''
syntax_err
syntax_err = <@>
<missing/a/slash/|
''',
expect_error=b'''Line 4: Unexpected SLASH
<missing/a/slash/|
^''')
self.given(b'''
syntax_err
syntax_err = <@>
</missing/a/slash|
''',
expect_error=b'''Line 4: Unexpected BAR
</missing/a/slash|
^''')
self.given(b'''
syntax_err
syntax_err = <@>
<!missing/a/slash/|
''',
expect_error=b'''Line 4: Unexpected SLASH
<!missing/a/slash/|
^''')
self.given(b'''
syntax_err
syntax_err = <@>
|!missing/a/slash>
''',
expect_error=b'''Line 4: Unexpected SLASH
|!missing/a/slash>
^''')
self.given(b'''
syntax_err
syntax_err = <@>
|missing/slashes|
''',
expect_error=b'''Line 4: Unexpected SLASH
|missing/slashes|
^''')
self.given(b'''
syntax_err
syntax_err = <@>
</ahead/behind/>
''',
expect_error=b'''Line 4: Unexpected GT
</ahead/behind/>
^''')
self.given(b'''
syntax_err
syntax_err = <@>
<behind<
''',
expect_error=b'''Line 4: Unexpected LT
<behind<
^''')
self.given(b'''
syntax_err
syntax_err = <@>
</missing/bar/
''',
expect_error=b'''Line 4: Unexpected NEWLINE
</missing/bar/
^''')
self.given(b'''
syntax_err
syntax_err = <@>
/missing/bar/>
''',
expect_error=b'''Line 4: Unexpected SLASH
/missing/bar/>
^''')
self.given(b'''
syntax_err
syntax_err = <@>
!/missing/bar/>
''',
expect_error=b'''Line 4: Unexpected EXCLAMARK
!/missing/bar/>
^''')
self.given(b'''
syntax_err
syntax_err = <@>
missing_bar>
''',
expect_error=b'''Line 4: Unexpected VARNAME (forgot to close LOOKAROUND?)
missing_bar>
^''')
self.given(b'''
syntax_err
syntax_err = <@>
|missing_bar_or_gt
''',
expect_error=b'''Line 4: Unexpected NEWLINE
|missing_bar_or_gt
^''')
self.given(b'''
nested_orblock
nested_orblock = <@>
|nested_lookaround>
|<@>
|!allowed>
''',
expect_error=b'Line 5: LOOKAROUND cannot contain LOOKAROUND')
self.given(b'''
lookblock_containing_orblock
lookblock_containing_orblock = <@>
@|
|"can't"
''',
expect_error=b'Line 4: LOOKAROUND cannot contain ORBLOCK')
def test_invalid_non_op(self):
self.given(b'''
non-BOS
''',
expect_error="Line 2: 'non-BOS': 'BOS' is not a character-class")
self.given(b'''
non-any
''',
expect_error="Line 2: 'non-any': 'any' is not a character-class")
self.given(b'''
non-vowel
vowel = (ignorecase) 1 of: a i u e o
''',
expect_error="Line 2: 'non-vowel': 'vowel' is not a character-class")
self.given(b'''
non-pin
pin = @4..6 of digit
''',
expect_error="Line 2: 'non-pin': 'pin' is not a character-class")
self.given(b'''
non-digits
digits = @1.. of digit
''',
expect_error="Line 2: 'non-digits': 'digits' is not a character-class")
self.given(b'''
non-digits
digits = 1.. <<- of digit
''',
expect_error="Line 2: 'non-digits': 'digits' is not a character-class")
self.given(b'''
non-digits
digits = 1 <<+.. of digit
''',
expect_error="Line 2: 'non-digits': 'digits' is not a character-class")
self.given(b'''
non-non-alpha
''',
expect_error=b'''Line 2: Unexpected NON
non-non-alpha
^''')
def test_invalid_anchor_sugar(self):
self.given(b'''
./
''',
expect_error=b'''Line 2: Unexpected NEWLINE
./
^''')
self.given(b'''
/.
''',
expect_error=b'''Line 2: Unexpected DOT
/.
^''')
self.given(b'''
.//.
''',
expect_error=b'''Line 2: Unexpected SLASH
.//.
^''')
self.given(b'''
./alpha./
''',
expect_error=b'''Line 2: Unexpected DOT
./alpha./
^''')
self.given(b'''
/.alpha/.
''',
expect_error=b'''Line 2: Unexpected DOT
/.alpha/.
^''')
self.given(b'''
//
''',
expect_error=b'''Line 2: Unexpected NEWLINE
//
^''')
self.given(b'''
////
''',
expect_error=b'''Line 2: Unexpected SLASH
////
^''')
self.given(b'''
/alpha//digit/
''',
expect_error=b'''Line 2: Unexpected VARNAME
/alpha//digit/
^''')
self.given(b'''
/alpha/.digit/
''',
expect_error=b'''Line 2: Unexpected VARNAME
/alpha/.digit/
^''')
def test_invalid_numrange_shortcut(self):
self.given(b'''
123..456 -- the numbers should be as string
''',
expect_error=b'''Line 2: Unexpected NEWLINE
123..456 -- the numbers should be as string
^''')
self.given(b'''
'456'..'123'
''',
expect_error="Line 2: Bad number-range format: '456'..'123' (start > end)")
self.given(b'''
'000'..'fff' -- only supports decimal for now
''',
expect_error="Line 2: Bad number-range format: 'fff'")
self.given(b'''
'I'..'MCMXCVIII' -- only supports decimal for now
''',
expect_error="Line 2: Bad number-range format: 'I'")
self.given(b'''
'one'..'ten'
''',
expect_error="Line 2: Bad number-range format: 'one'")
self.given(b'''
'2.718'..'3.14'
''',
expect_error=r"Line 2: Bad number-range format: '2\.718'")
self.given(b'''
'2.718'..'3.14'..'0.001'
''',
expect_error=b'''Line 2: Unexpected DOT
'2.718'..'3.14'..'0.001'
^''')
self.given(b'''
'3,14'..'2,718'
''',
expect_error=r"Line 2: Bad number-range format: '3,14'")
self.given(b'''
'-1'..'-10' -- negative numbers not supported for now
''',
expect_error="Line 2: Bad number-range format: '-1'")
self.given(b'''
'1'..'99'..'2'
''',
expect_error=b'''Line 2: Unexpected DOT
'1'..'99'..'2'
^''')
self.given(b'''
'1'...'10'
''',
expect_error=b'''Line 2: Unexpected DOT
'1'...'10'
^''')
self.given(b'''
''..''
''',
expect_error="Line 2: Bad number-range format: ''")
self.given(b'''
'0'..''
''',
expect_error="Line 2: Bad number-range format: ''")
self.given(b'''
''..'1'
''',
expect_error="Line 2: Bad number-range format: ''")
self.given(b'''
'1'..'1oo'
''',
expect_error="Line 2: Bad number-range format: '1oo'")
self.given(b'''
'o01'..'999'
''',
expect_error="Line 2: Bad number-range format: 'o01' (ambiguous leading-zero spec)")
self.given(b'''
'0o1'..'999'
''',
expect_error="Line 2: Bad number-range format: '0o1'")
self.given(b'''
'o'..'999' -- should be '0'..'999'
''',
expect_error="Line 2: Bad number-range format: 'o'")
self.given(b'''
'ooo'..'999' -- should be 'oo0'..'999'
''',
expect_error="Line 2: Bad number-range format: 'ooo'")
self.given(b'''
'01'..'999' -- should be '001'..'999'
''',
expect_error="Line 2: Bad number-range format: '01'..'999' (lengths must be the same if using leading-zero/o format)")
self.given(b'''
'o1'..'999' -- should be 'oo1'..'999'
''',
expect_error="Line 2: Bad number-range format: 'o1'..'999' (lengths must be the same if using leading-zero/o format)")
self.given(b'''
'oo1'..'099'
''',
expect_error="Line 2: Bad number-range format: 'oo1'..'099' (one cannot be o-led while the other is zero-led)")
self.given(b'''
'09'..'o1'
''',
expect_error="Line 2: Bad number-range format: '09'..'o1' (one cannot be o-led while the other is zero-led)")
def test_invalid_infinite_numrange(self):
self.given(b'''
'00'..
''',
expect_error="Line 2: Infinite range cannot have (non-optional) leading zero: '00'..")
self.given(b'''
'01'..
''',
expect_error="Line 2: Infinite range cannot have (non-optional) leading zero: '01'..")
self.given(b'''
'0123'..
''',
expect_error="Line 2: Infinite range cannot have (non-optional) leading zero: '0123'..")
self.given(b'''
'oo0'..
''',
expect_error="Line 2: Infinite range: excessive leading-o: 'oo0'..")
self.given(b'''
'oo1'..
''',
expect_error="Line 2: Infinite range: excessive leading-o: 'oo1'..")
self.given(b'''
'oo123'..
''',
expect_error="Line 2: Infinite range: excessive leading-o: 'oo123'..")
def test_invalid_wordchar_redef(self):
self.given(b'''
.'cat'.
wordchar: A..z
''',
expect_error=b'Line 3: Redefining wordchar: must be global')
self.given(b'''
.'cat'.
*) wordchar = 'A-z'
''',
expect_error=b'Line 3: Redefining wordchar: wordchar must be a charclass')
self.given(b'''
/cat/
cat = 'cat'
*) wordchar: A..z
''',
expect_error=b'Line 4: Redefining wordchar: must be the first/before any other definition')
self.given(b'''
/cat/
cat = 'cat'
*) wordchar: A..z
''',
expect_error=b'Line 4: Redefining wordchar: must be the first/before any other definition')
class TestOutput(unittest.TestCase):
def given(self, oprex_source, expect_regex):
oprex_source = to_string(oprex_source)
expect_regex = to_string(expect_regex)
default_flags = '(?V1w)'
regex_source = oprex(oprex_source)
regex_source = regex_source.replace(default_flags, '', 1)
if regex_source != expect_regex:
msg = 'For input: %s\n---------------------------- Got Output: -----------------------------\n%s\n\n------------------------- Expected Output: ---------------------------\n%s'
raise AssertionError(msg % (
oprex_source or '(empty string)',
regex_source or '(empty string)',
expect_regex or '(empty string)',
))
def test_empties(self):
self.given('',
expect_regex=b'')
self.given(b'''
''',
expect_regex=b'')
self.given(b'''
''',
expect_regex=b'')
self.given(b'''
''',
expect_regex=b'')
def test_indentation(self):
# indentation using space
self.given(b'''
/weather/warming/
weather = 'local'
*) warming = 'global'
''',
expect_regex=b'localglobal')
# indentation using tab
self.given(b'''
/weather/warming/
\tweather = 'local'
*)\twarming = 'global'
''',
expect_regex=b'localglobal')
def test_escaping_output(self):
self.given(b'''
stars
stars = '***'
''',
expect_regex=br'\*\*\*')
self.given(b'''
add
add: +plus
plus: +
''',
expect_regex=br'\+')
def test_assignment_whitespace(self):
self.given(b'''
/a/b/c/d/e/f/g/h/i/j/k/l/m/
a='a'
b= 'b'
c ='c'
d = 'd'
e ='e'
f = 'f'
g = 'g'
h= 'h'
i = 'i'
j = 'j'
k = l = m = 'z'
''',
expect_regex=b'abcdefghijzzz')
self.given(b'''
add
add: +plus
plus: +
''',
expect_regex=br'\+')
def test_character_class(self):
self.given(b'''
papersize
papersize = /series/size/
series: A B C
size: 0 1 2 3 4 5 6 7 8
''',
expect_regex=b'[ABC][012345678]')
self.given(b'''
/A/a/
A = a: A a
''',
expect_regex=b'[Aa][Aa]')
self.given(b'''
/A/a/
A = a: A a
''',
expect_regex=b'[Aa][Aa]')
self.given(b'''
x
x: [ ^ \\ ]
''',
expect_regex=b'[\\[\\^\\\\\\]]')
def test_char(self):
self.given(b'''
x
x: /Alphabetic /Script=Latin /InBasicLatin /IsCyrillic /Script=Cyrillic
''',
expect_regex=b'[\p{Alphabetic}\p{Script=Latin}\p{InBasicLatin}\p{IsCyrillic}\p{Script=Cyrillic}]')
self.given(b'''
x
x: \u00ab \u00AB \U000000ab \u00Ab
''',
expect_regex=b'[\u00ab\u00AB\U000000ab\u00Ab]')
self.given(b'''
x
x: \u12ab \u12AB \u12Ab
''',
expect_regex=b'[\u12ab\u12AB\u12Ab]')
try:
self.given(b'''
x
x: \U0001234a \U0001234A \U0001234a
''',
expect_regex=b'[\U0001234a\U0001234A\U0001234a]')
except ValueError as e:
if 'narrow Python build' in str(e):
pass
else:
raise
self.given(b'''
x
x: :SKULL_AND_CROSSBONES :BIOHAZARD_SIGN :CANCER
''',
expect_regex=b'[\N{SKULL AND CROSSBONES}\N{BIOHAZARD SIGN}\N{CANCER}]')
def test_character_range_output(self):
self.given(br'''
AB
AB: A..\u0042
''',
expect_regex=br'[A-\u0042]')
self.given(br'''
AB
AB: \u0041..\U00000042
''',
expect_regex=br'[\u0041-\U00000042]')
self.given(br'''
AB
AB: \U00000041..\x42
''',
expect_regex=br'[\U00000041-\x42]')
self.given(br'''
AB
AB: \x41..\102
''',
expect_regex=br'[\x41-\102]')
self.given(br'''
AB
AB: \101..\N{LATIN CAPITAL LETTER B}
''',
expect_regex=br'[\101-\N{LATIN CAPITAL LETTER B}]')
self.given(b'''
AB
AB: \N{LATIN CAPITAL LETTER A}..:LEFT_RIGHT_OPEN-HEADED_ARROW
''',
expect_regex=br'[\N{LATIN CAPITAL LETTER A}-\N{LEFT RIGHT OPEN-HEADED ARROW}]')
self.given(b'''
AB
AB: :LATIN_CAPITAL_LETTER_A..\N{LEFT RIGHT OPEN-HEADED ARROW}
''',
expect_regex=br'[\N{LATIN CAPITAL LETTER A}-\N{LEFT RIGHT OPEN-HEADED ARROW}]')
self.given(br'''
colon_to_semi
colon_to_semi: :..;
''',
expect_regex=b'[:-;]')
self.given(b'''
need_escape
need_escape: [ ^ a - z ]
''',
expect_regex=br'[\[\^a\-z\]]')
def test_charclass_include_output(self):
self.given(u'''
op
op: +add +sub +mul +div
add: +
sub: -
mul: * ×
div: / ÷ :
''',
expect_regex=r'[+\-*×/÷:]')
self.given(u'''
aUmlaut
aUmlaut: +a_with_diaeresis
a_with_diaeresis: ä
''',
expect_regex=u'ä')
self.given(u'''
aUmlaut
aUmlaut: +small_a_umlaut
small_a_umlaut: +a_with_diaeresis
a_with_diaeresis: ä
''',
expect_regex=u'ä')
self.given(b'''
aUmlaut
aUmlaut: +a_with_diaeresis
a_with_diaeresis: \u00E4
''',
expect_regex=br'\u00E4')
self.given(u'''
aUmlaut
aUmlaut: +a_with_diaeresis
a_with_diaeresis: :LATIN_SMALL_LETTER_A_WITH_DIAERESIS
''',
expect_regex=br'\N{LATIN SMALL LETTER A WITH DIAERESIS}')
self.given(u'''
alphabetic
alphabetic: +is_alphabetic
is_alphabetic: /Alphabetic
''',
expect_regex=br'\p{Alphabetic}')
self.given(u'''
lowaz
lowaz: +lowerAZ
lowerAZ: a..z
''',
expect_regex=b'[a-z]')
self.given(u'''
/hex/
hex: +hexx +hexy +hexz
hexx = hexdigit
hexdigit: 0..9
hexy = hexz = hexalpha
hexalpha: a..f A..F
''',
expect_regex=b'[0-9a-fA-Fa-fA-F]')
self.given(u'''
/xx/dup/
xx: x X
dup: +xx
''',
expect_regex=b'[xX][xX]')
self.given(u'''
/cc/
cc: +yy a
yy: y not Y
''',
expect_regex=b'[[y--Y]a]')
self.given(u'''
/cc/
cc: a +yy
yy: y not Y
''',
expect_regex=b'[a[y--Y]]')
self.given(u'''
/cc/
cc: +xx +yy
xx: x not X
yy: y not Y
''',
expect_regex=b'[[x--X][y--Y]]')
self.given(u'''
/cc/
cc: +xx a +yy
xx: x not X
yy: y not Y
''',
expect_regex=b'[[x--X]a[y--Y]]')
self.given(u'''
/notX/
notX: not: X
''',
expect_regex=b'[^X]')
self.given(u'''
not: X
''',
expect_regex=b'[^X]')
self.given(u'''
not: X Y Z
''',
expect_regex=b'[^XYZ]')
self.given(u'''
/not_X/
not_X: notX
notX: not: X
''',
expect_regex=b'[^X]')
self.given(u'''
/notNotX/
notNotX: not: +notX
notX: not: X
''',
expect_regex=b'X')
self.given(u'''
not: not: X
''',
expect_regex=b'X')
self.given(u'''
1 of: not: not: X
''',
expect_regex=b'X')
self.given(u'''
not: not: not: X
''',
expect_regex=b'[^X]')
self.given(u'''
not: not: not: not: X
''',
expect_regex=b'X')
self.given(u'''
not: not: -
''',
expect_regex=b'-')
self.given(u'''
not: -
''',
expect_regex=br'[^\-]')
self.given(u'''
/plus/minus/pmz/
plus: +
minus: -
pmz: +plus +minus z
''',
expect_regex=b'\+-[+\-z]')
self.given(u'''
vowhex
vowhex: +vowel +hex
vowel: a i u e o A I U E O
hex: 0..9 a..f A..F
''',
expect_regex=b'[aiueoAIUEO0-9a-fA-F]')
self.given(b'''
x
x: +__special__
__special__: x
''',
expect_regex=b'x')
self.given(b'''
x
x: __special__
__special__: x
''',
expect_regex=b'x')
self.given(b'''
/dot/period/
period: .
dot: . period
''',
expect_regex=b'[..]\.')
def test_charclass_operation_output(self):
self.given(u'''
xb123
xb123: X x +hex not c..f C..D :LATIN_CAPITAL_LETTER_F +vowel and 1 2 3 /Alphabetic
hex: 0..9 a..f A..F
vowel: a i u e o A I U E O
''',
expect_regex=b'[Xx0-9a-fA-F--c-fC-D\N{LATIN CAPITAL LETTER F}aiueoAIUEO&&123\p{Alphabetic}]')
self.given(u'''
allButU
allButU: not: U
''',
expect_regex=b'[^U]')
self.given(u'''
nonalpha
nonalpha: not: /Alphabetic
''',
expect_regex=b'\P{Alphabetic}')
self.given(u'''
not: digit
''',
expect_regex=b'\D')
self.given(u'''
not: /Alphabetic
''',
expect_regex=b'\P{Alphabetic}')
self.given(u'''
otherz
otherz: +nonz
nonz: not: z
''',
expect_regex=b'[^z]')
self.given(u'''
a_or_consonant
a_or_consonant: A a +consonant
consonant: a..z A..Z not a i u e o A I U E O
''',
expect_regex=b'[Aa[a-zA-Z--aiueoAIUEO]]')
self.given(u'''
maestro
maestro: m +ae s t r o
ae: +vowel and +hex not +upper
hex: +digit a..f A..F
vowel: a i u e o A I U E O
''',
expect_regex=br'[m[aiueoAIUEO&&\da-fA-F--A-Z]stro]')
def test_charclass_escape_output(self):
self.given(b'''
1 of: \u0061 \U00000061 \x61 \61
''',
expect_regex=b'[\u0061\U00000061\x61\61]')
self.given(br'''
1 of: \u0061 \U00000061 \x61 \61
''',
expect_regex=br'[\u0061\U00000061\x61\61]')
self.given(u'''
1 of: \u0061 \U00000061 \x61 \61
''',
expect_regex=u'[\u0061\U00000061\x61\61]')
self.given(r'''
1 of: \u0061 \U00000061 \x61 \61
''',
expect_regex=r'[\u0061\U00000061\x61\61]')
self.given(br'''
allowed_escape
allowed_escape: \n \r \t \a \b \v \f
''',
expect_regex=br'[\n\r\t\a\b\v\f]')
self.given(b'''
backspace
backspace: \\
''',
expect_regex=br'\\')
self.given(br'''
unicode_charname
unicode_charname: \N{AMPERSAND} :AMPERSAND \N{BIOHAZARD SIGN} :BIOHAZARD_SIGN
''',
expect_regex=b'[\N{AMPERSAND}\N{AMPERSAND}\N{BIOHAZARD SIGN}\N{BIOHAZARD SIGN}]')
def test_string_literal(self):
self.given(b'''
'lorem ipsum'
''',
expect_regex=b'lorem ipsum')
self.given(b'''
"lorem ipsum"
''',
expect_regex=b'lorem ipsum')
self.given(b'''
"Ron's"
''',
expect_regex="Ron's")
self.given(br'''
'Ron\'s'
''',
expect_regex="Ron's")
self.given(b'''
'Ron\\'s'
''',
expect_regex="Ron's")
self.given(b'''
'said "Hi"'
''',
expect_regex=b'said "Hi"')
self.given(br'''
"said \"Hi\""
''',
expect_regex=b'said "Hi"')
self.given(br'''
"name:\toprex\nawesome:\tyes"
''',
expect_regex=b'name:\\toprex\\nawesome:\\tyes')
def test_string_interpolation(self):
self.given(b'''
/p/pXs/s/
p = '%'
s = 's'
pXs = /p/X/s/
[X] = 'X'
''',
expect_regex=b'%%(?P<X>X)ss')
self.given(b'''
/p/pXs/s/
p = '%'
s = 's'
[pXs] = /p/X/s/
[X] = 'X'
''',
expect_regex=b'%(?P<pXs>%(?P<X>X)s)s')
self.given(b'''
greeting
greeting = 'Hello %(name)s'
''',
expect_regex=b'Hello %\(name\)s')
self.given(b'''
message
message = /greeting/name/
greeting = 'Hello%'
name = /salutation/first/last/
[salutation] = 'Sir/Madam'
first = 's%(first)s'
last = '%(last)s'
''',
expect_regex=b'Hello%(?P<salutation>Sir/Madam)s%\(first\)s%\(last\)s')
def test_scoping(self):
self.given(b'''
/subject/predicate/object/
subject = /article/adjective/noun/
article = 'the'
adjective = /speed/color/
speed = 'quick'
color = 'brown'
noun = 'fox'
predicate = /verb/adverb/
verb = 'jumps'
adverb = 'over'
object = /article/adjective/noun/
article = 'the'
adjective = 'lazy'
noun = 'dog'
''',
expect_regex=b'thequickbrownfoxjumpsoverthelazydog')
self.given(b'''
/subject/predicate/object/
subject = /article/adjective/noun/
*) article = 'the'
*) adjective = /speed/color/
speed = 'quick'
color = 'brown'
*) noun = 'fox'
predicate = /verb/adverb/
verb = 'jumps'
adverb = 'over'
object = /article/adjective/noun/
''',
expect_regex=b'thequickbrownfoxjumpsoverthequickbrownfox')
self.given(b'''
/grosir/banana4/papaya4/
grosir = /ciek/empat/sekawan/
ciek: 1
*) empat = sekawan: 4
banana4 = /gedang/sekawan/
gedang = 'banana'
papaya4 = /gedang/opat/
gedang = 'papaya'
opat = empat
''',
expect_regex=b'144banana4papaya4')
self.given(b'''
/oneoneone/oneone/one/
oneoneone = /satu/uno/ichi/
satu = '1'
*) uno = ichi = satu
oneone = /uno/ichi/
one = satu
satu: 1
''',
expect_regex=b'111111')
def test_aliases(self):
self.given(b'''
/griffin/griffon/gryphon/alce/keythong/opinicus/
griffin = griffon = 'protoceratops'
gryphon = griffon
alce = keythong = opinicus = griffin
''',
expect_regex=b'protoceratopsprotoceratopsprotoceratopsprotoceratopsprotoceratopsprotoceratops')
self.given(b'''
/X/deadeye/ten/unknown_thing/wrong_answer/
deadeye = X: X
ten = X
unknown_thing = wrong_answer = X
''',
expect_regex=b'XXXXX')
def test_empty_lines_ok(self):
self.given(b'''
/subject/predicate/object/
subject = /article/adjective/noun/
article = 'the'
adjective = /speed/color/
speed = 'quick'
color = 'brown'
noun = 'fox'
predicate = /verb/adverb/
verb = 'jumps'
adverb = 'over'
object = /article/adjective/noun/
article = 'the'
adjective = 'lazy'
noun = 'dog'
''',
expect_regex=b'thequickbrownfoxjumpsoverthelazydog')
def test_captures_output(self):
self.given(b'''
/extra/extra?/
[extra] = 'icing'
''',
expect_regex=b'(?P<extra>icing)(?P<extra>icing)?')
self.given(b'''
/defcon/level/
defcon = 'DEFCON'
[level]: 1 2 3 4 5
''',
expect_regex=br'DEFCON(?P<level>[12345])')
self.given(b'''
captured?
[captured] = /L/R/
[L] = 'Left'
[R] = 'Right'
''',
expect_regex=br'(?P<captured>(?P<L>Left)(?P<R>Right))?')
self.given(b'''
uncaptured?
uncaptured = /L?/R/
[L] = 'Left'
[R] = 'Right'
''',
expect_regex=br'(?:(?P<L>Left)?(?P<R>Right))?')
def test_atomic_grouping_output(self):
self.given(b'''
@/alpha/ -- possible though pointless
''',
expect_regex=br'(?>[a-zA-Z])')
self.given(b'''
@/alpha/digit/ -- possible though pointless
''',
expect_regex=br'(?>[a-zA-Z]\d)')
self.given(b'''
@/digits?/even/
digits = 1.. <<- of digit
even: 0 2 4 6 8
''',
expect_regex=br'(?>\d*[02468])')
self.given(b'''
./digits?/even/.
digits = 1.. <<- of digit
even: 0 2 4 6 8
''',
expect_regex=br'\A\d*[02468]\Z')
self.given(b'''
//digits?/even//
digits = 1.. <<- of digit
even: 0 2 4 6 8
''',
expect_regex=br'(?m:^)\d*[02468](?m:$)')
def test_builtin_output(self):
self.given(b'''
/alpha/upper/lower/digit/alnum/
''',
expect_regex=br'[a-zA-Z][A-Z][a-z]\d[a-zA-Z0-9]')
self.given(b'''
(unicode)
/alpha/upper/lower/digit/alnum/
''',
expect_regex=br'(?V1wu)\p{Alphabetic}\p{Uppercase}\p{Lowercase}\d\p{Alphanumeric}')
self.given(b'''
/BOS/EOS/BOL/EOL/BOW/EOW/WOB/
''',
expect_regex=br'\A\Z(?m:^)(?m:$)\m\M\b')
self.given(b'''
(multiline)
/BOS/EOS/BOL/EOL/BOW/EOW/WOB/
''',
expect_regex=br'(?V1wm)\A\Z^$\m\M\b')
self.given(b'''
(-multiline)
/BOS/EOS/BOL/EOL/BOW/EOW/WOB/
''',
expect_regex=br'(?V1w-m)\A\Z(?m:^)(?m:$)\m\M\b')
self.given(b'''
/any/uany/
''',
expect_regex=br'(?s:.)\X')
self.given(b'''
(dotall)
/any/uany/
''',
expect_regex=br'(?V1ws).\X')
self.given(b'''
(-dotall)
/any/uany/
''',
expect_regex=br'(?V1w-s)(?s:.)\X')
self.given(b'''
/backslash/wordchar/whitechar/linechar/padchar/space/tab/
''',
expect_regex=br'\\\w\s[\r\n\x0B\x0C][ \t] \t')
self.given(b'''
(word verbose)
/backslash/wordchar/whitechar/linechar/padchar/space/tab/
''',
expect_regex=br'(?V1wx)\\\w\s[\r\n\x0B\x0C][ \t][ ]\t')
self.given(b'''
(-word -verbose)
/backslash/wordchar/whitechar/linechar/padchar/space/tab/
''',
expect_regex=br'(?V1-wx)\\\w\s\n[ \t] \t')
self.given(b'''
/non-alpha/non-upper/non-lower/non-digit/non-alnum/
''',
expect_regex=br'[^a-zA-Z][^A-Z][^a-z]\D[^a-zA-Z0-9]')
self.given(b'''
(unicode)
/non-alpha/non-upper/non-lower/non-digit/non-alnum/
''',
expect_regex=br'(?V1wu)\P{Alphabetic}\P{Uppercase}\P{Lowercase}\D\P{Alphanumeric}')
self.given(b'''
/non-WOB/
''',
expect_regex=br'\B')
self.given(b'''
/non-backslash/non-wordchar/non-whitechar/non-linechar/non-padchar/non-space/non-tab/
''',
expect_regex=br'[^\\]\W\S.[^ \t][^ ][^\t]')
self.given(b'''
(word verbose)
/non-backslash/non-wordchar/non-whitechar/non-linechar/non-padchar/non-space/non-tab/
''',
expect_regex=br'(?V1wx)[^\\]\W\S.[^ \t][^ ][^\t]')
self.given(b'''
(-word -verbose)
/non-backslash/non-wordchar/non-whitechar/non-linechar/non-padchar/non-space/non-tab/
''',
expect_regex=br'(?V1-wx)[^\\]\W\S.[^ \t][^ ][^\t]')
def test_quantifier_output(self):
self.given(b'''
0 of alpha
''',
expect_regex=b'')
self.given(b'''
1 of alpha
''',
expect_regex=b'[a-zA-Z]')
self.given(b'''
2 of alpha
''',
expect_regex=b'[a-zA-Z]{2}')
self.given(b'''
0 of: alpha
''',
expect_regex=b'')
self.given(b'''
1 of: alpha
''',
expect_regex=b'[a-zA-Z]')
self.given(b'''
2 of: alpha
''',
expect_regex=b'[a-zA-Z]{2}')
self.given(b'''
@0 of alpha
''',
expect_regex=b'')
self.given(b'''
@1 of alpha
''',
expect_regex=b'[a-zA-Z]')
self.given(b'''
@2 of alpha
''',
expect_regex=b'[a-zA-Z]{2}')
self.given(b'''
@0 of: alpha
''',
expect_regex=b'')
self.given(b'''
@1 of: alpha
''',
expect_regex=b'[a-zA-Z]')
self.given(b'''
@2 of: alpha
''',
expect_regex=b'[a-zA-Z]{2}')
self.given(b'''
@0.. of alpha
''',
expect_regex=b'[a-zA-Z]*+')
self.given(b'''
@1.. of alpha
''',
expect_regex=b'[a-zA-Z]++')
self.given(b'''
@2.. of alpha
''',
expect_regex=b'[a-zA-Z]{2,}+')
self.given(b'''
@0..2 of alpha
''',
expect_regex=b'[a-zA-Z]{,2}+')
self.given(b'''
@0..1 of alpha
''',
expect_regex=b'[a-zA-Z]?+')
self.given(b'''
@3..4 of alpha
''',
expect_regex=b'[a-zA-Z]{3,4}+')
self.given(b'''
0.. <<- of alpha
''',
expect_regex=b'[a-zA-Z]*')
self.given(b'''
1.. <<- of alpha
''',
expect_regex=b'[a-zA-Z]+')
self.given(b'''
2.. <<- of alpha
''',
expect_regex=b'[a-zA-Z]{2,}')
self.given(b'''
0..2 <<- of alpha
''',
expect_regex=b'[a-zA-Z]{,2}')
self.given(b'''
0..1 <<- of alpha
''',
expect_regex=b'[a-zA-Z]?')
self.given(b'''
0 <<+..1 of alpha
''',
expect_regex=b'[a-zA-Z]??')
self.given(b'''
3..4 <<- of alpha
''',
expect_regex=b'[a-zA-Z]{3,4}')
self.given(b'''
0 <<+.. of alpha
''',
expect_regex=b'[a-zA-Z]*?')
self.given(b'''
1 <<+.. of alpha
''',
expect_regex=b'[a-zA-Z]+?')
self.given(b'''
2 <<+.. of alpha
''',
expect_regex=b'[a-zA-Z]{2,}?')
self.given(b'''
0 <<+..1 of alpha
''',
expect_regex=b'[a-zA-Z]??')
self.given(b'''
0 <<+..2 of alpha
''',
expect_regex=b'[a-zA-Z]{,2}?')
self.given(b'''
1 <<+..2 of alpha
''',
expect_regex=b'[a-zA-Z]{1,2}?')
self.given(b'''
alphas?
alphas = @1.. of alpha
''',
expect_regex=b'[a-zA-Z]*+')
self.given(b'''
alphas?
alphas = @0.. of alpha
''',
expect_regex=b'(?:[a-zA-Z]*+)?')
self.given(b'''
opt_alpha?
opt_alpha = @0..1 of alpha
''',
expect_regex=b'(?:[a-zA-Z]?+)?')
self.given(b'''
opt_alpha?
opt_alpha = 0..1 <<- of alpha
''',
expect_regex=b'(?:[a-zA-Z]?)?')
self.given(b'''
opt_alpha?
opt_alpha = 0 <<+..1 of alpha
''',
expect_regex=b'(?:[a-zA-Z]??)?')
self.given(b'''
@0..1 of @0..1 of alpha
''',
expect_regex=b'(?:[a-zA-Z]?+)?+')
self.given(b'''
@0..1 of 0..1 <<- of alpha
''',
expect_regex=b'(?:[a-zA-Z]?)?+')
self.given(b'''
0 <<+..1 of @0..1 of alpha
''',
expect_regex=b'(?:[a-zA-Z]?+)??')
self.given(b'''
0..1 <<- of 0 <<+..1 of alpha
''',
expect_regex=b'(?:[a-zA-Z]??)?')
self.given(b'''
2 of @0..1 of alpha
''',
expect_regex=b'(?:[a-zA-Z]?+){2}')
self.given(b'''
@0..1 of 2 of alpha
''',
expect_regex=b'(?:[a-zA-Z]{2})?+')
self.given(b'''
@0..1 of @1..3 of alpha
''',
expect_regex=b'(?:[a-zA-Z]{1,3}+)?+')
self.given(b'''
@1..3 of @0..1 of alpha
''',
expect_regex=b'(?:[a-zA-Z]?+){1,3}+')
self.given(b'''
1..3 <<- of @5..7 of alpha
''',
expect_regex=b'(?:[a-zA-Z]{5,7}+){1,3}')
self.given(b'''
@1..3 of 5..7 <<- of alpha
''',
expect_regex=b'(?:[a-zA-Z]{5,7}){1,3}+')
self.given(b'''
1..3 <<- of 5..7 <<- of alpha
''',
expect_regex=b'(?:[a-zA-Z]{5,7}){1,3}')
self.given(b'''
1 <<+..3 of 5..7 <<- of alpha
''',
expect_regex=b'(?:[a-zA-Z]{5,7}){1,3}?')
self.given(b'''
1..3 <<- of 5 <<+..7 of alpha
''',
expect_regex=b'(?:[a-zA-Z]{5,7}?){1,3}')
self.given(b'''
1 <<+..3 of 5 <<+..7 of alpha
''',
expect_regex=b'(?:[a-zA-Z]{5,7}?){1,3}?')
self.given(b'''
2 of @3..4 of alpha
''',
expect_regex=b'(?:[a-zA-Z]{3,4}+){2}')
self.given(b'''
2 of 3..4 <<- of alpha
''',
expect_regex=b'(?:[a-zA-Z]{3,4}){2}')
self.given(b'''
2 of 3 <<+..4 of alpha
''',
expect_regex=b'(?:[a-zA-Z]{3,4}?){2}')
self.given(b'''
@2..3 of 4 of alpha
''',
expect_regex=b'(?:[a-zA-Z]{4}){2,3}+')
self.given(b'''
2..3 <<- of 4 of alpha
''',
expect_regex=b'(?:[a-zA-Z]{4}){2,3}')
self.given(b'''
2 <<+..3 of 4 of alpha
''',
expect_regex=b'(?:[a-zA-Z]{4}){2,3}?')
self.given(b'''
css_color
css_color = 6 of hex
hex: 0..9 a..f
''',
expect_regex=b'[0-9a-f]{6}')
self.given(b'''
css_color
css_color = 3 of 2 of hex
hex: 0..9 a..f
''',
expect_regex=b'[0-9a-f]{6}')
self.given(b'''
css_color
css_color = 3 of hexbyte
hexbyte = 2 of: 0..9 a..f
''',
expect_regex=b'[0-9a-f]{6}')
self.given(b'''
DWORD_speak
DWORD_speak = @1.. of 4 of hex
hex: 0..9 A..F
''',
expect_regex=b'(?:[0-9A-F]{4})++')
self.given(b'''
? of alpha
''',
expect_regex=b'[a-zA-Z]?')
self.given(b'''
? of alphas
alphas = @1.. of alpha
''',
expect_regex=b'[a-zA-Z]*+')
self.given(b'''
? of alphas
alphas = 1.. <<- of alpha
''',
expect_regex=b'[a-zA-Z]*')
self.given(b'''
? of alphas
alphas = 1 <<+.. of alpha
''',
expect_regex=b'[a-zA-Z]*?')
def test_commenting(self):
self.given(b'''
-- comments should be ignored
''',
expect_regex=b'')
self.given(b'''
-- comments should be ignored
''',
expect_regex=b'')
self.given(b'''
-- comments should be ignored
--comments should be ignored
''',
expect_regex=b'')
self.given(b'''
-- comments should be ignored
--comments should be ignored
''',
expect_regex=b'')
self.given(b'''
--comments should be ignored
--comments should be ignored
-- comments should be ignored
''',
expect_regex=b'')
self.given(b'''
--comments should be ignored
-- comments should be ignored
-- comments should be ignored
--comments should be ignored
''',
expect_regex=b'')
self.given(b'''-- first line containing comments, and only comments, is OK
-- so is last line''',
expect_regex=b'')
self.given(b'''--
-- ''',
expect_regex=b'')
self.given(b''' --
--''',
expect_regex=b'')
self.given(b'''
--
''',
expect_regex=b'')
self.given(b'''
--
''',
expect_regex=b'')
self.given(b'''
---
''',
expect_regex=b'')
self.given(b'''
--
--
''',
expect_regex=b'')
self.given(b'''
--
--
''',
expect_regex=b'')
self.given(b'''
--
--
--
''',
expect_regex=b'')
self.given(b'''
--
--
--
--
''',
expect_regex=b'')
self.given(b'''
/comment/ -- should be ignored
comment = 'first'
''',
expect_regex=b'first')
self.given(b'''
/comment/ --should be ignored
comment = 'first'
''',
expect_regex=b'first')
self.given(b'''
/comment/ --
comment = 'first'
''',
expect_regex=b'first')
self.given(b'''
-- begin
/social_symbol/literally/literal/ --comments should be ignored
social_symbol: @ # -- the social media symbols
literally = 'literally' -- string literal
literal = literally --alias
--end
''',
expect_regex=b'[@#]literallyliterally')
def test_reference_output(self):
self.given(u'''
/bang/=bang/
[bang]: b a n g !
''',
expect_regex=b'(?P<bang>[bang!])(?P=bang)')
self.given(u'''
/=bang/bang/
[bang]: b a n g !
''',
expect_regex=b'(?P=bang)(?P<bang>[bang!])')
self.given(u'''
/bang/=bang?/
[bang]: b a n g !
''',
expect_regex=b'(?P<bang>[bang!])(?P=bang)?')
self.given(u'''
/=bang?/bang/
[bang]: b a n g !
''',
expect_regex=b'(?P=bang)?(?P<bang>[bang!])')
def test_wordchar_boundary_output(self):
self.given(b'''
/wordchar/WOB/non-WOB/BOW/EOW/
''',
expect_regex=br'\w\b\B\m\M')
self.given(b'''
realworld_wordchar
realworld_wordchar: +wordchar - not +digit _
''',
expect_regex=br'[\w\---\d_]')
self.given(b'''
cat
cat = .'cat'.
''',
expect_regex=br'\bcat\b')
self.given(b'''
/WOB/cat/WOB/
cat = 'cat'
''',
expect_regex=br'\bcat\b')
self.given(b'''
/BOW/cat/EOW/
cat = 'cat'
''',
expect_regex=br'\mcat\M')
self.given(b'''
/WOB/cat/WOB/
cat = .'cat'.
''',
expect_regex=br'\b\bcat\b\b')
self.given(b'''
/anti/non-WOB/
anti = 'anti'
''',
expect_regex=br'anti\B')
self.given(b'''
somethingtastic
somethingtastic = _'tastic'
''',
expect_regex=br'\Btastic')
self.given(b'''
expletification
expletification = _'bloody'_
''',
expect_regex=br'\Bbloody\B')
self.given(b'''
non-WOB
''',
expect_regex=br'\B')
self.given(b'''
WOB
''',
expect_regex=br'\b')
self.given(b'''
2 of WOB
''',
expect_regex=br'\b{2}')
self.given(b'''
bdry
bdry = @/WOB/
''',
expect_regex=br'(?>\b)')
self.given(b'''
bdry
[bdry] = WOB
''',
expect_regex=br'(?P<bdry>\b)')
self.given(b'''
bdries
bdries = 1 of 2 of 3 of WOB
''',
expect_regex=br'\b{6}')
self.given(b'''
bdries?
bdries = @1.. of WOB
''',
expect_regex=br'\b*+')
def test_string_escape_output(self):
self.given(br'''
@3.. of '\n'
''',
expect_regex=br'\n{3,}+')
self.given(br'''
@3.. of '\t'
''',
expect_regex=br'\t{3,}+')
self.given(b'''
@3.. of '\t'
''',
expect_regex=b'\t{3,}+')
self.given(br'''
@3.. of '\x61'
''',
expect_regex=br'\x61{3,}+')
self.given(b'''
@3.. of '\x61'
''',
expect_regex=b'a{3,}+')
self.given(u'''
@3.. of '\U00000061'
''',
expect_regex=b'a{3,}+')
self.given(br'''
@3.. of '\u0061'
''',
expect_regex=br'\u0061{3,}+')
self.given(br'''
@3.. of '\61'
''',
expect_regex=br'\61{3,}+')
self.given(u'''
@3.. of '\N{AMPERSAND}'
''',
expect_regex=b'&{3,}+')
self.given(br'''
@3.. of '\N{AMPERSAND}'
''',
expect_regex=br'\N{AMPERSAND}{3,}+')
self.given(br'''
@3.. of '\N{LEFTWARDS ARROW}'
''',
expect_regex=br'\N{LEFTWARDS ARROW}{3,}+')
self.given(u'''
@3.. of 'M\N{AMPERSAND}M\\\N{APOSTROPHE}s'
''',
expect_regex="(?:M&M's){3,}+")
self.given(r'''
@3.. of 'M\N{AMPERSAND}M\N{APOSTROPHE}s'
''',
expect_regex=br'(?:M\N{AMPERSAND}M\N{APOSTROPHE}s){3,}+')
self.given(r'''
@3.. of '\r\n'
''',
expect_regex=br'(?:\r\n){3,}+')
self.given(br'''
'\a\b\f\v\t'
''',
expect_regex=br'\x07\x08\x0C\x0B\t')
self.given(br'''
'.\w\b\s\X\n'
''',
expect_regex=br'\.\\w\x08\\s\\X\n')
def test_flagging_output(self):
self.given(b'''
(unicode)
''',
expect_regex=b'(?V1wu)')
self.given(b'''
(ascii version0)
''',
expect_regex=b'(?waV0)')
self.given(b'''
(bestmatch dotall enhancematch fullcase ignorecase locale multiline reverse verbose version1 word)
''',
expect_regex=b'(?bsefiLmrxV1w)')
self.given(b'''
(multiline)
''',
expect_regex=b'(?V1wm)')
self.given(b'''
(-multiline)
''',
expect_regex=b'(?V1w-m)')
self.given(b'''
(-word)
''',
expect_regex=b'(?V1-w)')
self.given(b'''
(ignorecase)
''',
expect_regex=b'(?V1wi)')
self.given(b'''
(-ignorecase)
''',
expect_regex=b'(?V1w-i)')
self.given(b'''
(unicode ignorecase)
''',
expect_regex=b'(?V1wui)')
self.given(b'''
(unicode)
(ignorecase) alpha
''',
expect_regex=b'(?V1wu)(?i:\p{Alphabetic})')
self.given(b'''
(unicode ignorecase)
(-ignorecase) lower
''',
expect_regex=b'(?V1wui)(?-i:\p{Lowercase})')
self.given(b'''
(ignorecase) .'giga'_
''',
expect_regex=br'(?i:\bgiga\B)')
self.given(b'''
(ignorecase) /super/uppers/
super = 'super'
uppers = (-ignorecase) @1.. of upper
''',
expect_regex=b'(?i:super(?-i:[A-Z]++))')
self.given(b'''
hex?
hex = (ignorecase) 1 of: +digit a..f
''',
expect_regex=br'(?i:[\da-f])?')
self.given(b'''
(ignorecase) 2 of 'yadda'
''',
expect_regex=br'(?i:(?:yadda){2})')
self.given(b'''
2 of (ignorecase) 'yadda'
''',
expect_regex=br'(?i:yadda){2}')
self.given(b'''
2 of (ignorecase) 3 of 4 of (ignorecase) 'yadda'
''',
expect_regex=br'(?i:(?i:yadda){12}){2}')
def test_variable_named_of(self):
self.given(b'''
2 of of -- a variable named "of"
of = 'of'
''',
expect_regex=br'(?:of){2}')
self.given(b'''
1 of 2 of of
of = 'of'
''',
expect_regex=br'(?:of){2}')
self.given(b'''
1 of 2 of 3 of of
of: o f
''',
expect_regex=br'[of]{6}')
self.given(b'''
2 of /digit/of/digit/
of: o f
''',
expect_regex=br'(?:\d[of]\d){2}')
self.given(b'''
1 of 2 of '3 of alpha'
''',
expect_regex=br'(?:3 of alpha){2}')
def test_flag_dependent_charclass_output(self):
self.given(b'''
/BOL/EOL/line2/BOS/EOS/
line2 = (multiline) /BOL/EOL/BOS/EOS/
''',
expect_regex=b'(?m:^)(?m:$)(?m:^$\A\Z)\A\Z')
self.given(b'''
(multiline)
/BOL/EOL/line2/BOS/EOS/
line2 = (multiline) /BOL/EOL/BOS/EOS/
''',
expect_regex=b'(?V1wm)^$(?m:^$\A\Z)\A\Z')
self.given(b'''
(-multiline)
/BOL/EOL/line2/BOS/EOS/
line2 = (multiline) /BOL/EOL/BOS/EOS/
''',
expect_regex=b'(?V1w-m)(?m:^)(?m:$)(?m:^$\A\Z)\A\Z')
self.given(b'''
/BOL/EOL/line2/BOS/EOS/
line2 = (-multiline) /BOL/EOL/BOS/EOS/
''',
expect_regex=b'(?m:^)(?m:$)(?-m:(?m:^)(?m:$)\A\Z)\A\Z')
self.given(b'''
(multiline)
/BOL/EOL/line2/BOS/EOS/
line2 = (-multiline) /BOL/EOL/BOS/EOS/
''',
expect_regex=b'(?V1wm)^$(?-m:(?m:^)(?m:$)\A\Z)\A\Z')
self.given(b'''
(-multiline)
/BOL/EOL/line2/BOS/EOS/
line2 = (-multiline) /BOL/EOL/BOS/EOS/
''',
expect_regex=b'(?V1w-m)(?m:^)(?m:$)(?-m:(?m:^)(?m:$)\A\Z)\A\Z')
self.given(b'''
/BOL/EOL/BOS/EOS/line2/
line2 = (dotall) /BOL/EOL/BOS/EOS/ -- should be unaffected
''',
expect_regex=b'(?m:^)(?m:$)\A\Z(?s:(?m:^)(?m:$)\A\Z)')
self.given(b'''
/any/any2/any3/
any2 = (dotall) any
any3 = (-dotall) any
''',
expect_regex=b'(?s:.)(?s:.)(?-s:(?s:.))')
self.given(b'''
/space/tab/spacetab2/spacetab3/
spacetab2 = (verbose) /space/tab/
spacetab3 = (-verbose) /space/tab/
''',
expect_regex=br' \t(?x:[ ]\t)(?-x: \t)')
self.given(b'''
/spacetab/spacetab2/
spacetab = (verbose) 1 of: +space +tab
spacetab2 = (-verbose) 1 of: +space +tab
''',
expect_regex=br'(?x:[ \t])(?-x:[ \t])')
self.given(b'''
/linechar/lf/
lf = (-word) 1 of: +linechar
''',
expect_regex=br'[\r\n\x0B\x0C](?-w:\n)')
self.given(b'''
(unicode)
linechar
''',
expect_regex=br'(?V1wu)[\r\n\x0B\x0C\x85\u2028\u2029]')
self.given(b'''
(word unicode)
linechar
''',
expect_regex=br'(?V1wu)[\r\n\x0B\x0C\x85\u2028\u2029]')
self.given(b'''
(unicode word)
linechar
''',
expect_regex=br'(?V1uw)[\r\n\x0B\x0C\x85\u2028\u2029]')
self.given(b'''
(unicode)
(word) linechar
''',
expect_regex=br'(?V1wu)(?w:[\r\n\x0B\x0C\x85\u2028\u2029])')
self.given(b'''
(unicode -word)
/linechar/line2/
line2 = (word) linechar
''',
expect_regex=br'(?V1u-w)\n(?w:[\r\n\x0B\x0C\x85\u2028\u2029])')
self.given(b'''
(-word unicode)
(word) /linechar/line2/
line2 = (-word) linechar
''',
expect_regex=br'(?V1u-w)(?w:[\r\n\x0B\x0C\x85\u2028\u2029](?-w:\n))')
self.given(b'''
(unicode)
(-word) /linechar/line2/
line2 = (word) linechar
''',
expect_regex=br'(?V1wu)(?-w:\n(?w:[\r\n\x0B\x0C\x85\u2028\u2029]))')
def test_orblock_output(self):
self.given(b'''
@|
|'cat'
|'dog'
''',
expect_regex=b'(?>cat|dog)')
self.given(b'''
<<|
|'tea'
|'coffee'
''',
expect_regex=b'tea|coffee')
self.given(b'''
backtrackable_choice
backtrackable_choice = <<|
|'catastrophy'
|'catass trophy'
|'cat'
''',
expect_regex=b'catastrophy|catass trophy|cat')
self.given(b'''
no_backtrack
no_backtrack = @|
|'red pill'
|'blue pill'
''',
expect_regex=b'(?>red pill|blue pill)')
self.given(b'''
/digit/space/ampm/
ampm = (ignorecase) <<|
|'AM'
|'PM'
''',
expect_regex=b'\d (?i:AM|PM)')
self.given(b'''
2 of <<|
|'fast'
|'good'
|'cheap'
''',
expect_regex=b'(?:fast|good|cheap){2}')
self.given(b'''
<<|
|2 of 'ma'
|2 of 'pa'
|2 of 'bolo'
''',
expect_regex=b'(?:ma){2}|(?:pa){2}|(?:bolo){2}')
self.given(b'''
/blood_type/rhesus/
blood_type =<<|
|'AB'
|1 of: A B O
rhesus = <<|
|'+'
|'-'
| -- allow empty/unknown rhesus
''',
expect_regex=b'(?:AB|[ABO])(?:\+|-|)')
self.given(b'''
subexpr_types
subexpr_types = <<|
|'string literal'
|(ignorecase) 1 of: a i u e o
|2..3 <<- of X
|/alpha/digit/
|alpha
X = 'X'
''',
expect_regex=b'string literal|(?i:[aiueo])|X{2,3}|[a-zA-Z]\d|[a-zA-Z]')
self.given(b'''
<<| -- comment here is ok
|'android'
|'ios'
''',
expect_regex=b'android|ios')
self.given(b'''
/nature/side/
nature = @|
|'lawful ' -- mind the trailing space
|'chaotic '
|'neutral '
--allow comment on ORBLOCK "breaker" line
side = @|
|'good'
|'evil'
|'neutral'
''',
expect_regex=b'(?>lawful |chaotic |neutral )(?>good|evil|neutral)')
self.given(b'''
any_color_as_long_as_it_is_
any_color_as_long_as_it_is_ = <<|
|'black'
-- single-entry "choice" is OK
''',
expect_regex=b'black')
self.given(b'''-- nested ORBLOCKs
<<|
|coffee
|tea
|'cendol'
coffee = <<|
|'espresso'
|'cappuccino'
|'kopi tubruk'
tea = <<|
|'earl grey'
|'ocha'
|'teh tarik'
''',
expect_regex=b'espresso|cappuccino|kopi tubruk|earl grey|ocha|teh tarik|cendol')
self.given(b'''
/currency/amount/
currency = <<|
|dollar
|euro
[dollar]: $
[euro]: :EURO_SIGN
amount = <<|
|[dollar] ? /digits/dot/digits/
|[euro] ? /digits/comma/digits/
|
digits = @1.. of digit
dot: .
comma: ,
''',
expect_regex=b'(?:(?P<dollar>\$)|(?P<euro>\N{EURO SIGN}))(?(dollar)\d++\.\d++|(?(euro)\d++,\d++))')
self.given(b'''
/currency/amount/
currency = <<|
|dollar
|euro
[dollar]: $
[euro]: :EURO_SIGN
amount = <<|
|[dollar] ? /digits/dot/digits/
|[euro] ? /digits/comma/digits/
|FAIL!
digits = @1.. of digit
dot: .
comma: ,
''',
expect_regex=b'(?:(?P<dollar>\$)|(?P<euro>\N{EURO SIGN}))(?(dollar)\d++\.\d++|(?(euro)\d++,\d++|(?!)))')
self.given(b'''
/alpha/or/
or = @|
|alpha
|digit
''',
expect_regex=b'[a-zA-Z](?>[a-zA-Z]|\d)')
self.given(b'''
/or/alpha/
or = @|
|alpha
|digit
''',
expect_regex=b'(?>[a-zA-Z]|\d)[a-zA-Z]')
self.given(b'''
/alpha/or/
or = <<|
|alpha
|digit
''',
expect_regex=b'[a-zA-Z](?:[a-zA-Z]|\d)')
self.given(b'''
/or/alpha/
or = <<|
|alpha
|digit
''',
expect_regex=b'(?:[a-zA-Z]|\d)[a-zA-Z]')
self.given(b'''
/az?/or/
[az]: a..z
or = <<|
|[az] ? alpha
|digit
''',
expect_regex=b'(?P<az>[a-z])?(?(az)[a-zA-Z]|\d)')
self.given(b'''
/or/az/
[az]: a..z
or = <<|
|[az] ? alpha
|digit
''',
expect_regex=b'(?(az)[a-zA-Z]|\d)(?P<az>[a-z])')
self.given(b'''
/az?/or/
[az]: a..z
or = <<|
|[az] ? alpha
|
''',
expect_regex=b'(?P<az>[a-z])?(?(az)[a-zA-Z])')
self.given(b'''
/az?/or/
[az]: a..z
or = <<|
|[az] ?
|digit
''',
expect_regex=b'(?P<az>[a-z])?(?(az)|\d)')
self.given(b'''
/az?/or/
[az]: a..z
or = <<|
|[az] ?
|
''',
expect_regex=b'(?P<az>[a-z])?(?(az))')
def test_lookaround_output(self):
self.given(b'''
<@>
<yamaha|
<!/yang/lain/|
|semakin|
|/di/depan/>
|!ketinggalan>
yamaha = 'yamaha'
yang = 'yang'
lain = 'lain'
semakin = 'semakin'
di = 'di'
depan = 'depan'
ketinggalan = 'ketinggalan'
''',
expect_regex=b'(?<=yamaha)(?<!yanglain)semakin(?=didepan)(?!ketinggalan)')
self.given(b'''
actually_no_lookaround
actually_no_lookaround = <@>
|alpha| -- possible, though pointless
''',
expect_regex=b'[a-zA-Z]')
self.given(b'''
<@>
|anyam>
|anyaman|
<nyaman|
anyam = 'anyam'
anyaman = 'anyaman'
nyaman = 'nyaman'
''',
expect_regex=b'(?=anyam)anyaman(?<=nyaman)')
self.given(b'''
<@>
|mixed_case>
|has_number>
|has_symbol>
|./len_8_to_255/.|
len_8_to_255 = @8..255 of any
mixed_case = <@>
|has_upper>
|has_lower>
has_upper = /non_uppers?/upper/
non_uppers = @1.. of: not: upper
has_lower = /non_lowers?/lower/
non_lowers = @1.. of: not: lower
has_number = /non_digits?/digit/
non_digits = @1.. of: not: digit
has_symbol = /non_symbols?/symbol/
symbol: not: /Alphanumeric
non_symbols = @1.. of: not: symbol
''',
expect_regex=br'(?=(?=[^A-Z]*+[A-Z])(?=[^a-z]*+[a-z]))(?=\D*+\d)(?=\p{Alphanumeric}*+\P{Alphanumeric})\A(?s:.){8,255}+\Z')
self.given(b'''
word_ends_with_s
word_ends_with_s = <@>
|wordchars|
<s|
wordchars = @1.. of wordchar
s = 's'
''',
expect_regex=b'\w++(?<=s)')
self.given(b'''
un_x_able
un_x_able = <@>
|un>
|unxable|
<able|
un = 'un'
unxable = @1.. of wordchar
able = 'able'
''',
expect_regex=b'(?=un)\w++(?<=able)')
self.given(b'''
escape
escape = <@>
<backslash|
|any|
''',
expect_regex=br'(?<=\\)(?s:.)')
self.given(b'''
money_digits
money_digits = <<|
|dollar_digits
|digits_buck
dollar_digits = <@>
<dollar|
|digits|
dollar = '$'
*) [digits] = @1.. of digit
digits_buck = <@>
|digits|
|buck>
buck = ' buck'
''',
expect_regex=b'(?<=\$)(?P<digits>\d++)|(?P<digits>\d++)(?= buck)')
self.given(b'''
/begin/msg/end/
begin = .'BEGIN'.
end = .'END'.
msg = @1.. of <<|
|@1.. of: not: E
|E_not_END
E_not_END = <@>
|E|
|!ND>
E = .'E'
ND = 'ND'.
''',
expect_regex=br'\bBEGIN\b(?:[^E]++|\bE(?!ND\b))++\bEND\b')
def test_non_op_output(self):
self.given(b'''
/non-alpha/non-digit/non-whitechar/non-wordchar/non-WOB/
''',
expect_regex=br'[^a-zA-Z]\D\S\W\B')
self.given(b'''
non_digits
non_digits = @1.. of non-digit
''',
expect_regex=br'\D++')
self.given(b'''
non-alphabetic
alphabetic: /Alphabetic
''',
expect_regex=b'\P{Alphabetic}')
self.given(b'''
non-minus
minus: -
''',
expect_regex=br'[^\-]')
self.given(b'''
non-caret
caret: ^
''',
expect_regex=br'[^\^]')
self.given(b'''
/non-non_alpha/non-non_digit/
non_alpha = non-alpha
non_digit = non-digit
''',
expect_regex=br'[a-zA-Z]\d')
self.given(b'''
non-consonant
consonant: alpha not vowel
vowel: a i u e o A I U E O
''',
expect_regex=br'[^a-zA-Z--aiueoAIUEO]')
self.given(b'''
/lower/non-lower/non_lower/non-non_lower/nonnon_lower/non-nonnon_lower/
non_lower: not: lower
nonnon_lower: not: non_lower
''',
expect_regex=br'[a-z][^a-z][^a-z][a-z][a-z][^a-z]')
self.given(b'''
/digit/non-digit/non_digit/non-non_digit/nonnon_digit/non-nonnon_digit/
non_digit: not: digit
nonnon_digit: not: non_digit
''',
expect_regex=br'\d\D\D\d\d\D')
self.given(b'''
/ex/non-ex/non_ex/non-non_ex/nonnon_ex/non-nonnon_ex/
ex: X
non_ex: not: ex
nonnon_ex: not: non_ex
''',
expect_regex=br'X[^X][^X]XX[^X]')
self.given(b'''
/minus/non-minus/non_minus/non-non_minus/nonnon_minus/non-nonnon_minus/
minus: -
non_minus: not: minus
nonnon_minus: not: non_minus
''',
expect_regex=br'-[^\-][^\-]--[^\-]')
self.given(b'''
/plus/non-plus/non_plus/non-non_plus/nonnon_plus/non-nonnon_plus/
plus: +
non_plus: not: plus
nonnon_plus: not: non_plus
''',
expect_regex=br'\+[^+][^+]\+\+[^+]')
self.given(b'''
/caret/non-caret/non_caret/non-non_caret/nonnon_caret/non-nonnon_caret/
caret: ^
non_caret: not: caret
nonnon_caret: not: non_caret
''',
expect_regex=br'\^[^\^][^\^]\^\^[^\^]')
def test_recursion_output(self):
self.given(b'''
singularity
singularity = singularity
''',
expect_regex=b'(?P<singularity>(?&singularity))')
self.given(b'''
./palindrome/.
palindrome = <<|
|/letter/palindrome/=letter/
|/letter/=letter/
|letter
[letter]: alpha
''',
expect_regex=b'\A(?P<palindrome>(?P<letter>[a-zA-Z])(?&palindrome)(?P=letter)|(?P<letter>[a-zA-Z])(?P=letter)|(?P<letter>[a-zA-Z]))\Z')
self.given(b'''
csv
csv = /value?/more_values?/
value = @1.. of non-separator
*) separator: ,
more_values = /separator/value?/more_values?/
''',
expect_regex=b'[^,]*+(?P<more_values>,[^,]*+(?&more_values)?)?')
self.given(b'''
text_in_parens
text_in_parens = /open/text/close/
open: (
close: )
text = @1.. of <<|
|non-open
|non-close
|text_in_parens
''',
expect_regex=b'(?P<text_in_parens>\((?:[^(]|[^)]|(?&text_in_parens))++\))')
def test_anchor_sugar_output(self):
self.given(b'''
//wordchar/
''',
expect_regex=br'(?m:^)\w')
self.given(b'''
/wordchar//
''',
expect_regex=br'\w(?m:$)')
self.given(b'''
//wordchar//
''',
expect_regex=br'(?m:^)\w(?m:$)')
self.given(b'''
./wordchar/
''',
expect_regex=br'\A\w')
self.given(b'''
/wordchar/.
''',
expect_regex=br'\w\Z')
self.given(b'''
./wordchar/.
''',
expect_regex=br'\A\w\Z')
self.given(b'''
./wordchar//
''',
expect_regex=br'\A\w(?m:$)')
self.given(b'''
//wordchar/.
''',
expect_regex=br'(?m:^)\w\Z')
self.given(b'''
@//wordchar/
''',
expect_regex=br'(?>(?m:^)\w)')
self.given(b'''
@./wordchar/
''',
expect_regex=br'(?>\A\w)')
self.given(b'''
@//wordchar//
''',
expect_regex=br'(?>(?m:^)\w(?m:$))')
self.given(b'''
@./wordchar//
''',
expect_regex=br'(?>\A\w(?m:$))')
self.given(b'''
@//wordchar/.
''',
expect_regex=br'(?>(?m:^)\w\Z)')
self.given(b'''
@./wordchar/.
''',
expect_regex=br'(?>\A\w\Z)')
self.given(b'''
./wordchar/digit//
''',
expect_regex=br'\A\w\d(?m:$)')
self.given(b'''
@//wordchar/digit/.
''',
expect_regex=br'(?>(?m:^)\w\d\Z)')
self.given(b'''
//wordchar/digit//
''',
expect_regex=br'(?m:^)\w\d(?m:$)')
self.given(b'''
@./wordchar/digit/.
''',
expect_regex=br'(?>\A\w\d\Z)')
self.given(b'''
<<|
|./wordchar//
|//wordchar/.
''',
expect_regex=br'\A\w(?m:$)|(?m:^)\w\Z')
self.given(b'''
(multiline)
<@>
|./wordchar//>
|./wordchar/.>
|//wordchar//>
|./wordchar//>
|./wordchar/.|
|//wordchar//|
|//wordchar/.|
|./wordchar//|
<./wordchar/.|
<//wordchar/.|
<@//wordchar//|
<@./wordchar//|
''',
expect_regex=br'(?V1wm)(?=\A\w$)(?=\A\w\Z)(?=^\w$)(?=\A\w$)\A\w\Z^\w$^\w\Z\A\w$(?<=\A\w\Z)(?<=^\w\Z)(?<=(?>^\w$))(?<=(?>\A\w$))')
def test_fail_output(self):
self.given(b'''
FAIL!
''',
expect_regex=b'(?!)')
self.given(b'''
/FAIL!/
''',
expect_regex=b'(?!)')
self.given(b'''
/alpha/FAIL!/
''',
expect_regex=b'[a-zA-Z](?!)')
self.given(b'''
2 of FAIL!
''',
expect_regex=b'(?!){2}')
self.given(b'''
<<|
|FAIL!
''',
expect_regex=b'(?!)')
self.given(b'''
<<|
|FAIL!
|alpha
''',
expect_regex=b'(?!)|[a-zA-Z]')
self.given(b'''
<<|
|alpha
|FAIL!
''',
expect_regex=b'[a-zA-Z]|(?!)')
self.given(b'''
/opener?/contents?/closer/
opener = <<|
|paren
|curly
|square
|chevron
[paren]: (
[curly]: {
[square]: [
[chevron]: <
contents = 1.. <<- of any
closer = <<|
|[paren] ? 1 of: )
|[curly] ? 1 of: }
|[square] ? 1 of: ]
|[chevron] ? 1 of: >
|FAIL!
''',
expect_regex=b'(?:(?P<paren>\()|(?P<curly>\{)|(?P<square>\[)|(?P<chevron><))?(?s:.)*(?(paren)\)|(?(curly)\}|(?(square)\]|(?(chevron)>|(?!)))))')
self.given(b'''
<@>
|FAIL!>
''',
expect_regex=b'(?=(?!))')
self.given(b'''
<@>
|!FAIL!>
''',
expect_regex=b'(?!(?!))')
def test_numrange_shortcut_output(self):
self.given(u'''
'0'..'1'
''',
expect_regex=br'[01](?!\d)')
self.given(u'''
'0'..'2'
''',
expect_regex=br'[0-2](?!\d)')
self.given(u'''
'0'..'9'
''',
expect_regex=br'\d(?!\d)')
self.given(u'''
'1'..'2'
''',
expect_regex=br'[12](?!\d)')
self.given(u'''
'1'..'9'
''',
expect_regex=br'[1-9](?!\d)')
self.given(u'''
'2'..'9'
''',
expect_regex=br'[2-9](?!\d)')
self.given(u'''
'8'..'9'
''',
expect_regex=br'[89](?!\d)')
self.given(u'''
'0'..'10'
''',
expect_regex=br'(?>10|\d)(?!\d)')
self.given(u'''
'1'..'10'
''',
expect_regex=br'(?>10|[1-9])(?!\d)')
self.given(u'''
'2'..'10'
''',
expect_regex=br'(?>10|[2-9])(?!\d)')
self.given(u'''
'8'..'10'
''',
expect_regex=br'(?>10|[89])(?!\d)')
self.given(u'''
'9'..'10'
''',
expect_regex=br'(?>10|9)(?!\d)')
self.given(u'''
'0'..'11'
''',
expect_regex=br'(?>1[01]|\d)(?!\d)')
self.given(u'''
'1'..'11'
''',
expect_regex=br'(?>1[01]|[1-9])(?!\d)')
self.given(u'''
'9'..'11'
''',
expect_regex=br'(?>1[01]|9)(?!\d)')
self.given(u'''
'10'..'11'
''',
expect_regex=br'1[01](?!\d)')
self.given(u'''
'0'..'12'
''',
expect_regex=br'(?>1[0-2]|\d)(?!\d)')
self.given(u'''
'1'..'12'
''',
expect_regex=br'(?>1[0-2]|[1-9])(?!\d)')
self.given(u'''
'2'..'12'
''',
expect_regex=br'(?>1[0-2]|[2-9])(?!\d)')
self.given(u'''
'0'..'19'
''',
expect_regex=br'(?>1\d|\d)(?!\d)')
self.given(u'''
'1'..'19'
''',
expect_regex=br'(?>1\d|[1-9])(?!\d)')
self.given(u'''
'9'..'19'
''',
expect_regex=br'(?>1\d|9)(?!\d)')
self.given(u'''
'10'..'19'
''',
expect_regex=br'1\d(?!\d)')
self.given(u'''
'0'..'20'
''',
expect_regex=br'(?>20|1\d|\d)(?!\d)')
self.given(u'''
'2'..'20'
''',
expect_regex=br'(?>20|1\d|[2-9])(?!\d)')
self.given(u'''
'10'..'20'
''',
expect_regex=br'(?>20|1\d)(?!\d)')
self.given(u'''
'19'..'20'
''',
expect_regex=br'(?>20|19)(?!\d)')
self.given(u'''
'0'..'29'
''',
expect_regex=br'(?>[12]\d|\d)(?!\d)')
self.given(u'''
'2'..'29'
''',
expect_regex=br'(?>[12]\d|[2-9])(?!\d)')
self.given(u'''
'9'..'29'
''',
expect_regex=br'(?>[12]\d|9)(?!\d)')
self.given(u'''
'2'..'42'
''',
expect_regex=br'(?>4[0-2]|[1-3]\d|[2-9])(?!\d)')
self.given(u'''
'12'..'42'
''',
expect_regex=br'(?>4[0-2]|[23]\d|1[2-9])(?!\d)')
self.given(u'''
'24'..'42'
''',
expect_regex=br'(?>4[0-2]|3\d|2[4-9])(?!\d)')
self.given(u'''
'38'..'42'
''',
expect_regex=br'(?>4[0-2]|3[89])(?!\d)')
self.given(u'''
'0'..'90'
''',
expect_regex=br'(?>90|[1-8]\d|\d)(?!\d)')
self.given(u'''
'9'..'90'
''',
expect_regex=br'(?>90|[1-8]\d|9)(?!\d)')
self.given(u'''
'10'..'90'
''',
expect_regex=br'(?>90|[1-8]\d)(?!\d)')
self.given(u'''
'0'..'98'
''',
expect_regex=br'(?>9[0-8]|[1-8]\d|\d)(?!\d)')
self.given(u'''
'1'..'98'
''',
expect_regex=br'(?>9[0-8]|[1-8]\d|[1-9])(?!\d)')
self.given(u'''
'0'..'99'
''',
expect_regex=br'(?>[1-9]\d?+|0)(?!\d)')
self.given(u'''
'1'..'99'
''',
expect_regex=br'[1-9]\d?+(?!\d)')
self.given(u'''
'2'..'99'
''',
expect_regex=br'(?>[1-9]\d|[2-9])(?!\d)')
self.given(u'''
'9'..'99'
''',
expect_regex=br'(?>[1-9]\d|9)(?!\d)')
self.given(u'''
'10'..'99'
''',
expect_regex=br'[1-9]\d(?!\d)')
self.given(u'''
'11'..'99'
''',
expect_regex=br'(?>[2-9]\d|1[1-9])(?!\d)')
self.given(u'''
'19'..'99'
''',
expect_regex=br'(?>[2-9]\d|19)(?!\d)')
self.given(u'''
'20'..'99'
''',
expect_regex=br'[2-9]\d(?!\d)')
self.given(u'''
'29'..'99'
''',
expect_regex=br'(?>[3-9]\d|29)(?!\d)')
self.given(u'''
'46'..'99'
''',
expect_regex=br'(?>[5-9]\d|4[6-9])(?!\d)')
self.given(u'''
'80'..'99'
''',
expect_regex=br'[89]\d(?!\d)')
self.given(u'''
'89'..'99'
''',
expect_regex=br'(?>9\d|89)(?!\d)')
self.given(u'''
'90'..'99'
''',
expect_regex=br'9\d(?!\d)')
self.given(u'''
'0'..'100'
''',
expect_regex=br'(?>100|[1-9]\d?+|0)(?!\d)')
self.given(u'''
'10'..'100'
''',
expect_regex=br'(?>100|[1-9]\d)(?!\d)')
self.given(u'''
'90'..'100'
''',
expect_regex=br'(?>100|9\d)(?!\d)')
self.given(u'''
'99'..'100'
''',
expect_regex=br'(?>100|99)(?!\d)')
self.given(u'''
'1'..'101'
''',
expect_regex=br'(?>10[01]|[1-9]\d?+)(?!\d)')
self.given(u'''
'99'..'101'
''',
expect_regex=br'(?>10[01]|99)(?!\d)')
self.given(u'''
'100'..'101'
''',
expect_regex=br'10[01](?!\d)')
self.given(u'''
'1'..'109'
''',
expect_regex=br'(?>10\d|[1-9]\d?+)(?!\d)')
self.given(u'''
'9'..'109'
''',
expect_regex=br'(?>10\d|[1-9]\d|9)(?!\d)')
self.given(u'''
'10'..'109'
''',
expect_regex=br'(?>10\d|[1-9]\d)(?!\d)')
self.given(u'''
'99'..'109'
''',
expect_regex=br'(?>10\d|99)(?!\d)')
self.given(u'''
'100'..'109'
''',
expect_regex=br'10\d(?!\d)')
self.given(u'''
'1'..'110'
''',
expect_regex=br'(?>1(?>10|0\d)|[1-9]\d?+)(?!\d)')
self.given(u'''
'10'..'110'
''',
expect_regex=br'(?>1(?>10|0\d)|[1-9]\d)(?!\d)')
self.given(u'''
'11'..'110'
''',
expect_regex=br'(?>1(?>10|0\d)|[2-9]\d|1[1-9])(?!\d)')
self.given(u'''
'100'..'110'
''',
expect_regex=br'1(?>10|0\d)(?!\d)')
self.given(u'''
'1'..'111'
''',
expect_regex=br'(?>1(?>1[01]|0\d)|[1-9]\d?+)(?!\d)')
self.given(u'''
'11'..'111'
''',
expect_regex=br'(?>1(?>1[01]|0\d)|[2-9]\d|1[1-9])(?!\d)')
self.given(u'''
'1'..'119'
''',
expect_regex=br'(?>1[01]\d|[1-9]\d?+)(?!\d)')
self.given(u'''
'11'..'119'
''',
expect_regex=br'(?>1[01]\d|[2-9]\d|1[1-9])(?!\d)')
self.given(u'''
'19'..'119'
''',
expect_regex=br'(?>1[01]\d|[2-9]\d|19)(?!\d)')
self.given(u'''
'1'..'123'
''',
expect_regex=br'(?>1(?>2[0-3]|[01]\d)|[1-9]\d?+)(?!\d)')
self.given(u'''
'12'..'123'
''',
expect_regex=br'(?>1(?>2[0-3]|[01]\d)|[2-9]\d|1[2-9])(?!\d)')
self.given(u'''
'23'..'123'
''',
expect_regex=br'(?>1(?>2[0-3]|[01]\d)|[3-9]\d|2[3-9])(?!\d)')
self.given(u'''
'1'..'199'
''',
expect_regex=br'(?>1\d{2}|[1-9]\d?+)(?!\d)')
self.given(u'''
'10'..'199'
''',
expect_regex=br'(?>1\d{2}|[1-9]\d)(?!\d)')
self.given(u'''
'19'..'199'
''',
expect_regex=br'(?>1\d{2}|[2-9]\d|19)(?!\d)')
self.given(u'''
'99'..'199'
''',
expect_regex=br'(?>1\d{2}|99)(?!\d)')
self.given(u'''
'100'..'199'
''',
expect_regex=br'1\d{2}(?!\d)')
self.given(u'''
'109'..'199'
''',
expect_regex=br'1(?>[1-9]\d|09)(?!\d)')
self.given(u'''
'110'..'199'
''',
expect_regex=br'1[1-9]\d(?!\d)')
self.given(u'''
'190'..'199'
''',
expect_regex=br'19\d(?!\d)')
self.given(u'''
'1'..'200'
''',
expect_regex=br'(?>200|1\d{2}|[1-9]\d?+)(?!\d)')
self.given(u'''
'20'..'200'
''',
expect_regex=br'(?>200|1\d{2}|[2-9]\d)(?!\d)')
self.given(u'''
'100'..'200'
''',
expect_regex=br'(?>200|1\d{2})(?!\d)')
self.given(u'''
'199'..'200'
''',
expect_regex=br'(?>200|199)(?!\d)')
self.given(u'''
'1'..'201'
''',
expect_regex=br'(?>20[01]|1\d{2}|[1-9]\d?+)(?!\d)')
self.given(u'''
'199'..'201'
''',
expect_regex=br'(?>20[01]|199)(?!\d)')
self.given(u'''
'200'..'201'
''',
expect_regex=br'20[01](?!\d)')
self.given(u'''
'1'..'299'
''',
expect_regex=br'(?>[12]\d{2}|[1-9]\d?+)(?!\d)')
self.given(u'''
'100'..'299'
''',
expect_regex=br'[12]\d{2}(?!\d)')
self.given(u'''
'199'..'299'
''',
expect_regex=br'(?>2\d{2}|199)(?!\d)')
self.given(u'''
'200'..'299'
''',
expect_regex=br'2\d{2}(?!\d)')
self.given(u'''
'290'..'299'
''',
expect_regex=br'29\d(?!\d)')
self.given(u'''
'1'..'300'
''',
expect_regex=br'(?>300|[12]\d{2}|[1-9]\d?+)(?!\d)')
self.given(u'''
'1'..'399'
''',
expect_regex=br'(?>[1-3]\d{2}|[1-9]\d?+)(?!\d)')
self.given(u'''
'123'..'456'
''',
expect_regex=br'(?>4(?>5[0-6]|[0-4]\d)|[23]\d{2}|1(?>[3-9]\d|2[3-9]))(?!\d)')
self.given(u'''
'1'..'901'
''',
expect_regex=br'(?>90[01]|[1-8]\d{2}|[1-9]\d?+)(?!\d)')
self.given(u'''
'0'..'999'
''',
expect_regex=br'(?>[1-9]\d{,2}+|0)(?!\d)')
self.given(u'''
'1'..'999'
''',
expect_regex=br'[1-9]\d{,2}+(?!\d)')
self.given(u'''
'9'..'999'
''',
expect_regex=br'(?>[1-9]\d{1,2}+|9)(?!\d)')
self.given(u'''
'10'..'999'
''',
expect_regex=br'[1-9]\d{1,2}+(?!\d)')
self.given(u'''
'99'..'999'
''',
expect_regex=br'(?>[1-9]\d{2}|99)(?!\d)')
self.given(u'''
'100'..'999'
''',
expect_regex=br'[1-9]\d{2}(?!\d)')
self.given(u'''
'900'..'999'
''',
expect_regex=br'9\d{2}(?!\d)')
self.given(u'''
'0'..'1000'
''',
expect_regex=br'(?>1000|[1-9]\d{,2}+|0)(?!\d)')
self.given(u'''
'1'..'1000'
''',
expect_regex=br'(?>1000|[1-9]\d{,2}+)(?!\d)')
self.given(u'''
'10'..'1000'
''',
expect_regex=br'(?>1000|[1-9]\d{1,2}+)(?!\d)')
self.given(u'''
'100'..'1000'
''',
expect_regex=br'(?>1000|[1-9]\d{2})(?!\d)')
self.given(u'''
'999'..'1000'
''',
expect_regex=br'(?>1000|999)(?!\d)')
self.given(u'''
'1'..'1001'
''',
expect_regex=br'(?>100[01]|[1-9]\d{,2}+)(?!\d)')
self.given(u'''
'11'..'1001'
''',
expect_regex=br'(?>100[01]|[1-9]\d{2}|[2-9]\d|1[1-9])(?!\d)')
self.given(u'''
'101'..'1001'
''',
expect_regex=br'(?>100[01]|[2-9]\d{2}|1(?>[1-9]\d|0[1-9]))(?!\d)')
self.given(u'''
'998'..'1001'
''',
expect_regex=br'(?>100[01]|99[89])(?!\d)')
self.given(u'''
'1000'..'1001'
''',
expect_regex=br'100[01](?!\d)')
self.given(u'''
'1000'..'1099'
''',
expect_regex=br'10\d{2}(?!\d)')
self.given(u'''
'1'..'1999'
''',
expect_regex=br'(?>1\d{3}|[1-9]\d{,2}+)(?!\d)')
self.given(u'''
'10'..'1999'
''',
expect_regex=br'(?>1\d{3}|[1-9]\d{1,2}+)(?!\d)')
self.given(u'''
'100'..'1999'
''',
expect_regex=br'(?>1\d{3}|[1-9]\d{2})(?!\d)')
self.given(u'''
'999'..'1999'
''',
expect_regex=br'(?>1\d{3}|999)(?!\d)')
self.given(u'''
'1000'..'1999'
''',
expect_regex=br'1\d{3}(?!\d)')
self.given(u'''
'1000'..'2000'
''',
expect_regex=br'(?>2000|1\d{3})(?!\d)')
self.given(u'''
'1999'..'2000'
''',
expect_regex=br'(?>2000|1999)(?!\d)')
self.given(u'''
'1998'..'2001'
''',
expect_regex=br'(?>200[01]|199[89])(?!\d)')
self.given(u'''
'999'..'2999'
''',
expect_regex=br'(?>[12]\d{3}|999)(?!\d)')
self.given(u'''
'1999'..'2999'
''',
expect_regex=br'(?>2\d{3}|1999)(?!\d)')
self.given(u'''
'0'..'9999'
''',
expect_regex=br'(?>[1-9]\d{,3}+|0)(?!\d)')
self.given(u'''
'1'..'9999'
''',
expect_regex=br'[1-9]\d{,3}+(?!\d)')
self.given(u'''
'10'..'9999'
''',
expect_regex=br'[1-9]\d{1,3}+(?!\d)')
self.given(u'''
'99'..'9999'
''',
expect_regex=br'(?>[1-9]\d{2,3}+|99)(?!\d)')
self.given(u'''
'100'..'9999'
''',
expect_regex=br'[1-9]\d{2,3}+(?!\d)')
self.given(u'''
'999'..'9999'
''',
expect_regex=br'(?>[1-9]\d{3}|999)(?!\d)')
self.given(u'''
'1000'..'9999'
''',
expect_regex=br'[1-9]\d{3}(?!\d)')
self.given(u'''
'1999'..'9999'
''',
expect_regex=br'(?>[2-9]\d{3}|1999)(?!\d)')
self.given(u'''
'2999'..'9999'
''',
expect_regex=br'(?>[3-9]\d{3}|2999)(?!\d)')
self.given(u'''
'7999'..'9999'
''',
expect_regex=br'(?>[89]\d{3}|7999)(?!\d)')
self.given(u'''
'8999'..'9999'
''',
expect_regex=br'(?>9\d{3}|8999)(?!\d)')
self.given(u'''
'9000'..'9999'
''',
expect_regex=br'9\d{3}(?!\d)')
self.given(u'''
'0'..'10000'
''',
expect_regex=br'(?>10000|[1-9]\d{,3}+|0)(?!\d)')
self.given(u'''
'1'..'10000'
''',
expect_regex=br'(?>10000|[1-9]\d{,3}+)(?!\d)')
self.given(u'''
'10'..'10000'
''',
expect_regex=br'(?>10000|[1-9]\d{1,3}+)(?!\d)')
self.given(u'''
'100'..'10000'
''',
expect_regex=br'(?>10000|[1-9]\d{2,3}+)(?!\d)')
self.given(u'''
'1000'..'10000'
''',
expect_regex=br'(?>10000|[1-9]\d{3})(?!\d)')
self.given(u'''
'9000'..'10000'
''',
expect_regex=br'(?>10000|9\d{3})(?!\d)')
self.given(u'''
'9999'..'10000'
''',
expect_regex=br'(?>10000|9999)(?!\d)')
self.given(u'''
'9999'..'10001'
''',
expect_regex=br'(?>1000[01]|9999)(?!\d)')
self.given(u'''
'10000'..'10001'
''',
expect_regex=br'1000[01](?!\d)')
def test_00numrange_shortcut_output(self):
self.given(u'''
'00'..'01'
''',
expect_regex=br'0[01](?!\d)')
self.given(u'''
'000'..'001'
''',
expect_regex=br'00[01](?!\d)')
self.given(u'''
'00'..'02'
''',
expect_regex=br'0[0-2](?!\d)')
self.given(u'''
'00'..'09'
''',
expect_regex=br'0\d(?!\d)')
self.given(u'''
'01'..'02'
''',
expect_regex=br'0[12](?!\d)')
self.given(u'''
'01'..'09'
''',
expect_regex=br'0[1-9](?!\d)')
self.given(u'''
'02'..'09'
''',
expect_regex=br'0[2-9](?!\d)')
self.given(u'''
'08'..'09'
''',
expect_regex=br'0[89](?!\d)')
self.given(u'''
'00'..'10'
''',
expect_regex=br'(?>10|0\d)(?!\d)')
self.given(u'''
'01'..'10'
''',
expect_regex=br'(?>10|0[1-9])(?!\d)')
self.given(u'''
'001'..'010'
''',
expect_regex=br'0(?>10|0[1-9])(?!\d)')
self.given(u'''
'02'..'10'
''',
expect_regex=br'(?>10|0[2-9])(?!\d)')
self.given(u'''
'08'..'10'
''',
expect_regex=br'(?>10|0[89])(?!\d)')
self.given(u'''
'09'..'10'
''',
expect_regex=br'(?>10|09)(?!\d)')
self.given(u'''
'00'..'11'
''',
expect_regex=br'(?>1[01]|0\d)(?!\d)')
self.given(u'''
'01'..'11'
''',
expect_regex=br'(?>1[01]|0[1-9])(?!\d)')
self.given(u'''
'09'..'11'
''',
expect_regex=br'(?>1[01]|09)(?!\d)')
self.given(u'''
'010'..'011'
''',
expect_regex=br'01[01](?!\d)')
self.given(u'''
'01'..'12'
''',
expect_regex=br'(?>1[0-2]|0[1-9])(?!\d)')
self.given(u'''
'000'..'012'
''',
expect_regex=br'0(?>1[0-2]|0\d)(?!\d)')
self.given(u'''
'02'..'12'
''',
expect_regex=br'(?>1[0-2]|0[2-9])(?!\d)')
self.given(u'''
'00'..'19'
''',
expect_regex=br'[01]\d(?!\d)')
self.given(u'''
'01'..'19'
''',
expect_regex=br'(?>1\d|0[1-9])(?!\d)')
self.given(u'''
'09'..'19'
''',
expect_regex=br'(?>1\d|09)(?!\d)')
self.given(u'''
'010'..'019'
''',
expect_regex=br'01\d(?!\d)')
self.given(u'''
'00'..'20'
''',
expect_regex=br'(?>20|[01]\d)(?!\d)')
self.given(u'''
'02'..'20'
''',
expect_regex=br'(?>20|1\d|0[2-9])(?!\d)')
self.given(u'''
'010'..'020'
''',
expect_regex=br'0(?>20|1\d)(?!\d)')
self.given(u'''
'019'..'020'
''',
expect_regex=br'0(?>20|19)(?!\d)')
def test_oonumrange_shortcut_output(self):
self.given(u'''
'o0'..'o1'
''',
expect_regex=br'0?[01](?!\d)')
self.given(u'''
'oo0'..'oo1'
''',
expect_regex=br'0{,2}[01](?!\d)')
self.given(u'''
'o0'..'o2'
''',
expect_regex=br'0?[0-2](?!\d)')
self.given(u'''
'o0'..'o9'
''',
expect_regex=br'0?\d(?!\d)')
self.given(u'''
'o1'..'o2'
''',
expect_regex=br'0?+[12](?!\d)')
self.given(u'''
'o1'..'o9'
''',
expect_regex=br'0?+[1-9](?!\d)')
self.given(u'''
'o2'..'o9'
''',
expect_regex=br'0?+[2-9](?!\d)')
self.given(u'''
'o8'..'o9'
''',
expect_regex=br'0?+[89](?!\d)')
self.given(u'''
'o0'..'10'
''',
expect_regex=br'(?>10|0?\d)(?!\d)')
self.given(u'''
'o1'..'10'
''',
expect_regex=br'(?>10|0?+[1-9])(?!\d)')
self.given(u'''
'oo1'..'o10'
''',
expect_regex=br'(?>0?+10|0{,2}+[1-9])(?!\d)')
self.given(u'''
'o2'..'10'
''',
expect_regex=br'(?>10|0?+[2-9])(?!\d)')
self.given(u'''
'o8'..'10'
''',
expect_regex=br'(?>10|0?+[89])(?!\d)')
self.given(u'''
'o9'..'10'
''',
expect_regex=br'(?>10|0?+9)(?!\d)')
self.given(u'''
'o0'..'11'
''',
expect_regex=br'(?>1[01]|0?\d)(?!\d)')
self.given(u'''
'o1'..'11'
''',
expect_regex=br'(?>1[01]|0?+[1-9])(?!\d)')
self.given(u'''
'o9'..'11'
''',
expect_regex=br'(?>1[01]|0?+9)(?!\d)')
self.given(u'''
'o10'..'o11'
''',
expect_regex=br'0?+1[01](?!\d)')
self.given(u'''
'o1'..'12'
''',
expect_regex=br'(?>1[0-2]|0?+[1-9])(?!\d)')
self.given(u'''
'oo0'..'o12'
''',
expect_regex=br'(?>0?1[0-2]|0{,2}\d)(?!\d)')
self.given(u'''
'o2'..'12'
''',
expect_regex=br'(?>1[0-2]|0?+[2-9])(?!\d)')
self.given(u'''
'o0'..'19'
''',
expect_regex=br'(?>1\d|0?\d)(?!\d)')
self.given(u'''
'o1'..'19'
''',
expect_regex=br'(?>1\d|0?+[1-9])(?!\d)')
self.given(u'''
'o9'..'19'
''',
expect_regex=br'(?>1\d|0?+9)(?!\d)')
self.given(u'''
'o10'..'o19'
''',
expect_regex=br'0?+1\d(?!\d)')
self.given(u'''
'o0'..'20'
''',
expect_regex=br'(?>20|1\d|0?\d)(?!\d)')
self.given(u'''
'o2'..'20'
''',
expect_regex=br'(?>20|1\d|0?+[2-9])(?!\d)')
self.given(u'''
'o10'..'o20'
''',
expect_regex=br'0?+(?>20|1\d)(?!\d)')
self.given(u'''
'o19'..'o20'
''',
expect_regex=br'0?+(?>20|19)(?!\d)')
def test_norange_shortcut_output(self):
self.given(u'''
'0'..'0'
''',
expect_regex=br'0(?!\d)')
self.given(u'''
'00'..'00'
''',
expect_regex=br'00(?!\d)')
self.given(u'''
'000'..'000'
''',
expect_regex=br'000(?!\d)')
self.given(u'''
'1'..'1'
''',
expect_regex=br'1(?!\d)')
self.given(u'''
'2'..'2'
''',
expect_regex=br'2(?!\d)')
self.given(u'''
'9'..'9'
''',
expect_regex=br'9(?!\d)')
self.given(u'''
'10'..'10'
''',
expect_regex=br'10(?!\d)')
self.given(u'''
'99'..'99'
''',
expect_regex=br'99(?!\d)')
self.given(u'''
'100'..'100'
''',
expect_regex=br'100(?!\d)')
self.given(u'''
'123'..'123'
''',
expect_regex=br'123(?!\d)')
self.given(u'''
'12345'..'12345'
''',
expect_regex=br'12345(?!\d)')
self.given(u'''
'9999999'..'9999999'
''',
expect_regex=br'9999999(?!\d)')
self.given(u'''
'o0'..'o0'
''',
expect_regex=br'0?0(?!\d)')
self.given(u'''
'oo0'..'oo0'
''',
expect_regex=br'0{,2}0(?!\d)')
self.given(u'''
'ooo0'..'ooo0'
''',
expect_regex=br'0{,3}0(?!\d)')
self.given(u'''
'o1'..'o1'
''',
expect_regex=br'0?+1(?!\d)')
self.given(u'''
'oo9'..'oo9'
''',
expect_regex=br'0{,2}+9(?!\d)')
self.given(u'''
'o10'..'o10'
''',
expect_regex=br'0?+10(?!\d)')
self.given(u'''
'oo100'..'oo100'
''',
expect_regex=br'0{,2}+100(?!\d)')
self.given(u'''
'o9999'..'o9999'
''',
expect_regex=br'0?+9999(?!\d)')
self.given(u'''
'ooo9999'..'ooo9999'
''',
expect_regex=br'0{,3}+9999(?!\d)')
def test_infinite_numrange_output(self):
self.given(u'''
'0'..
''',
expect_regex=br'(?!0\d)\d++')
self.given(u'''
'1'..
''',
expect_regex=br'[1-9]\d*+')
self.given(u'''
'2'..
''',
expect_regex=br'(?>[1-9]\d++|[2-9])')
self.given(u'''
'10'..
''',
expect_regex=br'[1-9]\d++')
self.given(u'''
'20'..
''',
expect_regex=br'(?>[1-9]\d{2,}+|[2-9]\d)')
self.given(u'''
'46'..
''',
expect_regex=br'(?>[1-9]\d{2,}+|[5-9]\d|4[6-9])')
self.given(u'''
'100'..
''',
expect_regex=br'[1-9]\d{2,}+')
self.given(u'''
'200'..
''',
expect_regex=br'(?>[1-9]\d{3,}+|[2-9]\d{2})')
self.given(u'''
'234'..
''',
expect_regex=br'(?>[1-9]\d{3,}+|[3-9]\d{2}|2(?>[4-9]\d|3[4-9]))')
self.given(u'''
? of '1'..
''',
expect_regex=br'(?:[1-9]\d*+)?')
self.given(u'''
? of '2'..
''',
expect_regex=br'(?>[1-9]\d++|[2-9])?')
def test_infinite_onumrange_output(self):
self.given(u'''
'o0'..
''',
expect_regex=br'\d++')
self.given(u'''
'o1'..
''',
expect_regex=br'0*+[1-9]\d*+')
self.given(u'''
'o2'..
''',
expect_regex=br'0*+(?>[1-9]\d++|[2-9])')
self.given(u'''
'o10'..
''',
expect_regex=br'0*+[1-9]\d++')
self.given(u'''
'o20'..
''',
expect_regex=br'0*+(?>[1-9]\d{2,}+|[2-9]\d)')
self.given(u'''
'o46'..
''',
expect_regex=br'0*+(?>[1-9]\d{2,}+|[5-9]\d|4[6-9])')
self.given(u'''
'o100'..
''',
expect_regex=br'0*+[1-9]\d{2,}+')
self.given(u'''
'o200'..
''',
expect_regex=br'0*+(?>[1-9]\d{3,}+|[2-9]\d{2})')
self.given(u'''
'o234'..
''',
expect_regex=br'0*+(?>[1-9]\d{3,}+|[3-9]\d{2}|2(?>[4-9]\d|3[4-9]))')
self.given(u'''
amount?
amount = 'o0'..
''',
expect_regex=br'\d*+')
self.given(u'''
? of 'o0'..
''',
expect_regex=br'\d*+')
self.given(u'''
? of 'o1'..
''',
expect_regex=br'(?:0*+[1-9]\d*+)?')
def test_numrange_optimization_output(self):
self.given(u'''
/xnum/x/numx/num/
x: x
num = '1'..'10'
numx = /num/x/
xnum = /x/num/
''',
expect_regex=br'x(?>10|[1-9])(?!\d)x(?>10|[1-9])x(?>10|[1-9])(?!\d)')
def test_wordchar_redef_output(self):
self.given(u'''
/wordchar/pads/WOB/pads/str/pads/non-WOB/
*) wordchar: digit
str = .' '_
pads = ' '
''',
expect_regex=br'\d (?>(?<=\d)(?!\d)|(?<!\d)(?=\d)) (?>(?<=\d)(?!\d)|(?<!\d)(?=\d)) (?>(?<=\d)(?=\d)|(?<!\d)(?!\d)) (?>(?<=\d)(?=\d)|(?<!\d)(?!\d))')
self.given(u'''
WOB
*) wordchar = lower
''',
expect_regex=br'(?>(?<=[a-z])(?![a-z])|(?<![a-z])(?=[a-z]))')
self.given(u'''
.'cat'_
*) wordchar: upper lower -
''',
expect_regex=br'(?>(?<=[A-Za-z\-])(?![A-Za-z\-])|(?<![A-Za-z\-])(?=[A-Za-z\-]))cat(?>(?<=[A-Za-z\-])(?=[A-Za-z\-])|(?<![A-Za-z\-])(?![A-Za-z\-]))')
def test_lazydotstar_output(self):
self.given(u'''
__
''',
expect_regex=br'.+?')
self.given(u'''
__?
''',
expect_regex=br'.*?')
self.given(u'''
/__?/
''',
expect_regex=br'.*?')
self.given(u'''
/__/__?/
''',
expect_regex=br'.+?.*?')
self.given(u'''
/__?/__/
''',
expect_regex=br'.*?.+?')
self.given(u'''
/__/alpha/
''',
expect_regex=br'[^a-zA-Z]++[a-zA-Z]')
self.given(u'''
/alpha/__/
''',
expect_regex=br'[a-zA-Z].+?')
self.given(u'''
/lazydotstar/alpha/
lazydotstar = __
''',
expect_regex=br'.+?[a-zA-Z]')
self.given(u'''
(unicode)
/__/alpha/
''',
expect_regex=br'(?V1wu)\P{Alphabetic}++\p{Alphabetic}')
self.given(u'''
(unicode)
/__/non-alpha/
''',
expect_regex=br'(?V1wu)\p{Alphabetic}++\P{Alphabetic}')
self.given(u'''
/__/non-alpha/
''',
expect_regex=br'[a-zA-Z]++[^a-zA-Z]')
self.given(u'''
/__/digit/
''',
expect_regex=br'\D++\d')
self.given(u'''
/__/non-digit/
''',
expect_regex=br'\d++\D')
self.given(u'''
(unicode)
/__/non-digit/
''',
expect_regex=br'(?V1wu)\d++\D')
self.given(u'''
/__/limiter/
limiter = ''
''',
expect_regex=br'.+?')
self.given(u'''
/__/limiter/
limiter = .''
''',
expect_regex=br'.+?\b')
self.given(u'''
/__/limiter/
limiter = _''
''',
expect_regex=br'.+?\B')
self.given(u'''
/__/limiter/
limiter = 'END'
''',
expect_regex=br'(?:[^E]++|E(?!ND))++END')
self.given(u'''
/__/limiter/
limiter = .'END'
''',
expect_regex=br'(?:[^E]++|(?<!\b)E|E(?!ND))++\bEND')
self.given(u'''
/__/limiter/
limiter = _'END'
''',
expect_regex=br'(?:[^E]++|(?<!\B)E|E(?!ND))++\BEND')
self.given(u'''
/__/limiter/
limiter = '.'
''',
expect_regex=br'[^.]++\.')
self.given(u'''
/__/limiter/
limiter = .'.'
''',
expect_regex=br'(?:[^.]++|(?<!\b)\.)++\b\.')
self.given(u'''
/__/limiter/
limiter = _'.'
''',
expect_regex=br'(?:[^.]++|(?<!\B)\.)++\B\.')
class TestMatches(unittest.TestCase):
def given(self, oprex_source, fn=regex.match, expect_full_match=[], no_match=[], partial_match={}):
oprex_source = to_string(oprex_source)
regex_source = oprex(oprex_source)
for text in expect_full_match:
text = to_string(text)
match = fn(regex_source, text)
partial = match and match.group(0) != text
if not match or partial:
raise AssertionError(u'%s\nis expected to fully match: %s\n%s\nThe regex is: %s' % (
oprex_source or u'(empty string)',
text or u'(empty string)',
u'It does match, but only partially. The match is: ' + (match.group(0) or u'(empty string)') if partial else u"But it doesn't match at all.",
regex_source or u'(empty string)',
))
for text in no_match:
text = to_string(text)
match = fn(regex_source, text)
if match:
raise AssertionError(u'%s\nis expected NOT to match: %s\n%s\nThe regex is: %s' % (
oprex_source or u'(empty string)',
text or u'(empty string)',
u'But it does match. The match is: ' + (match.group(0) or u'(empty string)'),
regex_source or u'(empty string)',
))
for text, partmatch in partial_match.items():
text = to_string(text)
match = fn(regex_source, text)
partial = match and match.group(0) != text and match.group(0) == partmatch
if not match or not partial:
if match and match.group(0) == text:
raise AssertionError(u"%s\nis expected to partially match: %s\nBut instead it's a full-match.\nThe regex is: %s" % (
oprex_source or u'(empty string)',
text or u'(empty string)',
regex_source or u'(empty string)',
))
else:
raise AssertionError(u'%s\nis expected to partially match: %s\n%s\nThe regex is: %s' % (
oprex_source or u'(empty string)',
text or u'(empty string)',
u"But it doesn't match at all." if not match else u'The expected partial match is: %s\nBut the resulting match is: %s' % (
partmatch or u'(empty string)',
match.group(0) or u'(empty string)'
),
regex_source or u'(empty string)',
))
def test_unicode(self):
self.given(u'''
'Déjà vu'
''',
expect_full_match=[u'Déjà vu'])
def test_simple_optional(self):
self.given(b'''
/a?/ether/
ether = /e/ther/
e = 'e'
ther = 'ther'
a = 'a'
''',
expect_full_match=[b'ether', b'aether'])
self.given(b'''
/air/man?/ship?/
air = 'air'
man = 'man'
ship = 'ship'
''',
expect_full_match=[b'air', b'airman', b'airship', b'airmanship'],
no_match=[b'manship'],
partial_match={'airma' : 'air'})
self.given(b'''
/ultra?/nagog/
ultra = "ultra"
nagog = 'nagog'
''',
expect_full_match=[b'ultranagog', b'nagog'],
no_match=[b'ultra'])
self.given(b'''
/cat?/fish?/
cat = 'cat'
fish = 'fish'
''',
expect_full_match=[b'catfish', b'cat', b'fish', b''],
partial_match={
'catfishing' : 'catfish',
'cafis' : '',
})
self.given(b'''
/very?/very?/nice/
very = 'very '
nice = "nice"
''',
expect_full_match=[b'nice', b'very nice', b'very very nice'])
def test_escaping(self):
self.given(b'''
orly
orly = "O RLY?"
''',
expect_full_match=[b'O RLY?'],
no_match=[b'O RLY', b'O RL'])
self.given(b'''
stars
stars = '***'
''',
expect_full_match=[b'***'],
partial_match={'****' : '***'},
no_match=[b'', b'*'])
self.given(b'''
add
add: +plus
plus: +
''',
expect_full_match=[b'+'],
no_match=[b''])
def test_character_class(self):
self.given(b'''
papersize
papersize = /series/size/
series: A B C
size: 0 1 2 3 4 5 6 7 8
''',
expect_full_match=[b'A3', b'A4'],
no_match=[b'Legal', b'Folio'])
self.given(b'''
x
x: \u0041 \u0042 \u0043 \u0044 \u0045
''',
expect_full_match=[b'A', b'B', b'C', b'D', b'E'],
no_match=[b'a', b'b', b'c', b'd', b'e'])
self.given(b'''
x
x: :SKULL_AND_CROSSBONES :BIOHAZARD_SIGN :CANCER
''',
expect_full_match=[u'☠', u'☣', u'♋'])
self.given(b'''
x
x: /Letter /Number
''',
expect_full_match=[b'A', b'1'],
no_match=[b'?', b'$'])
self.given(b'''
x
x: /Symbol
''',
expect_full_match=[b'$'],
no_match=[b'A', b'1'])
# uppercase or greek
self.given(b'''
x
x: /Lu /Greek
''',
expect_full_match=[b'A', u'γ', u'Γ'],
no_match=[b'a'])
# not uppercase or not greek == not(uppercase and greek)
self.given(b'''
x
x: /Uppercase_Letter /IsGreek
''',
expect_full_match=[b'A', u'γ', u'Γ'],
no_match=[b'a'])
self.given(b'''
/open/bs/caret/close/
open: [
bs: \\
caret: ^
close: ]
''',
expect_full_match=[b'[\\^]'])
def test_character_range(self):
self.given(br'''
AZ
AZ: \x41..Z
''',
expect_full_match=[b'A', b'B', b'C', b'X', b'Y', b'Z'],
no_match=[b'a', b'x', b'z', b'1', b'0'])
self.given(br'''
AB
AB: A..\u0042
''',
expect_full_match=[b'A', b'B'],
no_match=[b'a', b'b'])
self.given(br'''
AB
AB: \u0041..\U00000042
''',
expect_full_match=[b'A', b'B'],
no_match=[b'a', b'b'])
self.given(br'''
AB
AB: \U00000041..\x42
''',
expect_full_match=[b'A', b'B'],
no_match=[b'a', b'b'])
self.given(br'''
AB
AB: \x41..\102
''',
expect_full_match=[b'A', b'B'],
no_match=[b'a', b'b'])
self.given(br'''
AB
AB: \101..\N{LATIN CAPITAL LETTER B}
''',
expect_full_match=[b'A', b'B'],
no_match=[b'a', b'b'])
self.given(b'''
arrows
arrows: :LEFTWARDS_ARROW..:LEFT_RIGHT_OPEN-HEADED_ARROW
''',
expect_full_match=[u'←', u'→', u'⇶', u'⇿'],
no_match=[b'>'])
self.given(b'''
need_escape
need_escape: [ ^ a - z ]
''',
expect_full_match=[b'[', b'^', b'a', b'-', b'z', b']'],
no_match=[b'b', b'A'])
self.given(b'''
1 of: a -..\\
''',
expect_full_match=[b'a', b'-', b'\\', b'.'],
no_match=[b','])
self.given(b'''
1 of: [..]
''',
expect_full_match=[b'[', b']'],
no_match=[b'-'])
def test_charclass_include(self):
self.given(u'''
/op/op/op/op/
op: +add +sub +mul +div
add: +
sub: -
mul: * ×
div: / ÷ :
''',
expect_full_match=[b'++++', b'+-*/', u'×÷*/'],
no_match=[u'×××x', b'+++'])
self.given(u'''
binary
binary: +bindigit
bindigit: 0..1
''',
expect_full_match=[b'0', b'1'],
no_match=[b'2', b'I', b''],
partial_match={
'0-1' : '0',
'0..1' : '0',
})
self.given(u'''
/hex/hex/hex/
hex: +hexdigit
hexdigit: 0..9 a..f A..F
''',
expect_full_match=[b'AcE', b'12e', b'fff'],
no_match=[b'WOW', b'hi!', b'...'])
self.given(u'''
aUmlaut
aUmlaut: +a_with_diaeresis
a_with_diaeresis: ä
''',
expect_full_match=[u'ä'])
self.given(u'''
aUmlaut
aUmlaut: +small_a_umlaut
small_a_umlaut: +a_with_diaeresis
a_with_diaeresis: ä
''',
expect_full_match=[u'ä'])
self.given(b'''
aUmlaut
aUmlaut: +a_with_diaeresis
a_with_diaeresis: \u00E4
''',
expect_full_match=[u'ä'])
self.given(u'''
aUmlaut
aUmlaut: +a_with_diaeresis
a_with_diaeresis: :LATIN_SMALL_LETTER_A_WITH_DIAERESIS
''',
expect_full_match=[u'ä'])
self.given(u'''
alphabetic
alphabetic: +is_alphabetic
is_alphabetic: /Alphabetic
''',
expect_full_match=[u'ä', b'a'])
self.given(u'''
lowaz
lowaz: +lowerAZ
lowerAZ: a..z
''',
expect_full_match=[b'a', b'b', b'c', b'z'],
no_match=[b'A', b'Z', u'ä'])
self.given(u'''
/hex/
hex: +hexx +hexy +hexz
hexx = hexdigit
hexdigit: 0..9
hexy = hexz = hexalpha
hexalpha: a..f A..F
''',
expect_full_match=[b'a', b'b', b'f', b'A', b'B', b'F', b'0', b'1', b'9'],
no_match=[b'z', b'Z', u'ä', b'$'])
self.given(u'''
/plus/minus/pmz/
plus: +
minus: -
pmz: +plus +minus z
''',
expect_full_match=[b'+-+', b'+--', b'+-z'],
no_match=[b'+-a'])
self.given(u'''
vowhex
vowhex: +vowel +hex
vowel: a i u e o A I U E O
hex: 0..9 a..f A..F
''',
expect_full_match=[b'a', b'b', b'f', b'A', b'B', b'F', b'0', b'1', b'9', b'i', b'u', b'E', b'O'],
no_match=[b'$', b'z', b'k'])
self.given(u'''
1 of: . , ;
''',
expect_full_match=[b'.', b',', b';'])
def test_charclass_operation(self):
self.given(u'''
xb123
xb123: X x +hex not c..f C..D :LATIN_CAPITAL_LETTER_F +vowel and 1 2 3 /Alphabetic
hex: 0..9 a..f A..F
vowel: a i u e o A I U E O
''',
expect_full_match=[b'x', b'X', b'b', b'B', b'1', b'2', b'3'],
no_match=[b'a', b'A', b'c', b'C', b'd', b'D', b'e', b'E', b'f', b'F', b'y', b'Y', b'z', b'Z', b'0', b'4', b'9', b'-'])
self.given(u'''
allButU
allButU: not: U
''',
expect_full_match=[b'^', b'u'],
no_match=[b'U', b''])
self.given(u'''
nonalpha
nonalpha: not: /Alphabetic
''',
expect_full_match=[b'-', b'^', b'1'],
no_match=[b'A', b'a', u'Ä', u'ä'])
self.given(u'''
not: /Alphabetic
''',
expect_full_match=[b'-', b'^', b'1'],
no_match=[b'A', b'a', u'Ä', u'ä'])
self.given(u'''
otherz
otherz: +nonz
nonz: not: z
''',
expect_full_match=[b'-', b'^', b'1', b'A', b'a', u'Ä', u'ä', b'Z'],
no_match=[b'z'])
self.given(u'''
a_or_consonant
a_or_consonant: A a +consonant
consonant: a..z A..Z not a i u e o A I U E O
''',
expect_full_match=[b'A', b'a', b'Z', b'z'],
no_match=[u'Ä', u'ä', b'-', b'^', b'1'])
self.given(u'''
maestro
maestro: m +ae s t r o
ae: +vowel and +hex not +upper
hex: +digit a..f A..F
vowel: a i u e o A I U E O
''',
expect_full_match=[b'm', b'a', b'e', b's', b't', b'r', b'o'],
no_match=[u'Ä', u'ä', b'-', b'^', b'1', b'N', b'E', b'W', b'b'])
def test_charclass_escape(self):
self.given(b'''
1 of: \u0061 \U00000061 \x61 \61
''',
expect_full_match=[u'\u0061', u'\U00000061', b'\x61', b'\61', b'a'],
no_match=[b'\u0061', br'\u0061', br'\x61', br'\61'])
self.given(br'''
1 of: \u0061 \U00000061 \x61 \61
''',
expect_full_match=[u'\u0061', u'\U00000061', b'\x61', b'\61', b'a'],
no_match=[b'\u0061', br'\u0061', br'\x61', br'\61'])
self.given(u'''
1 of: \u0061 \U00000061 \x61 \61
''',
expect_full_match=[u'\u0061', u'\U00000061', b'\x61', b'\61', b'a'],
no_match=[b'\u0061', br'\u0061', br'\x61', br'\61'])
self.given(r'''
1 of: \u0061 \U00000061 \x61 \61
''',
expect_full_match=[u'\u0061', u'\U00000061', b'\x61', b'\61', b'a'],
no_match=[b'\u0061', br'\u0061', br'\x61', br'\61'])
self.given(br'''
allowed_escape
allowed_escape: \n \r \t \a \b \v \f
''',
expect_full_match=[b'\n', b'\r', b'\t', b'\a', b'\b', b'\v', b'\f'],
no_match=[br'\n', br'\r', br'\t', br'\a', br'\b', br'\v', br'\f'])
self.given(br'''
unicode_charname
unicode_charname: \N{AMPERSAND} \N{BIOHAZARD SIGN}
''',
expect_full_match=[b'&', u'\N{AMPERSAND}', u'\N{BIOHAZARD SIGN}', u'☣'],
no_match=[b'\N{AMPERSAND}', br'\N{AMPERSAND}', br'\N{BIOHAZARD SIGN}'])
self.given(br'''
unicode_charname
unicode_charname: \N{AMPERSAND} :AMPERSAND \N{BIOHAZARD SIGN} :BIOHAZARD_SIGN
''',
expect_full_match=[b'&', u'\N{AMPERSAND}', u'\N{BIOHAZARD SIGN}', u'☣'],
no_match=[b'\N{AMPERSAND}', br'\N{AMPERSAND}', br'\N{BIOHAZARD SIGN}'])
def test_atomic_grouping(self):
self.given(b'''
@/digits/even/
digits = 0.. <<- of digit
even: 0 2 4 6 8
''',
expect_full_match=[b'0', b'8', b'10', b'42', b'178'],
no_match=[b'', b'1', b'9', b'1337'],
partial_match={'24681' : '2468', b'134579' : '134'})
self.given(b'''
//digits/even//
digits = 0.. <<- of digit
even: 0 2 4 6 8
''',
expect_full_match=[b'0', b'8', b'10', b'42', b'178'],
no_match=[b'', b'1', b'9', b'1337', b'24681', b'134579'])
def test_builtin(self):
self.given(b'''
lowhex
lowhex: +alpha +alnum +lower not G..Z g..z +upper +digit padchar backslash tab whitechar
''',
expect_full_match=[b'a', b'b', b'c', b'd', b'e', b'f'],
no_match=[b'A', b'B', b'F', b'x', b'X', b'z', b'Z', b'0', b'1', b'9'])
def test_fail(self):
self.given(b'''
FAIL!
''',
expect_full_match=[],
no_match=[b'nothing should match', b'', b'not even empty string'])
self.given(b'''
/FAIL!/
''',
expect_full_match=[],
no_match=[b'nothing should match', b'', b'not even empty string'])
self.given(b'''
/alpha/FAIL!/
''',
expect_full_match=[],
no_match=[b'', b'A', b'nothing should match'])
self.given(b'''
2 of FAIL!
''',
expect_full_match=[],
no_match=[b'nothing should match', b'', b'not even empty string'])
self.given(b'''
<<|
|FAIL!
''',
expect_full_match=[],
no_match=[b'nothing should match', b'', b'not even empty string'])
self.given(b'''
<<|
|FAIL!
|alpha
''',
expect_full_match=[b'A', b'a'],
no_match=[b'1', b''])
self.given(b'''
<<|
|alpha
|FAIL!
''',
expect_full_match=[b'A', b'a'],
no_match=[b'1', b''])
self.given(b'''
/opener?/contents?/closer/
opener = <<|
|paren
|curly
|square
|chevron
[paren]: (
[curly]: {
[square]: [
[chevron]: <
contents = 1.. <<- of any
closer = <<|
|[paren] ? 1 of: )
|[curly] ? 1 of: }
|[square] ? 1 of: ]
|[chevron] ? 1 of: >
|FAIL!
''',
expect_full_match=[b'()', b'{}', b'[]', b'<>', b'(riiiight)', b'{()}', b'[! @]', b'<<>>', b'<<<<<<<<<<<>'],
no_match=[b'{]', b'<)', b'(', b'[unclosed'],
partial_match={
'(super) duper' : '(super)',
})
self.given(b'''
<@>
|FAIL!>
''',
expect_full_match=[],
no_match=[b'nothing should match', b'', b'not even empty string'])
self.given(b'''
<@>
|!FAIL!>
''',
expect_full_match=[b''],
no_match=[],
partial_match={
'everything matches' : '',
'though the match is empty string' : '',
})
def test_quantifier(self):
self.given(b'''
1 of alpha
''',
expect_full_match=[b'a', b'B'],
no_match=[b'', b'1', b'$'],
partial_match={'Cd' : 'C'})
self.given(b'''
2 of alpha
''',
expect_full_match=[b'ab', b'Cd'],
no_match=[b'', b'A1', b'$1'],
partial_match={'ABC' : 'AB'})
self.given(b'''
@3.. of alpha
''',
expect_full_match=[b'abc', b'DeFGhij'],
no_match=[b'', b'Aa4'])
self.given(b'''
@4..5 of alpha
''',
expect_full_match=[b'abcd', b'abcDE'],
no_match=[b'', b'ab123'],
partial_match={'ABCDEF' : 'ABCDE'})
self.given(b'''
/prefix/alnum/
prefix = 1..2 <<- of alpha
''',
expect_full_match=[b'A1', b'Ab', b'Ab3', b'abc'],
no_match=[b'', b'99', b'9z'],
partial_match={'YAMM' : 'YAM', b'B52' : 'B5'})
self.given(b'''
/prefix/alnum/
prefix = 3.. <<- of alpha
''',
expect_full_match=[b'AAA1', b'YAMM', b'Fubar', b'YAMM2', b'Fubar4'],
no_match=[b'', b'A1', b'Ab', b'abc', b'Ab3', b'ABC', b'99', b'9z', b'B52'],
partial_match={'Test123' : 'Test1'})
self.given(b'''
/open/content/close/
open: (
close: )
content = 1.. <<- of: +alnum +open +close
''',
expect_full_match=[b'(sic)'],
no_match=[b'f(x)'],
partial_match={'(pow)wow(kaching)zzz' : '(pow)wow(kaching)'})
self.given(b'''
/open/content/close/
open: (
close: )
content = 1 <<+.. of: +alnum +open +close
''',
expect_full_match=[b'(sic)'],
no_match=[b'f(x)'],
partial_match={'(pow)wow(kaching)zzz' : '(pow)'})
self.given(b'''
/open/content/close/
open: (
close: )
content = @1.. of: +alnum +open +close
''',
no_match=[b'(pow)wow(kaching)zzz'])
self.given(b'''
css_color
css_color = 6 of hex
hex: 0..9 a..f
''',
expect_full_match=[b'ff0000', b'cccccc', b'a762b3'],
no_match=[b'', b'black', b'white'])
self.given(b'''
DWORD_speak
DWORD_speak = @1.. of 4 of hex
hex: 0..9 A..F
''',
expect_full_match=[b'CAFEBABE', b'DEADBEEF', b'0FF1CE95'],
no_match=[b'', b'YIKE'])
self.given(b'''
? of alphas
alphas = @1.. of alpha
''',
expect_full_match=[b'', b'Cowabunga'],
partial_match={
'1one' : '',
'hell0' : 'hell',
})
self.given(b'''
? of alphas
alphas = 1.. <<- of alpha
''',
expect_full_match=[b'', b'Cowabunga'],
partial_match={
'1one' : '',
'hell0' : 'hell',
})
self.given(b'''
? of alphas
alphas = 1 <<+.. of alpha
''',
expect_full_match=[b''],
partial_match={
'1one' : '',
'hell0' : '',
'Cowabunga' : '',
})
self.given(b'''
/opt_alphas/.
opt_alphas = ? of alphas
alphas = 1 <<+.. of alpha
''',
expect_full_match=[b'', b'Cowabunga'],
no_match=[b'hell0', b'1one'])
def test_reference(self):
self.given(u'''
/bang/=bang/
[bang]: b a n g !
''',
expect_full_match=[b'bb', b'aa', b'nn', b'gg', b'!!'],
no_match=[b'', b'a', b'ba'])
self.given(u'''
/=bang/bang/
[bang]: b a n g !
''',
no_match=[b'', b'a', b'ba', b'bb', b'aa', b'nn', b'gg', b'!!'])
self.given(u'''
/bang/=bang?/
[bang]: b a n g !
''',
expect_full_match=[b'a', b'bb', b'aa', b'nn', b'gg', b'!!'],
no_match=[b'', b'clang!'],
partial_match={
'ba' : 'b',
'bang!' : 'b',
})
self.given(u'''
/=bang?/bang/
[bang]: b a n g !
''',
expect_full_match=[b'b', b'a', b'n', b'g', b'!'],
no_match=[b'', b'clang!'],
partial_match={'ba' : 'b', b'bb' : 'b', b'aa' : 'a', b'nn' : 'n', b'gg' : 'g', b'!!' : '!', b'bang!' : 'b'})
def test_wordchar_boundary(self):
self.given(b'''
/wordchar/WOB/non-WOB/
''',
expect_full_match=[],
no_match=[b'a', b'b', b'Z', b'_'])
self.given(b'''
/EOW/wordchar/BOW/
''',
expect_full_match=[],
no_match=[b'a', b'b', b'Z', b'_'])
self.given(b'''
realworld_wordchar
realworld_wordchar: +wordchar - not +digit _
''',
expect_full_match=[b'a', b'Z', b'-'],
no_match=[b'0', b'9', b'_'])
self.given(b'''
cat
cat = 'cat'
''',
fn=regex.search,
expect_full_match=[b'cat'],
no_match=[b'garfield'],
partial_match={'tomcat' : 'cat', b'catasthrope' : 'cat', b'complicated' : 'cat', b'cat videos' : 'cat', b'grumpy cat' : 'cat'})
self.given(b'''
cat
cat = 'cat'_
''',
fn=regex.search,
expect_full_match=[],
no_match=[b'cat', b'cat videos', b'grumpy cat', b'tomcat', b'garfield'],
partial_match={'catasthrope' : 'cat', b'complicated' : 'cat'})
self.given(b'''
cat
cat = 'cat'.
''',
fn=regex.search,
expect_full_match=[b'cat'],
no_match=[b'catasthrope', b'complicated', b'garfield'],
partial_match={'tomcat' : 'cat', b'cat videos' : 'cat', b'grumpy cat' : 'cat'})
self.given(b'''
cat
cat = _'cat'
''',
fn=regex.search,
expect_full_match=[],
no_match=[b'cat', b'catasthrope', b'cat videos', b'grumpy cat', b'garfield'],
partial_match={'tomcat' : 'cat', b'complicated' : 'cat'})
self.given(b'''
cat
cat = .'cat'
''',
fn=regex.search,
expect_full_match=[b'cat'],
no_match=[b'tomcat', b'complicated', b'garfield'],
partial_match={'catasthrope' : 'cat', b'cat videos' : 'cat', b'grumpy cat' : 'cat'})
self.given(b'''
cat
cat = _'cat'_
''',
fn=regex.search,
expect_full_match=[],
no_match=[b'cat', b'catasthrope', b'cat videos', b'tomcat', b'grumpy cat', b'garfield'],
partial_match={'complicated' : 'cat'})
self.given(b'''
cat
cat = .'cat'.
''',
fn=regex.search,
expect_full_match=[b'cat'],
no_match=[b'tomcat', b'catasthrope', b'complicated', b'garfield'],
partial_match={'cat videos' : 'cat', b'grumpy cat' : 'cat'})
self.given(b'''
/WOB/cat/WOB/
cat = 'cat'
''',
fn=regex.search,
expect_full_match=[b'cat'],
no_match=[b'tomcat', b'catasthrope', b'complicated', b'garfield'],
partial_match={'cat videos' : 'cat', b'grumpy cat' : 'cat'})
self.given(b'''
/WOB/cat/WOB/
cat = .'cat'.
''',
fn=regex.search,
expect_full_match=[b'cat'],
no_match=[b'tomcat', b'catasthrope', b'complicated', b'garfield'],
partial_match={'cat videos' : 'cat', b'grumpy cat' : 'cat'})
self.given(b'''
/BOW/cat/EOW/
cat = 'cat'
''',
fn=regex.search,
expect_full_match=[b'cat'],
no_match=[b'tomcat', b'catasthrope', b'complicated', b'garfield'],
partial_match={'cat videos' : 'cat', b'grumpy cat' : 'cat'})
self.given(b'''
/BOW/cat/EOW/
cat = .'cat'.
''',
fn=regex.search,
expect_full_match=[b'cat'],
no_match=[b'tomcat', b'catasthrope', b'complicated', b'garfield'],
partial_match={'cat videos' : 'cat', b'grumpy cat' : 'cat'})
self.given(b'''
/anti/non-WOB/
anti = 'anti'
''',
fn=regex.search,
expect_full_match=[],
no_match=[b'anti', b'anti-virus', b'rianti cartwright'],
partial_match={'antivirus' : 'anti', b'meantime' : 'anti'})
self.given(b'''
anti_
anti_ = 'anti'_
''',
fn=regex.search,
expect_full_match=[],
no_match=[b'anti', b'anti-virus', b'rianti cartwright'],
partial_match={'antivirus' : 'anti', b'meantime' : 'anti'})
self.given(b'''
somethingtastic
somethingtastic = _'tastic'
''',
fn=regex.search,
expect_full_match=[],
no_match=[b'tastic', b'tasticism'],
partial_match={'fantastic' : 'tastic', b'fantastico' : 'tastic'})
self.given(b'''
expletification
expletification = _'bloody'_
''',
fn=regex.search,
expect_full_match=[],
no_match=[b'bloody', b'bloody hell'],
partial_match={'absobloodylutely' : 'bloody'})
def test_flags(self):
self.given(b'''
lower
''',
expect_full_match=[b'a'],
no_match=[b'A', u'ä', u'Ä'])
self.given(b'''
(unicode)
lower
''',
expect_full_match=[b'a', u'ä'],
no_match=[b'A', u'Ä'])
self.given(b'''
(ignorecase)
1 of: a i u e o
''',
expect_full_match=[b'a', b'A'],
no_match=[u'Ä', u'ä'])
self.given(b'''
(ignorecase) 1 of: a i u e o
''',
expect_full_match=[b'a', b'A'],
no_match=[u'Ä', u'ä'])
self.given(b'''
(unicode)
(ignorecase) lower
''',
expect_full_match=[b'a', b'A', u'Ä', u'ä'])
self.given(b'''
(unicode ignorecase)
lower
''',
expect_full_match=[b'a', b'A', u'Ä', u'ä'])
self.given(b'''
(unicode ignorecase)
lower
''',
expect_full_match=[b'a', b'A', u'Ä', u'ä'])
self.given(b'''
(ignorecase) 'AAa'
''',
expect_full_match=[b'AAa', b'aAa', b'aaa', b'AAA'])
self.given(b'''
(ignorecase) /VOWEL/BIGVOWEL/
VOWEL: A I U E O
BIGVOWEL = (-ignorecase) VOWEL
''',
expect_full_match=[b'AA', b'aA'],
no_match=[b'Aa', b'aa'])
def test_string_escape(self):
self.given(br'''
'\n'
''',
expect_full_match=[b'\n'],
no_match=[br'\n', b'\\n'])
self.given(br'''
'\t'
''',
expect_full_match=[b'\t', b' '],
no_match=[br'\t', b'\\t'])
self.given(b'''
'\t'
''',
expect_full_match=[b'\t', b' '],
no_match=[br'\t', b'\\t'])
self.given(br'''
'\x61'
''',
expect_full_match=[u'\U00000061', u'\u0061', u'a', b'a', b'\x61', b'\141'],
no_match=[br'\x61', b'\\x61'])
self.given(b'''
'\x61'
''',
expect_full_match=[u'\U00000061', u'\u0061', u'a', b'a', b'\x61', b'\141'],
no_match=[br'\x61', b'\\x61'])
self.given(u'''
'\U00000061'
''',
expect_full_match=[u'\U00000061', u'\u0061', u'a', b'a', b'\x61', b'\141'],
no_match=[br'\U00000061', b'\\U00000061'])
self.given(br'''
'\u0061'
''',
expect_full_match=[u'\U00000061', u'\u0061', u'a', b'a', b'\x61', b'\141'],
no_match=[br'\u0061', b'\\u0061'])
self.given(br'''
'\141'
''',
expect_full_match=[u'\U00000061', u'\u0061', u'a', b'a', b'\x61', b'\141'],
no_match=[br'\141', b'\\141'])
self.given(u'''
'\N{AMPERSAND}'
''',
expect_full_match=[u'\N{AMPERSAND}', u'&', b'&'],
no_match=[br'\N{AMPERSAND}', b'\N{AMPERSAND}', b'\\N{AMPERSAND}'])
self.given(br'''
'\N{AMPERSAND}'
''',
expect_full_match=[u'\N{AMPERSAND}', u'&', b'&'],
no_match=[br'\N{AMPERSAND}', b'\N{AMPERSAND}', b'\\N{AMPERSAND}'])
self.given(br'''
'\N{BIOHAZARD SIGN}'
''',
expect_full_match=[u'\N{BIOHAZARD SIGN}', u'☣'],
no_match=[br'\N{BIOHAZARD SIGN}', b'\N{BIOHAZARD SIGN}', b'\\N{BIOHAZARD SIGN}'])
self.given(br'''
2 of 'M\N{AMPERSAND}M\N{APOSTROPHE}s'
''',
expect_full_match=["M&M'sM&M's"],
no_match=[br'M\N{AMPERSAND}M\N{APOSTROPHE}s'])
self.given(r'''
2 of 'M\N{AMPERSAND}M\N{APOSTROPHE}s'
''',
expect_full_match=["M&M'sM&M's"],
no_match=[br'M\N{AMPERSAND}M\N{APOSTROPHE}s'])
self.given(r'''
3 of '\t\t'
''',
expect_full_match=[b'\t\t\t\t\t\t'],
no_match=[b'\t\t\t\t'])
self.given(br'''
'\a\b\f\v\t'
''',
expect_full_match=[b'\a\b\f\v\t'],
no_match=[br'\a\b\f\v\t'])
self.given(br'''
'.\w\b\s\X\n'
''',
expect_full_match=[b'.\w\b\s\X\n'],
no_match=[br'.\w\b\s\X\n'])
def test_flag_dependents(self):
self.given(br'''
linechar
''',
expect_full_match=[b'\n', b'\r', b'\v', b'\f', b'\x0b', b'\x0C'],
no_match=['\x85', b'\u2028', br'\u2028', u'\u2028', u'\u2029'],
partial_match={'\r\n' : '\r'})
self.given(br'''
(unicode)
linechar
''',
expect_full_match=[b'\n', b'\r', b'\v', b'\f', b'\x0b', b'\x0C', '\x85', u'\u2028', u'\u2029'],
no_match=[b'\u2028', br'\u2028'],
partial_match={'\r\n' : '\r'})
self.given(br'''
(-word)
linechar
''',
expect_full_match=[b'\n'],
no_match=[b'\r', b'\v', b'\f', b'\x0b', b'\x0C', '\x85', u'\u2028', u'\u2029', b'\u2028', br'\u2028'],
partial_match={'\n\r' : '\n'})
self.given(br'''
(unicode -word)
linechar
''',
expect_full_match=[b'\n'],
no_match=[b'\r', b'\v', b'\f', b'\x0b', b'\x0C', '\x85', u'\u2028', u'\u2029', b'\u2028', br'\u2028'],
partial_match={'\n\r' : '\n'})
self.given(br'''
(-word unicode)
linechar
''',
expect_full_match=[b'\n'],
no_match=[b'\r', b'\v', b'\f', b'\x0b', b'\x0C', '\x85', u'\u2028', u'\u2029', b'\u2028', br'\u2028'],
partial_match={'\n\r' : '\n'})
self.given(br'''
(unicode)
(-word) linechar
''',
expect_full_match=[b'\n'],
no_match=[b'\r', b'\v', b'\f', b'\x0b', b'\x0C', '\x85', u'\u2028', u'\u2029', b'\u2028', br'\u2028'],
partial_match={'\n\r' : '\n'})
def test_orblock(self):
self.given(b'''
@|
|'cat'
|'dog'
''',
expect_full_match=[b'cat', b'dog'],
no_match=[b'cadog'],
partial_match={'catdog' : 'cat', b'catog' : 'cat'})
self.given(b'''
<<|
|'tea'
|'coffee'
''',
expect_full_match=[b'tea', b'coffee'],
no_match=[b'tecoffee'],
partial_match={'teacoffee' : 'tea', b'teaoffee' : 'tea'})
self.given(b'''
backtrackable_choice
backtrackable_choice = <<|
|'catastrophy'
|'catass trophy'
|'cat'
''',
expect_full_match=[b'catastrophy', b'catass trophy', b'cat'],
partial_match={'catastrophy cat' : 'catastrophy', b'catass cat' : 'cat'})
self.given(b'''
no_backtrack
no_backtrack = @|
|'red pill'
|'blue pill'
''',
expect_full_match=[b'red pill', b'blue pill'],
no_match=[b'red blue pill'],
partial_match={'red pill pill' : 'red pill'})
self.given(b'''
/digit/space/ampm/
ampm = (ignorecase) <<|
|'AM'
|'PM'
''',
expect_full_match=[b'1 AM', b'2 pm', b'9 pM'],
no_match=[b'10 am', b'1 APM', b'PM'],
partial_match={'5 aMm ' : '5 aM'})
self.given(b'''
2 of <<|
|'fast'
|'good'
|'cheap'
''',
expect_full_match=[b'fastgood', b'fastcheap', b'cheapgood', b'cheapfast', b'goodgood', b'cheapcheap'],
no_match=[b'fast', b'good', b'cheap'],
partial_match={'goodcheapfast' : 'goodcheap'})
self.given(b'''
<<|
|2 of 'ma'
|2 of 'pa'
|2 of 'bolo'
''',
expect_full_match=[b'mama', b'papa', b'bolobolo'],
no_match=[b'ma', b'mapa', b'mabolo', b'boloma', b'pabolo'],
partial_match={'papabolo' : 'papa', b'mamapapa' : 'mama'})
self.given(b'''
/blood_type/rhesus/
blood_type =<<|
|'AB'
|1 of: A B O
rhesus = <<|
|'+'
|'-'
| -- allow empty/unknown rhesus
''',
expect_full_match=[b'A', b'A+', b'B', b'B-', b'AB', b'AB+', b'O', b'O-'],
no_match=[b''],
partial_match={'A+B' : 'A+', b'AAA' : 'A'})
self.given(b'''
subexpr_types
subexpr_types = <<|
|'string literal'
|(ignorecase) 1 of: a i u e o
|2..3 <<- of X
|/alpha/digit/
|alpha
X = 'X'
''',
expect_full_match=[b'string literal', b'E', b'XX', b'R1', b'X'],
no_match=[b'2', b'3'],
partial_match={'aX' : 'a', b'string Z' : 's', b'YY' : 'Y'})
self.given(b'''
<<| -- comment here is ok
|'android'
|'ios'
''',
expect_full_match=[b'android', b'ios'],
no_match=[b'androiios'],
partial_match={'androidos' : 'android'})
self.given(b'''
/nature/side/
nature = @|
|'lawful ' -- mind the trailing space
|'chaotic '
|'neutral '
--allow comment on ORBLOCK "breaker" line
side = @|
|'good'
|'evil'
|'neutral'
''',
expect_full_match=[b'lawful good', b'chaotic good', b'chaotic evil', b'neutral evil', b'neutral neutral'],
no_match=[b'neutral', b'neutral ', b'lawful ', b'good', b'evil', b'chaotic chaotic ', b'evilevil', b' '])
self.given(b'''
any_color_as_long_as_it_is_
any_color_as_long_as_it_is_ = <<|
|'black'
-- single-entry "choice" is OK
''',
expect_full_match=[b'black'],
no_match=[b''],
partial_match={'blackish' : 'black'})
self.given(b'''-- nested ORBLOCKs
<<|
|coffee
|tea
|'cendol'
coffee = <<|
|'espresso'
|'cappuccino'
|'kopi tubruk'
tea = <<|
|'earl grey'
|'ocha'
|'teh tarik'
''',
expect_full_match=[b'cendol', b'kopi tubruk', b'teh tarik', b'ocha', b'cappuccino'],
no_match=[b'kopi earl grey cendol'],
partial_match={'espresso tubruk' : 'espresso'})
self.given(b'''
/alpha/or/
or = @|
|alpha
|digit
''',
expect_full_match=[b'Aa', b'a1'],
no_match=[b'', b'a', b'1'])
self.given(b'''
/or/alpha/
or = @|
|alpha
|digit
''',
expect_full_match=[b'Aa', b'1A'],
no_match=[b'', b'a', b'1'])
self.given(b'''
/alpha/or/
or = <<|
|alpha
|digit
''',
expect_full_match=[b'Aa', b'a1'],
no_match=[b'', b'a', b'1'])
self.given(b'''
/or/alpha/
or = <<|
|alpha
|digit
''',
expect_full_match=[b'aA', b'1a'],
no_match=[b'', b'a', b'1'])
self.given(b'''
/az?/or/
[az]: a..z
or = <<|
|[az] ? alpha
|digit
''',
expect_full_match=[b'aa', b'aA', b'1'],
no_match=[b'', b'a'])
self.given(b'''
/or/az/
[az]: a..z
or = <<|
|[az] ? alpha
|digit
''',
expect_full_match=[b'1a'],
no_match=[b'', b'a', b'1', b'aa', b'Aa', b'aA'])
self.given(b'''
/az?/or/
[az]: a..z
or = <<|
|[az] ? alpha
|
''',
expect_full_match=[b'aA', b'aa', b''],
no_match=[],
partial_match={
'Aa' : '',
'A' : '',
'a' : '',
'1' : '',
'12' : '',
})
self.given(b'''
/az?/or/
[az]: a..z
or = <<|
|[az] ?
|digit
''',
expect_full_match=[b'a', b'1'],
no_match=[b'A', b'', b'Aa'],
partial_match={
'aA' : 'a',
'a1' : 'a',
'12' : '1',
})
self.given(b'''
/az?/or/
[az]: a..z
or = <<|
|[az] ?
|
''',
expect_full_match=[b'a', b''],
no_match=[],
partial_match={
'Aa' : '',
'A' : '',
'1' : '',
'12' : '',
})
def test_lookaround(self):
self.given(b'''
actually_no_lookaround
actually_no_lookaround = <@>
|alpha|
|digit|
|upper|
|lower|
''',
expect_full_match=[b'a1Aa'])
self.given(b'''
<@>
<yamaha|
<!/yang/lain/|
|semakin|
|/di/depan/>
|!ketinggalan>
yamaha = 'yamaha'
yang = 'yang'
lain = 'lain'
semakin = 'semakin'
di = 'di'
depan = 'depan'
ketinggalan = 'ketinggalan'
''',
fn=regex.search,
expect_full_match=[],
no_match=[b'yanglainsemakinketinggalan'],
partial_match={'yamahasemakindidepan' : 'semakin'})
self.given(b'''
<@>
|anyam>
|anyaman|
<nyaman|
anyam = 'anyam'
anyaman = 'anyaman'
nyaman = 'nyaman'
''',
expect_full_match=[b'anyaman'],
partial_match={'anyamanyamannyaman' : 'anyaman'})
self.given(b'''
<@>
|mixed_case>
|has_number>
|has_symbol>
|./len_8_to_255/.|
len_8_to_255 = @8..255 of any
mixed_case = <@>
|has_upper>
|has_lower>
has_upper = /non_uppers?/upper/
non_uppers = @1.. of: not: upper
has_lower = /non_lowers?/lower/
non_lowers = @1.. of: not: lower
has_number = /non_digits?/digit/
non_digits = @1.. of: not: digit
has_symbol = /non_symbols?/symbol/
symbol: not: /Alphanumeric
non_symbols = @1.. of: not: symbol
''',
expect_full_match=[b'AAaa11!!'],
no_match=[b'$h0RT', b'noNumber!', b'noSymb0l', b'n0upcase!', b'N0LOWCASE!'])
self.given(b'''
word_ends_with_s
word_ends_with_s = <@>
|wordchars|
<s|
wordchars = @1.. of wordchar
s = 's'
''',
expect_full_match=[b'boss'],
no_match=[b'sassy'])
self.given(b'''
un_x_able
un_x_able = <@>
|un>
|unxable|
<able|
un = 'un'
unxable = @1.. of wordchar
able = 'able'
''',
expect_full_match=[b'undoable', b'unable'])
self.given(b'''
escape
escape = <@>
<backslash|
|any|
''',
fn=regex.search,
no_match=[b'\t', b'\\'],
partial_match={r'\t' : 't', b'\z': 'z', b'\\\\':'\\', br'\r\n' : 'r', br'r\n' : 'n', b'\wow' : 'w', b'\\\'' : '\''})
self.given(b'''
money_digits
money_digits = <<|
|dollar_digits
|digits_buck
dollar_digits = <@>
<dollar|
|digits|
dollar = '$'
*) [digits] = @1.. of digit
digits_buck = <@>
|digits|
|buck>
buck = ' buck'
''',
fn=regex.search,
no_match=[b'123', b'4 pm'],
partial_match={
'$1' : '1',
'$234' : '234',
'500 bucks' : '500',
'1 buck' : '1',
})
self.given(b'''
/begin/msg/end/
begin = .'BEGIN'.
end = .'END'.
msg = @1.. of <<|
|@1.. of: not: E
|E_not_END
E_not_END = <<|
|check_ahead
|check_behind
check_ahead = <@>
|E|
|!ND>
*) E = 'E'
ND = 'ND'.
check_behind = <@>
<!WOB|
|E|
''',
fn=regex.search,
expect_full_match=[
'BEGIN END',
'BEGIN hey END',
'BEGIN BEGINNER FIRE-BENDER BEND ENDER END',
],
no_match=[b'BEGINNER FIRE-BENDER', b'begin hey end', b'BEGINEND', b'BEGIN ...'],
partial_match={
'BEGIN huge wooden horse END brad pitt' : 'BEGIN huge wooden horse END',
'BEGINNER BEGIN ENDANGERED END' : 'BEGIN ENDANGERED END',
})
def test_non_op(self):
self.given(b'''
/non-alpha/non-digit/non-whitechar/non-wordchar/
''',
expect_full_match=[b'....'])
self.given(b'''
non_digits
non_digits = @1.. of non-digit
''',
expect_full_match=[b'ZERO-ZERO-SEVEN', b'ZEROZEROSEVEN'])
self.given(b'''
non-alphabetic
alphabetic: /Alphabetic
''',
expect_full_match=[b'1', b'!'],
no_match=[b'a', u'ä'])
self.given(b'''
non-minus
minus: -
''',
expect_full_match=[b'a', b'1', b'!'],
no_match=[b'-'])
self.given(b'''
non-caret
caret: ^
''',
expect_full_match=[b'a', b'1', b'!'],
no_match=[b'^'])
self.given(b'''
/non-non_alpha/non-non_digit/
non_alpha = non-alpha
non_digit = non-digit
''',
expect_full_match=[b'a1', b'A9'],
no_match=[b'a', b'1', b'Aa', b'42', b'A+'])
self.given(b'''
non-consonant
consonant: alpha not vowel
vowel: a i u e o A I U E O
''',
expect_full_match=[b'a', b'1', b'!'],
no_match=[b'b', b'Z'])
def test_numrange_shortcut(self):
self.given(u'''
'0'..'1'
''',
expect_full_match=[b'0', b'1'],
no_match=[b'00', b'01', b'10', b'11'])
self.given(u'''
'0'..'2'
''',
expect_full_match=[b'0', b'1', b'2'],
no_match=[b'00', b'11', b'22', b'02'])
self.given(u'''
'0'..'9'
''',
expect_full_match=[b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9'],
no_match=[b'00', b'11', b'99', b'09', b'911', b'123abc'],
partial_match={
'0xdeadbeef' : '0',
'9z' : '9',
'3.14' : '3',
})
self.given(u'''
'1'..'2'
''',
expect_full_match=[b'1', b'2'],
no_match=[b'0', b'11', b'22', b'12'])
self.given(u'''
'1'..'9'
''',
expect_full_match=[b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9'],
no_match=[b'0', b'11', b'99', b'19'])
self.given(u'''
'2'..'9'
''',
expect_full_match=[b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9'],
no_match=[b'1', b'22', b'99', b'29'])
self.given(u'''
'8'..'9'
''',
expect_full_match=[b'8', b'9'],
no_match=[b'88', b'99', b'89'])
self.given(u'''
'0'..'10'
''',
expect_full_match=[b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'10'],
no_match=[b'11', b'010', b'100'])
self.given(u'''
'1'..'10'
''',
expect_full_match=[b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'10'],
no_match=[b'0', b'11', b'010', b'100', b'110'])
self.given(u'''
'2'..'10'
''',
expect_full_match=[b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'10'],
no_match=[b'0', b'22', b'010', b'100', b'210'])
self.given(u'''
'8'..'10'
''',
expect_full_match=[b'8', b'9', b'10'],
no_match=[b'88', b'010', b'100', b'810'])
self.given(u'''
'9'..'10'
''',
expect_full_match=[b'9', b'10'],
no_match=[b'99', b'010', b'100', b'910'])
self.given(u'''
'0'..'11'
''',
expect_full_match=[b'0', b'1', b'5', b'10', b'11'],
no_match=[b'01', b'12', b'011', b'111'])
self.given(u'''
'1'..'11'
''',
expect_full_match=[b'1', b'6', b'10', b'11'],
no_match=[b'0', b'01', b'011', b'111'])
self.given(u'''
'9'..'11'
''',
expect_full_match=[b'9', b'10', b'11'],
no_match=[b'90', b'09', b'911'])
self.given(u'''
'10'..'11'
''',
expect_full_match=[b'10', b'11'],
no_match=[b'100', b'101', b'111', b'1011'])
self.given(u'''
'0'..'12'
''',
expect_full_match=[b'0', b'1', b'2', b'5', b'10', b'11', b'12'],
no_match=[b'00', b'012'])
self.given(u'''
'1'..'12'
''',
expect_full_match=[b'1', b'2', b'8', b'9', b'10', b'11', b'12'],
no_match=[b'01', b'012', b'112', b'121'])
self.given(u'''
'2'..'12'
''',
expect_full_match=[b'2', b'8', b'9', b'10', b'11', b'12'],
no_match=[b'20', b'100', b'110', b'120'])
self.given(u'''
'0'..'19'
''',
expect_full_match=[b'0', b'1', b'9', b'10', b'18', b'19'],
no_match=[b'00', b'019', b'190', b'20'])
self.given(u'''
'1'..'19'
''',
expect_full_match=[b'1', b'9', b'10', b'18', b'19'],
no_match=[b'0', b'00', b'019', b'190', b'20'])
self.given(u'''
'9'..'19'
''',
expect_full_match=[b'9', b'10', b'15', b'19'],
no_match=[b'919'])
self.given(u'''
'10'..'19'
''',
expect_full_match=[b'10', b'19'],
no_match=[b'0', b'1', b'9', b'100', b'1019', b'20', b'190'])
self.given(u'''
'0'..'20'
''',
expect_full_match=[b'0', b'1', b'2', b'10', b'19', b'20'],
no_match=[b'00', b'020', b'200'])
self.given(u'''
'2'..'20'
''',
expect_full_match=[b'2', b'10', b'19', b'20'],
no_match=[b'00', b'020', b'200', b'220'])
self.given(u'''
'10'..'20'
''',
expect_full_match=[b'10', b'11', b'15', b'19', b'20'],
no_match=[b'0', b'1', b'010', b'020', b'100', b'200', b'1020'])
self.given(u'''
'19'..'20'
''',
expect_full_match=[b'19', b'20'],
no_match=[b'1', b'019', b'2000', b'1920'])
self.given(u'''
'0'..'29'
''',
expect_full_match=[b'0', b'1', b'2', b'9', b'29'],
no_match=[b'00', b'029', b'299'])
self.given(u'''
'2'..'29'
''',
expect_full_match=[b'2', b'9', b'15', b'21', b'22', b'29'],
no_match=[b'0', b'1', b'229', b'292'])
self.given(u'''
'9'..'29'
''',
expect_full_match=[b'9', b'29', b'19', b'15'],
no_match=[b'92', b'929', b'299'])
self.given(u'''
'2'..'42'
''',
expect_full_match=[b'2', b'4', b'12', b'22', b'39', b'41', b'42'],
no_match=[b'02', b'242', b'422'])
self.given(u'''
'12'..'42'
''',
expect_full_match=[b'12', b'22', b'32', b'42', b'19', b'41', b'35'],
no_match=[b'1', b'2', b'1242', b'4212'])
self.given(u'''
'24'..'42'
''',
expect_full_match=[b'24', b'39', b'40', b'42'],
no_match=[b'2', b'4', b'2442', b'4224'])
self.given(u'''
'38'..'42'
''',
expect_full_match=[b'38', b'39', b'40', b'41', b'42'],
no_match=[b'3', b'4', b'3842'])
self.given(u'''
'0'..'90'
''',
expect_full_match=[b'0', b'1', b'2', b'5', b'7', b'8', b'9', b'10', b'11', b'30', b'42', b'69', b'83', b'88', b'89', b'90'],
no_match=[b'09', b'090', b'900'])
self.given(u'''
'9'..'90'
''',
expect_full_match=[b'9', b'10', b'19', b'42', b'89', b'90'],
no_match=[b'09', b'99', b'900'])
self.given(u'''
'10'..'90'
''',
expect_full_match=[b'10', b'19', b'20', b'42', b'89', b'90'],
no_match=[b'0', b'1', b'100', b'900', b'010'])
self.given(u'''
'0'..'98'
''',
expect_full_match=[b'0', b'1', b'8', b'9', b'18', b'20', b'42', b'89', b'90', b'97', b'98'],
no_match=[b'00', b'09', b'098', b'980'])
self.given(u'''
'1'..'98'
''',
expect_full_match=[b'1', b'8', b'9', b'18', b'20', b'42', b'89', b'90', b'97', b'98'],
no_match=[b'0', b'01', b'09', b'098', b'198', b'980'])
self.given(u'''
'0'..'99'
''',
expect_full_match=[b'0', b'1', b'8', b'9', b'18', b'20', b'42', b'89', b'90', b'97', b'98', b'99'],
no_match=[b'00', b'09', b'099', b'990'],
partial_match={
'0xcafebabe' : '0',
'9z' : '9',
'12ab' : '12',
'3.1415' : '3',
})
self.given(u'''
'1'..'99'
''',
expect_full_match=[b'1', b'8', b'9', b'18', b'20', b'42', b'89', b'90', b'97', b'98', b'99'],
no_match=[b'0', b'01', b'09', b'099', b'199', b'991'])
self.given(u'''
'2'..'99'
''',
expect_full_match=[b'2', b'8', b'9', b'18', b'20', b'42', b'89', b'90', b'97', b'98', b'99'],
no_match=[b'0', b'1', b'02', b'099', b'990'])
self.given(u'''
'9'..'99'
''',
expect_full_match=[b'9', b'18', b'20', b'42', b'89', b'90', b'97', b'98', b'99'],
no_match=[b'0', b'1', b'09', b'099', b'990'])
self.given(u'''
'10'..'99'
''',
expect_full_match=[b'10', b'18', b'19', b'20', b'42', b'89', b'90', b'97', b'98', b'99'],
no_match=[b'1', b'010', b'100', b'099', b'990', b'1099'])
self.given(u'''
'11'..'99'
''',
expect_full_match=[b'11', b'18', b'20', b'42', b'89', b'90', b'97', b'98', b'99'],
no_match=[b'0', b'1', b'9', b'10', b'011', b'110', b'099', b'990', b'1199'])
self.given(u'''
'19'..'99'
''',
expect_full_match=[b'19', b'20', b'42', b'89', b'90', b'97', b'98', b'99'],
no_match=[b'0', b'1', b'9', b'10', b'019', b'190', b'099', b'990', b'1999'])
self.given(u'''
'20'..'99'
''',
expect_full_match=[b'20', b'42', b'89', b'90', b'97', b'98', b'99'],
no_match=[b'2', b'9', b'020', b'200', b'099', b'990', b'2099'])
self.given(u'''
'29'..'99'
''',
expect_full_match=[b'29', b'42', b'89', b'90', b'97', b'98', b'99'],
no_match=[b'2', b'9', b'029', b'290', b'099', b'990', b'2999'])
self.given(u'''
'46'..'99'
''',
expect_full_match=[b'46', b'85', b'90', b'97', b'98', b'99'],
no_match=[b'4', b'9', b'046', b'460', b'099', b'990', b'4699'])
self.given(u'''
'80'..'99'
''',
expect_full_match=[b'80', b'85', b'90', b'97', b'98', b'99'],
no_match=[b'8', b'9', b'080', b'800', b'099', b'990', b'8099'])
self.given(u'''
'89'..'99'
''',
expect_full_match=[b'89', b'90', b'97', b'98', b'99'],
no_match=[b'8', b'9', b'089', b'890', b'099', b'990', b'8099'])
self.given(u'''
'90'..'99'
''',
expect_full_match=[b'90', b'91', b'92', b'95', b'97', b'98', b'99'],
no_match=[b'9', b'090', b'900', b'099', b'990', b'9099'])
self.given(u'''
'0'..'100'
''',
expect_full_match=[b'0', b'1', b'2', b'9', b'10', b'46', b'99', b'100'],
no_match=[b'00', b'010', b'0100', b'1000'])
self.given(u'''
'10'..'100'
''',
expect_full_match=[b'10', b'46', b'99', b'100'],
no_match=[b'1', b'010', b'0100', b'1000'])
self.given(u'''
'90'..'100'
''',
expect_full_match=[b'90', b'91', b'92', b'95', b'97', b'98', b'99', b'100'],
no_match=[b'9', b'090', b'900', b'0100', b'1000'])
self.given(u'''
'99'..'100'
''',
expect_full_match=[b'99', b'100'],
no_match=[b'9', b'1', b'10', b'099', b'0100', b'990', b'1000'])
def test_00numrange_shortcut(self):
self.given(u'''
'00'..'01'
''',
expect_full_match=[b'00', b'01'],
no_match=[b'0', b'1', b'000', b'001', b'010'])
self.given(u'''
'000'..'001'
''',
expect_full_match=[b'000', b'001'],
no_match=[b'0', b'1', b'00', b'01', b'0000', b'0001', b'0010'])
self.given(u'''
'00'..'02'
''',
expect_full_match=[b'00', b'01', b'02'],
no_match=[b'0', b'1', b'2', b'000', b'001', b'002', b'020'])
self.given(u'''
'00'..'09'
''',
expect_full_match=[b'00', b'01', b'02', b'03', b'04', b'05', b'06', b'07', b'08', b'09'],
no_match=[b'0', b'1', b'2', b'9', b'000', b'009', b'090', b'010', b'9z', b'3.14'],
partial_match={
'09z' : '09',
'03.14' : '03',
})
self.given(u'''
'01'..'02'
''',
expect_full_match=[b'01', b'02'],
no_match=[b'0', b'1', b'2', b'001', b'002', b'010', b'020'])
self.given(u'''
'01'..'09'
''',
expect_full_match=[b'01', b'02', b'03', b'04', b'05', b'06', b'07', b'08', b'09'],
no_match=[b'1', b'2', b'3', b'9', b'001', b'009', b'010', b'090'])
self.given(u'''
'02'..'09'
''',
expect_full_match=[b'02', b'03', b'04', b'05', b'06', b'07', b'08', b'09'],
no_match=[b'2', b'9', b'002', b'009', b'020', b'090'])
self.given(u'''
'08'..'09'
''',
expect_full_match=[b'08', b'09'],
no_match=[b'8', b'9', b'008', b'009', b'080', b'090'])
self.given(u'''
'00'..'10'
''',
expect_full_match=[b'00', b'01', b'02', b'03', b'04', b'05', b'06', b'07', b'08', b'09', b'10'],
no_match=[b'0', b'1', b'2', b'9', b'000', b'010', b'100'])
self.given(u'''
'01'..'10'
''',
expect_full_match=[b'01', b'02', b'03', b'04', b'05', b'06', b'07', b'08', b'09', b'10'],
no_match=[b'1', b'2', b'9', b'001', b'010', b'100'])
self.given(u'''
'001'..'010'
''',
expect_full_match=[b'001', b'002', b'003', b'004', b'005', b'006', b'007', b'008', b'009', b'010'],
no_match=[b'1', b'2', b'9', b'10', b'01', b'02', b'0001', b'0010', b'0100'])
self.given(u'''
'02'..'10'
''',
expect_full_match=[b'02', b'03', b'04', b'05', b'06', b'07', b'08', b'09', b'10'],
no_match=[b'2', b'9', b'002', b'020', b'010', b'100'])
self.given(u'''
'08'..'10'
''',
expect_full_match=[b'08', b'09', b'10'],
no_match=[b'8', b'008', b'080', b'010', b'100'])
self.given(u'''
'09'..'10'
''',
expect_full_match=[b'09', b'10'],
no_match=[b'9', b'009', b'090', b'010', b'100'])
self.given(u'''
'00'..'11'
''',
expect_full_match=[b'00', b'01', b'05', b'09', b'10', b'11'],
no_match=[b'0', b'1', b'000', b'001', b'011', b'110'])
self.given(u'''
'01'..'11'
''',
expect_full_match=[b'01', b'05', b'09', b'10', b'11'],
no_match=[b'0', b'1', b'00', b'001', b'010', b'011', b'110'])
self.given(u'''
'09'..'11'
''',
expect_full_match=[b'09', b'10', b'11'],
no_match=[b'0', b'9', b'009', b'090', b'011', b'110'])
self.given(u'''
'010'..'011'
''',
expect_full_match=[b'010', b'011'],
no_match=[b'0', b'01', b'10', b'11', b'0010', b'090', b'0011', b'0100', b'0110'])
self.given(u'''
'01'..'12'
''',
expect_full_match=[b'01', b'02', b'05', b'09', b'10', b'11', b'12'],
no_match=[b'1', b'001', b'012', b'010', b'012'])
self.given(u'''
'02'..'12'
''',
expect_full_match=[b'02', b'08', b'09', b'10', b'11', b'12'],
no_match=[b'2', b'8', b'9', b'002', b'009', b'020', b'012', b'120'])
self.given(u'''
'000'..'012'
''',
expect_full_match=[b'000', b'001', b'002', b'008', b'009', b'010', b'011', b'012'],
no_match=[b'0', b'1', b'2', b'10', b'12', b'0000', b'0012', b'0120'])
self.given(u'''
'00'..'19'
''',
expect_full_match=[b'00', b'01', b'02', b'09', b'10', b'18', b'19'],
no_match=[b'0', b'1', b'000', b'019', b'190'])
self.given(u'''
'01'..'19'
''',
expect_full_match=[b'01', b'02', b'09', b'10', b'18', b'19'],
no_match=[b'0', b'1', b'000', b'019', b'190'])
self.given(u'''
'09'..'19'
''',
expect_full_match=[b'09', b'10', b'18', b'19'],
no_match=[b'0', b'1', b'9', b'009', b'019', b'090', b'190'])
self.given(u'''
'010'..'019'
''',
expect_full_match=[b'010', b'011', b'015', b'018', b'019'],
no_match=[b'0', b'1', b'10', b'11', b'19', b'0010', b'0190', b'0100', b'0190'])
self.given(u'''
'00'..'20'
''',
expect_full_match=[b'00', b'01', b'02', b'10', b'19', b'20'],
no_match=[b'0', b'2', b'000', b'020', b'200'])
self.given(u'''
'02'..'20'
''',
expect_full_match=[b'02', b'10', b'19', b'20'],
no_match=[b'0', b'2', b'002', b'020', b'200'])
self.given(u'''
'010'..'020'
''',
expect_full_match=[b'010', b'011', b'012', b'015', b'018', b'019', b'020'],
no_match=[b'0', b'01', b'02', b'10', b'20', b'0100', b'0200', b'0010', b'0020'])
self.given(u'''
'019'..'020'
''',
expect_full_match=[b'019', b'020'],
no_match=[b'0', b'01', b'02', b'19', b'20', b'0190', b'0200', b'0019', b'0020'])
def test_oonumrange_shortcut(self):
self.given(u'''
'o0'..'o1'
''',
expect_full_match=[b'0', b'1', b'00', b'01'],
no_match=[b'000', b'001', b'010'])
self.given(u'''
'oo0'..'oo1'
''',
expect_full_match=[b'0', b'1', b'00', b'01', b'000', b'001'],
no_match=[b'0000', b'0001', b'0010'])
self.given(u'''
'o0'..'o2'
''',
expect_full_match=[b'0', b'1', b'2', b'00', b'01', b'02'],
no_match=[b'000', b'001', b'002', b'020'])
self.given(u'''
'o0'..'o9'
''',
expect_full_match=[b'0', b'1', b'2', b'9', b'00', b'01', b'02', b'03', b'04', b'05', b'06', b'07', b'08', b'09'],
no_match=[b'000', b'009', b'090', b'010'],
partial_match={
'9z' : '9',
'09z' : '09',
'3.14' : '3',
'03.14' : '03',
})
self.given(u'''
'o1'..'o2'
''',
expect_full_match=[b'1', b'2', b'01', b'02'],
no_match=[b'0', b'001', b'002', b'010', b'020'])
self.given(u'''
'o1'..'o9'
''',
expect_full_match=[b'1', b'2', b'3', b'9', b'01', b'02', b'03', b'04', b'05', b'06', b'07', b'08', b'09'],
no_match=[b'001', b'009', b'010', b'090'])
self.given(u'''
'o2'..'o9'
''',
expect_full_match=[b'2', b'9', b'02', b'03', b'04', b'05', b'06', b'07', b'08', b'09'],
no_match=[b'002', b'009', b'020', b'090'])
self.given(u'''
'o8'..'o9'
''',
expect_full_match=[b'8', b'9', b'08', b'09'],
no_match=[b'008', b'009', b'080', b'090'])
self.given(u'''
'o0'..'10'
''',
expect_full_match=[b'0', b'1', b'2', b'9', b'00', b'01', b'02', b'03', b'04', b'05', b'06', b'07', b'08', b'09', b'10'],
no_match=[b'000', b'010', b'100'])
self.given(u'''
'o1'..'10'
''',
expect_full_match=[b'1', b'2', b'9', b'01', b'02', b'03', b'04', b'05', b'06', b'07', b'08', b'09', b'10'],
no_match=[b'001', b'010', b'100'])
self.given(u'''
'oo1'..'o10'
''',
expect_full_match=[b'1', b'2', b'9', b'10', b'01', b'02', b'001', b'002', b'003', b'004', b'005', b'006', b'007', b'008', b'009', b'010'],
no_match=[b'0001', b'0010', b'0100'])
self.given(u'''
'o2'..'10'
''',
expect_full_match=[b'2', b'9', b'02', b'03', b'04', b'05', b'06', b'07', b'08', b'09', b'10'],
no_match=[b'002', b'020', b'010', b'100'])
self.given(u'''
'o8'..'10'
''',
expect_full_match=[b'8', b'08', b'09', b'10'],
no_match=[b'008', b'080', b'010', b'100'])
self.given(u'''
'o9'..'10'
''',
expect_full_match=[b'9', b'09', b'10'],
no_match=[b'009', b'090', b'010', b'100'])
self.given(u'''
'o0'..'11'
''',
expect_full_match=[b'0', b'1', b'00', b'01', b'05', b'09', b'10', b'11'],
no_match=[b'000', b'001', b'011', b'110'])
self.given(u'''
'o1'..'11'
''',
expect_full_match=[b'1', b'01', b'05', b'09', b'10', b'11'],
no_match=[b'0', b'00', b'001', b'010', b'011', b'110'])
self.given(u'''
'o9'..'11'
''',
expect_full_match=[b'9', b'09', b'10', b'11'],
no_match=[b'0', b'009', b'090', b'011', b'110'])
self.given(u'''
'o10'..'o11'
''',
expect_full_match=[b'10', b'11', b'010', b'011'],
no_match=[b'0', b'01', b'0010', b'090', b'0011', b'0100', b'0110'])
self.given(u'''
'o1'..'12'
''',
expect_full_match=[b'1', b'01', b'02', b'05', b'09', b'10', b'11', b'12'],
no_match=[b'001', b'012', b'010', b'012'])
self.given(u'''
'o2'..'12'
''',
expect_full_match=[b'2', b'8', b'9', b'02', b'08', b'09', b'10', b'11', b'12'],
no_match=[b'002', b'009', b'020', b'012', b'120'])
self.given(u'''
'oo0'..'o12'
''',
expect_full_match=[b'0', b'1', b'2', b'10', b'12', b'000', b'001', b'002', b'008', b'009', b'010', b'011', b'012'],
no_match=[b'0000', b'0012', b'0120'])
self.given(u'''
'o0'..'19'
''',
expect_full_match=[b'0', b'1', b'00', b'01', b'02', b'09', b'10', b'18', b'19'],
no_match=[b'000', b'019', b'190'])
self.given(u'''
'o1'..'19'
''',
expect_full_match=[b'1', b'01', b'02', b'09', b'10', b'18', b'19'],
no_match=[b'0', b'000', b'019', b'190'])
self.given(u'''
'o9'..'19'
''',
expect_full_match=[b'9', b'09', b'10', b'18', b'19'],
no_match=[b'0', b'1', b'009', b'019', b'090', b'190'])
self.given(u'''
'o10'..'o19'
''',
expect_full_match=[b'10', b'11', b'19', b'010', b'011', b'015', b'018', b'019'],
no_match=[b'0', b'1', b'0010', b'0190', b'0100', b'0190'])
self.given(u'''
'o0'..'20'
''',
expect_full_match=[b'0', b'2', b'00', b'01', b'02', b'10', b'19', b'20'],
no_match=[b'000', b'020', b'200'])
self.given(u'''
'o2'..'20'
''',
expect_full_match=[b'2', b'02', b'10', b'19', b'20'],
no_match=[b'0', b'002', b'020', b'200'])
self.given(u'''
'o10'..'o20'
''',
expect_full_match=[b'10', b'20', b'010', b'011', b'012', b'015', b'018', b'019', b'020'],
no_match=[b'0', b'01', b'02', b'0100', b'0200', b'0010', b'0020'])
self.given(u'''
'o19'..'o20'
''',
expect_full_match=[b'19', b'20', b'019', b'020'],
no_match=[b'0', b'01', b'02', b'0190', b'0200', b'0019', b'0020'])
def test_norange_shortcut(self):
self.given(u'''
'0'..'0'
''',
expect_full_match=[b'0'],
no_match=[b'00', b'01', b'1'])
self.given(u'''
'00'..'00'
''',
expect_full_match=[b'00'],
no_match=[b'0', b'01', b'000'])
self.given(u'''
'000'..'000'
''',
expect_full_match=[b'000'],
no_match=[b'0', b'00', b'0000', b'0001', b'001'])
self.given(u'''
'007'..'007'
''',
expect_full_match=[b'007'],
no_match=[b'0', b'00', b'07', b'7', b'0070', b'0007', b'006', b'008'])
self.given(u'''
'1'..'1'
''',
expect_full_match=[b'1'],
no_match=[b'00', b'01', b'0', b'2'])
self.given(u'''
'2'..'2'
''',
expect_full_match=[b'2'],
no_match=[b'0', b'1', b'3', b'02', b'20'])
self.given(u'''
'9'..'9'
''',
expect_full_match=[b'9'],
no_match=[b'0', b'8', b'10', b'09', b'90'])
self.given(u'''
'10'..'10'
''',
expect_full_match=[b'10'],
no_match=[b'0', b'1', b'9', b'11', b'010', b'100'])
self.given(u'''
'99'..'99'
''',
expect_full_match=[b'99'],
no_match=[b'0', b'9', b'98', b'100', b'099', b'990'])
self.given(u'''
'100'..'100'
''',
expect_full_match=[b'100'],
no_match=[b'0', b'1', b'99', b'101', b'0100', b'1000'])
self.given(u'''
'123'..'123'
''',
expect_full_match=[b'123'],
no_match=[b'1', b'12', b'122', b'124', b'023', b'1230'])
self.given(u'''
'12345'..'12345'
''',
expect_full_match=[b'12345'],
no_match=[b'0', b'1', b'12', b'123', b'1234', b'12344', b'12346', b'012345', b'123450'])
self.given(u'''
'9999999'..'9999999'
''',
expect_full_match=[b'9999999'],
no_match=[b'99999999', b'999999', b'99999', b'9999', b'999', b'99', b'9', b'9999998', b'10000000', b'09999999', b'99999990'])
self.given(u'''
'o0'..'o0'
''',
expect_full_match=[b'0', b'00'],
no_match=[b'000', b'1', b'01'])
self.given(u'''
'oo0'..'oo0'
''',
expect_full_match=[b'0', b'00', b'000'],
no_match=[b'0000', b'1', b'01', b'001'])
self.given(u'''
'ooo0'..'ooo0'
''',
expect_full_match=[b'0', b'00', b'000', b'0000'],
no_match=[b'00000', b'1', b'01', b'001', b'0001'])
self.given(u'''
'o1'..'o1'
''',
expect_full_match=[b'1', b'01'],
no_match=[b'001', b'0', b'00', b'02', b'010'])
self.given(u'''
'oo9'..'oo9'
''',
expect_full_match=[b'9', b'09', b'009'],
no_match=[b'0009', b'0', b'00', b'008', b'010', b'0090'])
self.given(u'''
'o10'..'o10'
''',
expect_full_match=[b'10', b'010'],
no_match=[b'0', b'01', b'009', b'011', b'0100', b'0010'])
self.given(u'''
'oo100'..'oo100'
''',
expect_full_match=[b'100', b'0100', b'00100'],
no_match=[b'0', b'1', b'10', b'00', b'01', b'010', b'000', b'001', b'0010', b'99', b'101'])
self.given(u'''
'o9999'..'o9999'
''',
expect_full_match=[b'9999', b'09999'],
no_match=[b'0', b'9', b'09', b'099', b'0999', b'9998', b'10000', b'99990'])
self.given(u'''
'ooo9999'..'ooo9999'
''',
expect_full_match=[b'9999', b'09999', b'009999', b'0009999'],
no_match=[b'0', b'9', b'09', b'099', b'0999', b'009', b'0099', b'00999', b'0009', b'00099', b'000999', b'9998', b'10000'])
def test_infinite_numrange(self):
self.given(u'''
'0'..
''',
expect_full_match=[b'0', b'1', b'2', b'10', b'11', b'20', b'100', b'200', b'1000', b'9999', b'65535', b'4294967295'],
no_match=[b'00', b'01', b'02', b'010', b'011', b'020', b'0100', b'0200', b'01000', b'09999', b'065535', b'04294967295'])
self.given(u'''
'1'..
''',
expect_full_match=[b'1', b'2', b'10', b'11', b'20', b'100', b'200', b'1000', b'9999', b'65535', b'4294967295'],
no_match=[b'0', b'00', b'01', b'02', b'010', b'011', b'020', b'0100', b'0200'])
self.given(u'''
'2'..
''',
expect_full_match=[b'2', b'10', b'11', b'20', b'100', b'200', b'1000', b'9999', b'65535', b'4294967295'],
no_match=[b'0', b'1', b'00', b'01', b'02', b'010', b'011', b'020', b'0100', b'0200'])
self.given(u'''
'10'..
''',
expect_full_match=[b'10', b'11', b'20', b'100', b'200', b'1000', b'9999', b'65535', b'4294967295'],
no_match=[b'0', b'1', b'2', b'00', b'01', b'02', b'010', b'011', b'020', b'0100', b'0200'])
self.given(u'''
'20'..
''',
expect_full_match=[b'20', b'100', b'200', b'1000', b'9999', b'65535', b'4294967295'],
no_match=[b'0', b'1', b'2', b'10', b'11', b'00', b'01', b'02', b'010', b'011', b'020', b'0100', b'0200'])
self.given(u'''
'46'..
''',
expect_full_match=[b'100', b'200', b'1000', b'9999', b'65535', b'4294967295'],
no_match=[b'0', b'1', b'2', b'10', b'11', b'20', b'00', b'01', b'02', b'010', b'011', b'020', b'0100', b'0200'])
self.given(u'''
'100'..
''',
expect_full_match=[b'100', b'200', b'1000', b'9999', b'65535', b'4294967295'],
no_match=[b'0', b'1', b'2', b'10', b'11', b'20', b'00', b'01', b'02', b'010', b'011', b'020', b'0100', b'0200'])
self.given(u'''
'200'..
''',
expect_full_match=[b'200', b'1000', b'9999', b'65535', b'4294967295'],
no_match=[b'0', b'1', b'2', b'10', b'11', b'20', b'100', b'00', b'01', b'02', b'010', b'011', b'020', b'0100', b'0200'])
self.given(u'''
'234'..
''',
expect_full_match=[b'1000', b'9999', b'65535', b'4294967295'],
no_match=[b'0', b'1', b'2', b'10', b'11', b'20', b'100', b'200', b'00', b'01', b'02', b'010', b'011', b'020', b'0100', b'0200'])
self.given(u'''
? of '1'..
''',
expect_full_match=[b'1', b'2', b'10', b'11', b'20', b'100', b'200', b'1000', b'9999', b'65535', b'4294967295'],
partial_match={'0':'', b'00':'', b'01':'', b'02':'', b'010':'', b'011':'', b'020':'', b'0100':'', b'0200':''})
self.given(u'''
? of '2'..
''',
expect_full_match=[b'2', b'10', b'11', b'20', b'100', b'200', b'1000', b'9999', b'65535', b'4294967295'],
partial_match={'0':'', b'1':'', b'00':'', b'01':'', b'02':'', b'010':'', b'011':'', b'020':'', b'0100':'', b'0200':''})
def test_infinite_onumrange(self):
self.given(u'''
'o0'..
''',
expect_full_match=[b'0', b'1', b'2', b'10', b'11', b'20', b'100', b'200', b'1000', b'9999', b'65535', b'4294967295',
'00', b'01', b'02', b'010', b'011', b'020', b'0100', b'0200', b'01000', b'09999', b'065535', b'04294967295',
'000', b'001', b'002', b'0010', b'0011', b'0020', b'00100', b'00200', b'001000', b'009999', b'0065535', b'004294967295',
'0000', b'0001', b'0002', b'00010', b'00011', b'00020', b'000100', b'000200', b'0001000', b'0009999', b'00065535', b'0004294967295'],
no_match=[b''])
self.given(u'''
'o1'..
''',
expect_full_match=[b'1', b'2', b'10', b'11', b'20', b'100', b'200', b'1000', b'9999', b'65535', b'4294967295',
'01', b'02', b'010', b'011', b'020', b'0100', b'0200',
'001', b'002', b'0010', b'0011', b'0020', b'00100', b'00200'],
no_match=[b'', b'0', b'00', b'000', b'0000', b'00000'])
self.given(u'''
'o2'..
''',
expect_full_match=[b'2', b'10', b'11', b'20', b'100', b'200', b'1000', b'9999', b'65535', b'4294967295',
'02', b'010', b'011', b'020', b'0100', b'0200'],
no_match=[b'', b'0', b'1', b'00', b'01', b'000', b'001'])
self.given(u'''
'o10'..
''',
expect_full_match=[b'10', b'11', b'20', b'100', b'200', b'1000', b'9999', b'65535', b'4294967295',
'010', b'011', b'020', b'0100', b'0200'],
no_match=[b'', b'0', b'1', b'2', b'00', b'01', b'02', b'000', b'001', b'002'])
self.given(u'''
'o20'..
''',
expect_full_match=[b'20', b'100', b'200', b'1000', b'9999', b'65535', b'4294967295',
'020', b'0100', b'0200', b'0020', b'00100', b'00200'],
no_match=[b'', b'0', b'1', b'2', b'10', b'11', b'00', b'01', b'02', b'010', b'011', b'000', b'001', b'002'])
self.given(u'''
'o46'..
''',
expect_full_match=[b'100', b'200', b'1000', b'9999', b'65535', b'4294967295',
'0100', b'0200', b'00100', b'00200'],
no_match=[b'', b'0', b'1', b'2', b'10', b'11', b'20', b'00', b'01', b'02', b'010', b'011', b'020', b'000', b'001', b'002'])
self.given(u'''
'o100'..
''',
expect_full_match=[b'100', b'200', b'1000', b'9999', b'65535', b'4294967295',
'0100', b'0200', b'00100', b'00200'],
no_match=[b'', b'0', b'1', b'2', b'10', b'11', b'20', b'00', b'01', b'02', b'010', b'011', b'020', b'000', b'001', b'002'])
self.given(u'''
'o200'..
''',
expect_full_match=[b'200', b'1000', b'9999', b'65535', b'4294967295', b'0200', b'00200'],
no_match=[b'', b'0', b'1', b'2', b'10', b'11', b'20', b'100', b'00', b'01', b'02', b'010', b'011', b'020', b'0100'])
self.given(u'''
'o234'..
''',
expect_full_match=[b'1000', b'9999', b'65535', b'4294967295', b'01000', b'001000'],
no_match=[b'', b'0', b'1', b'2', b'10', b'11', b'20', b'100', b'200', b'00', b'01', b'02', b'010', b'011', b'020', b'0100', b'0200'])
self.given(u'''
? of 'o0'..
''',
expect_full_match=[b'', b'0', b'1', b'2', b'10', b'11', b'20', b'100', b'200', b'1000', b'9999', b'65535', b'4294967295',
'00', b'01', b'02', b'010', b'011', b'020', b'0100', b'0200', b'01000', b'09999', b'065535', b'04294967295',
'000', b'001', b'002', b'0010', b'0011', b'0020', b'00100', b'00200', b'001000', b'009999', b'0065535', b'004294967295',
'0000', b'0001', b'0002', b'00010', b'00011', b'00020', b'000100', b'000200', b'0001000', b'0009999', b'00065535', b'0004294967295'],
no_match=[])
self.given(u'''
? of 'o1'..
''',
expect_full_match=[b'', b'1', b'2', b'10', b'11', b'20', b'100', b'200', b'1000', b'9999', b'65535', b'4294967295',
'01', b'02', b'010', b'011', b'020', b'0100', b'0200', b'01000', b'09999', b'065535', b'04294967295',
'01', b'002', b'0010', b'0011', b'0020', b'00100', b'00200', b'001000', b'009999', b'0065535', b'004294967295',
'0001', b'0002', b'00010', b'00011', b'00020', b'000100', b'000200', b'0001000', b'0009999', b'00065535', b'0004294967295'],
partial_match={'0':'', b'00':'', b'000':'', b'0000':''})
def test_wordchar_redef(self):
self.given(u'''
.'cat'_
*) wordchar: upper lower -
''',
expect_full_match=[],
no_match=[b'cat', b'cat9', b'bobcat', b'that-cat'],
partial_match={
'catasthropic' : 'cat',
'cat-like' : 'cat',
'cat-9' : 'cat',
})
def test_lazydotstar(self):
self.given(u'''
/quote/__?/quote/
quote: "
''',
expect_full_match=[b'"Hi!"', b'""'],
no_match=[b'"unclosed'],
partial_match={
'"Hi!", he said, "How are you?"' : '"Hi!"',
})
self.given(u'''
/quote/__/quote/
quote: "
''',
expect_full_match=[b'"Hi!"'],
no_match=[b'"unclosed', b'""'],
partial_match={
'"Hi!", he said, "How are you?"' : '"Hi!"',
})
self.given(u'''
/open/__/close/
open: (
close: )
''',
expect_full_match=[b'(sic)', b'({})'],
no_match=[b'(unclosed', b'()'],
partial_match={
'((x+y)*z)' : '((x+y)',
})
self.given(u'''
/BEGIN/__?/END/
BEGIN = 'BEGIN'
END = 'END'
''',
expect_full_match=[b'BEGINEND', b'BEGIN END', b'BEGINNING END', b'BEGIN SECRET MESSAGE END'],
partial_match={
'BEGINNINGENDING' : 'BEGINNINGEND',
'BEGIN DONT SEND THE PACKAGE YET END' : 'BEGIN DONT SEND',
})
self.given(u'''
/BEGIN/__?/END/
BEGIN = 'BEGIN'
END = .'END'
''',
expect_full_match=[b'BEGIN END', b'BEGINNING END', b'BEGIN SECRET MESSAGE END', b'BEGIN DONT SEND THE PACKAGE YET END'],
no_match=[b'BEGINEND'])
self.given(u'''
/__/END/
END = '.'
''',
expect_full_match=[b'this.'],
no_match=[b'.', b'.com', b'...'],
partial_match={
'example.com' : 'example.',
'Hmm...' : 'Hmm.',
})
self.given(u'''
/__?/END/
END = 'Z'
''',
expect_full_match=[b'Z', b'WoZ', b'ATOZ', b'A TO Z'],
partial_match={'ZOO' : 'Z', b'PIZZA' : 'PIZ'})
self.given(u'''
/__?/END/
END = .'Z'
''',
expect_full_match=[b'Z', b'A TO Z'],
no_match=[b'WoZ', b'ATOZ', b'PIZZA'],
partial_match={'ZOO' : 'Z'})
self.given(u'''
/__?/END/
END = _'Z'
''',
expect_full_match=[b'WoZ', b'ATOZ'],
no_match=[b'Z', b'A TO Z', b'ZOO'],
partial_match={'PIZZA' : 'PIZ'})
class TestSampleFiles(unittest.TestCase):
def test_sample_files(self):
try:
samples_dir = os.path.join(os.path.dirname(__file__), 'samples')
for f in os.listdir(samples_dir):
filename = os.path.join(samples_dir, f)
with open(filename) as f:
contents = f.read()
oprex(contents)
except OprexSyntaxError as e:
msg = '\nFile: %s\n' % filename
msg += contents
msg += str(e)
raise OprexSyntaxError(None, msg)
if __name__ == '__main__':
unittest.main()
|
rooney/oprex
|
tests.py
|
Python
|
lgpl-3.0
| 249,199
|
[
"ESPResSo"
] |
7109831d33cf9aa7ad59454a62135a391d69fd433656939318dd9bbd503b86bb
|
# Read in soil temperature data (assumes this is always there)
# ref: http://bigladdersoftware.com/epx/docs/8-2/auxiliary-programs/epw-csv-format-inout.html
soilData = self.header[3]
self.nSoil = int(soilData[1]) # Number of ground temperature depths
self.Tsoil = utilities.zeros(self.nSoil,12) # nSoil x 12 matrix for soil temperture (K)
self.depth = utilities.zeros(self.nSoil,1) # nSoil x 1 matrix for soil depth (m)
# Read monthly data for each layer of soil from EPW file
for i in xrange(self.nSoil):
self.depth[i][0] = float(soilData[2 + (i*16)]) # get soil depth for each nSoil
# Monthly data
for j in xrange(12):
self.Tsoil[i][j] = float(soilData[6 + (i*16) + j]) + 273.15 # 12 months of soil T for specific depth
# Define Road (Assume 0.5m of asphalt)
kRoad = ipd['kRoad'] # road pavement conductivity (W/m K)
cRoad = ipd['cRoad'] # road volumetric heat capacity (J/m^3 K)
emis = 0.93
asphalt = Material(kRoad,cRoad,'asphalt')
road_T_init = 293.
road_horizontal = 1
road_veg_coverage = min(vegCover/(1-bldDensity),1.) # fraction of surface vegetation coverage
# define road layers
road_layer_num = int(math.ceil(d_road/0.05))
thickness_vector = map(lambda r: 0.05, range(road_layer_num)) # 0.5/0.05 ~ 10 x 1 matrix of 0.05 thickness
material_vector = map(lambda n: asphalt, range(road_layer_num))
road = Element(alb_road,emis,thickness_vector,material_vector,road_veg_coverage,road_T_init,road_horizontal)
print road
print '--'
print 'soil properties'
print 'soilnum', self.nSoil
print 'soilDepth: ',
pprint.pprint(self.depth)
print 'soilT summer: ',
pprint.pprint( map(lambda t: round(sum(t[2:6])/float(len(t[2:6]))-273.15,2), self.Tsoil))
print '\n'
#testing alt
#self.depth[0][0] = 0.4
#min_depth_diff = float('Inf')
#for i in xrange(self.nSoil):
# curr_depth_diff = abs(sum(road.layerThickness) - self.depth[i][0])
# if min_depth_diff >= curr_depth_diff:
# min_depth_diff = curr_depth_diff
# soilindex1 = i
|
saeranv/UWG_Python
|
UWG/roadscratch.py
|
Python
|
gpl-3.0
| 2,019
|
[
"EPW"
] |
df7ff9dbc2df9cb0559f29fe88a2aa57e11083d6ffc3390f1f6f3f97eea2e1c7
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import glob
class Mirdeep2(Package):
"""miRDeep2 is a completely overhauled tool which discovers microRNA genes
by analyzing sequenced RNAs."""
homepage = "https://www.mdc-berlin.de/8551903/en/"
url = "https://www.mdc-berlin.de/system/files/migrated_files/fiona/mirdeep2_0_0_8.zip"
version('0.0.8', 'a707f7d7ad4a2975fb8b2e78c5bcf483')
depends_on('perl', type=('build', 'run'))
depends_on('perl-pdf-api2', type=('build', 'run'))
depends_on('bowtie')
depends_on('viennarna')
depends_on('squid')
depends_on('randfold')
def url_for_version(self, version):
url = 'https://www.mdc-berlin.de/system/files/migrated_files/fiona/mirdeep2_{0}.zip'
return url.format(version.underscored)
def patch(self):
with working_dir('src'):
files = glob.iglob("*.pl")
for file in files:
change = FileFilter(file)
change.filter('usr/bin/perl', 'usr/bin/env perl')
change.filter('perl -W', 'perl')
change.filter("../Rfam_for_miRDeep.fa",
"Rfam_for_miRDeep.fa")
change.filter("../install_successful",
"install_successful")
def install(self, spec, prefix):
install_tree('src', prefix.bin)
install('Rfam_for_miRDeep.fa', prefix.bin)
# miRDeep looks for the install_sucessful file to check if you used
# their automated install script which we'll just be kind of hacking
# around
touch(prefix.bin.install_successful)
|
tmerrick1/spack
|
var/spack/repos/builtin/packages/mirdeep2/package.py
|
Python
|
lgpl-2.1
| 2,851
|
[
"Bowtie"
] |
65563f24aa7c3c94eeee6b7b79e020ce70043b959d2920157566eab2e48a7ab4
|
import individual
import copy
import numpy as np
class Simulated_Annealing(object):
''''
Simulated Annealing
It is a probabilistic technique for approximating the global optimum of a given function.
Specifically, it is a metaheuristic to approximate global optimization in a large
search space. It is often used when the search space is discrete (e.g., all tours
that visit a given set of cities). For problems where finding an approximate global
optimum is more important than finding a precise local optimum in a fixed amount of
time, simulated annealing may be preferable to alternatives such as gradient descent.
It outperforms hill climbing when there are many suboptimal local points in the search space.
At each step, the SA heuristic considers some neighbouring state s' of the current state s,
and probabilistically decides between moving the system to state s' or staying in state s.
These probabilities ultimately lead the system to move to states of lower energy.
Typically this step is repeated until the system reaches a state that is good enough for the
application, or until a given computation budget has been exhausted.
INPUT:
- Number of iterations
- Target Image
- Number of polygons in solution
- Type of Polygon
'''
def __init__(self, iterations, target, size , type ):
# Set hyperparameters
self.iterations = iterations
self.target = target
self.height, self.width = target.shape[:2]
self.size = size
self.type = type
def run( self , *args ):
# Run the simulated annealing optimization and print
if len(args) == 0:
# Start with a random initial solution
solution = individual.IndividualGen(self.size, self.height, self.width, self.type , 0.1 )
min_err = self.height * self.width
else:
# Start with a precomputed initial solution
solution = individual.IndividualGen(args[0])
data = args[0].split("_")
min_err = float(data[3][:-4])
# Iterative optimization to minimize error
temperature = 100.0
for i in range(self.iterations):
# Store solution
temp = copy.deepcopy(solution)
# Modify one random polygon in solution
temp.mutate()
# Compute dissimilarity
err = temp.fitness(self.target)
# Update the solution that provides more fitness
if err < min_err:
min_err = err
solution = copy.deepcopy(temp)
elif np.exp((min_err-err)/temperature) > np.random.random():
min_err = err
solution = copy.deepcopy(temp)
# Print results evolution
if i % 5000 == 0 :
solution.write("SolutionSA_Error_" + str(i) + "_" + str(min_err) + ".jpg")
solution.encode("SolutionSA_Error_" + str(i) + "_" + str(min_err) + ".txt")
# Cooling schedule
temperature *= 0.99
|
Alfo5123/PolyPic
|
docs/sim_annealing.py
|
Python
|
mit
| 3,207
|
[
"VisIt"
] |
a01a196f77f06895dd695b3bb46937e6a78ecddc28aeb0798c2142f1f004f451
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
A plugin to verify the data against user-adjusted tests.
This is the research tool, not the low-level data ingerity check.
Note that this tool has an old heritage (20-Oct-2002 at least) and
so there are vestages of earlier ways of doing things which have not
been converted to a more-modern way. For instance the way the tool
options are defined (and read in) is not done the way it would be now.
"""
# pylint: disable=not-callable
# pylint: disable=no-self-use
# pylint: disable=undefined-variable
#------------------------------------------------------------------------
#
# standard python modules
#
#------------------------------------------------------------------------
import os
import pickle
from hashlib import md5
#------------------------------------------------------------------------
#
# GNOME/GTK modules
#
#------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GObject
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.errors import WindowActiveError
from gramps.gen.const import URL_MANUAL_PAGE, VERSION_DIR
from gramps.gen.lib import (ChildRefType, EventRoleType, EventType,
FamilyRelType, NameType, Person)
from gramps.gen.lib.date import Today
from gramps.gui.editors import EditPerson, EditFamily
from gramps.gen.utils.db import family_name
from gramps.gui.display import display_help
from gramps.gui.managedwindow import ManagedWindow
from gramps.gen.updatecallback import UpdateCallback
from gramps.gui.plug import tool
from gramps.gui.glade import Glade
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Tools' % URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Verify_the_Data')
#-------------------------------------------------------------------------
#
# temp storage and related functions
#
#-------------------------------------------------------------------------
_person_cache = {}
_family_cache = {}
_event_cache = {}
_today = Today().get_sort_value()
def find_event(db, handle):
""" find an event, given a handle """
if handle in _event_cache:
obj = _event_cache[handle]
else:
obj = db.get_event_from_handle(handle)
_event_cache[handle] = obj
return obj
def find_person(db, handle):
""" find a person, given a handle """
if handle in _person_cache:
obj = _person_cache[handle]
else:
obj = db.get_person_from_handle(handle)
_person_cache[handle] = obj
return obj
def find_family(db, handle):
""" find a family, given a handle """
if handle in _family_cache:
obj = _family_cache[handle]
else:
obj = db.get_family_from_handle(handle)
_family_cache[handle] = obj
return obj
def clear_cache():
""" clear the cache """
_person_cache.clear()
_family_cache.clear()
_event_cache.clear()
#-------------------------------------------------------------------------
#
# helper functions
#
#-------------------------------------------------------------------------
def get_date_from_event_handle(db, event_handle, estimate=False):
""" get a date from an event handle """
if not event_handle:
return 0
event = find_event(db, event_handle)
if event:
date_obj = event.get_date_object()
if (not estimate
and (date_obj.get_day() == 0 or date_obj.get_month() == 0)):
return 0
return date_obj.get_sort_value()
else:
return 0
def get_date_from_event_type(db, person, event_type, estimate=False):
""" get a date from a person's specific event type """
if not person:
return 0
for event_ref in person.get_event_ref_list():
event = find_event(db, event_ref.ref)
if event:
if (event_ref.get_role() != EventRoleType.PRIMARY
and event.get_type() == EventType.BURIAL):
continue
if event.get_type() == event_type:
date_obj = event.get_date_object()
if (not estimate
and (date_obj.get_day() == 0
or date_obj.get_month() == 0)):
return 0
return date_obj.get_sort_value()
return 0
def get_bapt_date(db, person, estimate=False):
""" get a person's baptism date """
return get_date_from_event_type(db, person,
EventType.BAPTISM, estimate)
def get_bury_date(db, person, estimate=False):
""" get a person's burial date """
# check role on burial event
for event_ref in person.get_event_ref_list():
event = find_event(db, event_ref.ref)
if (event
and event.get_type() == EventType.BURIAL
and event_ref.get_role() == EventRoleType.PRIMARY):
return get_date_from_event_type(db, person,
EventType.BURIAL, estimate)
def get_birth_date(db, person, estimate=False):
""" get a person's birth date (or baptism date if 'estimated') """
if not person:
return 0
birth_ref = person.get_birth_ref()
if not birth_ref:
ret = 0
else:
ret = get_date_from_event_handle(db, birth_ref.ref, estimate)
if estimate and (ret == 0):
ret = get_bapt_date(db, person, estimate)
ret = 0 if ret is None else ret
return ret
def get_death(db, person):
"""
boolean whether there is a death event or not
(if a user claims a person is dead, we will believe it even with no date)
"""
if not person:
return False
death_ref = person.get_death_ref()
if death_ref:
return True
else:
return False
def get_death_date(db, person, estimate=False):
""" get a person's death date (or burial date if 'estimated') """
if not person:
return 0
death_ref = person.get_death_ref()
if not death_ref:
ret = 0
else:
ret = get_date_from_event_handle(db, death_ref.ref, estimate)
if estimate and (ret == 0):
ret = get_bury_date(db, person, estimate)
ret = 0 if ret is None else ret
return ret
def get_age_at_death(db, person, estimate):
""" get a person's age at death """
birth_date = get_birth_date(db, person, estimate)
death_date = get_death_date(db, person, estimate)
if (birth_date > 0) and (death_date > 0):
return death_date - birth_date
return 0
def get_father(db, family):
""" get a family's father """
if not family:
return None
father_handle = family.get_father_handle()
if father_handle:
return find_person(db, father_handle)
return None
def get_mother(db, family):
""" get a family's mother """
if not family:
return None
mother_handle = family.get_mother_handle()
if mother_handle:
return find_person(db, mother_handle)
return None
def get_child_birth_dates(db, family, estimate):
""" get a family's children's birth dates """
dates = []
for child_ref in family.get_child_ref_list():
child = find_person(db, child_ref.ref)
child_birth_date = get_birth_date(db, child, estimate)
if child_birth_date > 0:
dates.append(child_birth_date)
return dates
def get_n_children(db, person):
""" get the number of a family's children """
number = 0
for family_handle in person.get_family_handle_list():
family = find_family(db, family_handle)
if family:
number += len(family.get_child_ref_list())
return number
def get_marriage_date(db, family):
""" get a family's marriage date """
if not family:
return 0
for event_ref in family.get_event_ref_list():
event = find_event(db, event_ref.ref)
if (event.get_type() == EventType.MARRIAGE
and (event_ref.get_role() == EventRoleType.FAMILY
or event_ref.get_role() == EventRoleType.PRIMARY)):
date_obj = event.get_date_object()
return date_obj.get_sort_value()
return 0
#-------------------------------------------------------------------------
#
# Actual tool
#
#-------------------------------------------------------------------------
class Verify(tool.Tool, ManagedWindow, UpdateCallback):
"""
A plugin to verify the data against user-adjusted tests.
This is the research tool, not the low-level data ingerity check.
"""
def __init__(self, dbstate, user, options_class, name, callback=None):
""" initialize things """
uistate = user.uistate
self.label = _('Data Verify tool')
self.v_r = None
tool.Tool.__init__(self, dbstate, options_class, name)
ManagedWindow.__init__(self, uistate, [], self.__class__)
if uistate:
UpdateCallback.__init__(self, self.uistate.pulse_progressbar)
self.dbstate = dbstate
if uistate:
self.init_gui()
else:
self.add_results = self.add_results_cli
self.run_the_tool(cli=True)
def add_results_cli(self, results):
""" print data for the user, no GUI """
(msg, gramps_id, name, the_type, rule_id, severity, handle) = results
severity_str = 'S'
if severity == Rule.WARNING:
severity_str = 'W'
elif severity == Rule.ERROR:
severity_str = 'E'
# translators: needed for French+Arabic, ignore otherwise
print(_("%(severity)s: %(msg)s, %(type)s: %(gid)s, %(name)s"
) % {'severity' : severity_str, 'msg' : msg, 'type' : the_type,
'gid' : gramps_id, 'name' : name})
def init_gui(self):
""" Draw dialog and make it handle everything """
self.v_r = None
self.top = Glade()
self.top.connect_signals({
"destroy_passed_object" : self.close,
"on_help_clicked" : self.on_help_clicked,
"on_verify_ok_clicked" : self.on_apply_clicked,
"on_delete_event" : self.close,
})
window = self.top.toplevel
self.set_window(window, self.top.get_object('title'), self.label)
self.setup_configs('interface.verify', 650, 400)
o_dict = self.options.handler.options_dict
for option in o_dict:
if option in ['estimate_age', 'invdate']:
self.top.get_object(option).set_active(o_dict[option])
else:
self.top.get_object(option).set_value(o_dict[option])
self.show()
def build_menu_names(self, obj):
""" build the menu names """
return (_("Tool settings"), self.label)
def on_help_clicked(self, obj):
""" Display the relevant portion of Gramps manual """
display_help(webpage=WIKI_HELP_PAGE, section=WIKI_HELP_SEC)
def on_apply_clicked(self, obj):
""" event handler for user clicking the OK button: start things """
run_button = self.top.get_object('button4')
close_button = self.top.get_object('button5')
run_button.set_sensitive(False)
close_button.set_sensitive(False)
o_dict = self.options.handler.options_dict
for option in o_dict:
if option in ['estimate_age', 'invdate']:
o_dict[option] = self.top.get_object(option).get_active()
else:
o_dict[option] = self.top.get_object(option).get_value_as_int()
try:
self.v_r = VerifyResults(self.dbstate, self.uistate, self.track,
self.top, self.close)
self.add_results = self.v_r.add_results
self.v_r.load_ignored(self.db.full_name)
except WindowActiveError:
pass
except AttributeError: # VerifyResults.load_ignored was not run
self.v_r.ignores = {}
self.uistate.set_busy_cursor(True)
self.uistate.progress.show()
busy_cursor = Gdk.Cursor.new_for_display(Gdk.Display.get_default(),
Gdk.CursorType.WATCH)
self.window.get_window().set_cursor(busy_cursor)
try:
self.v_r.window.get_window().set_cursor(busy_cursor)
except AttributeError:
pass
self.run_the_tool(cli=False)
self.uistate.progress.hide()
self.uistate.set_busy_cursor(False)
try:
self.window.get_window().set_cursor(None)
self.v_r.window.get_window().set_cursor(None)
except AttributeError:
pass
run_button.set_sensitive(True)
close_button.set_sensitive(True)
self.reset()
# Save options
self.options.handler.save_options()
def run_the_tool(self, cli=False):
""" run the tool """
person_handles = self.db.iter_person_handles()
for option, value in self.options.handler.options_dict.items():
exec('%s = %s' % (option, value), globals())
# TODO my pylint doesn't seem to understand these variables really
# are defined here, so I have disabled the undefined-variable error
if self.v_r:
self.v_r.real_model.clear()
self.set_total(self.db.get_number_of_people() +
self.db.get_number_of_families())
for person_handle in person_handles:
person = find_person(self.db, person_handle)
rule_list = [
BirthAfterBapt(self.db, person),
DeathBeforeBapt(self.db, person),
BirthAfterBury(self.db, person),
DeathAfterBury(self.db, person),
BirthAfterDeath(self.db, person),
BaptAfterBury(self.db, person),
OldAge(self.db, person, oldage, estimate_age),
OldAgeButNoDeath(self.db, person, oldage, estimate_age),
UnknownGender(self.db, person),
MultipleParents(self.db, person),
MarriedOften(self.db, person, wedder),
OldUnmarried(self.db, person, oldunm, estimate_age),
TooManyChildren(self.db, person, mxchilddad, mxchildmom),
Disconnected(self.db, person),
InvalidBirthDate(self.db, person, invdate),
InvalidDeathDate(self.db, person, invdate),
BirthEqualsDeath(self.db, person),
BirthEqualsMarriage(self.db, person),
DeathEqualsMarriage(self.db, person),
]
for rule in rule_list:
if rule.broken():
self.add_results(rule.report_itself())
clear_cache()
if not cli:
self.update()
# Family-based rules
for family_handle in self.db.iter_family_handles():
family = find_family(self.db, family_handle)
rule_list = [
SameSexFamily(self.db, family),
FemaleHusband(self.db, family),
MaleWife(self.db, family),
SameSurnameFamily(self.db, family),
LargeAgeGapFamily(self.db, family, hwdif, estimate_age),
MarriageBeforeBirth(self.db, family, estimate_age),
MarriageAfterDeath(self.db, family, estimate_age),
EarlyMarriage(self.db, family, yngmar, estimate_age),
LateMarriage(self.db, family, oldmar, estimate_age),
OldParent(self.db, family, oldmom, olddad, estimate_age),
YoungParent(self.db, family, yngmom, yngdad, estimate_age),
UnbornParent(self.db, family, estimate_age),
DeadParent(self.db, family, estimate_age),
LargeChildrenSpan(self.db, family, cbspan, estimate_age),
LargeChildrenAgeDiff(self.db, family, cspace, estimate_age),
MarriedRelation(self.db, family),
]
for rule in rule_list:
if rule.broken():
self.add_results(rule.report_itself())
clear_cache()
if not cli:
self.update()
#-------------------------------------------------------------------------
#
# Display the results
#
#-------------------------------------------------------------------------
class VerifyResults(ManagedWindow):
""" GUI class to show the results in another dialog """
IGNORE_COL = 0
WARNING_COL = 1
OBJ_ID_COL = 2
OBJ_NAME_COL = 3
OBJ_TYPE_COL = 4
RULE_ID_COL = 5
OBJ_HANDLE_COL = 6
FG_COLOR_COL = 7
TRUE_COL = 8
SHOW_COL = 9
def __init__(self, dbstate, uistate, track, glade, closeall):
""" initialize things """
self.title = _('Data Verification Results')
ManagedWindow.__init__(self, uistate, track, self.__class__)
self.dbstate = dbstate
self.closeall = closeall
self._set_filename()
self.top = glade
window = self.top.get_object("verify_result")
self.set_window(window, self.top.get_object('title2'), self.title)
self.setup_configs('interface.verifyresults', 500, 300)
window.connect("close", self.close)
close_btn = self.top.get_object("closebutton1")
close_btn.connect("clicked", self.close)
self.warn_tree = self.top.get_object('warn_tree')
self.warn_tree.connect('button_press_event', self.double_click)
self.selection = self.warn_tree.get_selection()
self.hide_button = self.top.get_object('hide_button')
self.hide_button.connect('toggled', self.hide_toggled)
self.mark_button = self.top.get_object('mark_all')
self.mark_button.connect('clicked', self.mark_clicked)
self.unmark_button = self.top.get_object('unmark_all')
self.unmark_button.connect('clicked', self.unmark_clicked)
self.invert_button = self.top.get_object('invert_all')
self.invert_button.connect('clicked', self.invert_clicked)
self.real_model = Gtk.ListStore(GObject.TYPE_BOOLEAN,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING, object,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_BOOLEAN,
GObject.TYPE_BOOLEAN)
self.filt_model = self.real_model.filter_new()
self.filt_model.set_visible_column(VerifyResults.TRUE_COL)
self.sort_model = self.filt_model.sort_new_with_model()
self.warn_tree.set_model(self.sort_model)
self.renderer = Gtk.CellRendererText()
self.img_renderer = Gtk.CellRendererPixbuf()
self.bool_renderer = Gtk.CellRendererToggle()
self.bool_renderer.connect('toggled', self.selection_toggled)
# Add ignore column
ignore_column = Gtk.TreeViewColumn(_('Mark'), self.bool_renderer,
active=VerifyResults.IGNORE_COL)
ignore_column.set_sort_column_id(VerifyResults.IGNORE_COL)
self.warn_tree.append_column(ignore_column)
# Add image column
img_column = Gtk.TreeViewColumn(None, self.img_renderer)
img_column.set_cell_data_func(self.img_renderer, self.get_image)
self.warn_tree.append_column(img_column)
# Add column with the warning text
warn_column = Gtk.TreeViewColumn(_('Warning'), self.renderer,
text=VerifyResults.WARNING_COL,
foreground=VerifyResults.FG_COLOR_COL)
warn_column.set_sort_column_id(VerifyResults.WARNING_COL)
self.warn_tree.append_column(warn_column)
# Add column with object gramps_id
id_column = Gtk.TreeViewColumn(_('ID'), self.renderer,
text=VerifyResults.OBJ_ID_COL,
foreground=VerifyResults.FG_COLOR_COL)
id_column.set_sort_column_id(VerifyResults.OBJ_ID_COL)
self.warn_tree.append_column(id_column)
# Add column with object name
name_column = Gtk.TreeViewColumn(_('Name'), self.renderer,
text=VerifyResults.OBJ_NAME_COL,
foreground=VerifyResults.FG_COLOR_COL)
name_column.set_sort_column_id(VerifyResults.OBJ_NAME_COL)
self.warn_tree.append_column(name_column)
self.show()
self.window_shown = False
def _set_filename(self):
""" set the file where people who will be ignored will be kept """
db_filename = self.dbstate.db.get_save_path()
if isinstance(db_filename, str):
db_filename = db_filename.encode('utf-8')
md5sum = md5(db_filename)
self.ignores_filename = os.path.join(
VERSION_DIR, md5sum.hexdigest() + os.path.extsep + 'vfm')
def load_ignored(self, db_filename):
""" get ready to load the file with the previously-ignored people """
## a new Gramps major version means recreating the .vfm file.
## User can copy over old one, with name of new one, but no guarantee
## that will work.
if not self._load_ignored(self.ignores_filename):
self.ignores = {}
def _load_ignored(self, filename):
""" load the file with the people who were previously ignored """
try:
try:
file = open(filename, 'rb')
except IOError:
return False
self.ignores = pickle.load(file)
file.close()
return True
except (IOError, EOFError):
file.close()
return False
def save_ignored(self, new_ignores):
""" get ready to save the file with the ignored people """
self.ignores = new_ignores
self._save_ignored(self.ignores_filename)
def _save_ignored(self, filename):
""" save the file with the people the user wants to ignore """
try:
with open(filename, 'wb') as file:
pickle.dump(self.ignores, file, 1)
return True
except IOError:
return False
def get_marking(self, handle, rule_id):
if handle in self.ignores:
return rule_id in self.ignores[handle]
else:
return False
def get_new_marking(self):
new_ignores = {}
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
ignore = row[VerifyResults.IGNORE_COL]
if ignore:
handle = row[VerifyResults.OBJ_HANDLE_COL]
rule_id = row[VerifyResults.RULE_ID_COL]
if handle not in new_ignores:
new_ignores[handle] = set()
new_ignores[handle].add(rule_id)
return new_ignores
def close(self, *obj):
""" close the dialog and write out the file """
new_ignores = self.get_new_marking()
self.save_ignored(new_ignores)
ManagedWindow.close(self, *obj)
self.closeall()
def hide_toggled(self, button):
if button.get_active():
button.set_label(_("_Show all"))
self.filt_model = self.real_model.filter_new()
self.filt_model.set_visible_column(VerifyResults.SHOW_COL)
self.sort_model = self.filt_model.sort_new_with_model()
self.warn_tree.set_model(self.sort_model)
else:
self.filt_model = self.real_model.filter_new()
self.filt_model.set_visible_column(VerifyResults.TRUE_COL)
self.sort_model = self.filt_model.sort_new_with_model()
self.warn_tree.set_model(self.sort_model)
button.set_label(_("_Hide marked"))
def selection_toggled(self, cell, path_string):
sort_path = tuple(map(int, path_string.split(':')))
filt_path = self.sort_model.convert_path_to_child_path(
Gtk.TreePath(sort_path))
real_path = self.filt_model.convert_path_to_child_path(filt_path)
row = self.real_model[real_path]
row[VerifyResults.IGNORE_COL] = not row[VerifyResults.IGNORE_COL]
row[VerifyResults.SHOW_COL] = not row[VerifyResults.IGNORE_COL]
self.real_model.row_changed(real_path, row.iter)
def mark_clicked(self, mark_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[VerifyResults.IGNORE_COL] = True
row[VerifyResults.SHOW_COL] = False
self.filt_model.refilter()
def unmark_clicked(self, unmark_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[VerifyResults.IGNORE_COL] = False
row[VerifyResults.SHOW_COL] = True
self.filt_model.refilter()
def invert_clicked(self, invert_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[VerifyResults.IGNORE_COL] = not row[VerifyResults.IGNORE_COL]
row[VerifyResults.SHOW_COL] = not row[VerifyResults.SHOW_COL]
self.filt_model.refilter()
def double_click(self, obj, event):
""" the user wants to edit the selected person or family """
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
(model, node) = self.selection.get_selected()
if not node:
return
sort_path = self.sort_model.get_path(node)
filt_path = self.sort_model.convert_path_to_child_path(sort_path)
real_path = self.filt_model.convert_path_to_child_path(filt_path)
row = self.real_model[real_path]
the_type = row[VerifyResults.OBJ_TYPE_COL]
handle = row[VerifyResults.OBJ_HANDLE_COL]
if the_type == 'Person':
try:
person = self.dbstate.db.get_person_from_handle(handle)
EditPerson(self.dbstate, self.uistate, self.track, person)
except WindowActiveError:
pass
elif the_type == 'Family':
try:
family = self.dbstate.db.get_family_from_handle(handle)
EditFamily(self.dbstate, self.uistate, self.track, family)
except WindowActiveError:
pass
def get_image(self, column, cell, model, iter_, user_data=None):
""" flag whether each line is a person or family """
the_type = model.get_value(iter_, VerifyResults.OBJ_TYPE_COL)
if the_type == 'Person':
cell.set_property('icon-name', 'gramps-person')
elif the_type == 'Family':
cell.set_property('icon-name', 'gramps-family')
def add_results(self, results):
(msg, gramps_id, name, the_type, rule_id, severity, handle) = results
ignore = self.get_marking(handle, rule_id)
if severity == Rule.ERROR:
line_color = 'red'
else:
line_color = None
self.real_model.append(row=[ignore, msg, gramps_id, name,
the_type, rule_id, handle, line_color,
True, not ignore])
if not self.window_shown:
self.window.show()
self.window_shown = True
def build_menu_names(self, obj):
""" build the menu names """
return (self.title, self.title)
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class VerifyOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, person_id=None):
""" initialize the options """
tool.ToolOptions.__init__(self, name, person_id)
# Options specific for this report
self.options_dict = {
'oldage' : 90,
'hwdif' : 30,
'cspace' : 8,
'cbspan' : 25,
'yngmar' : 17,
'oldmar' : 50,
'oldmom' : 48,
'yngmom' : 17,
'yngdad' : 18,
'olddad' : 65,
'wedder' : 3,
'mxchildmom' : 12,
'mxchilddad' : 15,
'lngwdw' : 30,
'oldunm' : 99,
'estimate_age' : 0,
'invdate' : 1,
}
# TODO these strings are defined in the glade file (more or less, since
# those have accelerators), and so are not translated here, but that
# means that a CLI user who runs gramps in a non-English language and
# says (for instance) "show=oldage" will see "Maximum age" in English
# (but I think such a CLI use is very unlikely and so is low priority,
# especially since the tool's normal CLI output will be translated)
self.options_help = {
'oldage' : ("=num", "Maximum age", "Age in years"),
'hwdif' : ("=num", "Maximum husband-wife age difference",
"Age difference in years"),
'cspace' : ("=num",
"Maximum number of years between children",
"Number of years"),
'cbspan' : ("=num",
"Maximum span of years for all children",
"Span in years"),
'yngmar' : ("=num", "Minimum age to marry", "Age in years"),
'oldmar' : ("=num", "Maximum age to marry", "Age in years"),
'oldmom' : ("=num", "Maximum age to bear a child",
"Age in years"),
'yngmom' : ("=num", "Minimum age to bear a child",
"Age in years"),
'yngdad' : ("=num", "Minimum age to father a child",
"Age in years"),
'olddad' : ("=num", "Maximum age to father a child",
"Age in years"),
'wedder' : ("=num", "Maximum number of spouses for a person",
"Number of spouses"),
'mxchildmom' : ("=num", "Maximum number of children for a woman",
"Number of children"),
'mxchilddad' : ("=num", "Maximum number of children for a man",
"Number of chidlren"),
'lngwdw' : ("=num", "Maximum number of consecutive years "
"of widowhood before next marriage",
"Number of years"),
'oldunm' : ("=num", "Maximum age for an unmarried person"
"Number of years"),
'estimate_age' : ("=0/1",
"Whether to estimate missing or inexact dates",
["Do not estimate", "Estimate dates"],
True),
'invdate' : ("=0/1", "Whether to check for invalid dates"
"Do not identify invalid dates",
"Identify invalid dates", True),
}
#-------------------------------------------------------------------------
#
# Base classes for different tests -- the rules
#
#-------------------------------------------------------------------------
class Rule:
"""
Basic class for use in this tool.
Other rules must inherit from this.
"""
ID = 0
TYPE = ''
ERROR = 1
WARNING = 2
SEVERITY = WARNING
def __init__(self, db, obj):
""" initialize the rule """
self.db = db
self.obj = obj
def broken(self):
"""
Return boolean indicating whether this rule is violated.
"""
return False
def get_message(self):
""" return the rule's error message """
assert False, "Need to be overriden in the derived class"
def get_name(self):
""" return the person's primary name or the name of the family """
assert False, "Need to be overriden in the derived class"
def get_handle(self):
""" return the object's handle """
return self.obj.handle
def get_id(self):
""" return the object's gramps_id """
return self.obj.gramps_id
def get_rule_id(self):
""" return the rule's identification number, and parameters """
params = self._get_params()
return (self.ID, params)
def _get_params(self):
""" return the rule's parameters """
return tuple()
def report_itself(self):
""" return the details about a rule """
handle = self.get_handle()
the_type = self.TYPE
rule_id = self.get_rule_id()
severity = self.SEVERITY
name = self.get_name()
gramps_id = self.get_id()
msg = self.get_message()
return (msg, gramps_id, name, the_type, rule_id, severity, handle)
class PersonRule(Rule):
"""
Person-based class.
"""
TYPE = 'Person'
def get_name(self):
""" return the person's primary name """
return self.obj.get_primary_name().get_name()
class FamilyRule(Rule):
"""
Family-based class.
"""
TYPE = 'Family'
def get_name(self):
""" return the name of the family """
return family_name(self.obj, self.db)
#-------------------------------------------------------------------------
#
# Actual rules for testing
#
#-------------------------------------------------------------------------
class BirthAfterBapt(PersonRule):
""" test if a person was baptised before their birth """
ID = 1
SEVERITY = Rule.ERROR
def broken(self):
""" return boolean indicating whether this rule is violated """
birth_date = get_birth_date(self.db, self.obj)
bapt_date = get_bapt_date(self.db, self.obj)
birth_ok = birth_date > 0 if birth_date is not None else False
bapt_ok = bapt_date > 0 if bapt_date is not None else False
return birth_ok and bapt_ok and birth_date > bapt_date
def get_message(self):
""" return the rule's error message """
return _("Baptism before birth")
class DeathBeforeBapt(PersonRule):
""" test if a person died before their baptism """
ID = 2
SEVERITY = Rule.ERROR
def broken(self):
""" return boolean indicating whether this rule is violated """
death_date = get_death_date(self.db, self.obj)
bapt_date = get_bapt_date(self.db, self.obj)
bapt_ok = bapt_date > 0 if bapt_date is not None else False
death_ok = death_date > 0 if death_date is not None else False
return death_ok and bapt_ok and bapt_date > death_date
def get_message(self):
""" return the rule's error message """
return _("Death before baptism")
class BirthAfterBury(PersonRule):
""" test if a person was buried before their birth """
ID = 3
SEVERITY = Rule.ERROR
def broken(self):
""" return boolean indicating whether this rule is violated """
birth_date = get_birth_date(self.db, self.obj)
bury_date = get_bury_date(self.db, self.obj)
birth_ok = birth_date > 0 if birth_date is not None else False
bury_ok = bury_date > 0 if bury_date is not None else False
return birth_ok and bury_ok and birth_date > bury_date
def get_message(self):
""" return the rule's error message """
return _("Burial before birth")
class DeathAfterBury(PersonRule):
""" test if a person was buried before their death """
ID = 4
SEVERITY = Rule.ERROR
def broken(self):
""" return boolean indicating whether this rule is violated """
death_date = get_death_date(self.db, self.obj)
bury_date = get_bury_date(self.db, self.obj)
death_ok = death_date > 0 if death_date is not None else False
bury_ok = bury_date > 0 if bury_date is not None else False
return death_ok and bury_ok and death_date > bury_date
def get_message(self):
""" return the rule's error message """
return _("Burial before death")
class BirthAfterDeath(PersonRule):
""" test if a person died before their birth """
ID = 5
SEVERITY = Rule.ERROR
def broken(self):
""" return boolean indicating whether this rule is violated """
birth_date = get_birth_date(self.db, self.obj)
death_date = get_death_date(self.db, self.obj)
birth_ok = birth_date > 0 if birth_date is not None else False
death_ok = death_date > 0 if death_date is not None else False
return birth_ok and death_ok and birth_date > death_date
def get_message(self):
""" return the rule's error message """
return _("Death before birth")
class BaptAfterBury(PersonRule):
""" test if a person was buried before their baptism """
ID = 6
SEVERITY = Rule.ERROR
def broken(self):
""" return boolean indicating whether this rule is violated """
bapt_date = get_bapt_date(self.db, self.obj)
bury_date = get_bury_date(self.db, self.obj)
bapt_ok = bapt_date > 0 if bapt_date is not None else False
bury_ok = bury_date > 0 if bury_date is not None else False
return bapt_ok and bury_ok and bapt_date > bury_date
def get_message(self):
""" return the rule's error message """
return _("Burial before baptism")
class OldAge(PersonRule):
""" test if a person died beyond the age the user has set """
ID = 7
SEVERITY = Rule.WARNING
def __init__(self, db, person, old_age, est):
""" initialize the rule """
PersonRule.__init__(self, db, person)
self.old_age = old_age
self.est = est
def _get_params(self):
""" return the rule's parameters """
return (self.old_age, self.est)
def broken(self):
""" return boolean indicating whether this rule is violated """
age_at_death = get_age_at_death(self.db, self.obj, self.est)
return age_at_death / 365 > self.old_age
def get_message(self):
""" return the rule's error message """
return _("Old age at death")
class UnknownGender(PersonRule):
""" test if a person is neither a male nor a female """
ID = 8
SEVERITY = Rule.WARNING
def broken(self):
""" return boolean indicating whether this rule is violated """
female = self.obj.get_gender() == Person.FEMALE
male = self.obj.get_gender() == Person.MALE
return not (male or female)
def get_message(self):
""" return the rule's error message """
return _("Unknown gender")
class MultipleParents(PersonRule):
""" test if a person belongs to multiple families """
ID = 9
SEVERITY = Rule.WARNING
def broken(self):
""" return boolean indicating whether this rule is violated """
n_parent_sets = len(self.obj.get_parent_family_handle_list())
return n_parent_sets > 1
def get_message(self):
""" return the rule's error message """
return _("Multiple parents")
class MarriedOften(PersonRule):
""" test if a person was married 'often' """
ID = 10
SEVERITY = Rule.WARNING
def __init__(self, db, person, wedder):
""" initialize the rule """
PersonRule.__init__(self, db, person)
self.wedder = wedder
def _get_params(self):
""" return the rule's parameters """
return (self.wedder,)
def broken(self):
""" return boolean indicating whether this rule is violated """
n_spouses = len(self.obj.get_family_handle_list())
return n_spouses > self.wedder
def get_message(self):
""" return the rule's error message """
return _("Married often")
class OldUnmarried(PersonRule):
""" test if a person was married when they died """
ID = 11
SEVERITY = Rule.WARNING
def __init__(self, db, person, old_unm, est):
""" initialize the rule """
PersonRule.__init__(self, db, person)
self.old_unm = old_unm
self.est = est
def _get_params(self):
""" return the rule's parameters """
return (self.old_unm, self.est)
def broken(self):
""" return boolean indicating whether this rule is violated """
age_at_death = get_age_at_death(self.db, self.obj, self.est)
n_spouses = len(self.obj.get_family_handle_list())
return age_at_death / 365 > self.old_unm and n_spouses == 0
def get_message(self):
""" return the rule's error message """
return _("Old and unmarried")
class TooManyChildren(PersonRule):
""" test if a person had 'too many' children """
ID = 12
SEVERITY = Rule.WARNING
def __init__(self, db, obj, mx_child_dad, mx_child_mom):
""" initialize the rule """
PersonRule.__init__(self, db, obj)
self.mx_child_dad = mx_child_dad
self.mx_child_mom = mx_child_mom
def _get_params(self):
""" return the rule's parameters """
return (self.mx_child_dad, self.mx_child_mom)
def broken(self):
""" return boolean indicating whether this rule is violated """
n_child = get_n_children(self.db, self.obj)
if (self.obj.get_gender == Person.MALE
and n_child > self.mx_child_dad):
return True
if (self.obj.get_gender == Person.FEMALE
and n_child > self.mx_child_mom):
return True
return False
def get_message(self):
""" return the rule's error message """
return _("Too many children")
class SameSexFamily(FamilyRule):
""" test if a family's parents are both male or both female """
ID = 13
SEVERITY = Rule.WARNING
def broken(self):
""" return boolean indicating whether this rule is violated """
mother = get_mother(self.db, self.obj)
father = get_father(self.db, self.obj)
same_sex = (mother and father and
(mother.get_gender() == father.get_gender()))
unknown_sex = (mother and
(mother.get_gender() == Person.UNKNOWN))
return same_sex and not unknown_sex
def get_message(self):
""" return the rule's error message """
return _("Same sex marriage")
class FemaleHusband(FamilyRule):
""" test if a family's 'husband' is female """
ID = 14
SEVERITY = Rule.WARNING
def broken(self):
""" return boolean indicating whether this rule is violated """
father = get_father(self.db, self.obj)
return father and (father.get_gender() == Person.FEMALE)
def get_message(self):
""" return the rule's error message """
return _("Female husband")
class MaleWife(FamilyRule):
""" test if a family's 'wife' is male """
ID = 15
SEVERITY = Rule.WARNING
def broken(self):
""" return boolean indicating whether this rule is violated """
mother = get_mother(self.db, self.obj)
return mother and (mother.get_gender() == Person.MALE)
def get_message(self):
""" return the rule's error message """
return _("Male wife")
class SameSurnameFamily(FamilyRule):
""" test if a family's parents were born with the same surname """
ID = 16
SEVERITY = Rule.WARNING
def broken(self):
""" return boolean indicating whether this rule is violated """
mother = get_mother(self.db, self.obj)
father = get_father(self.db, self.obj)
_broken = False
# Make sure both mother and father exist.
if mother and father:
mname = mother.get_primary_name()
fname = father.get_primary_name()
# Only compare birth names (not married names).
if (mname.get_type() == NameType.BIRTH
and fname.get_type() == NameType.BIRTH):
# Empty names don't count.
if (len(mname.get_surname()) != 0
and len(fname.get_surname()) != 0):
# Finally, check if the names are the same.
if mname.get_surname() == fname.get_surname():
_broken = True
return _broken
def get_message(self):
""" return the rule's error message """
return _("Husband and wife with the same surname")
class LargeAgeGapFamily(FamilyRule):
""" test if a family's parents were born far apart """
ID = 17
SEVERITY = Rule.WARNING
def __init__(self, db, obj, hw_diff, est):
""" initialize the rule """
FamilyRule.__init__(self, db, obj)
self.hw_diff = hw_diff
self.est = est
def _get_params(self):
""" return the rule's parameters """
return (self.hw_diff, self.est)
def broken(self):
""" return boolean indicating whether this rule is violated """
mother = get_mother(self.db, self.obj)
father = get_father(self.db, self.obj)
mother_birth_date = get_birth_date(self.db, mother, self.est)
father_birth_date = get_birth_date(self.db, father, self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
large_diff = abs(
father_birth_date-mother_birth_date) / 365 > self.hw_diff
return mother_birth_date_ok and father_birth_date_ok and large_diff
def get_message(self):
""" return the rule's error message """
return _("Large age difference between spouses")
class MarriageBeforeBirth(FamilyRule):
""" test if each family's parent was born before the marriage """
ID = 18
SEVERITY = Rule.ERROR
def __init__(self, db, obj, est):
""" initialize the rule """
FamilyRule.__init__(self, db, obj)
self.est = est
def _get_params(self):
""" return the rule's parameters """
return (self.est,)
def broken(self):
""" return boolean indicating whether this rule is violated """
marr_date = get_marriage_date(self.db, self.obj)
marr_date_ok = marr_date > 0
mother = get_mother(self.db, self.obj)
father = get_father(self.db, self.obj)
mother_birth_date = get_birth_date(self.db, mother, self.est)
father_birth_date = get_birth_date(self.db, father, self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
father_broken = (father_birth_date_ok and marr_date_ok
and (father_birth_date > marr_date))
mother_broken = (mother_birth_date_ok and marr_date_ok
and (mother_birth_date > marr_date))
return father_broken or mother_broken
def get_message(self):
""" return the rule's error message """
return _("Marriage before birth")
class MarriageAfterDeath(FamilyRule):
""" test if each family's parent died before the marriage """
ID = 19
SEVERITY = Rule.ERROR
def __init__(self, db, obj, est):
""" initialize the rule """
FamilyRule.__init__(self, db, obj)
self.est = est
def _get_params(self):
""" return the rule's parameters """
return (self.est,)
def broken(self):
""" return boolean indicating whether this rule is violated """
marr_date = get_marriage_date(self.db, self.obj)
marr_date_ok = marr_date > 0
mother = get_mother(self.db, self.obj)
father = get_father(self.db, self.obj)
mother_death_date = get_death_date(self.db, mother, self.est)
father_death_date = get_death_date(self.db, father, self.est)
mother_death_date_ok = mother_death_date > 0
father_death_date_ok = father_death_date > 0
father_broken = (father_death_date_ok and marr_date_ok
and (father_death_date < marr_date))
mother_broken = (mother_death_date_ok and marr_date_ok
and (mother_death_date < marr_date))
return father_broken or mother_broken
def get_message(self):
""" return the rule's error message """
return _("Marriage after death")
class EarlyMarriage(FamilyRule):
""" test if each family's parent was 'too young' at the marriage """
ID = 20
SEVERITY = Rule.WARNING
def __init__(self, db, obj, yng_mar, est):
""" initialize the rule """
FamilyRule.__init__(self, db, obj)
self.yng_mar = yng_mar
self.est = est
def _get_params(self):
""" return the rule's parameters """
return (self.yng_mar, self.est,)
def broken(self):
""" return boolean indicating whether this rule is violated """
marr_date = get_marriage_date(self.db, self.obj)
marr_date_ok = marr_date > 0
mother = get_mother(self.db, self.obj)
father = get_father(self.db, self.obj)
mother_birth_date = get_birth_date(self.db, mother, self.est)
father_birth_date = get_birth_date(self.db, father, self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
father_broken = (
father_birth_date_ok and marr_date_ok and
father_birth_date < marr_date and
((marr_date - father_birth_date) / 365 < self.yng_mar))
mother_broken = (
mother_birth_date_ok and marr_date_ok and
mother_birth_date < marr_date and
((marr_date - mother_birth_date) / 365 < self.yng_mar))
return father_broken or mother_broken
def get_message(self):
""" return the rule's error message """
return _("Early marriage")
class LateMarriage(FamilyRule):
""" test if each family's parent was 'too old' at the marriage """
ID = 21
SEVERITY = Rule.WARNING
def __init__(self, db, obj, old_mar, est):
""" initialize the rule """
FamilyRule.__init__(self, db, obj)
self.old_mar = old_mar
self.est = est
def _get_params(self):
""" return the rule's parameters """
return (self.old_mar, self.est)
def broken(self):
""" return boolean indicating whether this rule is violated """
marr_date = get_marriage_date(self.db, self.obj)
marr_date_ok = marr_date > 0
mother = get_mother(self.db, self.obj)
father = get_father(self.db, self.obj)
mother_birth_date = get_birth_date(self.db, mother, self.est)
father_birth_date = get_birth_date(self.db, father, self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
father_broken = (
father_birth_date_ok and marr_date_ok and
((marr_date - father_birth_date) / 365 > self.old_mar))
mother_broken = (
mother_birth_date_ok and marr_date_ok and
((marr_date - mother_birth_date) / 365 > self.old_mar))
return father_broken or mother_broken
def get_message(self):
""" return the rule's error message """
return _("Late marriage")
class OldParent(FamilyRule):
""" test if each family's parent was 'too old' at a child's birth """
ID = 22
SEVERITY = Rule.WARNING
def __init__(self, db, obj, old_mom, old_dad, est):
""" initialize the rule """
FamilyRule.__init__(self, db, obj)
self.old_mom = old_mom
self.old_dad = old_dad
self.est = est
def _get_params(self):
""" return the rule's parameters """
return (self.old_mom, self.old_dad, self.est)
def broken(self):
""" return boolean indicating whether this rule is violated """
mother = get_mother(self.db, self.obj)
father = get_father(self.db, self.obj)
mother_birth_date = get_birth_date(self.db, mother, self.est)
father_birth_date = get_birth_date(self.db, father, self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
for child_ref in self.obj.get_child_ref_list():
child = find_person(self.db, child_ref.ref)
child_birth_date = get_birth_date(self.db, child, self.est)
child_birth_date_ok = child_birth_date > 0
if not child_birth_date_ok:
continue
father_broken = (
father_birth_date_ok and
((child_birth_date - father_birth_date) / 365 > self.old_dad))
if father_broken:
self.get_message = self.father_message
return True
mother_broken = (
mother_birth_date_ok and
((child_birth_date - mother_birth_date) / 365 > self.old_mom))
if mother_broken:
self.get_message = self.mother_message
return True
return False
def father_message(self):
""" return the rule's error message """
return _("Old father")
def mother_message(self):
""" return the rule's error message """
return _("Old mother")
class YoungParent(FamilyRule):
""" test if each family's parent was 'too young' at a child's birth """
ID = 23
SEVERITY = Rule.WARNING
def __init__(self, db, obj, yng_mom, yng_dad, est):
""" initialize the rule """
FamilyRule.__init__(self, db, obj)
self.yng_dad = yng_dad
self.yng_mom = yng_mom
self.est = est
def _get_params(self):
""" return the rule's parameters """
return (self.yng_mom, self.yng_dad, self.est)
def broken(self):
""" return boolean indicating whether this rule is violated """
mother = get_mother(self.db, self.obj)
father = get_father(self.db, self.obj)
mother_birth_date = get_birth_date(self.db, mother, self.est)
father_birth_date = get_birth_date(self.db, father, self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
for child_ref in self.obj.get_child_ref_list():
child = find_person(self.db, child_ref.ref)
child_birth_date = get_birth_date(self.db, child, self.est)
child_birth_date_ok = child_birth_date > 0
if not child_birth_date_ok:
continue
father_broken = (
father_birth_date_ok and
((child_birth_date - father_birth_date) / 365 < self.yng_dad))
if father_broken:
self.get_message = self.father_message
return True
mother_broken = (
mother_birth_date_ok and
((child_birth_date - mother_birth_date) / 365 < self.yng_mom))
if mother_broken:
self.get_message = self.mother_message
return True
return False
def father_message(self):
""" return the rule's error message """
return _("Young father")
def mother_message(self):
""" return the rule's error message """
return _("Young mother")
class UnbornParent(FamilyRule):
""" test if each family's parent was not yet born at a child's birth """
ID = 24
SEVERITY = Rule.ERROR
def __init__(self, db, obj, est):
""" initialize the rule """
FamilyRule.__init__(self, db, obj)
self.est = est
def _get_params(self):
""" return the rule's parameters """
return (self.est,)
def broken(self):
""" return boolean indicating whether this rule is violated """
mother = get_mother(self.db, self.obj)
father = get_father(self.db, self.obj)
mother_birth_date = get_birth_date(self.db, mother, self.est)
father_birth_date = get_birth_date(self.db, father, self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
for child_ref in self.obj.get_child_ref_list():
child = find_person(self.db, child_ref.ref)
child_birth_date = get_birth_date(self.db, child, self.est)
child_birth_date_ok = child_birth_date > 0
if not child_birth_date_ok:
continue
father_broken = (father_birth_date_ok
and (father_birth_date > child_birth_date))
if father_broken:
self.get_message = self.father_message
return True
mother_broken = (mother_birth_date_ok
and (mother_birth_date > child_birth_date))
if mother_broken:
self.get_message = self.mother_message
return True
def father_message(self):
""" return the rule's error message """
return _("Unborn father")
def mother_message(self):
""" return the rule's error message """
return _("Unborn mother")
class DeadParent(FamilyRule):
""" test if each family's parent was dead at a child's birth """
ID = 25
SEVERITY = Rule.ERROR
def __init__(self, db, obj, est):
""" initialize the rule """
FamilyRule.__init__(self, db, obj)
self.est = est
def _get_params(self):
""" return the rule's parameters """
return (self.est,)
def broken(self):
""" return boolean indicating whether this rule is violated """
mother = get_mother(self.db, self.obj)
father = get_father(self.db, self.obj)
mother_death_date = get_death_date(self.db, mother, self.est)
father_death_date = get_death_date(self.db, father, self.est)
mother_death_date_ok = mother_death_date > 0
father_death_date_ok = father_death_date > 0
for child_ref in self.obj.get_child_ref_list():
child = find_person(self.db, child_ref.ref)
child_birth_date = get_birth_date(self.db, child, self.est)
child_birth_date_ok = child_birth_date > 0
if not child_birth_date_ok:
continue
has_birth_rel_to_mother = child_ref.mrel == ChildRefType.BIRTH
has_birth_rel_to_father = child_ref.frel == ChildRefType.BIRTH
father_broken = (
has_birth_rel_to_father
and father_death_date_ok
and ((father_death_date + 294) < child_birth_date))
if father_broken:
self.get_message = self.father_message
return True
mother_broken = (has_birth_rel_to_mother
and mother_death_date_ok
and (mother_death_date < child_birth_date))
if mother_broken:
self.get_message = self.mother_message
return True
def father_message(self):
""" return the rule's error message """
return _("Dead father")
def mother_message(self):
""" return the rule's error message """
return _("Dead mother")
class LargeChildrenSpan(FamilyRule):
""" test if a family's first and last children were born far apart """
ID = 26
SEVERITY = Rule.WARNING
def __init__(self, db, obj, cb_span, est):
""" initialize the rule """
FamilyRule.__init__(self, db, obj)
self.cbs = cb_span
self.est = est
def _get_params(self):
""" return the rule's parameters """
return (self.cbs, self.est)
def broken(self):
""" return boolean indicating whether this rule is violated """
child_birh_dates = get_child_birth_dates(self.db, self.obj, self.est)
child_birh_dates.sort()
return (child_birh_dates and
((child_birh_dates[-1] - child_birh_dates[0]) / 365 > self.cbs))
def get_message(self):
""" return the rule's error message """
return _("Large year span for all children")
class LargeChildrenAgeDiff(FamilyRule):
""" test if any of a family's children were born far apart """
ID = 27
SEVERITY = Rule.WARNING
def __init__(self, db, obj, c_space, est):
""" initialize the rule """
FamilyRule.__init__(self, db, obj)
self.c_space = c_space
self.est = est
def _get_params(self):
""" return the rule's parameters """
return (self.c_space, self.est)
def broken(self):
""" return boolean indicating whether this rule is violated """
child_birh_dates = get_child_birth_dates(self.db, self.obj, self.est)
child_birh_dates_diff = [child_birh_dates[i+1] - child_birh_dates[i]
for i in range(len(child_birh_dates)-1)]
return (child_birh_dates_diff and
max(child_birh_dates_diff) / 365 > self.c_space)
def get_message(self):
""" return the rule's error message """
return _("Large age differences between children")
class Disconnected(PersonRule):
""" test if a person has no children and no parents """
ID = 28
SEVERITY = Rule.WARNING
def broken(self):
""" return boolean indicating whether this rule is violated """
return (len(self.obj.get_parent_family_handle_list())
+ len(self.obj.get_family_handle_list()) == 0)
def get_message(self):
""" return the rule's error message """
return _("Disconnected individual")
class InvalidBirthDate(PersonRule):
""" test if a person has an 'invalid' birth date """
ID = 29
SEVERITY = Rule.ERROR
def __init__(self, db, person, invdate):
""" initialize the rule """
PersonRule.__init__(self, db, person)
self._invdate = invdate
def broken(self):
""" return boolean indicating whether this rule is violated """
if not self._invdate: # should we check?
return False
# if so, let's get the birth date
person = self.obj
birth_ref = person.get_birth_ref()
if birth_ref:
birth_event = self.db.get_event_from_handle(birth_ref.ref)
birth_date = birth_event.get_date_object()
if birth_date and not birth_date.get_valid():
return True
return False
def get_message(self):
""" return the rule's error message """
return _("Invalid birth date")
class InvalidDeathDate(PersonRule):
""" test if a person has an 'invalid' death date """
ID = 30
SEVERITY = Rule.ERROR
def __init__(self, db, person, invdate):
""" initialize the rule """
PersonRule.__init__(self, db, person)
self._invdate = invdate
def broken(self):
""" return boolean indicating whether this rule is violated """
if not self._invdate: # should we check?
return False
# if so, let's get the death date
person = self.obj
death_ref = person.get_death_ref()
if death_ref:
death_event = self.db.get_event_from_handle(death_ref.ref)
death_date = death_event.get_date_object()
if death_date and not death_date.get_valid():
return True
return False
def get_message(self):
""" return the rule's error message """
return _("Invalid death date")
class MarriedRelation(FamilyRule):
""" test if a family has a marriage date but is not marked 'married' """
ID = 31
SEVERITY = Rule.WARNING
def __init__(self, db, obj):
""" initialize the rule """
FamilyRule.__init__(self, db, obj)
def broken(self):
""" return boolean indicating whether this rule is violated """
marr_date = get_marriage_date(self.db, self.obj)
marr_date_ok = marr_date > 0
married = self.obj.get_relationship() == FamilyRelType.MARRIED
if not married and marr_date_ok:
return self.get_message
def get_message(self):
""" return the rule's error message """
return _("Marriage date but not married")
class OldAgeButNoDeath(PersonRule):
""" test if a person is 'too old' but is not shown as dead """
ID = 32
SEVERITY = Rule.WARNING
def __init__(self, db, person, old_age, est):
""" initialize the rule """
PersonRule.__init__(self, db, person)
self.old_age = old_age
self.est = est
def _get_params(self):
""" return the rule's parameters """
return (self.old_age, self.est)
def broken(self):
""" return boolean indicating whether this rule is violated """
birth_date = get_birth_date(self.db, self.obj, self.est)
dead = get_death(self.db, self.obj)
death_date = get_death_date(self.db, self.obj, True) # or burial date
if dead or death_date or not birth_date:
return 0
age = (_today - birth_date) / 365
return age > self.old_age
def get_message(self):
""" return the rule's error message """
return _("Old age but no death")
class BirthEqualsDeath(PersonRule):
""" test if a person's birth date is the same as their death date """
ID = 33
SEVERITY = Rule.ERROR
def broken(self):
""" return boolean indicating whether this rule is violated """
birth_date = get_birth_date(self.db, self.obj)
death_date = get_death_date(self.db, self.obj)
birth_ok = birth_date > 0 if birth_date is not None else False
death_ok = death_date > 0 if death_date is not None else False
return death_ok and birth_ok and birth_date == death_date
def get_message(self):
""" return the rule's error message """
return _("Birth equals death")
class BirthEqualsMarriage(PersonRule):
""" test if a person's birth date is the same as their marriage date """
ID = 34
SEVERITY = Rule.ERROR
def broken(self):
""" return boolean indicating whether this rule is violated """
birth_date = get_birth_date(self.db, self.obj)
birth_ok = birth_date > 0 if birth_date is not None else False
for fhandle in self.obj.get_family_handle_list():
family = self.db.get_family_from_handle(fhandle)
marr_date = get_marriage_date(self.db, family)
marr_ok = marr_date > 0 if marr_date is not None else False
return marr_ok and birth_ok and birth_date == marr_date
def get_message(self):
""" return the rule's error message """
return _("Birth equals marriage")
class DeathEqualsMarriage(PersonRule):
""" test if a person's death date is the same as their marriage date """
ID = 35
SEVERITY = Rule.WARNING # it's possible
def broken(self):
""" return boolean indicating whether this rule is violated """
death_date = get_death_date(self.db, self.obj)
death_ok = death_date > 0 if death_date is not None else False
for fhandle in self.obj.get_family_handle_list():
family = self.db.get_family_from_handle(fhandle)
marr_date = get_marriage_date(self.db, family)
marr_ok = marr_date > 0 if marr_date is not None else False
return marr_ok and death_ok and death_date == marr_date
def get_message(self):
""" return the rule's error message """
return _("Death equals marriage")
|
jralls/gramps
|
gramps/plugins/tool/verify.py
|
Python
|
gpl-2.0
| 69,208
|
[
"Brian"
] |
73a7eece57660ca472b908368baf9261c3e527fa993df6d602724e2251176995
|
#!/usr/bin/env python
'''
useage: LCSiellipse.py fileprefix
run from OffCenter subdirectory
the program:
- grabs all files with CLUSTER*.fits
- makes sure that masked image mCLUSTER*.fits exists
- will run imedit if mask is not found
- runs ellipse interactively
- saves result in ../Final subdirectory
'''
import glob
from pyraf import iraf
import os
from pylab import sqrt
import sys
def writeregfile(x,y,majoraxis,ellip,pa):
outfile=open('ds9.reg','w')
outfile.write('global color=green\n')
outfile.write('physical \n')
s='ellipse(%5.1f,%5.1f,%5.1f,%5.1f,%5.1f)'%(x,y,majoraxis,ellip*majoraxis,pa)
outfile.write(s)
outfile.close()
def findellipse(image,x,y):
repeatflag=1
while repeatflag:
ellip=float(raw_input('enter ellip '))
pa=float(raw_input('enter PA (between -90=+x and 90=-x) '))
majoraxis=float(raw_input('enter Major Axis '))
s='echo "image; ellipse %5.1f %5.1f %5.1f %5.1f %5.1f" |xpaset ds9 regions'%(x,y,majoraxis,(1-ellip)*majoraxis,(pa+90))
os.system(s)
flag=str(raw_input('are you happy with the ellipse? y=yes n=no x=quit '))
if flag.find('y') > -1:
return ellip,pa
elif flag.find('x') > -1:
return
def runimedit(mfile,outfile1,nframe):
continueWithProgram=1
continueWithObject=1
repeatflag=1
while (repeatflag > 0.1):
iraf.display(mfile,frame=nframe, fill='yes')
print mfile
print 'Running imedit to mask out other sources in the field:'
print 'Enter b to mask out a circular region'
print 'Enter a to mark the corners of a rectangle'
print 'Enter q when finished'
try:
os.remove(outfile1)
except OSError:
print 'everything is ok'
print 'running imedit ',mfile, outfile1
iraf.imedit(mfile,outfile1)
flag=str(raw_input('Are you happy with the editing? n=no x=quit y (or any other key) = yes '))
flag=str(flag)
print 'this is what I think you typed ',flag
if flag.find('n') > -1:
flag2=str(raw_input('What is wrong? r=redo masking, o=nearby object, p=partial image, x=quit '))
if flag2.find('r') > -1:
s='rm '+outfile1
os.system(s)
repeatflag=1
print 'i think repeatflag = 1 ', repeatflag
elif flag2.find('o') > -1:
s='rm '+outfile1
os.system(s)
s='mv '+mfile+' NearbyObjects/'
os.system(s)
continueWithObject=0
return continueWithProgram,continueWithObject
elif flag2.find('p') > -1:
s='rm '+outfile1
os.system(s)
s='mv '+mfile+' PartialImages/'
os.system(s)
continueWithObject=0
return continueWithProgram,continueWithObject
elif flag2.find('x') > -1:
continueWithProgram=0
repeatflag=0
print 'i think repeatflag = 0', repeatflag
return continueWithProgram,continueWithObject
else:
repeatflag=0
elif flag.find('x') > -1:
print 'i think you want to exit'
continueWithProgram=0
repeatflag=0
return continueWithProgram,continueWithObject
else:
repeatflag=0
return continueWithProgram,continueWithObject
def runellipse(files,xcenter,ycenter,minr,ipa,initialr,maxr,iellip,nframe=1,myradius=15):
initialradius=myradius
#print 'got here'
#print files
for i in range(len(files)):
myradius=initialradius
mfile=files[i]
#mask image
outfile1='m'+mfile
if os.path.isfile(outfile1):
print "found masked file ",outfile1
print "skipping imedit and running ellipse interactively"
continueWithProgram=1
continueWithObject=1
else:
print "can't find masked file ",outfile1
print "running imedit"
continueWithProgram,continueWithObject=runimedit(mfile,outfile1,nframe)
if not continueWithProgram:
print "quitting program"
return
if not continueWithObject:
print "going on to next image"
continue
#run ellipse
t=mfile.split('.')
efile=t[0]+'.tab'
imprefix=t[0]
try:
os.remove(efile)
except OSError:
print 'everything is ok'
#continue
print "First pass through ellipse to find center"
iraf.ellipse(input=outfile1,output=efile,x0=xcenter,y0=ycenter,hcenter='no',sma0=initialr,minsma=minr,maxsma=maxr,pa=ipa,hpa='no',ellip=iellip,hellip='no',interactive='no')
#print 'Displaying isophotes from first pass. Hit q in DS9 window to quit'
#iraf.isoexam(table=efile)
os.system('rm junk.txt')
iraf.tprint(table=efile,pwidth='INDEF',showhdr='no', Stdout='junk.txt')
os.system("awk '{print $2, $7, $9, $11, $13}' < junk.txt > junk2.txt")
#run ellipse a second time, keeping PA and ellip fixed
#allow user to adjust the radius where PA and ellip are measured
infile=open('junk2.txt','r')
for line in infile:
t=line.split()
if float(t[0]) > myradius:
newxcenter=float(t[3])
newycenter=float(t[4])
break
s='rm '+efile
os.system(s)
#draw ellipse with ds9
iraf.display(outfile1,1)
(myellip,mypa)=findellipse(outfile1,newxcenter,newycenter)
flag2=str(raw_input('Do you want to skip this one? y=skip, any other key to continue '))
if flag2.find('y') > -1:
s='mv *'+imprefix+'* ../PeculiarGalaxies/'
print s
os.system(s)
continue
#run ellipse interactively
#allow user to repeat until satisfied with script
repeatflag=1
while repeatflag:
s='rm '+efile
os.system(s)
iraf.ellipse(input=outfile1,output=efile,x0=newxcenter,y0=newycenter,hcenter='yes',sma0=initialr,minsma=minr,maxsma=maxr,pa0=mypa,hpa='yes',ellip0=myellip,hellip='yes',interactive='no')
print 'Displaying isophotes from second pass using r = ',myradius
print 'Hit q in the DS9 window to quit'
iraf.isoexam(table=efile)
flag=str(raw_input('Are you happy with the fit? y=yes n=no x=quit '))
flag=str(flag)
print 'this is what I think you typed ',flag
if flag.find('n') > -1:
s='rm '+efile
os.system(s)
repeatflag=1
elif flag.find('x') > -1:
repeatflag=0
print 'i think repeatflag = 0', repeatflag
return
else:
s='mv *'+imprefix+'* ../Finished/'
os.system(s)
repeatflag=0
print 'i think repeatflag = 0 ', repeatflag
print 'repeatflag = ',repeatflag
def runellipseold(files,xcenter,ycenter,minr,ipa,initialr,maxr,iellip,nframe=1,myradius=15):
initialradius=myradius
for i in range(len(files)):
myradius=initialradius
mfile=files[i]
#mask image
outfile1='m'+mfile
continueWithProgram,continueWithObject=runimedit(mfile,outfile1,nframe)
if not continueWithProgram:
print "quitting program"
return
if not continueWithObject:
print "going on to next image"
continue
#run ellipse
t=mfile.split('.')
efile=t[0]+'.tab'
imprefix=t[0]
print mfile, imprefix
print 'Running ellipse to fit isophotes to galaxy:'
try:
os.remove(efile)
except OSError:
print 'everything is ok'
print "First pass, letting PA and e vary"
iraf.ellipse(input=outfile1,output=efile,x0=xcenter,y0=ycenter,hcenter='no',sma0=initialr,minsma=minr,maxsma=maxr,pa=ipa,hpa='no',ellip=iellip,hellip='no')
print 'Displaying isophotes from first pass. Hit q in DS9 window to quit'
iraf.isoexam(table=efile)
os.system('rm junk.txt')
iraf.tprint(table=efile,pwidth='INDEF',showhdr='no', Stdout='junk.txt')
os.system("awk '{print $2, $7, $9, $11, $13}' < junk.txt > junk2.txt")
#run ellipse a second time, keeping PA and ellip fixed
#allow user to adjust the radius where PA and ellip are measured
repeatflag=1
while (repeatflag > 0.1):
infile=open('junk2.txt','r')
for line in infile:
t=line.split()
if float(t[0]) > myradius:
newellip=float(t[1])
if newellip < .05:#min value that ellipse can handle
newellip=.05
newPA=float(t[2])
if newPA < -90:
newPA=newPA+180
elif newPA > 90:
newPA = newPA-180
#11 - X0, 13 - Y0
newxcenter=float(t[3])
newycenter=float(t[4])
break
s='rm '+efile
os.system(s)
iraf.ellipse(input=outfile1,output=efile,x0=newxcenter,y0=newycenter,hcenter='yes',sma0=initialr,minsma=minr,maxsma=maxr,pa=newPA,hpa='yes',ellip=newellip,hellip='yes')
print 'Displaying isophotes from second pass using r = ',myradius
print 'Hit q in the DS9 window to quit'
iraf.isoexam(table=efile)
flag=str(raw_input('Are you happy with the fit? y=yes n=no x=quit '))
flag=str(flag)
print 'this is what I think you typed ',flag
if flag.find('n') > -1:
flag2=str(raw_input('What is the problem? c=off-center r=set new radius x=quit '))
flag2=str(flag2)
if flag2.find('r') > -1:
myr=input('Enter new radius to use ')
myradius=float(myr)
s='rm '+efile
os.system(s)
repeatflag=1
elif flag2.find('x') > -1:
repeatflag=0
return
elif flag2.find('c') > -1:
s='mv *'+imprefix+'* OffCenter/'
print s
os.system(s)
repeatflag=0
print "repeatflag = ",repeatflag
elif flag.find('x') > -1:
repeatflag=0
print 'i think repeatflag = 0', repeatflag
return
else:
s='mv *'+imprefix+'* Finished/'
os.system(s)
repeatflag=0
print 'i think repeatflag = 0 ', repeatflag
print 'repeatflag = ',repeatflag
raw_input('Make sure ds9 is open. Hit return when ready.')
iraf.stsdas()
iraf.analysis()
iraf.isophote()
iraf.tables()
iraf.ttools()
#t=os.getcwd()
#s=t.split('cutouts')
#t=s[1].split('/')
#prefix=t[1]
#s=prefix+'*cutout-24.fits'
#mipsfiles=glob.glob(s)
#s=prefix+'*cutout-sdss.fits'
#print s
fileprefix=sys.argv[1]
print fileprefix
sdssrfiles=glob.glob(fileprefix)
print sdssrfiles
ipa=0
xcenter=23.0
ycenter=23.0
minr=2
initialr=6
maxr=20
iellip = .05
evalrad=15#radius to measure PA and ellip at
sdssxcenter=50.5
sdssycenter=50.5
sdssminr=2
sdssinitialr=8
sdssmaxr=49
evalrad=15#radius to measure PA and ellip at
#print 'sdssrfiles',sdssrfiles
os.system('mkdir ../PeculiarGalaxies')
#runellipse(mipsfiles,xcenter,ycenter,minr,ipa,initialr,maxr,iellip)
runellipse(sdssrfiles,sdssxcenter,sdssycenter,sdssminr,ipa,sdssinitialr,sdssmaxr,iellip,nframe=2,myradius=evalrad)
|
rfinn/LCS
|
paper1code/LCSiellipse.py
|
Python
|
gpl-3.0
| 12,042
|
[
"Galaxy"
] |
b914c510074cdc9c7ff276c95ebbd17b2cdad4ce40652b60e5a7fba8313b71ec
|
__author__ = 'Luqman'
import cv2
import numpy as np
import PostProcessing
from BackgroundSubtraction import BackgroundSubtraction
from BackgroundSubtraction import BackgroundSubtractionColor
"""
Author: Luqman A. M.
BackgroundSubtractionImpl.py
Background Subtraction Algorithms Implementation for Object Detection in Video Processing
Frame Difference, Running Average, Median, Online K-Means, 1-G, KDE
"""
# class Frame Difference
class FrameDifference(BackgroundSubtraction):
def __init__(self, filename, threshold):
print "initializing Frame Difference..."
BackgroundSubtraction.__init__(self, filename, False)
self.init_threshold = threshold
self.threshold = None
return
def apply(self, data):
if self.threshold is None:
self.threshold = np.multiply(np.ones_like(data, 'uint8'), self.init_threshold)
diff = np.absolute(np.subtract(data, self.prev_frame))
new_fg = np.multiply(
np.ones_like(data, 'uint8'),
np.where(
np.less(diff, self.threshold),
0,
255
)
)
return new_fg
def run(self):
BackgroundSubtraction.run(self)
# class Running Average
class RunningAverage(BackgroundSubtraction):
def __init__(self, filename, alpha):
print "initializing Running Average..."
BackgroundSubtraction.__init__(self, filename, True)
self.alpha = alpha
self.beta = 0.02
return
def apply(self, data):
rects, fg = PostProcessing.foreground_detection(data, self.bg, False)
new_bg = np.where(
np.equal(fg, 0)
, np.add(((1 - self.alpha) * self.bg), (self.alpha * data))
, np.add(((1 - self.beta) * self.bg), (self.beta * data))
)
return cv2.convertScaleAbs(new_bg)
def run(self):
BackgroundSubtraction.run(self)
# class Running Average 2 (with improvement)
class RunningAverageWithThresholdImprovement(BackgroundSubtraction):
def __init__(self, filename, alpha):
print "initializing Running Average..."
BackgroundSubtraction.__init__(self, filename, False)
self.alpha = alpha
self.beta = 0.02
self.gamma = 3.2
self.threshold = None
return
def apply(self, pict):
if self.threshold is None:
self.threshold = np.multiply(np.ones_like(pict, 'float32'), 65)
fg = np.copy(pict)
else:
resultant = cv2.absdiff(pict, self.bg)
fg = np.where(np.greater(resultant, self.threshold), 255, 0)
new_bg = np.where(
np.equal(fg, 0)
, np.add(((1 - self.alpha) * self.bg), (self.alpha * pict))
, np.add(((1 - self.beta) * self.bg), (self.beta * pict))
)
self.threshold = self.threshold_update(fg, pict)
print self.threshold
self.bg = np.uint8(new_bg)
return cv2.convertScaleAbs(fg)
def threshold_update(self, fg, pict):
new_threshold = np.where(
np.equal(fg, 0),
np.add(
np.multiply((1 - self.alpha), self.threshold),
np.multiply(self.alpha, self.gamma * cv2.absdiff(pict, self.bg))
),
self.threshold
)
return new_threshold
def run(self):
BackgroundSubtraction.run(self)
# class Median Recursive
class MedianRecursive(BackgroundSubtraction):
def __init__(self, filename):
print "initializing Median Recursive..."
BackgroundSubtraction.__init__(self, filename, True)
return
def apply(self, cur_frame):
new_bg = np.where(np.less_equal(self.bg, cur_frame), self.bg + 1, self.bg - 1)
return cv2.convertScaleAbs(new_bg)
def run(self):
BackgroundSubtraction.run(self)
# class OnlineKMeans
class OnlineKMeans(BackgroundSubtraction):
def __init__(self, filename, alpha):
print "initializing Online K Means..."
BackgroundSubtraction.__init__(self, filename, False)
self.alpha = alpha
self.K = 0
self.centroids = None
self.w = None
_, frame = self.vid_src.read()
gray_pict = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.init_clusters(3, gray_pict)
return
def init_clusters(self, k, pict):
self.K = k
self.centroids = [np.multiply(np.ones_like(pict, 'float64'), ((256. / k) * i)) for i in range(k)]
self.w = [np.multiply(np.ones_like(pict, 'float64'), (1. / k))] * k
return
def apply(self, pict):
# get min diff & centroid assigned
min_diff = np.multiply(np.ones_like(pict, 'float64'), -1)
assigned = np.zeros_like(pict, 'uint8')
new_bg = np.multiply(np.ones_like(pict, 'uint8'), 255)
for i in range(self.K):
# get diff
cur_diff = np.multiply(np.ones_like(pict, 'float64'), ((pict - self.centroids[i]) ** 2))
assigned = np.where(np.logical_or(np.equal(min_diff, -1), np.less(cur_diff, min_diff)), i, assigned)
min_diff = np.where(np.logical_or(np.equal(min_diff, -1), np.less(cur_diff, min_diff)), cur_diff, min_diff)
# update the centroids and weight
for i in range(self.K):
update_centroids = np.multiply(
np.ones_like(pict, 'float64'),
(np.add(self.centroids[i], self.alpha * np.subtract(pict, self.centroids[i])))
)
self.centroids[i] = np.where(np.equal(assigned, i), update_centroids, self.centroids[i])
self.w[i] = np.where(np.equal(assigned, i), np.add(np.multiply((1. - self.alpha), self.w[i]), self.alpha),
np.multiply((1. - self.alpha), self.w[i]))
new_bg = np.where(np.logical_and(np.equal(assigned, i), np.greater(self.w[i], 1. / self.K)), 0, new_bg)
return new_bg
def run(self):
BackgroundSubtraction.run(self)
# class Single Gaussian
# gaussian (mean, variance, weight) is a numpy array
class SingleGaussian(BackgroundSubtraction):
def __init__(self, filename, alpha, th):
print "initializing Single Gaussian..."
BackgroundSubtraction.__init__(self, filename, False)
self.alpha = alpha
self.threshold = th
self.th_array = None
self.variance = None
self.mean = None
return
def apply(self, pict):
if self.mean is None:
self.bg = np.uint8(pict)
self.mean = np.uint8(pict)
self.th_array = np.multiply(np.ones_like(pict, 'float64'), self.threshold)
self.variance = np.multiply(np.ones_like(pict, 'float64'), 20)
pdf = np.multiply(
(1. / (np.sqrt(self.variance * 2 * np.pi))),
np.exp((-((pict - self.mean) ** 2)) / (2 * self.variance))
)
new_pict = np.zeros_like(pict)
new_pict = np.where(np.less(pdf, self.th_array), 255, new_pict)
self.bg = np.copy(self.mean)
self.mean = np.add(((1 - self.alpha) * self.mean), (self.alpha * pict))
# self.variance = np.add(((1-self.alpha) * self.variance), (self.alpha * ((pict - self.bg) ** 2)))
return new_pict
def run(self):
BackgroundSubtraction.run(self)
# class KDE
# implements KDE with LUT
class KDE(BackgroundSubtraction):
def __init__(self, filename, alpha, th, kernelnum):
print "initializing KDE..."
BackgroundSubtraction.__init__(self, filename, False)
self.alpha = alpha
self.threshold = th
self.th_array = None
self.kernels = None
self.init_kernel(kernelnum)
self.pdf_dict = np.array([self.pdf(i, kernelnum) for i in range(256)])
return
def apply(self, pict):
if self.th_array is None:
self.th_array = np.multiply(np.ones_like(pict, 'float64'), self.threshold)
pdf_kernel = map(
lambda x: np.multiply(
np.ones_like(pict, 'float32'),
self.pdf_dict[np.absolute(np.subtract(pict, x))]
),
self.kernels
)
pdf_combination = reduce(lambda a, b: np.add(a, b), pdf_kernel)
fg = np.multiply(
np.ones_like(pict, 'uint8'),
np.where(
np.greater(pdf_combination, self.threshold),
0,
255
).astype('uint8')
)
return fg
def pdf(self, num, kernel_num):
variance = 30
index = -(float(num) ** 2) / (2 * variance ** 2)
result = np.exp(index) / kernel_num
return result
def init_kernel(self, n):
print "initializing kernels for KDE...."
cvid = cv2.VideoCapture(self.file)
length = int(cvid.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
print length, "frames"
_, frame = cvid.read()
it = 0
nframe = 0
self.kernels = []
# iterating through video, filling kernel with images by 3 frame skip
while frame is not None and it < n:
if nframe % 3 == 0:
gray_pict = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.kernels.append(np.copy(gray_pict))
it += 1
_, frame = cvid.read()
nframe += 1
print "done"
return
def run(self):
BackgroundSubtraction.run(self)
# unfinished classes
# class Gaussian Mixture
class GaussianMixture(BackgroundSubtraction):
def __init__(self, filename, alpha, k):
print "initializing Gaussian Mixture..."
BackgroundSubtraction.__init__(self, filename, False)
self.alpha = alpha
self.models = None
self.K = k
self.means = None
self.variance = None
self.weights = None
return
def apply(self, pict):
return pict
def run(self):
BackgroundSubtraction.run(self)
# class HiddenMarkov
class HiddenMarkov(BackgroundSubtraction):
def __init__(self, filename):
print "initializing Hidden Markov..."
BackgroundSubtraction.__init__(self, filename, False)
return
def apply(self, pict):
pass
def run(self):
BackgroundSubtraction.run(self)
# class Running Average Color
class RunningAverageColor(BackgroundSubtractionColor):
# warping matrix -- for determining neighbor pixels
WARP_MATRIX = [
np.float32([[1, 0, -3], [0, 1, -3]]),
np.float32([[1, 0, 0], [0, 1, -3]]),
np.float32([[1, 0, 3], [0, 1, -3]]),
np.float32([[1, 0, 3], [0, 1, 0]]),
np.float32([[1, 0, 3], [0, 1, 3]]),
np.float32([[1, 0, 0], [0, 1, 3]]),
np.float32([[1, 0, -3], [0, 1, 3]]),
np.float32([[1, 0, -3], [0, 1, 0]])
]
def __init__(self, params):
"""
initialization of Running Average Algorithm
:param params: tuple consists of alpha and beta
"""
BackgroundSubtractionColor.__init__(self)
self.bg = None
self.prev_frame = None
self.prev_prev_frame = None
self.alpha = params[0]
self.beta = params[1]
def apply(self, cur_image, cur_objects):
"""
apply the algorithm for running average in color
:param cur_image: numpy array; a color image (RGB)
:param cur_objects: array consists of object squares
:return new_objects_box: array consists of new object squares
:return new_fg: binary image consists of image (black and white)
"""
cols, rows, depth = cur_image.shape
if self.bg is None:
self.bg = np.copy(cur_image)
if self.prev_frame is None:
self.prev_frame = np.copy(cur_image)
self.prev_prev_frame = np.copy(cur_image)
# get neighbor pixels
neighbor_pixels = map(
lambda x: cv2.warpAffine(cur_image, x, (cols, rows)),
self.WARP_MATRIX
)
# update background
new_bg = np.add(((1 - self.alpha) * self.bg), (self.alpha * cur_image))
# compare neighbor pixel with current background
# neighbor_pixels_diff = map(
# lambda x: np.absolute(np.subtract(new_bg, x)),
# neighbor_pixels
# )
# get difference at this pixel
diff = np.absolute(np.subtract(new_bg, cur_image))
fg_raw = cv2.inRange(cv2.cvtColor(diff.astype('uint8'), cv2.COLOR_BGR2GRAY), 25, 255)
raw_boxes, new_fg = PostProcessing.foreground_process(fg_raw)
new_objects_box = PostProcessing.bounding_box_mask(raw_boxes, new_fg)
cv2.imshow('Background', new_bg.astype('uint8'))
self.bg = np.copy(new_bg)
self.prev_prev_frame = np.copy(self.prev_frame)
self.prev_frame = np.copy(cur_image)
return new_objects_box, new_fg
@staticmethod
def get_difference(cur_image, prev_image, threshold):
threshold_array = np.multiply(np.ones_like(cur_image, 'uint8'), threshold)
diff = np.absolute(np.subtract(cur_image, prev_image))
result = np.multiply(
np.ones_like(cur_image, 'uint8'),
np.where(
np.less(diff, threshold_array),
0,
255
)
)
return result
# class Frame Difference Color
class FrameDifferenceColor(BackgroundSubtractionColor):
def __init__(self, params):
"""
initialization of Running Average Algorithm
:param params: tuple consists of threshold
"""
self.threshold = params[0]
self.prev_image = None
return
def apply(self, cur_image):
"""
apply the algorithm for running average in color
:param cur_image: numpy array; a color image (RGB)
:return new_objects_box: array consists of new object squares
:return new_fg: binary image consists of image (black and white)
"""
cur_image_gray = cv2.cvtColor(cur_image, cv2.COLOR_BGR2GRAY)
if self.prev_image is not None:
threshold_array = np.multiply(np.ones_like(cur_image_gray, 'uint8'), self.threshold)
diff = np.absolute(np.subtract(cur_image_gray, self.prev_frame))
fg_raw = np.multiply(
np.ones_like(cur_image_gray, 'uint8'),
np.where(
np.less(diff, threshold_array),
0,
255
)
)
raw_boxes, new_fg = PostProcessing.foreground_process(fg_raw)
new_objects_box = PostProcessing.bounding_box_mask(raw_boxes, new_fg)
else:
new_fg = np.zeros_like(cur_image_gray)
new_objects_box = []
self.prev_image = np.copy(cur_image_gray)
return new_objects_box, new_fg
|
umanium/trafficmon
|
BackgroundSubtractionImpl.py
|
Python
|
mit
| 14,858
|
[
"Gaussian"
] |
b0787f99dd0b1047048f7fc489e2f9d141e451239fc73628e08397fe7acf810c
|
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import datetime
from unittest import mock
import uuid
from keystoneauth1 import access
from keystoneauth1 import fixture
from osc_lib.cli import format_columns
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit import utils
base_url = 'http://identity:5000/v3/'
domain_id = 'd1'
domain_name = 'oftheking'
domain_description = 'domain description'
DOMAIN = {
'id': domain_id,
'name': domain_name,
'description': domain_description,
'enabled': True,
'tags': [],
'links': base_url + 'domains/' + domain_id,
}
group_id = 'gr-010'
group_name = 'spencer davis'
GROUP = {
'id': group_id,
'name': group_name,
'links': base_url + 'groups/' + group_id,
}
mapping_id = 'test_mapping'
mapping_rules_file_path = '/tmp/path/to/file'
# Copied from
# (https://github.com/openstack/keystone/blob\
# master/keystone/tests/mapping_fixtures.py
EMPLOYEE_GROUP_ID = "0cd5e9"
DEVELOPER_GROUP_ID = "xyz"
MAPPING_RULES = [
{
"local": [
{
"group": {
"id": EMPLOYEE_GROUP_ID
}
}
],
"remote": [
{
"type": "orgPersonType",
"not_any_of": [
"Contractor",
"Guest"
]
}
]
}
]
MAPPING_RULES_2 = [
{
"local": [
{
"group": {
"id": DEVELOPER_GROUP_ID
}
}
],
"remote": [
{
"type": "orgPersonType",
"any_one_of": [
"Contractor"
]
}
]
}
]
MAPPING_RESPONSE = {
"id": mapping_id,
"rules": MAPPING_RULES
}
MAPPING_RESPONSE_2 = {
"id": mapping_id,
"rules": MAPPING_RULES_2
}
mfa_opt1 = 'password,totp'
mfa_opt2 = 'password'
project_id = '8-9-64'
project_name = 'beatles'
project_description = 'Fab Four'
PROJECT = {
'id': project_id,
'name': project_name,
'description': project_description,
'enabled': True,
'domain_id': domain_id,
'tags': [],
'links': base_url + 'projects/' + project_id,
}
PROJECT_2 = {
'id': project_id + '-2222',
'name': project_name + ' reprise',
'description': project_description + 'plus four more',
'enabled': True,
'domain_id': domain_id,
'tags': [],
'links': base_url + 'projects/' + project_id,
}
region_id = 'region_one'
region_parent_region_id = 'region_two'
region_description = 'region one'
REGION = {
'id': region_id,
'description': region_description,
'parent_region_id': region_parent_region_id,
'links': base_url + 'regions/' + region_id,
}
PROJECT_WITH_PARENT = {
'id': project_id + '-with-parent',
'name': project_name + ' and their parents',
'description': project_description + ' plus another four',
'enabled': True,
'domain_id': domain_id,
'parent_id': project_id,
'tags': [],
'links': base_url + 'projects/' + (project_id + '-with-parent'),
}
PROJECT_WITH_GRANDPARENT = {
'id': project_id + '-with-grandparent',
'name': project_name + ', granny and grandpa',
'description': project_description + ' plus another eight?',
'enabled': True,
'domain_id': domain_id,
'parent_id': PROJECT_WITH_PARENT['id'],
'tags': [],
'links': base_url + 'projects/' + (project_id + '-with-grandparent'),
}
parents = [{'project': PROJECT}]
grandparents = [{'project': PROJECT}, {'project': PROJECT_WITH_PARENT}]
ids_for_parents = [PROJECT['id']]
ids_for_parents_and_grandparents = [PROJECT['id'], PROJECT_WITH_PARENT['id']]
children = [{'project': PROJECT_WITH_GRANDPARENT}]
ids_for_children = [PROJECT_WITH_GRANDPARENT['id']]
role_id = 'r1'
role_name = 'roller'
role_description = 'role description'
ROLE = {
'id': role_id,
'name': role_name,
'domain': None,
'links': base_url + 'roles/' + role_id,
}
ROLE_2 = {
'id': 'r2',
'name': 'Rolls Royce',
'domain': domain_id,
'links': base_url + 'roles/' + 'r2',
}
ROLES = [ROLE, ROLE_2]
service_id = 's-123'
service_name = 'Texaco'
service_type = 'gas'
service_description = 'oil brand'
SERVICE = {
'id': service_id,
'name': service_name,
'type': service_type,
'description': service_description,
'enabled': True,
'links': base_url + 'services/' + service_id,
}
SERVICE_WITHOUT_NAME = {
'id': service_id,
'type': service_type,
'description': service_description,
'enabled': True,
'links': base_url + 'services/' + service_id,
}
endpoint_id = 'e-123'
endpoint_url = 'http://127.0.0.1:35357'
endpoint_region = 'RegionOne'
endpoint_interface = 'admin'
ENDPOINT = {
'id': endpoint_id,
'url': endpoint_url,
'region': endpoint_region,
'interface': endpoint_interface,
'service_id': service_id,
'enabled': True,
'links': base_url + 'endpoints/' + endpoint_id,
}
endpoint_group_id = 'eg-123'
endpoint_group_description = 'eg 123 description'
endpoint_group_filters = {
'service_id': service_id,
'region_id': endpoint_region,
}
endpoint_group_filters_2 = {
'region_id': endpoint_region,
}
endpoint_group_file_path = '/tmp/path/to/file'
ENDPOINT_GROUP = {
'id': endpoint_group_id,
'filters': endpoint_group_filters,
'description': endpoint_group_description,
'links': base_url + 'endpoint_groups/' + endpoint_group_id,
}
user_id = 'bbbbbbb-aaaa-aaaa-aaaa-bbbbbbbaaaa'
user_name = 'paul'
user_description = 'Sir Paul'
user_email = 'paul@applecorps.com'
USER = {
'id': user_id,
'name': user_name,
'default_project_id': project_id,
'email': user_email,
'enabled': True,
'domain_id': domain_id,
'links': base_url + 'users/' + user_id,
}
trust_id = 't-456'
trust_expires = None
trust_impersonation = False
trust_roles = {"id": role_id, "name": role_name},
TRUST = {
'expires_at': trust_expires,
'id': trust_id,
'impersonation': trust_impersonation,
'links': base_url + 'trusts/' + trust_id,
'project_id': project_id,
'roles': trust_roles,
'trustee_user_id': user_id,
'trustor_user_id': user_id,
}
token_expires = '2016-09-05T18:04:52+0000'
token_id = 'tttttttt-tttt-tttt-tttt-tttttttttttt'
UNSCOPED_TOKEN = {
'expires': token_expires,
'id': token_id,
'user_id': user_id,
}
TOKEN_WITH_PROJECT_ID = {
'expires': token_expires,
'id': token_id,
'project_id': project_id,
'user_id': user_id,
}
TOKEN_WITH_DOMAIN_ID = {
'expires': token_expires,
'id': token_id,
'domain_id': domain_id,
'user_id': user_id,
}
idp_id = 'test_idp'
idp_description = 'super exciting IdP description'
idp_remote_ids = ['entity1', 'entity2']
formatted_idp_remote_ids = format_columns.ListColumn(idp_remote_ids)
IDENTITY_PROVIDER = {
'id': idp_id,
'remote_ids': idp_remote_ids,
'enabled': True,
'description': idp_description,
'domain_id': domain_id,
}
protocol_id = 'protocol'
mapping_id = 'test_mapping'
mapping_id_updated = 'prod_mapping'
sp_id = 'BETA'
sp_description = 'Service Provider to burst into'
service_provider_url = 'https://beta.example.com/Shibboleth.sso/POST/SAML'
sp_auth_url = ('https://beta.example.com/v3/OS-FEDERATION/identity_providers/'
'idp/protocol/saml2/auth')
SERVICE_PROVIDER = {
'id': sp_id,
'enabled': True,
'description': sp_description,
'sp_url': service_provider_url,
'auth_url': sp_auth_url
}
PROTOCOL_ID_MAPPING = {
'id': protocol_id,
'mapping': mapping_id
}
PROTOCOL_OUTPUT = {
'id': protocol_id,
'mapping_id': mapping_id,
'identity_provider': idp_id
}
PROTOCOL_OUTPUT_UPDATED = {
'id': protocol_id,
'mapping_id': mapping_id_updated,
'identity_provider': idp_id
}
# Assignments
ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID = {
'scope': {'project': {'id': project_id}},
'user': {'id': user_id},
'role': {'id': role_id},
}
ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID_INCLUDE_NAMES = {
'scope': {
'project': {
'domain': {'id': domain_id,
'name': domain_name},
'id': project_id,
'name': project_name}},
'user': {
'domain': {'id': domain_id,
'name': domain_name},
'id': user_id,
'name': user_name},
'role': {'id': role_id,
'name': role_name},
}
ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID_INHERITED = {
'scope': {'project': {'id': project_id},
'OS-INHERIT:inherited_to': 'projects'},
'user': {'id': user_id},
'role': {'id': role_id},
}
ASSIGNMENT_WITH_PROJECT_ID_AND_GROUP_ID = {
'scope': {'project': {'id': project_id}},
'group': {'id': group_id},
'role': {'id': role_id},
}
ASSIGNMENT_WITH_DOMAIN_ID_AND_USER_ID = {
'scope': {'domain': {'id': domain_id}},
'user': {'id': user_id},
'role': {'id': role_id},
}
ASSIGNMENT_WITH_DOMAIN_ROLE = {
'scope': {'domain': {'id': domain_id}},
'user': {'id': user_id},
'role': {'id': ROLE_2['id']},
}
ASSIGNMENT_WITH_DOMAIN_ID_AND_USER_ID_INCLUDE_NAMES = {
'scope': {
'domain': {'id': domain_id,
'name': domain_name}},
'user': {
'domain': {'id': domain_id,
'name': domain_name},
'id': user_id,
'name': user_name},
'role': {'id': role_id,
'name': role_name},
}
ASSIGNMENT_WITH_DOMAIN_ID_AND_USER_ID_INHERITED = {
'scope': {'domain': {'id': domain_id},
'OS-INHERIT:inherited_to': 'projects'},
'user': {'id': user_id},
'role': {'id': role_id},
}
ASSIGNMENT_WITH_DOMAIN_ID_AND_GROUP_ID = {
'scope': {'domain': {'id': domain_id}},
'group': {'id': group_id},
'role': {'id': role_id},
}
consumer_id = 'test consumer id'
consumer_description = 'someone we trust'
consumer_secret = 'test consumer secret'
OAUTH_CONSUMER = {
'id': consumer_id,
'secret': consumer_secret,
'description': consumer_description
}
access_token_id = 'test access token id'
access_token_secret = 'test access token secret'
access_token_expires = '2014-05-18T03:13:18.152071Z'
OAUTH_ACCESS_TOKEN = {
'id': access_token_id,
'expires': access_token_expires,
'key': access_token_id,
'secret': access_token_secret
}
request_token_id = 'test request token id'
request_token_secret = 'test request token secret'
request_token_expires = '2014-05-17T11:10:51.511336Z'
OAUTH_REQUEST_TOKEN = {
'id': request_token_id,
'expires': request_token_expires,
'key': request_token_id,
'secret': request_token_secret
}
oauth_verifier_pin = '6d74XaDS'
OAUTH_VERIFIER = {
'oauth_verifier': oauth_verifier_pin
}
app_cred_id = 'app-cred-id'
app_cred_name = 'testing_app_cred'
app_cred_role = {"id": role_id, "name": role_name, "domain": None},
app_cred_description = 'app credential for testing'
app_cred_expires = datetime.datetime(2022, 1, 1, 0, 0)
app_cred_expires_str = app_cred_expires.strftime('%Y-%m-%dT%H:%M:%S%z')
app_cred_secret = 'moresecuresecret'
app_cred_access_rules = (
'[{"path": "/v2.1/servers", "method": "GET", "service": "compute"}]'
)
app_cred_access_rules_path = '/tmp/access_rules.json'
access_rule_id = 'access-rule-id'
access_rule_service = 'compute'
access_rule_path = '/v2.1/servers'
access_rule_method = 'GET'
APP_CRED_BASIC = {
'id': app_cred_id,
'name': app_cred_name,
'project_id': project_id,
'roles': app_cred_role,
'description': None,
'expires_at': None,
'unrestricted': False,
'secret': app_cred_secret,
'access_rules': None
}
APP_CRED_OPTIONS = {
'id': app_cred_id,
'name': app_cred_name,
'project_id': project_id,
'roles': app_cred_role,
'description': app_cred_description,
'expires_at': app_cred_expires_str,
'unrestricted': False,
'secret': app_cred_secret,
'access_rules': None,
}
ACCESS_RULE = {
'id': access_rule_id,
'service': access_rule_service,
'path': access_rule_path,
'method': access_rule_method,
}
APP_CRED_ACCESS_RULES = {
'id': app_cred_id,
'name': app_cred_name,
'project_id': project_id,
'roles': app_cred_role,
'description': None,
'expires_at': None,
'unrestricted': False,
'secret': app_cred_secret,
'access_rules': app_cred_access_rules
}
registered_limit_id = 'registered-limit-id'
registered_limit_default_limit = 10
registered_limit_description = 'default limit of foobars'
registered_limit_resource_name = 'foobars'
REGISTERED_LIMIT = {
'id': registered_limit_id,
'default_limit': registered_limit_default_limit,
'resource_name': registered_limit_resource_name,
'service_id': service_id,
'description': None,
'region_id': None
}
REGISTERED_LIMIT_OPTIONS = {
'id': registered_limit_id,
'default_limit': registered_limit_default_limit,
'resource_name': registered_limit_resource_name,
'service_id': service_id,
'description': registered_limit_description,
'region_id': region_id
}
limit_id = 'limit-id'
limit_resource_limit = 15
limit_description = 'limit of foobars'
limit_resource_name = 'foobars'
LIMIT = {
'id': limit_id,
'project_id': project_id,
'resource_limit': limit_resource_limit,
'resource_name': limit_resource_name,
'service_id': service_id,
'description': None,
'region_id': None
}
LIMIT_OPTIONS = {
'id': limit_id,
'project_id': project_id,
'resource_limit': limit_resource_limit,
'resource_name': limit_resource_name,
'service_id': service_id,
'description': limit_description,
'region_id': region_id
}
def fake_auth_ref(fake_token, fake_service=None):
"""Create an auth_ref using keystoneauth's fixtures"""
token_copy = copy.deepcopy(fake_token)
token_id = token_copy.pop('id')
token = fixture.V3Token(**token_copy)
# An auth_ref is actually an access info object
auth_ref = access.create(
body=token,
auth_token=token_id,
)
# Create a service catalog
if fake_service:
service = token.add_service(
fake_service['type'],
fake_service['name'],
)
# TODO(dtroyer): Add an 'id' element to KSA's _Service fixure
service['id'] = fake_service['id']
for e in fake_service['endpoints']:
region = e.get('region_id') or e.get('region', '<none>')
service.add_endpoint(
e['interface'],
e['url'],
region=region,
)
return auth_ref
class FakeAuth(object):
def __init__(self, auth_method_class=None):
self._auth_method_class = auth_method_class
def get_token(self, *args, **kwargs):
return token_id
class FakeSession(object):
def __init__(self, **kwargs):
self.auth = FakeAuth()
class FakeIdentityv3Client(object):
def __init__(self, **kwargs):
self.domains = mock.Mock()
self.domains.resource_class = fakes.FakeResource(None, {})
self.credentials = mock.Mock()
self.credentials.resource_class = fakes.FakeResource(None, {})
self.endpoints = mock.Mock()
self.endpoints.resource_class = fakes.FakeResource(None, {})
self.endpoint_filter = mock.Mock()
self.endpoint_filter.resource_class = fakes.FakeResource(None, {})
self.endpoint_groups = mock.Mock()
self.endpoint_groups.resource_class = fakes.FakeResource(None, {})
self.groups = mock.Mock()
self.groups.resource_class = fakes.FakeResource(None, {})
self.oauth1 = mock.Mock()
self.oauth1.resource_class = fakes.FakeResource(None, {})
self.projects = mock.Mock()
self.projects.resource_class = fakes.FakeResource(None, {})
self.regions = mock.Mock()
self.regions.resource_class = fakes.FakeResource(None, {})
self.roles = mock.Mock()
self.roles.resource_class = fakes.FakeResource(None, {})
self.services = mock.Mock()
self.services.resource_class = fakes.FakeResource(None, {})
self.session = mock.Mock()
self.session.auth.auth_ref.service_catalog.resource_class = \
fakes.FakeResource(None, {})
self.tokens = mock.Mock()
self.tokens.resource_class = fakes.FakeResource(None, {})
self.trusts = mock.Mock()
self.trusts.resource_class = fakes.FakeResource(None, {})
self.users = mock.Mock()
self.users.resource_class = fakes.FakeResource(None, {})
self.role_assignments = mock.Mock()
self.role_assignments.resource_class = fakes.FakeResource(None, {})
self.auth_token = kwargs['token']
self.management_url = kwargs['endpoint']
self.auth = FakeAuth()
self.auth.client = mock.Mock()
self.auth.client.resource_class = fakes.FakeResource(None, {})
self.application_credentials = mock.Mock()
self.application_credentials.resource_class = fakes.FakeResource(None,
{})
self.access_rules = mock.Mock()
self.access_rules.resource_class = fakes.FakeResource(None, {})
self.inference_rules = mock.Mock()
self.inference_rules.resource_class = fakes.FakeResource(None, {})
self.registered_limits = mock.Mock()
self.registered_limits.resource_class = fakes.FakeResource(None, {})
self.limits = mock.Mock()
self.limits.resource_class = fakes.FakeResource(None, {})
class FakeFederationManager(object):
def __init__(self, **kwargs):
self.identity_providers = mock.Mock()
self.identity_providers.resource_class = fakes.FakeResource(None, {})
self.mappings = mock.Mock()
self.mappings.resource_class = fakes.FakeResource(None, {})
self.protocols = mock.Mock()
self.protocols.resource_class = fakes.FakeResource(None, {})
self.projects = mock.Mock()
self.projects.resource_class = fakes.FakeResource(None, {})
self.domains = mock.Mock()
self.domains.resource_class = fakes.FakeResource(None, {})
self.service_providers = mock.Mock()
self.service_providers.resource_class = fakes.FakeResource(None, {})
class FakeFederatedClient(FakeIdentityv3Client):
def __init__(self, **kwargs):
super(FakeFederatedClient, self).__init__(**kwargs)
self.federation = FakeFederationManager()
class FakeOAuth1Client(FakeIdentityv3Client):
def __init__(self, **kwargs):
super(FakeOAuth1Client, self).__init__(**kwargs)
self.access_tokens = mock.Mock()
self.access_tokens.resource_class = fakes.FakeResource(None, {})
self.consumers = mock.Mock()
self.consumers.resource_class = fakes.FakeResource(None, {})
self.request_tokens = mock.Mock()
self.request_tokens.resource_class = fakes.FakeResource(None, {})
class TestIdentityv3(utils.TestCommand):
def setUp(self):
super(TestIdentityv3, self).setUp()
self.app.client_manager.identity = FakeIdentityv3Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
class TestFederatedIdentity(utils.TestCommand):
def setUp(self):
super(TestFederatedIdentity, self).setUp()
self.app.client_manager.identity = FakeFederatedClient(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN
)
class TestOAuth1(utils.TestCommand):
def setUp(self):
super(TestOAuth1, self).setUp()
self.app.client_manager.identity = FakeOAuth1Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN
)
class FakeProject(object):
"""Fake one or more project."""
@staticmethod
def create_one_project(attrs=None):
"""Create a fake project.
:param Dictionary attrs:
A dictionary with all attributes
:return:
A FakeResource object, with id, name, and so on
"""
attrs = attrs or {}
# set default attributes.
project_info = {
'id': 'project-id-' + uuid.uuid4().hex,
'name': 'project-name-' + uuid.uuid4().hex,
'description': 'project-description-' + uuid.uuid4().hex,
'enabled': True,
'is_domain': False,
'domain_id': 'domain-id-' + uuid.uuid4().hex,
'parent_id': 'parent-id-' + uuid.uuid4().hex,
'tags': [],
'links': 'links-' + uuid.uuid4().hex,
}
project_info.update(attrs)
project = fakes.FakeResource(info=copy.deepcopy(project_info),
loaded=True)
return project
@staticmethod
def create_projects(attrs=None, count=2):
"""Create multiple fake projects.
:param Dictionary attrs:
A dictionary with all attributes
:param int count:
The number of projects to fake
:return:
A list of FakeResource objects faking the projects
"""
projects = []
for i in range(0, count):
projects.append(FakeProject.create_one_project(attrs))
return projects
class FakeDomain(object):
"""Fake one or more domain."""
@staticmethod
def create_one_domain(attrs=None):
"""Create a fake domain.
:param Dictionary attrs:
A dictionary with all attributes
:return:
A FakeResource object, with id, name, and so on
"""
attrs = attrs or {}
# set default attributes.
domain_info = {
'id': 'domain-id-' + uuid.uuid4().hex,
'name': 'domain-name-' + uuid.uuid4().hex,
'description': 'domain-description-' + uuid.uuid4().hex,
'enabled': True,
'tags': [],
'links': 'links-' + uuid.uuid4().hex,
}
domain_info.update(attrs)
domain = fakes.FakeResource(info=copy.deepcopy(domain_info),
loaded=True)
return domain
class FakeCredential(object):
"""Fake one or more credential."""
@staticmethod
def create_one_credential(attrs=None):
"""Create a fake credential.
:param Dictionary attrs:
A dictionary with all attributes
:return:
A FakeResource object, with id, type, and so on
"""
attrs = attrs or {}
# set default attributes.
credential_info = {
'id': 'credential-id-' + uuid.uuid4().hex,
'type': 'cert',
'user_id': 'user-id-' + uuid.uuid4().hex,
'blob': 'credential-data-' + uuid.uuid4().hex,
'project_id': 'project-id-' + uuid.uuid4().hex,
'links': 'links-' + uuid.uuid4().hex,
}
credential_info.update(attrs)
credential = fakes.FakeResource(
info=copy.deepcopy(credential_info), loaded=True)
return credential
@staticmethod
def create_credentials(attrs=None, count=2):
"""Create multiple fake credentials.
:param Dictionary attrs:
A dictionary with all attributes
:param int count:
The number of credentials to fake
:return:
A list of FakeResource objects faking the credentials
"""
credentials = []
for i in range(0, count):
credential = FakeCredential.create_one_credential(attrs)
credentials.append(credential)
return credentials
@staticmethod
def get_credentials(credentials=None, count=2):
"""Get an iterable MagicMock object with a list of faked credentials.
If credentials list is provided, then initialize the Mock object with
the list. Otherwise create one.
:param List credentials:
A list of FakeResource objects faking credentials
:param Integer count:
The number of credentials to be faked
:return:
An iterable Mock object with side_effect set to a list of faked
credentials
"""
if credentials is None:
credentials = FakeCredential.create_credentials(count)
return mock.Mock(side_effect=credentials)
class FakeUser(object):
"""Fake one or more user."""
@staticmethod
def create_one_user(attrs=None):
"""Create a fake user.
:param Dictionary attrs:
A dictionary with all attributes
:return:
A FakeResource object, with id, name, and so on
"""
attrs = attrs or {}
# set default attributes.
user_info = {
'id': 'user-id-' + uuid.uuid4().hex,
'name': 'user-name-' + uuid.uuid4().hex,
'default_project_id': 'project-' + uuid.uuid4().hex,
'email': 'user-email-' + uuid.uuid4().hex,
'enabled': True,
'domain_id': 'domain-id-' + uuid.uuid4().hex,
'links': 'links-' + uuid.uuid4().hex,
}
user_info.update(attrs)
user = fakes.FakeResource(info=copy.deepcopy(user_info),
loaded=True)
return user
@staticmethod
def create_users(attrs=None, count=2):
"""Create multiple fake users.
:param Dictionary attrs:
A dictionary with all attributes
:param int count:
The number of users to fake
:return:
A list of FakeResource objects faking the users
"""
users = []
for i in range(0, count):
user = FakeUser.create_one_user(attrs)
users.append(user)
return users
@staticmethod
def get_users(users=None, count=2):
"""Get an iterable MagicMock object with a list of faked users.
If users list is provided, then initialize the Mock object with
the list. Otherwise create one.
:param List users:
A list of FakeResource objects faking users
:param Integer count:
The number of users to be faked
:return
An iterable Mock object with side_effect set to a list of faked
users
"""
if users is None:
users = FakeUser.create_users(count)
return mock.Mock(side_effect=users)
class FakeGroup(object):
"""Fake one or more group."""
@staticmethod
def create_one_group(attrs=None):
"""Create a fake group.
:param Dictionary attrs:
A dictionary with all attributes
:return:
A FakeResource object, with id, name, and so on
"""
attrs = attrs or {}
# set default attributes.
group_info = {
'id': 'group-id-' + uuid.uuid4().hex,
'name': 'group-name-' + uuid.uuid4().hex,
'links': 'links-' + uuid.uuid4().hex,
'domain_id': 'domain-id-' + uuid.uuid4().hex,
'description': 'group-description-' + uuid.uuid4().hex,
}
group_info.update(attrs)
group = fakes.FakeResource(info=copy.deepcopy(group_info),
loaded=True)
return group
@staticmethod
def create_groups(attrs=None, count=2):
"""Create multiple fake groups.
:param Dictionary attrs:
A dictionary with all attributes
:param int count:
The number of groups to fake
:return:
A list of FakeResource objects faking the groups
"""
groups = []
for i in range(0, count):
group = FakeGroup.create_one_group(attrs)
groups.append(group)
return groups
@staticmethod
def get_groups(groups=None, count=2):
"""Get an iterable MagicMock object with a list of faked groups.
If groups list is provided, then initialize the Mock object with
the list. Otherwise create one.
:param List groups:
A list of FakeResource objects faking groups
:param Integer count:
The number of groups to be faked
:return:
An iterable Mock object with side_effect set to a list of faked
groups
"""
if groups is None:
groups = FakeGroup.create_groups(count)
return mock.Mock(side_effect=groups)
class FakeEndpoint(object):
"""Fake one or more endpoint."""
@staticmethod
def create_one_endpoint(attrs=None):
"""Create a fake endpoint.
:param Dictionary attrs:
A dictionary with all attributes
:return:
A FakeResource object, with id, url, and so on
"""
attrs = attrs or {}
# set default attributes.
endpoint_info = {
'id': 'endpoint-id-' + uuid.uuid4().hex,
'url': 'url-' + uuid.uuid4().hex,
'region': 'endpoint-region-' + uuid.uuid4().hex,
'interface': 'admin',
'service_id': 'service-id-' + uuid.uuid4().hex,
'enabled': True,
'links': 'links-' + uuid.uuid4().hex,
}
endpoint_info.update(attrs)
endpoint = fakes.FakeResource(info=copy.deepcopy(endpoint_info),
loaded=True)
return endpoint
@staticmethod
def create_one_endpoint_filter(attrs=None):
"""Create a fake endpoint project relationship.
:param Dictionary attrs:
A dictionary with all attributes of endpoint filter
:return:
A FakeResource object with project, endpoint and so on
"""
attrs = attrs or {}
# Set default attribute
endpoint_filter_info = {
'project': 'project-id-' + uuid.uuid4().hex,
'endpoint': 'endpoint-id-' + uuid.uuid4().hex,
}
# Overwrite default attributes if there are some attributes set
endpoint_filter_info.update(attrs)
endpoint_filter = fakes.FakeModel(
copy.deepcopy(endpoint_filter_info))
return endpoint_filter
class FakeEndpointGroup(object):
"""Fake one or more endpoint group."""
@staticmethod
def create_one_endpointgroup(attrs=None):
"""Create a fake endpoint group.
:param Dictionary attrs:
A dictionary with all attributes
:return:
A FakeResource object, with id, url, and so on
"""
attrs = attrs or {}
# set default attributes.
endpointgroup_info = {
'id': 'endpoint-group-id-' + uuid.uuid4().hex,
'name': 'endpoint-group-name-' + uuid.uuid4().hex,
'filters': {
'region': 'region-' + uuid.uuid4().hex,
'service_id': 'service-id-' + uuid.uuid4().hex,
},
'description': 'endpoint-group-description-' + uuid.uuid4().hex,
'links': 'links-' + uuid.uuid4().hex,
}
endpointgroup_info.update(attrs)
endpoint = fakes.FakeResource(info=copy.deepcopy(endpointgroup_info),
loaded=True)
return endpoint
@staticmethod
def create_one_endpointgroup_filter(attrs=None):
"""Create a fake endpoint project relationship.
:param Dictionary attrs:
A dictionary with all attributes of endpointgroup filter
:return:
A FakeResource object with project, endpointgroup and so on
"""
attrs = attrs or {}
# Set default attribute
endpointgroup_filter_info = {
'project': 'project-id-' + uuid.uuid4().hex,
'endpointgroup': 'endpointgroup-id-' + uuid.uuid4().hex,
}
# Overwrite default attributes if there are some attributes set
endpointgroup_filter_info.update(attrs)
endpointgroup_filter = fakes.FakeModel(
copy.deepcopy(endpointgroup_filter_info))
return endpointgroup_filter
class FakeService(object):
"""Fake one or more service."""
@staticmethod
def create_one_service(attrs=None):
"""Create a fake service.
:param Dictionary attrs:
A dictionary with all attributes
:return:
A FakeResource object, with id, name, and so on
"""
attrs = attrs or {}
# set default attributes.
service_info = {
'id': 'service-id-' + uuid.uuid4().hex,
'name': 'service-name-' + uuid.uuid4().hex,
'type': 'service-type-' + uuid.uuid4().hex,
'description': 'service-description-' + uuid.uuid4().hex,
'enabled': True,
'links': 'links-' + uuid.uuid4().hex,
}
service_info.update(attrs)
service = fakes.FakeResource(info=copy.deepcopy(service_info),
loaded=True)
return service
class FakeRoleAssignment(object):
"""Fake one or more role assignment."""
@staticmethod
def create_one_role_assignment(attrs=None):
"""Create a fake role assignment.
:param Dictionary attrs:
A dictionary with all attributes
:return:
A FakeResource object, with scope, user, and so on
"""
attrs = attrs or {}
# set default attributes.
role_assignment_info = {
'scope': {'project': {'id': 'project-id-' + uuid.uuid4().hex}},
'user': {'id': 'user-id-' + uuid.uuid4().hex},
'role': {'id': 'role-id-' + uuid.uuid4().hex},
}
role_assignment_info.update(attrs)
role_assignment = fakes.FakeResource(
info=copy.deepcopy(role_assignment_info), loaded=True)
return role_assignment
class FakeImpliedRoleResponse(object):
"""Fake one or more role assignment."""
def __init__(self, prior_role, implied_roles):
self.prior_role = prior_role
self.implies = [role for role in implied_roles]
@staticmethod
def create_list():
"""Create a fake implied role list response.
:return:
A list of FakeImpliedRoleResponse objects
"""
# set default attributes.
implied_roles = [
FakeImpliedRoleResponse(ROLES[0], [ROLES[1]])
]
return implied_roles
|
openstack/python-openstackclient
|
openstackclient/tests/unit/identity/v3/fakes.py
|
Python
|
apache-2.0
| 34,955
|
[
"exciting"
] |
bb1739d63252aebd3c97b733af499b5e6641fd024e6fc8cd2964da5aac4baf22
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from espressopp.standard_system.LennardJones import *
from espressopp.standard_system.PolymerMelt import *
from espressopp.standard_system.Minimal import *
from espressopp.standard_system.Default import *
from espressopp.standard_system.KGMelt import *
|
govarguz/espressopp
|
src/standard_system/__init__.py
|
Python
|
gpl-3.0
| 1,125
|
[
"ESPResSo"
] |
729318ddb15bb17cbdf13188bdefb0b9aa2f99fabf688ea507f860c0e329115a
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Actions sampler that supports sampling the following types of actions.
1. The structure of the action must be a dict, not a nested structure.
2. It contains one (and only one) categorical (one_hot) action.
The category action should define a spec whose spec.dtype.is_integer==True.
The size of this category action indicate the total number of mutually
exclusive actions.
3. Each category is 0/1 action by default. But it can also extends to stand for
a continuous action. In order to do that, sub_actions_fields must be set.
4. The sampler will always sample 0/1 actions 1 for each categorical and sample
continuous actions evenly (for each category) among the rest of samples.
For example,
```
action_spec = {
'continuous1': tensor_spec.BoundedTensorSpec(
[2], tf.float32, 0.0, 1.0),
'continuous2': tensor_spec.BoundedTensorSpec(
[2], tf.float32, 0.0, 1.0),
'continuous3': tensor_spec.BoundedTensorSpec(
[2], tf.float32, 0.0, 1.0),
'categorical': tensor_spec.BoundedTensorSpec(
[4], tf.int32, 0, 1)}
sampler = cem_actions_sampler_continuous_and_one_hot.GaussianActionsSampler(
action_spec=action_spec, sample_clippers=[[], [], []],
sub_actions_fields=[
['categorical'], ['continuous1', 'continuous3'], ['continuous2']])
```
In this case, the action has a 'categorical' field that is a categorical
action. There are 4 possible actions:
[1, 0, 0, 0] -- choosing 1st 0/1 action
[0, 1, 0, 0] -- choosing 2nd 0/1 action
[0, 0, 1, 0] -- choosing continuous1 & continuous3
[0, 0, 0, 1] -- choosing continuous2
when 1st or 2nd action is chosen, all continuous fields will be all 0s
when 3rd action is chosen, 'continuous2' will be all 0s
when 4th action is chosen, 'continuous1', 'continuous3' will be all 0s
Therefore making the 4 actions mutually exclusive.
For example, if batch_size == 1 and you call
```
sampler.sample_batch_and_clip(4)
```
The result will be
{
'continuous1':
[[[0.0, 0.0],
[0.0, 0.0],
[0.3, 0.5],
[0.0, 0.0]]],
'continuous2':
[[[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.2, 0.1]]],
'continuous3':
[[[0.0, 0.0],
[0.0, 0.0],
[0.4, 0.6],
[0.0, 0.0]]],
'categorical':
[[[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]]
}
Changing the order of the sub_action_fields will change the what each
one_hot_vector stands for.
For example, if you call
```
sampler = cem_actions_sampler_continuous_and_one_hot.GaussianActionsSampler(
action_spec=action_spec, sample_clippers=[[], [], []],
sub_actions_fields=[
['continuous1', 'continuous3'], ['categorical'], ['continuous2']])
```
[1, 0, 0, 0] -- choosing continuous1 & continuous3
[0, 1, 0, 0] -- choosing 1st categorical action
[0, 0, 1, 0] -- choosing 2nd categorical action
[0, 0, 0, 1] -- choosing continuous2
Some notations used in the comments below are:
B: batch_size
A: action_size
N: num_samples
S: num_mutually_exclusive_actions
K: num_sub_continuous_actions
S-K: num_sub_categorical_actions
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.policies.samplers import cem_actions_sampler
from tf_agents.utils import common
@gin.configurable
class GaussianActionsSampler(cem_actions_sampler.ActionsSampler):
"""Action sampler that samples continuous actions using Gaussian distribution and one_hot actions.
Supports dict action_spec with arbitrary 1d continuous actions and 1 one_hot
action.
Given a batch of distribution params(including mean and var) for K (K >= 1)
mutually exclusive continuous actions, sample [B, N-S+K, A] continuous actions
and [B, S-K, S] one_hot actions, where 'N' means num_samples,
'B' means batch_size, 'A' means action_size_continuous,
'S' means num_mutually_exclusive_actions. S-K is the possible number of
categorical actions. If K > 1, the number of samples: N-S+K will be divided
evenly among these mutually exclusive continuous actions.
"""
def __init__(self, action_spec, sample_clippers=None,
sub_actions_fields=None, sample_rejecters=None,
max_rejection_iterations=10):
"""Builds a GaussianActionsSampler.
Args:
action_spec: A dict of BoundedTensorSpec representing the actions.
sample_clippers: A list of list of sample clipper functions. The function
takes a dict of Tensors of actions and a dict of Tensors of the state,
output a dict of Tensors of clipped actions.
sub_actions_fields: A list of list of action keys to group
fields into sub_actions.
sample_rejecters: A list of callables that will reject samples and return
a mask tensor.
max_rejection_iterations: max_rejection_iterations
"""
super(GaussianActionsSampler, self).__init__(
action_spec, sample_clippers, sample_rejecters)
num_one_hot_action = 0
for flat_action_spec in tf.nest.flatten(action_spec):
if flat_action_spec.shape.rank != 1:
raise ValueError('Only 1d action is supported by this sampler. '
'The action_spec: \n{}\n contains action whose rank is'
' not 1. Consider coverting it into multiple 1d '
'actions.'.format(action_spec))
if flat_action_spec.dtype.is_integer:
num_one_hot_action = num_one_hot_action + 1
# S
self._num_mutually_exclusive_actions = (
flat_action_spec.shape.as_list()[0])
if num_one_hot_action != 1:
raise ValueError('Only continuous action + 1 one_hot action is supported'
' by this sampler. The action_spec: \n{}\n contains '
'either multiple one_hot actions or no one_hot '
'action'.format(action_spec))
if sample_clippers is None:
raise ValueError('Sampler clippers must be set!')
if sub_actions_fields is None:
raise ValueError('sub_actions_fields must be set!')
if len(sample_clippers) != len(sub_actions_fields):
raise ValueError('Number of sample_clippers must be the same as number of'
' sub_actions_fields! sample_clippers: {}, '
'sub_actions_fields: {}'.format(
sample_clippers, sub_actions_fields))
if self._sample_rejecters is None:
self._sample_rejecters = [None] * len(sub_actions_fields)
self._max_rejection_iterations = tf.constant(max_rejection_iterations)
self._num_sub_actions = len(sample_clippers)
self._sub_actions_fields = sub_actions_fields
action_spec_keys = list(sorted(self._action_spec.keys()))
sub_actions_fields_keys = [
item for sublist in self._sub_actions_fields for item in sublist # pylint: disable=g-complex-comprehension
]
sub_actions_fields_keys.sort()
if action_spec_keys != sub_actions_fields_keys:
raise ValueError('sub_actions_fields must cover all keys in action_spec!'
'action_spec_keys: {}, sub_actions_fields_keys:'
' {}'.format(action_spec_keys, sub_actions_fields_keys))
self._categorical_index = -1
for i in range(self._num_sub_actions):
if (len(self._sub_actions_fields[i]) == 1 and
self._action_spec[self._sub_actions_fields[i][0]].dtype.is_integer):
self._categorical_index = i
break
if self._categorical_index == -1:
raise ValueError('Categorical action cannot be grouped together w/ '
'continuous action into a sub_action.')
self._categorical_key = self._sub_actions_fields[self._categorical_index][0]
# K
self._num_sub_continuous_actions = self._num_sub_actions - 1
# S-K
self._num_sub_categorical_actions = (
self._num_mutually_exclusive_actions -
self._num_sub_continuous_actions)
# Because the sampler will sample for all fields and there are actions
# that are mutually exclusive. Therefore masks are needed to zero
# out the fields that does not belong to the sub_action.
self._masks = []
for i in range(self._num_sub_actions):
mask = {}
for k in self._action_spec.keys():
if k in self._sub_actions_fields[i]:
mask[k] = tf.ones([1])
else:
mask[k] = tf.zeros([1])
self._masks.append(mask)
self._index_range_min = {}
self._index_range_max = {}
def refit_distribution_to(self, target_sample_indices, samples):
"""Refits distribution according to actions with index of ind.
Args:
target_sample_indices: A [B, M] sized tensor indicating the index
samples: A dict corresponding to action_spec. Each action is
a [B, N, A] sized tensor.
Returns:
mean: A dict containing [B, A] sized tensors where each row
is the refitted mean.
var: A dict containing [B, A] sized tensors where each row
is the refitted var.
"""
def get_mean(best_samples, spec, index_range_min, index_range_max):
if spec.dtype.is_integer:
return tf.zeros([tf.shape(target_sample_indices)[0], spec.shape[0]])
else:
# In the following we use a customized way to calculate mean and var
# from best_samples. The reason why we don't use standard tf.nn.moment
# is because:
# 1. We only want to calculate mean and var for continuous samples
# 2. M elites may contain both continuous and categorical samples
# 3. In a batch (B) of data, numner of continuous elite samples may be
# different
# Also because the value of samples of categorical actions are all 0.0
# We calculate mean and var in the following way.
# mean = sum_elites_continuou / num_elites_continuous_expanded
# var = sum((best_samples - mean)^2 - (mean)^2 * num_elites_categorical)
# / num_elites_continuous
sum_elites_continuous = tf.reduce_sum(best_samples, axis=1) # [B, A]
# num_elites_continuous: [B]
num_elites_continuous = tf.reduce_sum(tf.cast(
tf.logical_and(tf.greater_equal(
target_sample_indices, index_range_min), tf.less(
target_sample_indices, index_range_max)),
tf.float32), axis=1)
# num_elites_continuous_expanded: [B, A]
num_elites_continuous_expanded = tf.tile(tf.expand_dims(
num_elites_continuous, 1), [1, spec.shape.as_list()[0]])
# mean: [B, A]
mean = tf.math.divide_no_nan(
sum_elites_continuous, num_elites_continuous_expanded)
return mean
def get_var(best_samples, mean, spec, index_range_min, index_range_max):
if spec.dtype.is_integer:
return tf.zeros([tf.shape(target_sample_indices)[0], spec.shape[0]])
else:
# num_elites_continuous: [B]
num_elites_continuous = tf.reduce_sum(tf.cast(
tf.logical_and(tf.greater_equal(
target_sample_indices, index_range_min), tf.less(
target_sample_indices, index_range_max)),
tf.float32), axis=1)
# num_elites_continuous_expanded: [B, A]
num_elites_continuous_expanded = tf.tile(tf.expand_dims(
num_elites_continuous, 1), [1, spec.shape.as_list()[0]])
num_elites = tf.cast(tf.shape(target_sample_indices)[1], tf.float32)
# num_elites_categorical_expanded: [B, A]
num_elites_categorical_expanded = (num_elites -
num_elites_continuous_expanded)
# mean_expanded: [M, B, A]
mean_expanded = mean * tf.ones(
[tf.shape(target_sample_indices)[1], 1, 1])
# mean_expanded: [B, M, A]
mean_expanded = tf.transpose(mean_expanded, [1, 0, 2])
var = tf.math.divide_no_nan(
tf.reduce_sum(tf.square(best_samples - mean_expanded), axis=1) -
tf.multiply(tf.square(mean), num_elites_categorical_expanded),
num_elites_continuous_expanded)
return var
best_samples = tf.nest.map_structure(
lambda s: tf.gather(s, target_sample_indices, batch_dims=1), samples)
if not self._index_range_min or not self._index_range_max:
raise ValueError('sample_batch_and_clip must be called before '
'refit_distribution_to!')
mean = tf.nest.map_structure(
get_mean, best_samples, self._action_spec,
self._index_range_min, self._index_range_max)
var = tf.nest.map_structure(
get_var, best_samples, mean, self._action_spec,
self._index_range_min, self._index_range_max)
return mean, var
def _sample_continuous_and_transpose(
self, mean, var, state, i, one_hot_index):
num_samples = self._number_samples_all[i]
def sample_and_transpose(mean, var, spec, mask):
if spec.dtype.is_integer:
sample = tf.one_hot(
one_hot_index, self._num_mutually_exclusive_actions)
sample = tf.broadcast_to(
sample,
[tf.shape(mean)[0],
tf.constant(num_samples), # pylint: disable=cell-var-from-loop
tf.shape(mean)[1]])
else:
dist = tfp.distributions.Normal(loc=mean, scale=tf.sqrt(var))
# Transpose to [B, N, A]
sample = tf.transpose(
dist.sample(num_samples), [1, 0, 2]) # pylint: disable=cell-var-from-loop
sample = sample * mask
return tf.cast(sample, spec.dtype)
batch_size = tf.shape(tf.nest.flatten(mean)[0])[0]
def sample_fn(mean_sample, var_sample, state_sample):
# [B, N, A]
samples_continuous = tf.nest.map_structure(sample_and_transpose,
mean_sample, var_sample,
self._action_spec,
self._masks[i])
if self._sample_clippers[i]:
for sample_clipper in self._sample_clippers[i]:
samples_continuous = sample_clipper(samples_continuous, state_sample)
samples_continuous = tf.nest.map_structure(
common.clip_to_spec, samples_continuous, self._action_spec)
return samples_continuous
@tf.function
def rejection_sampling(sample_rejector):
valid_batch_samples = tf.nest.map_structure(
lambda spec: tf.TensorArray(spec.dtype, size=batch_size),
self._action_spec)
for b_indx in tf.range(batch_size):
k = tf.constant(0)
# pylint: disable=cell-var-from-loop
valid_samples = tf.nest.map_structure(
lambda spec: tf.TensorArray(spec.dtype, size=num_samples),
self._action_spec)
count = tf.constant(0)
while count < self._max_rejection_iterations:
count += 1
mean_sample = tf.nest.map_structure(
lambda t: tf.expand_dims(tf.gather(t, b_indx), axis=0), mean)
var_sample = tf.nest.map_structure(
lambda t: tf.expand_dims(tf.gather(t, b_indx), axis=0), var)
if state is not None:
state_sample = tf.nest.map_structure(
lambda t: tf.expand_dims(tf.gather(t, b_indx), axis=0), state)
else:
state_sample = None
samples = sample_fn(mean_sample, var_sample, state_sample) # n, a
mask = sample_rejector(samples, state_sample)
mask = mask[0, ...]
mask_index = tf.where(mask)[:, 0]
num_mask = tf.shape(mask_index)[0]
if num_mask == 0:
continue
good_samples = tf.nest.map_structure(
lambda t: tf.gather(t, mask_index, axis=1)[0, ...], samples)
for sample_idx in range(num_mask):
if k >= num_samples:
break
valid_samples = tf.nest.map_structure(
lambda gs, vs: vs.write(k, gs[sample_idx:sample_idx+1, ...]),
good_samples, valid_samples)
k += 1
if k < num_samples:
def sample_zero_and_one_hot(spec):
if spec.dtype.is_integer:
sample = tf.one_hot(
one_hot_index, self._num_mutually_exclusive_actions)
else:
sample = tf.zeros(spec.shape, spec.dtype)
sample = tf.broadcast_to(
sample,
tf.TensorShape([num_samples] + sample.shape.dims))
return tf.cast(sample, spec.dtype)
zero_samples = tf.nest.map_structure(
sample_zero_and_one_hot, self._action_spec)
for sample_idx in range(num_samples-k):
valid_samples = tf.nest.map_structure(
lambda gs, vs: vs.write(k, gs[sample_idx:sample_idx+1, ...]),
zero_samples, valid_samples)
valid_samples = tf.nest.map_structure(lambda vs: vs.concat(),
valid_samples)
valid_batch_samples = tf.nest.map_structure(
lambda vbs, vs: vbs.write(b_indx, vs), valid_batch_samples,
valid_samples)
samples_continuous = tf.nest.map_structure(
lambda a: a.stack(), valid_batch_samples)
return samples_continuous
if self._sample_rejecters[i]:
samples_continuous = rejection_sampling(self._sample_rejecters[i])
def set_b_n_shape(t):
t.set_shape(tf.TensorShape([None, num_samples] + t.shape[2:].dims))
tf.nest.map_structure(set_b_n_shape, samples_continuous)
else:
samples_continuous = sample_fn(mean, var, state)
return samples_continuous
@gin.configurable
def sample_batch_and_clip(self, num_samples, mean, var, state=None):
"""Samples and clips a batch of actions [B, N, A] with mean and var.
Args:
num_samples: Number of actions to sample each round.
mean: A dict containing [B, A] shaped tensor representing the
mean of the actions to be sampled.
var: A dict containing [B, A] shaped tensor representing the
variance of the actions to be sampled.
state: A dict of state tensors constructed according to oberservation_spec
of the task.
Returns:
actions: A dict containing tensor of sampled actions with
shape [B, N, A]
"""
# At least one sample for each kind of one hot action is generated.
assert num_samples >= self._num_mutually_exclusive_actions
num_samples_continuous_total = (
num_samples -
self._num_mutually_exclusive_actions +
self._num_sub_actions - 1)
num_samples_continuous_each = (
num_samples_continuous_total // self._num_sub_continuous_actions)
# When sampling N samples, we use min_index and max_index to cut N samples
# into several segments for each sub_actions.
min_index = 0
max_index = 0
self._number_samples_all = []
for i in range(self._num_sub_actions):
min_index = max_index
if i == self._categorical_index:
max_index += self._num_sub_categorical_actions
elif i == self._num_sub_actions - 1:
max_index = num_samples
else:
max_index += num_samples_continuous_each
for k in self._action_spec.keys():
if k in self._sub_actions_fields[i]:
self._index_range_min[k] = min_index
self._index_range_max[k] = max_index
self._number_samples_all.append(max_index - min_index)
samples_all = []
one_hot_index = 0
for i in range(self._num_sub_actions):
if i == self._categorical_index:
# Samples one_hot actions.
def sample_one_hot(mean, spec):
if spec.dtype.is_integer:
full_one_hot = tf.eye(
self._num_mutually_exclusive_actions,
dtype=tf.int32) # [S, S]
categorical_one_hot = tf.gather(
full_one_hot,
tf.range(one_hot_index,
one_hot_index+self._num_sub_categorical_actions))
return tf.broadcast_to(
categorical_one_hot,
[tf.shape(mean)[0],
self._num_sub_categorical_actions,
spec.shape[0]])
else:
return tf.zeros([
tf.shape(mean)[0],
self._num_sub_categorical_actions,
spec.shape[0]])
samples_one_hot = tf.nest.map_structure(
sample_one_hot, mean, self._action_spec)
samples_one_hot = tf.nest.map_structure(
common.clip_to_spec, samples_one_hot, self._action_spec)
samples_all.append(samples_one_hot)
one_hot_index += self._num_sub_categorical_actions
else:
samples_continuous = self._sample_continuous_and_transpose(
mean, var, state, i, one_hot_index)
samples_all.append(samples_continuous)
one_hot_index += 1
samples_all = tf.nest.map_structure(
lambda *tensors: tf.concat(tensors, axis=1),
*samples_all)
return samples_all
|
tensorflow/agents
|
tf_agents/policies/samplers/cem_actions_sampler_continuous_and_one_hot.py
|
Python
|
apache-2.0
| 21,624
|
[
"Gaussian"
] |
5743f3d0ccb1d2b8356c1e30373418857f7ff85a811860f01b41cfbc9deef5d1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import os
from django.contrib.auth.models import Permission
from django.conf import settings
from django.core import mail
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.test import TestCase, skipUnlessDBFeature
from django.test.client import Client
from django.test.utils import override_settings
from django.utils import timezone
from pybb import permissions, views as pybb_views
from pybb.templatetags.pybb_tags import pybb_is_topic_unread, pybb_topic_unread, pybb_forum_unread, \
pybb_get_latest_topics, pybb_get_latest_posts
from pybb import compat, util
User = compat.get_user_model()
username_field = compat.get_username_field()
try:
from lxml import html
except ImportError:
raise Exception('PyBB requires lxml for self testing')
from pybb import defaults
from pybb.models import Topic, TopicReadTracker, Forum, ForumReadTracker, Post, Category, PollAnswer
Profile = util.get_pybb_profile_model()
__author__ = 'zeus'
class SharedTestModule(object):
def create_user(self):
self.user = User.objects.create_user('zeus', 'zeus@localhost', 'zeus')
def login_client(self, username='zeus', password='zeus'):
self.client.login(username=username, password=password)
def create_initial(self, post=True):
self.category = Category.objects.create(name='foo')
self.forum = Forum.objects.create(name='xfoo', description='bar', category=self.category)
self.topic = Topic.objects.create(name='etopic', forum=self.forum, user=self.user)
if post:
self.post = Post.objects.create(topic=self.topic, user=self.user, body='bbcode [b]test[/b]')
def get_form_values(self, response, form="post-form"):
return dict(html.fromstring(response.content).xpath('//form[@class="%s"]' % form)[0].form_values())
def get_with_user(self, url, username=None, password=None):
if username:
self.client.login(username=username, password=password)
r = self.client.get(url)
self.client.logout()
return r
class FeaturesTest(TestCase, SharedTestModule):
def setUp(self):
self.ORIG_PYBB_ENABLE_ANONYMOUS_POST = defaults.PYBB_ENABLE_ANONYMOUS_POST
self.ORIG_PYBB_PREMODERATION = defaults.PYBB_PREMODERATION
defaults.PYBB_PREMODERATION = False
defaults.PYBB_ENABLE_ANONYMOUS_POST = False
self.create_user()
self.create_initial()
mail.outbox = []
def test_base(self):
# Check index page
Forum.objects.create(name='xfoo1', description='bar1', category=self.category, parent=self.forum)
url = reverse('pybb:index')
response = self.client.get(url)
parser = html.HTMLParser(encoding='utf8')
tree = html.fromstring(response.content, parser=parser)
self.assertContains(response, 'foo')
self.assertContains(response, self.forum.get_absolute_url())
self.assertTrue(defaults.PYBB_DEFAULT_TITLE in tree.xpath('//title')[0].text_content())
self.assertEqual(len(response.context['categories']), 1)
self.assertEqual(len(response.context['categories'][0].forums_accessed), 1)
def test_forum_page(self):
# Check forum page
response = self.client.get(self.forum.get_absolute_url())
self.assertEqual(response.context['forum'], self.forum)
tree = html.fromstring(response.content)
self.assertTrue(tree.xpath('//a[@href="%s"]' % self.topic.get_absolute_url()))
self.assertTrue(tree.xpath('//title[contains(text(),"%s")]' % self.forum.name))
self.assertFalse(tree.xpath('//a[contains(@href,"?page=")]'))
self.assertFalse(response.context['is_paginated'])
def test_category_page(self):
Forum.objects.create(name='xfoo1', description='bar1', category=self.category, parent=self.forum)
response = self.client.get(self.category.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.forum.get_absolute_url())
self.assertEqual(len(response.context['object'].forums_accessed), 1)
def test_profile_language_default(self):
user = User.objects.create_user(username='user2', password='user2', email='user2@example.com')
self.assertEqual(util.get_pybb_profile(user).language, settings.LANGUAGE_CODE)
def test_profile_edit(self):
# Self profile edit
self.login_client()
response = self.client.get(reverse('pybb:edit_profile'))
self.assertEqual(response.status_code, 200)
values = self.get_form_values(response, 'profile-edit')
values['signature'] = 'test signature'
response = self.client.post(reverse('pybb:edit_profile'), data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.client.get(self.post.get_absolute_url(), follow=True)
self.assertContains(response, 'test signature')
# Test empty signature
values['signature'] = ''
response = self.client.post(reverse('pybb:edit_profile'), data=values, follow=True)
self.assertEqual(len(response.context['form'].errors), 0)
def test_pagination_and_topic_addition(self):
for i in range(0, defaults.PYBB_FORUM_PAGE_SIZE + 3):
topic = Topic(name='topic_%s_' % i, forum=self.forum, user=self.user)
topic.save()
url = self.forum.get_absolute_url()
response = self.client.get(url)
self.assertEqual(len(response.context['topic_list']), defaults.PYBB_FORUM_PAGE_SIZE)
self.assertTrue(response.context['is_paginated'])
self.assertEqual(response.context['paginator'].num_pages,
int((defaults.PYBB_FORUM_PAGE_SIZE + 3) / defaults.PYBB_FORUM_PAGE_SIZE) + 1)
def test_bbcode_and_topic_title(self):
response = self.client.get(self.topic.get_absolute_url())
tree = html.fromstring(response.content)
self.assertTrue(self.topic.name in tree.xpath('//title')[0].text_content())
self.assertContains(response, self.post.body_html)
self.assertContains(response, 'bbcode <strong>test</strong>')
def test_topic_addition(self):
self.login_client()
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'new topic test'
values['name'] = 'new topic name'
values['poll_type'] = 0
response = self.client.post(add_topic_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(Topic.objects.filter(name='new topic name').exists())
def test_topic_read_before_post_addition(self):
"""
Test if everything is okay when :
- user A create the topic
- but before associated post is created, user B display the forum
"""
topic = Topic(name='xtopic', forum=self.forum, user=self.user)
topic.save()
#topic is saved, but post is not yet created at this time
#an other user is displaing the forum before the post creation
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client = Client()
client.login(username='ann', password='ann')
self.assertEqual(client.get(topic.get_absolute_url()).status_code, 404)
self.assertEqual(topic.forum.post_count, 1)
self.assertEqual(topic.forum.topic_count, 1)
#do we need to correct this ?
#self.assertEqual(topic.forum.topics.count(), 1)
self.assertEqual(topic.post_count, 0)
#Now, TopicReadTracker is not created because the topic detail view raise a 404
#If its creation is not finished. So we create it manually to add a test, just in case
#we have an other way where TopicReadTracker could be set for a not complete topic.
TopicReadTracker.objects.create(user=user_ann, topic=topic, time_stamp=topic.created)
#before correction, raised TypeError: can't compare datetime.datetime to NoneType
pybb_topic_unread([topic,], user_ann)
#before correction, raised IndexError: list index out of range
last_post = topic.last_post
#post creation now.
Post(topic=topic, user=self.user, body='one').save()
self.assertEqual(client.get(topic.get_absolute_url()).status_code, 200)
self.assertEqual(topic.forum.post_count, 2)
self.assertEqual(topic.forum.topic_count, 2)
self.assertEqual(topic.forum.topics.count(), 2)
self.assertEqual(topic.post_count, 1)
def test_post_deletion(self):
post = Post(topic=self.topic, user=self.user, body='bbcode [b]test[/b]')
post.save()
post.delete()
Topic.objects.get(id=self.topic.id)
Forum.objects.get(id=self.forum.id)
def test_topic_deletion(self):
topic = Topic(name='xtopic', forum=self.forum, user=self.user)
topic.save()
post = Post(topic=topic, user=self.user, body='one')
post.save()
post = Post(topic=topic, user=self.user, body='two')
post.save()
post.delete()
Topic.objects.get(id=topic.id)
Forum.objects.get(id=self.forum.id)
topic.delete()
Forum.objects.get(id=self.forum.id)
def test_forum_updated(self):
topic = Topic(name='xtopic', forum=self.forum, user=self.user)
topic.save()
post = Post(topic=topic, user=self.user, body='one')
post.save()
post = Post.objects.get(id=post.id)
self.assertTrue(self.forum.updated == post.created)
def test_read_tracking(self):
topic = Topic(name='xtopic', forum=self.forum, user=self.user)
topic.save()
post = Post(topic=topic, user=self.user, body='one')
post.save()
client = Client()
client.login(username='zeus', password='zeus')
# Topic status
tree = html.fromstring(client.get(topic.forum.get_absolute_url()).content)
self.assertTrue(tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.get_absolute_url()))
# Forum status
tree = html.fromstring(client.get(reverse('pybb:index')).content)
self.assertTrue(
tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.forum.get_absolute_url()))
# Visit it
client.get(topic.get_absolute_url())
# Topic status - readed
tree = html.fromstring(client.get(topic.forum.get_absolute_url()).content)
# Visit others
for t in topic.forum.topics.all():
client.get(t.get_absolute_url())
self.assertFalse(tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.get_absolute_url()))
# Forum status - readed
tree = html.fromstring(client.get(reverse('pybb:index')).content)
self.assertFalse(
tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.forum.get_absolute_url()))
# Post message
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': topic.id})
response = client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test tracking'
response = client.post(add_post_url, values, follow=True)
self.assertContains(response, 'test tracking')
# Topic status - readed
tree = html.fromstring(client.get(topic.forum.get_absolute_url()).content)
self.assertFalse(tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.get_absolute_url()))
# Forum status - readed
tree = html.fromstring(client.get(reverse('pybb:index')).content)
self.assertFalse(
tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.forum.get_absolute_url()))
post = Post(topic=topic, user=self.user, body='one')
post.save()
client.get(reverse('pybb:mark_all_as_read'))
tree = html.fromstring(client.get(reverse('pybb:index')).content)
self.assertFalse(
tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % topic.forum.get_absolute_url()))
# Empty forum - readed
f = Forum(name='empty', category=self.category)
f.save()
tree = html.fromstring(client.get(reverse('pybb:index')).content)
self.assertFalse(tree.xpath('//a[@href="%s"]/parent::td[contains(@class,"unread")]' % f.get_absolute_url()))
@skipUnlessDBFeature('supports_microsecond_precision')
def test_read_tracking_multi_user(self):
topic_1 = self.topic
topic_2 = Topic(name='topic_2', forum=self.forum, user=self.user)
topic_2.save()
Post(topic=topic_2, user=self.user, body='one').save()
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client_ann = Client()
client_ann.login(username='ann', password='ann')
user_bob = User.objects.create_user('bob', 'bob@localhost', 'bob')
client_bob = Client()
client_bob.login(username='bob', password='bob')
# Two topics, each with one post. everything is unread, so the db should reflect that:
self.assertEqual(TopicReadTracker.objects.all().count(), 0)
self.assertEqual(ForumReadTracker.objects.all().count(), 0)
# user_ann reads topic_1, she should get one topic read tracker, there should be no forum read trackers
client_ann.get(topic_1.get_absolute_url())
self.assertEqual(TopicReadTracker.objects.all().count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=user_ann).count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=user_ann, topic=topic_1).count(), 1)
self.assertEqual(ForumReadTracker.objects.all().count(), 0)
# user_bob reads topic_1, he should get one topic read tracker, there should be no forum read trackers
client_bob.get(topic_1.get_absolute_url())
self.assertEqual(TopicReadTracker.objects.all().count(), 2)
self.assertEqual(TopicReadTracker.objects.filter(user=user_bob).count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=user_bob, topic=topic_1).count(), 1)
# user_bob reads topic_2, he should get a forum read tracker,
# there should be no topic read trackers for user_bob
client_bob.get(topic_2.get_absolute_url())
self.assertEqual(TopicReadTracker.objects.all().count(), 1)
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
self.assertEqual(ForumReadTracker.objects.filter(user=user_bob).count(), 1)
self.assertEqual(ForumReadTracker.objects.filter(user=user_bob, forum=self.forum).count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=user_bob).count(), 0)
self.assertListEqual([t.unread for t in pybb_topic_unread([topic_1, topic_2], user_bob)], [False, False])
# user_ann creates topic_3, they should get a new topic read tracker in the db
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
response = client_ann.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'topic_3'
values['name'] = 'topic_3'
values['poll_type'] = 0
response = client_ann.post(add_topic_url, data=values, follow=True)
self.assertEqual(TopicReadTracker.objects.all().count(), 2)
self.assertEqual(TopicReadTracker.objects.filter(user=user_ann).count(), 2)
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
topic_3 = Topic.objects.order_by('-updated', '-id')[0]
self.assertEqual(topic_3.name, 'topic_3')
# user_ann posts to topic_1, a topic they've already read, no new trackers should be created
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': topic_1.id})
response = client_ann.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test tracking'
response = client_ann.post(add_post_url, values, follow=True)
self.assertEqual(TopicReadTracker.objects.all().count(), 2)
self.assertEqual(TopicReadTracker.objects.filter(user=user_ann).count(), 2)
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
# user_bob has two unread topics, 'topic_1' and 'topic_3'.
# This is because user_ann created a new topic and posted to an existing topic,
# after user_bob got his forum read tracker.
# user_bob reads 'topic_1'
# user_bob gets a new topic read tracker, and the existing forum read tracker stays the same.
# 'topic_3' appears unread for user_bob
#
previous_time = ForumReadTracker.objects.all()[0].time_stamp
client_bob.get(topic_1.get_absolute_url())
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
self.assertEqual(ForumReadTracker.objects.all()[0].time_stamp, previous_time)
self.assertEqual(TopicReadTracker.objects.filter(user=user_bob).count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=user_ann).count(), 2)
self.assertEqual(TopicReadTracker.objects.all().count(), 3)
# user_bob reads the last unread topic, 'topic_3'.
# user_bob's existing forum read tracker updates and his topic read tracker disappears
#
previous_time = ForumReadTracker.objects.all()[0].time_stamp
client_bob.get(topic_3.get_absolute_url())
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
self.assertGreater(ForumReadTracker.objects.all()[0].time_stamp, previous_time)
self.assertEqual(TopicReadTracker.objects.all().count(), 2)
self.assertEqual(TopicReadTracker.objects.filter(user=user_bob).count(), 0)
def test_read_tracking_multi_forum(self):
topic_1 = self.topic
topic_2 = Topic(name='topic_2', forum=self.forum, user=self.user)
topic_2.save()
Post(topic=topic_2, user=self.user, body='one').save()
forum_1 = self.forum
forum_2 = Forum(name='forum_2', description='bar', category=self.category)
forum_2.save()
Topic(name='garbage', forum=forum_2, user=self.user).save()
client = Client()
client.login(username='zeus', password='zeus')
# everything starts unread
self.assertEqual(ForumReadTracker.objects.all().count(), 0)
self.assertEqual(TopicReadTracker.objects.all().count(), 0)
# user reads topic_1, they should get one topic read tracker, there should be no forum read trackers
client.get(topic_1.get_absolute_url())
self.assertEqual(TopicReadTracker.objects.all().count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=self.user).count(), 1)
self.assertEqual(TopicReadTracker.objects.filter(user=self.user, topic=topic_1).count(), 1)
# user reads topic_2, they should get a forum read tracker,
# there should be no topic read trackers for the user
client.get(topic_2.get_absolute_url())
self.assertEqual(TopicReadTracker.objects.all().count(), 0)
self.assertEqual(ForumReadTracker.objects.all().count(), 1)
self.assertEqual(ForumReadTracker.objects.filter(user=self.user).count(), 1)
self.assertEqual(ForumReadTracker.objects.filter(user=self.user, forum=self.forum).count(), 1)
def test_read_tracker_after_posting(self):
client = Client()
client.login(username='zeus', password='zeus')
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
response = client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test tracking'
response = client.post(add_post_url, values, follow=True)
# after posting in topic it should be readed
# because there is only one topic, so whole forum should be marked as readed
self.assertEqual(TopicReadTracker.objects.filter(user=self.user, topic=self.topic).count(), 0)
self.assertEqual(ForumReadTracker.objects.filter(user=self.user, forum=self.forum).count(), 1)
def test_pybb_is_topic_unread_filter(self):
forum_1 = self.forum
topic_1 = self.topic
topic_2 = Topic.objects.create(name='topic_2', forum=forum_1, user=self.user)
forum_2 = Forum.objects.create(name='forum_2', description='forum2', category=self.category)
topic_3 = Topic.objects.create(name='topic_2', forum=forum_2, user=self.user)
Post(topic=topic_1, user=self.user, body='one').save()
Post(topic=topic_2, user=self.user, body='two').save()
Post(topic=topic_3, user=self.user, body='three').save()
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client_ann = Client()
client_ann.login(username='ann', password='ann')
# Two topics, each with one post. everything is unread, so the db should reflect that:
self.assertTrue(pybb_is_topic_unread(topic_1, user_ann))
self.assertTrue(pybb_is_topic_unread(topic_2, user_ann))
self.assertTrue(pybb_is_topic_unread(topic_3, user_ann))
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2, topic_3], user_ann)],
[True, True, True])
client_ann.get(topic_1.get_absolute_url())
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
topic_3 = Topic.objects.get(id=topic_3.id)
self.assertFalse(pybb_is_topic_unread(topic_1, user_ann))
self.assertTrue(pybb_is_topic_unread(topic_2, user_ann))
self.assertTrue(pybb_is_topic_unread(topic_3, user_ann))
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2, topic_3], user_ann)],
[False, True, True])
client_ann.get(topic_2.get_absolute_url())
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
topic_3 = Topic.objects.get(id=topic_3.id)
self.assertFalse(pybb_is_topic_unread(topic_1, user_ann))
self.assertFalse(pybb_is_topic_unread(topic_2, user_ann))
self.assertTrue(pybb_is_topic_unread(topic_3, user_ann))
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2, topic_3], user_ann)],
[False, False, True])
client_ann.get(topic_3.get_absolute_url())
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
topic_3 = Topic.objects.get(id=topic_3.id)
self.assertFalse(pybb_is_topic_unread(topic_1, user_ann))
self.assertFalse(pybb_is_topic_unread(topic_2, user_ann))
self.assertFalse(pybb_is_topic_unread(topic_3, user_ann))
self.assertListEqual(
[t.unread for t in pybb_topic_unread([topic_1, topic_2, topic_3], user_ann)],
[False, False, False])
def test_is_forum_unread_filter(self):
Forum.objects.all().delete()
forum_parent = Forum.objects.create(name='f1', category=self.category)
forum_child1 = Forum.objects.create(name='f2', category=self.category, parent=forum_parent)
forum_child2 = Forum.objects.create(name='f3', category=self.category, parent=forum_parent)
topic_1 = Topic.objects.create(name='topic_1', forum=forum_parent, user=self.user)
topic_2 = Topic.objects.create(name='topic_2', forum=forum_child1, user=self.user)
topic_3 = Topic.objects.create(name='topic_3', forum=forum_child2, user=self.user)
Post(topic=topic_1, user=self.user, body='one').save()
Post(topic=topic_2, user=self.user, body='two').save()
Post(topic=topic_3, user=self.user, body='three').save()
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client_ann = Client()
client_ann.login(username='ann', password='ann')
forum_parent = Forum.objects.get(id=forum_parent.id)
forum_child1 = Forum.objects.get(id=forum_child1.id)
forum_child2 = Forum.objects.get(id=forum_child2.id)
self.assertListEqual([f.unread for f in pybb_forum_unread([forum_parent, forum_child1, forum_child2], user_ann)],
[True, True, True])
# unless we read parent topic, there is unreaded topics in child forums
client_ann.get(topic_1.get_absolute_url())
forum_parent = Forum.objects.get(id=forum_parent.id)
forum_child1 = Forum.objects.get(id=forum_child1.id)
forum_child2 = Forum.objects.get(id=forum_child2.id)
self.assertListEqual([f.unread for f in pybb_forum_unread([forum_parent, forum_child1, forum_child2], user_ann)],
[True, True, True])
# still unreaded topic in one of the child forums
client_ann.get(topic_2.get_absolute_url())
forum_parent = Forum.objects.get(id=forum_parent.id)
forum_child1 = Forum.objects.get(id=forum_child1.id)
forum_child2 = Forum.objects.get(id=forum_child2.id)
self.assertListEqual([f.unread for f in pybb_forum_unread([forum_parent, forum_child1, forum_child2], user_ann)],
[True, False, True])
# all topics readed
client_ann.get(topic_3.get_absolute_url())
forum_parent = Forum.objects.get(id=forum_parent.id)
forum_child1 = Forum.objects.get(id=forum_child1.id)
forum_child2 = Forum.objects.get(id=forum_child2.id)
self.assertListEqual([f.unread for f in pybb_forum_unread([forum_parent, forum_child1, forum_child2], user_ann)],
[False, False, False])
@skipUnlessDBFeature('supports_microsecond_precision')
def test_read_tracker_when_topics_forum_changed(self):
forum_1 = Forum.objects.create(name='f1', description='bar', category=self.category)
forum_2 = Forum.objects.create(name='f2', description='bar', category=self.category)
topic_1 = Topic.objects.create(name='t1', forum=forum_1, user=self.user)
topic_2 = Topic.objects.create(name='t2', forum=forum_2, user=self.user)
Post.objects.create(topic=topic_1, user=self.user, body='one')
Post.objects.create(topic=topic_2, user=self.user, body='two')
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client_ann = Client()
client_ann.login(username='ann', password='ann')
# Everything is unread
self.assertListEqual([t.unread for t in pybb_topic_unread([topic_1, topic_2], user_ann)], [True, True])
self.assertListEqual([t.unread for t in pybb_forum_unread([forum_1, forum_2], user_ann)], [True, True])
# read all
client_ann.get(reverse('pybb:mark_all_as_read'))
self.assertListEqual([t.unread for t in pybb_topic_unread([topic_1, topic_2], user_ann)], [False, False])
self.assertListEqual([t.unread for t in pybb_forum_unread([forum_1, forum_2], user_ann)], [False, False])
post = Post.objects.create(topic=topic_1, user=self.user, body='three')
post = Post.objects.get(id=post.id) # get post with timestamp from DB
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
self.assertEqual(topic_1.updated, post.updated or post.created)
self.assertEqual(forum_1.updated, post.updated or post.created)
self.assertListEqual([t.unread for t in pybb_topic_unread([topic_1, topic_2], user_ann)], [True, False])
self.assertListEqual([t.unread for t in pybb_forum_unread([forum_1, forum_2], user_ann)], [True, False])
post.topic = topic_2
post.save()
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
forum_1 = Forum.objects.get(id=forum_1.id)
forum_2 = Forum.objects.get(id=forum_2.id)
self.assertEqual(topic_2.updated, post.updated or post.created)
self.assertEqual(forum_2.updated, post.updated or post.created)
self.assertListEqual([t.unread for t in pybb_topic_unread([topic_1, topic_2], user_ann)], [False, True])
self.assertListEqual([t.unread for t in pybb_forum_unread([forum_1, forum_2], user_ann)], [False, True])
topic_2.forum = forum_1
topic_2.save()
topic_1 = Topic.objects.get(id=topic_1.id)
topic_2 = Topic.objects.get(id=topic_2.id)
forum_1 = Forum.objects.get(id=forum_1.id)
forum_2 = Forum.objects.get(id=forum_2.id)
self.assertEqual(forum_1.updated, post.updated or post.created)
self.assertListEqual([t.unread for t in pybb_topic_unread([topic_1, topic_2], user_ann)], [False, True])
self.assertListEqual([t.unread for t in pybb_forum_unread([forum_1, forum_2], user_ann)], [True, False])
@skipUnlessDBFeature('supports_microsecond_precision')
def test_open_first_unread_post(self):
forum_1 = self.forum
topic_1 = Topic.objects.create(name='topic_1', forum=forum_1, user=self.user)
topic_2 = Topic.objects.create(name='topic_2', forum=forum_1, user=self.user)
post_1_1 = Post.objects.create(topic=topic_1, user=self.user, body='1_1')
post_1_2 = Post.objects.create(topic=topic_1, user=self.user, body='1_2')
post_2_1 = Post.objects.create(topic=topic_2, user=self.user, body='2_1')
user_ann = User.objects.create_user('ann', 'ann@localhost', 'ann')
client_ann = Client()
client_ann.login(username='ann', password='ann')
response = client_ann.get(topic_1.get_absolute_url(), data={'first-unread': 1}, follow=True)
self.assertRedirects(response, '%s?page=%d#post-%d' % (topic_1.get_absolute_url(), 1, post_1_1.id))
response = client_ann.get(topic_1.get_absolute_url(), data={'first-unread': 1}, follow=True)
self.assertRedirects(response, '%s?page=%d#post-%d' % (topic_1.get_absolute_url(), 1, post_1_2.id))
response = client_ann.get(topic_2.get_absolute_url(), data={'first-unread': 1}, follow=True)
self.assertRedirects(response, '%s?page=%d#post-%d' % (topic_2.get_absolute_url(), 1, post_2_1.id))
post_1_3 = Post.objects.create(topic=topic_1, user=self.user, body='1_3')
post_1_4 = Post.objects.create(topic=topic_1, user=self.user, body='1_4')
response = client_ann.get(topic_1.get_absolute_url(), data={'first-unread': 1}, follow=True)
self.assertRedirects(response, '%s?page=%d#post-%d' % (topic_1.get_absolute_url(), 1, post_1_3.id))
def test_latest_topics(self):
topic_1 = self.topic
topic_1.updated = timezone.now()
topic_1.save()
topic_2 = Topic.objects.create(name='topic_2', forum=self.forum, user=self.user)
topic_2.updated = timezone.now() + datetime.timedelta(days=-1)
topic_2.save()
category_2 = Category.objects.create(name='cat2')
forum_2 = Forum.objects.create(name='forum_2', category=category_2)
topic_3 = Topic.objects.create(name='topic_3', forum=forum_2, user=self.user)
topic_3.updated = timezone.now() + datetime.timedelta(days=-2)
topic_3.save()
self.login_client()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertEqual(response.status_code, 200)
self.assertListEqual(list(response.context['topic_list']), [topic_1, topic_2, topic_3])
topic_2.forum.hidden = True
topic_2.forum.save()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_3])
topic_2.forum.hidden = False
topic_2.forum.save()
category_2.hidden = True
category_2.save()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_1, topic_2])
topic_2.forum.hidden = False
topic_2.forum.save()
category_2.hidden = False
category_2.save()
topic_1.on_moderation = True
topic_1.save()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_1, topic_2, topic_3])
topic_1.user = User.objects.create_user('another', 'another@localhost', 'another')
topic_1.save()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_2, topic_3])
topic_1.forum.moderators.add(self.user)
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_1, topic_2, topic_3])
topic_1.forum.moderators.remove(self.user)
self.user.is_superuser = True
self.user.save()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_1, topic_2, topic_3])
self.client.logout()
response = self.client.get(reverse('pybb:topic_latest'))
self.assertListEqual(list(response.context['topic_list']), [topic_2, topic_3])
def test_hidden(self):
client = Client()
category = Category(name='hcat', hidden=True)
category.save()
forum_in_hidden = Forum(name='in_hidden', category=category)
forum_in_hidden.save()
topic_in_hidden = Topic(forum=forum_in_hidden, name='in_hidden', user=self.user)
topic_in_hidden.save()
forum_hidden = Forum(name='hidden', category=self.category, hidden=True)
forum_hidden.save()
topic_hidden = Topic(forum=forum_hidden, name='hidden', user=self.user)
topic_hidden.save()
post_hidden = Post(topic=topic_hidden, user=self.user, body='hidden')
post_hidden.save()
post_in_hidden = Post(topic=topic_in_hidden, user=self.user, body='hidden')
post_in_hidden.save()
self.assertFalse(category.id in [c.id for c in client.get(reverse('pybb:index')).context['categories']])
self.assertEqual(client.get(category.get_absolute_url()).status_code, 302)
self.assertEqual(client.get(forum_in_hidden.get_absolute_url()).status_code, 302)
self.assertEqual(client.get(topic_in_hidden.get_absolute_url()).status_code, 302)
self.assertNotContains(client.get(reverse('pybb:index')), forum_hidden.get_absolute_url())
self.assertNotContains(client.get(reverse('pybb:feed_topics')), topic_hidden.get_absolute_url())
self.assertNotContains(client.get(reverse('pybb:feed_topics')), topic_in_hidden.get_absolute_url())
self.assertNotContains(client.get(reverse('pybb:feed_posts')), post_hidden.get_absolute_url())
self.assertNotContains(client.get(reverse('pybb:feed_posts')), post_in_hidden.get_absolute_url())
self.assertEqual(client.get(forum_hidden.get_absolute_url()).status_code, 302)
self.assertEqual(client.get(topic_hidden.get_absolute_url()).status_code, 302)
user = User.objects.create_user('someguy', 'email@abc.xyz', 'password')
client.login(username='someguy', password='password')
response = client.get(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}))
self.assertEqual(response.status_code, 200, response)
response = client.get(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}), data={'quote_id': post_hidden.id})
self.assertEqual(response.status_code, 403, response)
client.login(username='zeus', password='zeus')
self.assertFalse(category.id in [c.id for c in client.get(reverse('pybb:index')).context['categories']])
self.assertNotContains(client.get(reverse('pybb:index')), forum_hidden.get_absolute_url())
self.assertEqual(client.get(category.get_absolute_url()).status_code, 403)
self.assertEqual(client.get(forum_in_hidden.get_absolute_url()).status_code, 403)
self.assertEqual(client.get(topic_in_hidden.get_absolute_url()).status_code, 403)
self.assertEqual(client.get(forum_hidden.get_absolute_url()).status_code, 403)
self.assertEqual(client.get(topic_hidden.get_absolute_url()).status_code, 403)
self.user.is_staff = True
self.user.save()
self.assertTrue(category.id in [c.id for c in client.get(reverse('pybb:index')).context['categories']])
self.assertContains(client.get(reverse('pybb:index')), forum_hidden.get_absolute_url())
self.assertEqual(client.get(category.get_absolute_url()).status_code, 200)
self.assertEqual(client.get(forum_in_hidden.get_absolute_url()).status_code, 200)
self.assertEqual(client.get(topic_in_hidden.get_absolute_url()).status_code, 200)
self.assertEqual(client.get(forum_hidden.get_absolute_url()).status_code, 200)
self.assertEqual(client.get(topic_hidden.get_absolute_url()).status_code, 200)
def test_inactive(self):
self.login_client()
url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
response = self.client.get(url)
values = self.get_form_values(response)
values['body'] = 'test ban'
response = self.client.post(url, values, follow=True)
self.assertEqual(len(Post.objects.filter(body='test ban')), 1)
self.user.is_active = False
self.user.save()
values['body'] = 'test ban 2'
self.client.post(url, values, follow=True)
self.assertEqual(len(Post.objects.filter(body='test ban 2')), 0)
def get_csrf(self, form):
return form.xpath('//input[@name="csrfmiddlewaretoken"]/@value')[0]
def test_csrf(self):
client = Client(enforce_csrf_checks=True)
client.login(username='zeus', password='zeus')
post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
response = client.get(post_url)
values = self.get_form_values(response)
del values['csrfmiddlewaretoken']
response = client.post(post_url, values, follow=True)
self.assertNotEqual(response.status_code, 200)
response = client.get(self.topic.get_absolute_url())
values = self.get_form_values(response)
response = client.post(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}), values, follow=True)
self.assertEqual(response.status_code, 200)
def test_user_blocking(self):
user = User.objects.create_user('test', 'test@localhost', 'test')
topic = Topic.objects.create(name='topic', forum=self.forum, user=user)
p1 = Post.objects.create(topic=topic, user=user, body='bbcode [b]test[/b]')
p2 = Post.objects.create(topic=topic, user=user, body='bbcode [b]test[/b]')
self.user.is_superuser = True
self.user.save()
self.login_client()
response = self.client.get(reverse('pybb:block_user', args=[user.username]), follow=True)
self.assertEqual(response.status_code, 405)
response = self.client.post(reverse('pybb:block_user', args=[user.username]), follow=True)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=user.username)
self.assertFalse(user.is_active)
self.assertEqual(Topic.objects.filter().count(), 2)
self.assertEqual(Post.objects.filter(user=user).count(), 2)
user.is_active = True
user.save()
self.assertEqual(Topic.objects.count(), 2)
response = self.client.post(reverse('pybb:block_user', args=[user.username]),
data={'block_and_delete_messages': 'block_and_delete_messages'}, follow=True)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=user.username)
self.assertFalse(user.is_active)
self.assertEqual(Topic.objects.count(), 1)
self.assertEqual(Post.objects.filter(user=user).count(), 0)
def test_user_unblocking(self):
user = User.objects.create_user('test', 'test@localhost', 'test')
user.is_active=False
user.save()
self.user.is_superuser = True
self.user.save()
self.login_client()
response = self.client.get(reverse('pybb:unblock_user', args=[user.username]), follow=True)
self.assertEqual(response.status_code, 405)
response = self.client.post(reverse('pybb:unblock_user', args=[user.username]), follow=True)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=user.username)
self.assertTrue(user.is_active)
def test_ajax_preview(self):
self.login_client()
response = self.client.post(reverse('pybb:post_ajax_preview'), data={'data': '[b]test bbcode ajax preview[/b]'})
self.assertContains(response, '<strong>test bbcode ajax preview</strong>')
def test_headline(self):
self.forum.headline = 'test <b>headline</b>'
self.forum.save()
client = Client()
self.assertContains(client.get(self.forum.get_absolute_url()), 'test <b>headline</b>')
def test_quote(self):
self.login_client()
response = self.client.get(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}),
data={'quote_id': self.post.id, 'body': 'test tracking'}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.post.body)
def test_edit_post(self):
self.login_client()
edit_post_url = reverse('pybb:edit_post', kwargs={'pk': self.post.id})
response = self.client.get(edit_post_url)
self.assertEqual(response.status_code, 200)
self.assertIsNone(Post.objects.get(id=self.post.id).updated)
tree = html.fromstring(response.content)
values = dict(tree.xpath('//form[@method="post"]')[0].form_values())
values['body'] = 'test edit'
response = self.client.post(edit_post_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Post.objects.get(pk=self.post.id).body, 'test edit')
response = self.client.get(self.post.get_absolute_url(), follow=True)
self.assertContains(response, 'test edit')
self.assertIsNotNone(Post.objects.get(id=self.post.id).updated)
# Check admin form
self.user.is_staff = True
self.user.save()
response = self.client.get(edit_post_url)
self.assertEqual(response.status_code, 200)
tree = html.fromstring(response.content)
values = dict(tree.xpath('//form[@method="post"]')[0].form_values())
values['body'] = 'test edit'
values['login'] = 'new_login'
response = self.client.post(edit_post_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test edit')
def test_admin_post_add(self):
self.user.is_staff = True
self.user.save()
self.login_client()
response = self.client.post(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}),
data={'quote_id': self.post.id, 'body': 'test admin post', 'user': 'zeus'},
follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test admin post')
def test_stick(self):
self.user.is_superuser = True
self.user.save()
self.login_client()
self.assertEqual(
self.client.get(reverse('pybb:stick_topic', kwargs={'pk': self.topic.id}), follow=True).status_code, 200)
self.assertEqual(
self.client.get(reverse('pybb:unstick_topic', kwargs={'pk': self.topic.id}), follow=True).status_code, 200)
def test_delete_view(self):
post = Post(topic=self.topic, user=self.user, body='test to delete')
post.save()
self.user.is_superuser = True
self.user.save()
self.login_client()
response = self.client.post(reverse('pybb:delete_post', args=[post.id]), follow=True)
self.assertEqual(response.status_code, 200)
# Check that topic and forum exists ;)
self.assertEqual(Topic.objects.filter(id=self.topic.id).count(), 1)
self.assertEqual(Forum.objects.filter(id=self.forum.id).count(), 1)
# Delete topic
response = self.client.post(reverse('pybb:delete_post', args=[self.post.id]), follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Post.objects.filter(id=self.post.id).count(), 0)
self.assertEqual(Topic.objects.filter(id=self.topic.id).count(), 0)
self.assertEqual(Forum.objects.filter(id=self.forum.id).count(), 1)
def test_open_close(self):
self.user.is_superuser = True
self.user.save()
self.login_client()
add_post_url = reverse('pybb:add_post', args=[self.topic.id])
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test closed'
response = self.client.get(reverse('pybb:close_topic', args=[self.topic.id]), follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 403)
response = self.client.get(reverse('pybb:open_topic', args=[self.topic.id]), follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
def test_subscription(self):
user2 = User.objects.create_user(username='user2', password='user2', email='user2@someserver.com')
user3 = User.objects.create_user(username='user3', password='user3', email='user3@example.com')
client = Client()
client.login(username='user2', password='user2')
subscribe_url = reverse('pybb:add_subscription', args=[self.topic.id])
response = client.get(self.topic.get_absolute_url())
subscribe_links = html.fromstring(response.content).xpath('//a[@href="%s"]' % subscribe_url)
self.assertEqual(len(subscribe_links), 1)
response = client.get(subscribe_url, follow=True)
self.assertEqual(response.status_code, 200)
self.assertIn(user2, self.topic.subscribers.all())
self.topic.subscribers.add(user3)
# create a new reply (with another user)
self.client.login(username='zeus', password='zeus')
add_post_url = reverse('pybb:add_post', args=[self.topic.id])
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test subscribtion юникод'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_post = Post.objects.order_by('-id')[0]
# there should only be one email in the outbox (to user2) because @example.com are ignored
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to[0], user2.email)
self.assertTrue([msg for msg in mail.outbox if new_post.get_absolute_url() in msg.body])
# unsubscribe
client.login(username='user2', password='user2')
self.assertTrue([msg for msg in mail.outbox if new_post.get_absolute_url() in msg.body])
response = client.get(reverse('pybb:delete_subscription', args=[self.topic.id]), follow=True)
self.assertEqual(response.status_code, 200)
self.assertNotIn(user2, self.topic.subscribers.all())
def test_subscription_disabled(self):
orig_conf = defaults.PYBB_DISABLE_SUBSCRIPTIONS
defaults.PYBB_DISABLE_SUBSCRIPTIONS = True
user2 = User.objects.create_user(username='user2', password='user2', email='user2@someserver.com')
user3 = User.objects.create_user(username='user3', password='user3', email='user3@someserver.com')
client = Client()
client.login(username='user2', password='user2')
subscribe_url = reverse('pybb:add_subscription', args=[self.topic.id])
response = client.get(self.topic.get_absolute_url())
subscribe_links = html.fromstring(response.content).xpath('//a[@href="%s"]' % subscribe_url)
self.assertEqual(len(subscribe_links), 0)
response = client.get(subscribe_url, follow=True)
self.assertEqual(response.status_code, 403)
self.topic.subscribers.add(user3)
# create a new reply (with another user)
self.client.login(username='zeus', password='zeus')
add_post_url = reverse('pybb:add_post', args=[self.topic.id])
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test subscribtion юникод'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_post = Post.objects.order_by('-id')[0]
# there should be one email in the outbox (user3)
#because already subscribed users will still receive notifications.
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to[0], user3.email)
defaults.PYBB_DISABLE_SUBSCRIPTIONS = orig_conf
def test_notifications_disabled(self):
orig_conf = defaults.PYBB_DISABLE_NOTIFICATIONS
defaults.PYBB_DISABLE_NOTIFICATIONS = True
user2 = User.objects.create_user(username='user2', password='user2', email='user2@someserver.com')
user3 = User.objects.create_user(username='user3', password='user3', email='user3@someserver.com')
client = Client()
client.login(username='user2', password='user2')
subscribe_url = reverse('pybb:add_subscription', args=[self.topic.id])
response = client.get(self.topic.get_absolute_url())
subscribe_links = html.fromstring(response.content).xpath('//a[@href="%s"]' % subscribe_url)
self.assertEqual(len(subscribe_links), 1)
response = client.get(subscribe_url, follow=True)
self.assertEqual(response.status_code, 200)
self.topic.subscribers.add(user3)
# create a new reply (with another user)
self.client.login(username='zeus', password='zeus')
add_post_url = reverse('pybb:add_post', args=[self.topic.id])
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test subscribtion юникод'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_post = Post.objects.order_by('-id')[0]
# there should be no email in the outbox
self.assertEqual(len(mail.outbox), 0)
defaults.PYBB_DISABLE_NOTIFICATIONS = orig_conf
@skipUnlessDBFeature('supports_microsecond_precision')
def test_topic_updated(self):
topic = Topic(name='new topic', forum=self.forum, user=self.user)
topic.save()
post = Post(topic=topic, user=self.user, body='bbcode [b]test[/b]')
post.save()
client = Client()
response = client.get(self.forum.get_absolute_url())
self.assertEqual(response.context['topic_list'][0], topic)
post = Post(topic=self.topic, user=self.user, body='bbcode [b]test[/b]')
post.save()
client = Client()
response = client.get(self.forum.get_absolute_url())
self.assertEqual(response.context['topic_list'][0], self.topic)
def test_topic_deleted(self):
forum_1 = Forum.objects.create(name='new forum', category=self.category)
topic_1 = Topic.objects.create(name='new topic', forum=forum_1, user=self.user)
post_1 = Post.objects.create(topic=topic_1, user=self.user, body='test')
post_1 = Post.objects.get(id=post_1.id)
self.assertEqual(topic_1.updated, post_1.created)
self.assertEqual(forum_1.updated, post_1.created)
topic_2 = Topic.objects.create(name='another topic', forum=forum_1, user=self.user)
post_2 = Post.objects.create(topic=topic_2, user=self.user, body='another test')
post_2 = Post.objects.get(id=post_2.id)
self.assertEqual(topic_2.updated, post_2.created)
self.assertEqual(forum_1.updated, post_2.created)
topic_2.delete()
forum_1 = Forum.objects.get(id=forum_1.id)
self.assertEqual(forum_1.updated, post_1.created)
self.assertEqual(forum_1.topic_count, 1)
self.assertEqual(forum_1.post_count, 1)
post_1.delete()
forum_1 = Forum.objects.get(id=forum_1.id)
self.assertEqual(forum_1.topic_count, 0)
self.assertEqual(forum_1.post_count, 0)
def test_user_views(self):
response = self.client.get(reverse('pybb:user', kwargs={'username': self.user.username}))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('pybb:user_posts', kwargs={'username': self.user.username}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['object_list'].count(), 1)
response = self.client.get(reverse('pybb:user_topics', kwargs={'username': self.user.username}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['object_list'].count(), 1)
self.topic.forum.hidden = True
self.topic.forum.save()
self.client.logout()
response = self.client.get(reverse('pybb:user_posts', kwargs={'username': self.user.username}))
self.assertEqual(response.context['object_list'].count(), 0)
response = self.client.get(reverse('pybb:user_topics', kwargs={'username': self.user.username}))
self.assertEqual(response.context['object_list'].count(), 0)
def test_post_count(self):
topic = Topic(name='etopic', forum=self.forum, user=self.user)
topic.save()
post = Post(topic=topic, user=self.user, body='test') # another post
post.save()
self.assertEqual(util.get_pybb_profile(self.user).post_count, 2)
post.body = 'test2'
post.save()
self.assertEqual(Profile.objects.get(pk=util.get_pybb_profile(self.user).pk).post_count, 2)
post.delete()
self.assertEqual(Profile.objects.get(pk=util.get_pybb_profile(self.user).pk).post_count, 1)
def test_latest_topics_tag(self):
Topic.objects.all().delete()
for i in range(10):
Topic.objects.create(name='topic%s' % i, user=self.user, forum=self.forum)
latest_topics = pybb_get_latest_topics(context=None, user=self.user)
self.assertEqual(len(latest_topics), 5)
self.assertEqual(latest_topics[0].name, 'topic9')
self.assertEqual(latest_topics[4].name, 'topic5')
def test_latest_posts_tag(self):
Post.objects.all().delete()
for i in range(10):
Post.objects.create(body='post%s' % i, user=self.user, topic=self.topic)
latest_topics = pybb_get_latest_posts(context=None, user=self.user)
self.assertEqual(len(latest_topics), 5)
self.assertEqual(latest_topics[0].body, 'post9')
self.assertEqual(latest_topics[4].body, 'post5')
def test_multiple_objects_returned(self):
"""
see issue #87: https://github.com/hovel/pybbm/issues/87
"""
self.assertFalse(self.user.is_superuser)
self.assertFalse(self.user.is_staff)
self.assertFalse(self.topic.on_moderation)
self.assertEqual(self.topic.user, self.user)
user1 = User.objects.create_user('geyser', 'geyser@localhost', 'geyser')
self.topic.forum.moderators.add(self.user)
self.topic.forum.moderators.add(user1)
self.login_client()
response = self.client.get(reverse('pybb:add_post', kwargs={'topic_id': self.topic.id}))
self.assertEqual(response.status_code, 200)
def tearDown(self):
defaults.PYBB_ENABLE_ANONYMOUS_POST = self.ORIG_PYBB_ENABLE_ANONYMOUS_POST
defaults.PYBB_PREMODERATION = self.ORIG_PYBB_PREMODERATION
class AnonymousTest(TestCase, SharedTestModule):
def setUp(self):
self.ORIG_PYBB_ENABLE_ANONYMOUS_POST = defaults.PYBB_ENABLE_ANONYMOUS_POST
self.ORIG_PYBB_ANONYMOUS_USERNAME = defaults.PYBB_ANONYMOUS_USERNAME
self.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER = defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER
defaults.PYBB_ENABLE_ANONYMOUS_POST = True
defaults.PYBB_ANONYMOUS_USERNAME = 'Anonymous'
self.user = User.objects.create_user('Anonymous', 'Anonymous@localhost', 'Anonymous')
self.category = Category.objects.create(name='foo')
self.forum = Forum.objects.create(name='xfoo', description='bar', category=self.category)
self.topic = Topic.objects.create(name='etopic', forum=self.forum, user=self.user)
self.post = Post.objects.create(body='body post', topic=self.topic, user=self.user)
add_post_permission = Permission.objects.get_by_natural_key('add_post', 'pybb', 'post')
self.user.user_permissions.add(add_post_permission)
def tearDown(self):
defaults.PYBB_ENABLE_ANONYMOUS_POST = self.ORIG_PYBB_ENABLE_ANONYMOUS_POST
defaults.PYBB_ANONYMOUS_USERNAME = self.ORIG_PYBB_ANONYMOUS_USERNAME
defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER = self.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER
def test_anonymous_posting(self):
post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
response = self.client.get(post_url)
values = self.get_form_values(response)
values['body'] = 'test anonymous'
response = self.client.post(post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(Post.objects.filter(body='test anonymous')), 1)
self.assertEqual(Post.objects.get(body='test anonymous').user, self.user)
def test_anonymous_cache_topic_views(self):
self.assertNotIn(util.build_cache_key('anonymous_topic_views', topic_id=self.topic.id), cache)
url = self.topic.get_absolute_url()
self.client.get(url)
self.assertEqual(cache.get(util.build_cache_key('anonymous_topic_views', topic_id=self.topic.id)), 1)
for _ in range(defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER - 2):
self.client.get(url)
self.assertEqual(Topic.objects.get(id=self.topic.id).views, 0)
self.assertEqual(cache.get(util.build_cache_key('anonymous_topic_views', topic_id=self.topic.id)),
defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER - 1)
self.client.get(url)
self.assertEqual(Topic.objects.get(id=self.topic.id).views, defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER)
self.assertEqual(cache.get(util.build_cache_key('anonymous_topic_views', topic_id=self.topic.id)), 0)
views = Topic.objects.get(id=self.topic.id).views
defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER = None
self.client.get(url)
self.assertEqual(Topic.objects.get(id=self.topic.id).views, views + 1)
self.assertEqual(cache.get(util.build_cache_key('anonymous_topic_views', topic_id=self.topic.id)), 0)
def premoderate_test(user, post):
"""
Test premoderate function
Allow post without moderation for staff users only
"""
if user.username.startswith('allowed'):
return True
return False
class PreModerationTest(TestCase, SharedTestModule):
def setUp(self):
self.ORIG_PYBB_PREMODERATION = defaults.PYBB_PREMODERATION
defaults.PYBB_PREMODERATION = premoderate_test
self.create_user()
self.create_initial()
mail.outbox = []
def test_premoderation(self):
self.client.login(username='zeus', password='zeus')
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test premoderation'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
post = Post.objects.get(body='test premoderation')
self.assertEqual(post.on_moderation, True)
# Post is visible by author
response = self.client.get(post.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test premoderation')
# Post is not visible by anonymous user
client = Client()
response = client.get(post.get_absolute_url(), follow=True)
self.assertRedirects(response, settings.LOGIN_URL + '?next=%s' % post.get_absolute_url())
response = client.get(self.topic.get_absolute_url(), follow=True)
self.assertNotContains(response, 'test premoderation')
# But visible by superuser (with permissions)
user = User.objects.create_user('admin', 'admin@localhost', 'admin')
user.is_superuser = True
user.save()
client.login(username='admin', password='admin')
response = client.get(post.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test premoderation')
# user with names stats with allowed can post without premoderation
user = User.objects.create_user('allowed_zeus', 'allowed_zeus@localhost', 'allowed_zeus')
client.login(username='allowed_zeus', password='allowed_zeus')
response = client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test premoderation staff'
response = client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
post = Post.objects.get(body='test premoderation staff')
client = Client()
response = client.get(post.get_absolute_url(), follow=True)
self.assertContains(response, 'test premoderation staff')
# Superuser can moderate
user.is_superuser = True
user.save()
admin_client = Client()
admin_client.login(username='admin', password='admin')
post = Post.objects.get(body='test premoderation')
response = admin_client.get(reverse('pybb:moderate_post', kwargs={'pk': post.id}), follow=True)
self.assertEqual(response.status_code, 200)
# Now all can see this post:
client = Client()
response = client.get(post.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test premoderation')
# Other users can't moderate
post.on_moderation = True
post.save()
client.login(username='zeus', password='zeus')
response = client.get(reverse('pybb:moderate_post', kwargs={'pk': post.id}), follow=True)
self.assertEqual(response.status_code, 403)
# If user create new topic it goes to moderation if MODERATION_ENABLE
# When first post is moderated, topic becomes moderated too
self.client.login(username='zeus', password='zeus')
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'new topic test'
values['name'] = 'new topic name'
values['poll_type'] = 0
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'new topic test')
client = Client()
response = client.get(self.forum.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'new topic name')
response = client.get(Topic.objects.get(name='new topic name').get_absolute_url())
self.assertEqual(response.status_code, 302)
response = admin_client.get(reverse('pybb:moderate_post',
kwargs={'pk': Post.objects.get(body='new topic test').id}),
follow=True)
self.assertEqual(response.status_code, 200)
response = client.get(self.forum.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'new topic name')
response = client.get(Topic.objects.get(name='new topic name').get_absolute_url())
self.assertEqual(response.status_code, 200)
def tearDown(self):
defaults.PYBB_PREMODERATION = self.ORIG_PYBB_PREMODERATION
class AttachmentTest(TestCase, SharedTestModule):
def setUp(self):
self.PYBB_ATTACHMENT_ENABLE = defaults.PYBB_ATTACHMENT_ENABLE
defaults.PYBB_ATTACHMENT_ENABLE = True
self.ORIG_PYBB_PREMODERATION = defaults.PYBB_PREMODERATION
defaults.PYBB_PREMODERATION = False
self.file_name = os.path.join(os.path.dirname(__file__), 'static', 'pybb', 'img', 'attachment.png')
self.create_user()
self.create_initial()
def test_attachment_one(self):
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
self.login_client()
response = self.client.get(add_post_url)
with open(self.file_name, 'rb') as fp:
values = self.get_form_values(response)
values['body'] = 'test attachment'
values['attachments-0-file'] = fp
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(Post.objects.filter(body='test attachment').exists())
def test_attachment_two(self):
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
self.login_client()
response = self.client.get(add_post_url)
with open(self.file_name, 'rb') as fp:
values = self.get_form_values(response)
values['body'] = 'test attachment'
values['attachments-0-file'] = fp
del values['attachments-INITIAL_FORMS']
del values['attachments-TOTAL_FORMS']
with self.assertRaises(ValidationError):
self.client.post(add_post_url, values, follow=True)
def tearDown(self):
defaults.PYBB_ATTACHMENT_ENABLE = self.PYBB_ATTACHMENT_ENABLE
defaults.PYBB_PREMODERATION = self.ORIG_PYBB_PREMODERATION
class PollTest(TestCase, SharedTestModule):
def setUp(self):
self.create_user()
self.create_initial()
self.PYBB_POLL_MAX_ANSWERS = defaults.PYBB_POLL_MAX_ANSWERS
defaults.PYBB_POLL_MAX_ANSWERS = 2
def test_poll_add(self):
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
self.login_client()
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'test poll body'
values['name'] = 'test poll name'
values['poll_type'] = 0 # poll_type = None, create topic without poll answers
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = 'answer1'
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_topic = Topic.objects.get(name='test poll name')
self.assertIsNone(new_topic.poll_question)
self.assertFalse(PollAnswer.objects.filter(topic=new_topic).exists()) # no answers here
values['name'] = 'test poll name 1'
values['poll_type'] = 1
values['poll_answers-0-text'] = 'answer1' # not enough answers
values['poll_answers-TOTAL_FORMS'] = 1
response = self.client.post(add_topic_url, values, follow=True)
self.assertFalse(Topic.objects.filter(name='test poll name 1').exists())
values['name'] = 'test poll name 1'
values['poll_type'] = 1
values['poll_answers-0-text'] = 'answer1' # too many answers
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-2-text'] = 'answer3'
values['poll_answers-TOTAL_FORMS'] = 3
response = self.client.post(add_topic_url, values, follow=True)
self.assertFalse(Topic.objects.filter(name='test poll name 1').exists())
values['name'] = 'test poll name 1'
values['poll_type'] = 1 # poll type = single choice, create answers
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = 'answer1' # two answers - what do we need to create poll
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_topic = Topic.objects.get(name='test poll name 1')
self.assertEqual(new_topic.poll_question, 'q1')
self.assertEqual(PollAnswer.objects.filter(topic=new_topic).count(), 2)
def test_regression_adding_poll_with_removed_answers(self):
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
self.login_client()
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'test poll body'
values['name'] = 'test poll name'
values['poll_type'] = 1
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = ''
values['poll_answers-0-DELETE'] = 'on'
values['poll_answers-1-text'] = ''
values['poll_answers-1-DELETE'] = 'on'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(Topic.objects.filter(name='test poll name').exists())
def test_regression_poll_deletion_after_second_post(self):
self.login_client()
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'test poll body'
values['name'] = 'test poll name'
values['poll_type'] = 1 # poll type = single choice, create answers
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = 'answer1' # two answers - what do we need to create poll
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_topic = Topic.objects.get(name='test poll name')
self.assertEqual(new_topic.poll_question, 'q1')
self.assertEqual(PollAnswer.objects.filter(topic=new_topic).count(), 2)
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': new_topic.id})
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test answer body'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(PollAnswer.objects.filter(topic=new_topic).count(), 2)
def test_poll_edit(self):
edit_topic_url = reverse('pybb:edit_post', kwargs={'pk': self.post.id})
self.login_client()
response = self.client.get(edit_topic_url)
values = self.get_form_values(response)
values['poll_type'] = 1 # add_poll
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = 'answer1'
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(edit_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_type, 1)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_question, 'q1')
self.assertEqual(PollAnswer.objects.filter(topic=self.topic).count(), 2)
values = self.get_form_values(self.client.get(edit_topic_url))
values['poll_type'] = 2 # change_poll type
values['poll_question'] = 'q100' # change poll question
values['poll_answers-0-text'] = 'answer100' # change poll answers
values['poll_answers-1-text'] = 'answer200'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(edit_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_type, 2)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_question, 'q100')
self.assertEqual(PollAnswer.objects.filter(topic=self.topic).count(), 2)
self.assertTrue(PollAnswer.objects.filter(text='answer100').exists())
self.assertTrue(PollAnswer.objects.filter(text='answer200').exists())
self.assertFalse(PollAnswer.objects.filter(text='answer1').exists())
self.assertFalse(PollAnswer.objects.filter(text='answer2').exists())
values['poll_type'] = 0 # remove poll
values['poll_answers-0-text'] = 'answer100' # no matter how many answers we provide
values['poll_answers-TOTAL_FORMS'] = 1
response = self.client.post(edit_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_type, 0)
self.assertIsNone(Topic.objects.get(id=self.topic.id).poll_question)
self.assertEqual(PollAnswer.objects.filter(topic=self.topic).count(), 0)
def test_poll_voting(self):
def recreate_poll(poll_type):
self.topic.poll_type = poll_type
self.topic.save()
PollAnswer.objects.filter(topic=self.topic).delete()
PollAnswer.objects.create(topic=self.topic, text='answer1')
PollAnswer.objects.create(topic=self.topic, text='answer2')
self.login_client()
recreate_poll(poll_type=Topic.POLL_TYPE_SINGLE)
vote_url = reverse('pybb:topic_poll_vote', kwargs={'pk': self.topic.id})
my_answer = PollAnswer.objects.all()[0]
values = {'answers': my_answer.id}
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Topic.objects.get(id=self.topic.id).poll_votes(), 1)
self.assertEqual(PollAnswer.objects.get(id=my_answer.id).votes(), 1)
self.assertEqual(PollAnswer.objects.get(id=my_answer.id).votes_percent(), 100.0)
# already voted
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 403) # bad request status
recreate_poll(poll_type=Topic.POLL_TYPE_MULTIPLE)
values = {'answers': [a.id for a in PollAnswer.objects.all()]}
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertListEqual([a.votes() for a in PollAnswer.objects.all()], [1, 1])
self.assertListEqual([a.votes_percent() for a in PollAnswer.objects.all()], [50.0, 50.0])
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 403) # already voted
cancel_vote_url = reverse('pybb:topic_cancel_poll_vote', kwargs={'pk': self.topic.id})
response = self.client.post(cancel_vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertListEqual([a.votes() for a in PollAnswer.objects.all()], [0, 0])
self.assertListEqual([a.votes_percent() for a in PollAnswer.objects.all()], [0, 0])
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertListEqual([a.votes() for a in PollAnswer.objects.all()], [1, 1])
self.assertListEqual([a.votes_percent() for a in PollAnswer.objects.all()], [50.0, 50.0])
def test_poll_voting_on_closed_topic(self):
self.login_client()
self.topic.poll_type = Topic.POLL_TYPE_SINGLE
self.topic.save()
PollAnswer.objects.create(topic=self.topic, text='answer1')
PollAnswer.objects.create(topic=self.topic, text='answer2')
self.topic.closed = True
self.topic.save()
vote_url = reverse('pybb:topic_poll_vote', kwargs={'pk': self.topic.id})
my_answer = PollAnswer.objects.all()[0]
values = {'answers': my_answer.id}
response = self.client.post(vote_url, data=values, follow=True)
self.assertEqual(response.status_code, 403)
def tearDown(self):
defaults.PYBB_POLL_MAX_ANSWERS = self.PYBB_POLL_MAX_ANSWERS
class FiltersTest(TestCase, SharedTestModule):
def setUp(self):
self.create_user()
self.create_initial(post=False)
def test_filters(self):
add_post_url = reverse('pybb:add_post', kwargs={'topic_id': self.topic.id})
self.login_client()
response = self.client.get(add_post_url)
values = self.get_form_values(response)
values['body'] = 'test\n \n \n\nmultiple empty lines\n'
response = self.client.post(add_post_url, values, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Post.objects.all()[0].body, 'test\nmultiple empty lines')
class CustomPermissionHandler(permissions.DefaultPermissionHandler):
"""
a custom permission handler which changes the meaning of "hidden" forum:
"hidden" forum or category is visible for all logged on users, not only staff
"""
def filter_categories(self, user, qs):
return qs.filter(hidden=False) if user.is_anonymous() else qs
def may_view_category(self, user, category):
return user.is_authenticated() if category.hidden else True
def filter_forums(self, user, qs):
if user.is_anonymous():
qs = qs.filter(Q(hidden=False) & Q(category__hidden=False))
return qs
def may_view_forum(self, user, forum):
return user.is_authenticated() if forum.hidden or forum.category.hidden else True
def filter_topics(self, user, qs):
if user.is_anonymous():
qs = qs.filter(Q(forum__hidden=False) & Q(forum__category__hidden=False))
qs = qs.filter(closed=False) # filter out closed topics for test
return qs
def may_view_topic(self, user, topic):
return self.may_view_forum(user, topic.forum)
def filter_posts(self, user, qs):
if user.is_anonymous():
qs = qs.filter(Q(topic__forum__hidden=False) & Q(topic__forum__category__hidden=False))
return qs
def may_view_post(self, user, post):
return self.may_view_forum(user, post.topic.forum)
def may_create_poll(self, user):
return False
def may_edit_topic_slug(self, user):
return True
class MarkupParserTest(TestCase, SharedTestModule):
def setUp(self):
# Reinit Engines because they are stored in memory and the current bbcode engine stored
# may be the old one, depending the test order exec.
self.ORIG_PYBB_MARKUP_ENGINES = util.PYBB_MARKUP_ENGINES
self.ORIG_PYBB_QUOTE_ENGINES = util.PYBB_QUOTE_ENGINES
util.PYBB_MARKUP_ENGINES = {
'bbcode': 'pybb.markup.bbcode.BBCodeParser', # default parser
'bbcode_custom': 'test_project.markup_parsers.CustomBBCodeParser', # overrided default parser
'liberator': 'test_project.markup_parsers.LiberatorParser', # completely new parser
'fake': 'pybb.markup.base.BaseParser', # base parser
'markdown': defaults.markdown # old-style callable parser,
}
util.PYBB_QUOTE_ENGINES = {
'bbcode': 'pybb.markup.bbcode.BBCodeParser', # default parser
'bbcode_custom': 'test_project.markup_parsers.CustomBBCodeParser', # overrided default parser
'liberator': 'test_project.markup_parsers.LiberatorParser', # completely new parser
'fake': 'pybb.markup.base.BaseParser', # base parser
'markdown': lambda text, username="": '>' + text.replace('\n', '\n>').replace('\r', '\n>') + '\n' # old-style callable parser
}
def tearDown(self):
util._MARKUP_ENGINES = {}
util._QUOTE_ENGINES = {}
util.PYBB_MARKUP_ENGINES = self.ORIG_PYBB_MARKUP_ENGINES
util.PYBB_QUOTE_ENGINES = self.ORIG_PYBB_QUOTE_ENGINES
def test_markup_engines(self):
def _test_engine(parser_name, text_to_html_map):
for item in text_to_html_map:
self.assertIn(util._get_markup_formatter(parser_name)(item[0]), item[1:])
text_to_html_map = [
['[b]bold[/b]', '<strong>bold</strong>'],
['[i]italic[/i]', '<em>italic</em>'],
['[u]underline[/u]', '<u>underline</u>'],
['[s]striked[/s]', '<strike>striked</strike>'],
[
'[img]http://domain.com/image.png[/img]',
'<img src="http://domain.com/image.png"></img>',
'<img src="http://domain.com/image.png">'
],
['[url=google.com]search in google[/url]', '<a href="http://google.com">search in google</a>'],
['http://google.com', '<a href="http://google.com">http://google.com</a>'],
['[list][*]1[*]2[/list]', '<ul><li>1</li><li>2</li></ul>'],
[
'[list=1][*]1[*]2[/list]',
'<ol><li>1</li><li>2</li></ol>',
'<ol style="list-style-type:decimal;"><li>1</li><li>2</li></ol>'
],
['[quote="post author"]quote[/quote]', '<blockquote><em>post author</em><br>quote</blockquote>'],
[
'[code]code[/code]',
'<div class="code"><pre>code</pre></div>',
'<pre><code>code</code></pre>']
,
]
_test_engine('bbcode', text_to_html_map)
text_to_html_map = text_to_html_map + [
['[ul][li]1[/li][li]2[/li][/ul]', '<ul><li>1</li><li>2</li></ul>'],
[
'[youtube]video_id[/youtube]',
(
'<iframe src="http://www.youtube.com/embed/video_id?wmode=opaque" '
'data-youtube-id="video_id" allowfullscreen="" frameborder="0" '
'height="315" width="560"></iframe>'
)
],
]
_test_engine('bbcode_custom', text_to_html_map)
text_to_html_map = [
['Windows and Mac OS are wonderfull OS !', 'GNU Linux and FreeBSD are wonderfull OS !'],
['I love PHP', 'I love Python'],
]
_test_engine('liberator', text_to_html_map)
text_to_html_map = [
['[b]bold[/b]', '[b]bold[/b]'],
['*italic*', '*italic*'],
]
_test_engine('fake', text_to_html_map)
_test_engine('not_existent', text_to_html_map)
text_to_html_map = [
['**bold**', '<p><strong>bold</strong></p>'],
['*italic*', '<p><em>italic</em></p>'],
[
'',
'<p><img alt="alt text" src="http://domain.com/image.png" title="title" /></p>'
],
[
'[search in google](https://www.google.com)',
'<p><a href="https://www.google.com">search in google</a></p>'
],
[
'[google] some text\n[google]: https://www.google.com',
'<p><a href="https://www.google.com">google</a> some text</p>'
],
['* 1\n* 2', '<ul>\n<li>1</li>\n<li>2</li>\n</ul>'],
['1. 1\n2. 2', '<ol>\n<li>1</li>\n<li>2</li>\n</ol>'],
['> quote', '<blockquote>\n<p>quote</p>\n</blockquote>'],
['```\ncode\n```', '<p><code>code</code></p>'],
]
_test_engine('markdown', text_to_html_map)
def test_quote_engines(self):
def _test_engine(parser_name, text_to_quote_map):
for item in text_to_quote_map:
self.assertEqual(util._get_markup_quoter(parser_name)(item[0]), item[1])
self.assertEqual(util._get_markup_quoter(parser_name)(item[0], 'username'), item[2])
text_to_quote_map = [
['quote text', '[quote=""]quote text[/quote]\n', '[quote="username"]quote text[/quote]\n']
]
_test_engine('bbcode', text_to_quote_map)
_test_engine('bbcode_custom', text_to_quote_map)
text_to_quote_map = [
['quote text', 'quote text', 'posted by: username\nquote text']
]
_test_engine('liberator', text_to_quote_map)
text_to_quote_map = [
['quote text', 'quote text', 'quote text']
]
_test_engine('fake', text_to_quote_map)
_test_engine('not_existent', text_to_quote_map)
text_to_quote_map = [
['quote\r\ntext', '>quote\n>\n>text\n', '>quote\n>\n>text\n']
]
_test_engine('markdown', text_to_quote_map)
def test_body_cleaners(self):
user = User.objects.create_user('zeus', 'zeus@localhost', 'zeus')
staff = User.objects.create_user('staff', 'staff@localhost', 'staff')
staff.is_staff = True
staff.save()
from pybb.markup.base import rstrip_str
cleaners_map = [
['pybb.markup.base.filter_blanks', 'some\n\n\n\ntext\n\nwith\nnew\nlines', 'some\ntext\n\nwith\nnew\nlines'],
[rstrip_str, 'text \n \nwith whitespaces ', 'text\n\nwith whitespaces'],
]
for cleaner, source, dest in cleaners_map:
self.assertEqual(util.get_body_cleaner(cleaner)(user, source), dest)
self.assertEqual(util.get_body_cleaner(cleaner)(staff, source), source)
def _attach_perms_class(class_name):
"""
override the permission handler. this cannot be done with @override_settings as
permissions.perms is already imported at import point, instead we got to monkeypatch
the modules (not really nice, but only an issue in tests)
"""
pybb_views.perms = permissions.perms = util.resolve_class(class_name)
def _detach_perms_class():
"""
reset permission handler (otherwise other tests may fail)
"""
pybb_views.perms = permissions.perms = util.resolve_class('pybb.permissions.DefaultPermissionHandler')
class CustomPermissionHandlerTest(TestCase, SharedTestModule):
""" test custom permission handler """
def setUp(self):
self.create_user()
# create public and hidden categories, forums, posts
c_pub = Category(name='public')
c_pub.save()
c_hid = Category(name='private', hidden=True)
c_hid.save()
self.forum = Forum.objects.create(name='pub1', category=c_pub)
Forum.objects.create(name='priv1', category=c_hid)
Forum.objects.create(name='private_in_public_cat', hidden=True, category=c_pub)
for f in Forum.objects.all():
t = Topic.objects.create(name='a topic', forum=f, user=self.user)
Post.objects.create(topic=t, user=self.user, body='test')
# make some topics closed => hidden
for t in Topic.objects.all()[0:2]:
t.closed = True
t.save()
_attach_perms_class('pybb.tests.CustomPermissionHandler')
def tearDown(self):
_detach_perms_class()
def test_category_permission(self):
for c in Category.objects.all():
# anon user may not see category
r = self.get_with_user(c.get_absolute_url())
if c.hidden:
self.assertEqual(r.status_code, 302)
else:
self.assertEqual(r.status_code, 200)
# logged on user may see all categories
r = self.get_with_user(c.get_absolute_url(), 'zeus', 'zeus')
self.assertEqual(r.status_code, 200)
def test_forum_permission(self):
for f in Forum.objects.all():
r = self.get_with_user(f.get_absolute_url())
self.assertEqual(r.status_code, 302 if f.hidden or f.category.hidden else 200)
r = self.get_with_user(f.get_absolute_url(), 'zeus', 'zeus')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.context['object_list'].count(), f.topics.filter(closed=False).count())
def test_topic_permission(self):
for t in Topic.objects.all():
r = self.get_with_user(t.get_absolute_url())
self.assertEqual(r.status_code, 302 if t.forum.hidden or t.forum.category.hidden else 200)
r = self.get_with_user(t.get_absolute_url(), 'zeus', 'zeus')
self.assertEqual(r.status_code, 200)
def test_post_permission(self):
for p in Post.objects.all():
r = self.get_with_user(p.get_absolute_url())
self.assertEqual(r.status_code, 302)
r = self.get_with_user(p.get_absolute_url(), 'zeus', 'zeus')
self.assertEqual(r.status_code, 302)
def test_poll_add(self):
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
self.login_client()
response = self.client.get(add_topic_url)
values = self.get_form_values(response)
values['body'] = 'test poll body'
values['name'] = 'test poll name'
values['poll_type'] = 1 # poll_type = 1, create topic with poll
values['poll_question'] = 'q1'
values['poll_answers-0-text'] = 'answer1'
values['poll_answers-1-text'] = 'answer2'
values['poll_answers-TOTAL_FORMS'] = 2
response = self.client.post(add_topic_url, values, follow=True)
self.assertEqual(response.status_code, 200)
new_topic = Topic.objects.get(name='test poll name')
self.assertIsNone(new_topic.poll_question)
self.assertFalse(PollAnswer.objects.filter(topic=new_topic).exists()) # no answers here
class RestrictEditingHandler(permissions.DefaultPermissionHandler):
def may_create_topic(self, user, forum):
return False
def may_create_post(self, user, topic):
return False
def may_edit_post(self, user, post):
return False
class LogonRedirectTest(TestCase, SharedTestModule):
""" test whether anonymous user gets redirected, whereas unauthorized user gets PermissionDenied """
def setUp(self):
# create users
staff = User.objects.create_user('staff', 'staff@localhost', 'staff')
staff.is_staff = True
staff.save()
nostaff = User.objects.create_user('nostaff', 'nostaff@localhost', 'nostaff')
nostaff.is_staff = False
nostaff.save()
# create topic, post in hidden category
self.category = Category(name='private', hidden=True)
self.category.save()
self.forum = Forum(name='priv1', category=self.category)
self.forum.save()
self.topic = Topic(name='a topic', forum=self.forum, user=staff)
self.topic.save()
self.post = Post(body='body post', topic=self.topic, user=staff, on_moderation=True)
self.post.save()
def test_redirect_category(self):
# access without user should be redirected
r = self.get_with_user(self.category.get_absolute_url())
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % self.category.get_absolute_url())
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(self.category.get_absolute_url(), 'nostaff', 'nostaff')
self.assertEquals(r.status_code, 403)
# allowed user is allowed
r = self.get_with_user(self.category.get_absolute_url(), 'staff', 'staff')
self.assertEquals(r.status_code, 200)
def test_redirect_forum(self):
# access without user should be redirected
r = self.get_with_user(self.forum.get_absolute_url())
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % self.forum.get_absolute_url())
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(self.forum.get_absolute_url(), 'nostaff', 'nostaff')
self.assertEquals(r.status_code, 403)
# allowed user is allowed
r = self.get_with_user(self.forum.get_absolute_url(), 'staff', 'staff')
self.assertEquals(r.status_code, 200)
def test_redirect_topic(self):
# access without user should be redirected
r = self.get_with_user(self.topic.get_absolute_url())
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % self.topic.get_absolute_url())
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(self.topic.get_absolute_url(), 'nostaff', 'nostaff')
self.assertEquals(r.status_code, 403)
# allowed user is allowed
r = self.get_with_user(self.topic.get_absolute_url(), 'staff', 'staff')
self.assertEquals(r.status_code, 200)
def test_redirect_post(self):
# access without user should be redirected
r = self.get_with_user(self.post.get_absolute_url())
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % self.post.get_absolute_url())
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(self.post.get_absolute_url(), 'nostaff', 'nostaff')
self.assertEquals(r.status_code, 403)
# allowed user is allowed
r = self.get_with_user(self.post.get_absolute_url(), 'staff', 'staff')
self.assertEquals(r.status_code, 302)
@override_settings(PYBB_ENABLE_ANONYMOUS_POST=False)
def test_redirect_topic_add(self):
_attach_perms_class('pybb.tests.RestrictEditingHandler')
# access without user should be redirected
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.id})
r = self.get_with_user(add_topic_url)
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % add_topic_url)
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(add_topic_url, 'staff', 'staff')
self.assertEquals(r.status_code, 403)
_detach_perms_class()
# allowed user is allowed
r = self.get_with_user(add_topic_url, 'staff', 'staff')
self.assertEquals(r.status_code, 200)
def test_redirect_post_edit(self):
_attach_perms_class('pybb.tests.RestrictEditingHandler')
# access without user should be redirected
edit_post_url = reverse('pybb:edit_post', kwargs={'pk': self.post.id})
r = self.get_with_user(edit_post_url)
self.assertRedirects(r, settings.LOGIN_URL + '?next=%s' % edit_post_url)
# access with (unauthorized) user should get 403 (forbidden)
r = self.get_with_user(edit_post_url, 'staff', 'staff')
self.assertEquals(r.status_code, 403)
_detach_perms_class()
# allowed user is allowed
r = self.get_with_user(edit_post_url, 'staff', 'staff')
self.assertEquals(r.status_code, 200)
def test_profile_autocreation_signal_on(self):
user = User.objects.create_user('cronos', 'cronos@localhost', 'cronos')
profile = getattr(user, defaults.PYBB_PROFILE_RELATED_NAME, None)
self.assertIsNotNone(profile)
self.assertEqual(type(profile), util.get_pybb_profile_model())
user.delete()
def test_profile_autocreation_middleware(self):
user = User.objects.create_user('cronos', 'cronos@localhost', 'cronos')
getattr(user, defaults.PYBB_PROFILE_RELATED_NAME).delete()
#just display a page : the middleware should create the profile
self.get_with_user('/', 'cronos', 'cronos')
user = User.objects.get(username='cronos')
profile = getattr(user, defaults.PYBB_PROFILE_RELATED_NAME, None)
self.assertIsNotNone(profile)
self.assertEqual(type(profile), util.get_pybb_profile_model())
user.delete()
def test_user_delete_cascade(self):
user = User.objects.create_user('cronos', 'cronos@localhost', 'cronos')
profile = getattr(user, defaults.PYBB_PROFILE_RELATED_NAME, None)
self.assertIsNotNone(profile)
post = Post(topic=self.topic, user=user, body='I \'ll be back')
post.save()
user_pk = user.pk
profile_pk = profile.pk
post_pk = post.pk
user.delete()
self.assertFalse(User.objects.filter(pk=user_pk).exists())
self.assertFalse(Profile.objects.filter(pk=profile_pk).exists())
self.assertFalse(Post.objects.filter(pk=post_pk).exists())
class NiceUrlsTest(TestCase, SharedTestModule):
def __init__(self, *args, **kwargs):
super(NiceUrlsTest, self).__init__(*args, **kwargs)
self.ORIGINAL_PYBB_NICE_URL = defaults.PYBB_NICE_URL
defaults.PYBB_NICE_URL = True
self.urls = settings.ROOT_URLCONF
def setUp(self):
self.create_user()
self.login_client()
self.create_initial()
self.ORIGINAL_PYBB_NICE_URL = defaults.PYBB_NICE_URL
defaults.PYBB_NICE_URL = True
def test_unicode_slugify(self):
self.assertEqual(compat.slugify('北京 (China), Москва (Russia), é_è (a sad smiley !)'),
'bei-jing-china-moskva-russia-e_e-a-sad-smiley')
def test_automatique_slug(self):
self.assertEqual(compat.slugify(self.category.name), self.category.slug)
self.assertEqual(compat.slugify(self.forum.name), self.forum.slug)
self.assertEqual(compat.slugify(self.topic.name), self.topic.slug)
def test_no_duplicate_slug(self):
category_name = self.category.name
forum_name = self.forum.name
topic_name = self.topic.name
# objects created without slug but the same name
category = Category.objects.create(name=category_name)
forum = Forum.objects.create(name=forum_name, description='bar', category=self.category)
topic = Topic.objects.create(name=topic_name, forum=self.forum, user=self.user)
slug_nb = len(Category.objects.filter(slug__startswith=category_name)) - 1
self.assertEqual('%s-%d' % (compat.slugify(category_name), slug_nb), category.slug)
slug_nb = len(Forum.objects.filter(slug__startswith=forum_name)) - 1
self.assertEqual('%s-%d' % (compat.slugify(forum_name), slug_nb), forum.slug)
slug_nb = len(Topic.objects.filter(slug__startswith=topic_name)) - 1
self.assertEqual('%s-%d' % (compat.slugify(topic_name), slug_nb), topic.slug)
# objects created with a duplicate slug but a different name
category = Category.objects.create(name='test_slug_category', slug=compat.slugify(category_name))
forum = Forum.objects.create(name='test_slug_forum', description='bar',
category=self.category, slug=compat.slugify(forum_name))
topic = Topic.objects.create(name='test_topic_slug', forum=self.forum,
user=self.user, slug=compat.slugify(topic_name))
slug_nb = len(Category.objects.filter(slug__startswith=category_name)) - 1
self.assertEqual('%s-%d' % (compat.slugify(category_name), slug_nb), category.slug)
slug_nb = len(Forum.objects.filter(slug__startswith=forum_name)) - 1
self.assertEqual('%s-%d' % (compat.slugify(forum_name), slug_nb), forum.slug)
slug_nb = len(Topic.objects.filter(slug__startswith=self.topic.name)) - 1
self.assertEqual('%s-%d' % (compat.slugify(topic_name), slug_nb), topic.slug)
def test_fail_on_too_many_duplicate_slug(self):
original_duplicate_limit = defaults.PYBB_NICE_URL_SLUG_DUPLICATE_LIMIT
defaults.PYBB_NICE_URL_SLUG_DUPLICATE_LIMIT = 200
try:
for _ in iter(range(200)):
Topic.objects.create(name='dolly', forum=self.forum, user=self.user)
except ValidationError as e:
self.fail('Should be able to create "dolly", "dolly-1", ..., "dolly-199".\n')
with self.assertRaises(ValidationError):
Topic.objects.create(name='dolly', forum=self.forum, user=self.user)
defaults.PYBB_NICE_URL_SLUG_DUPLICATE_LIMIT = original_duplicate_limit
def test_long_duplicate_slug(self):
long_name = 'abcde' * 51 # 255 symbols
topic1 = Topic.objects.create(name=long_name, forum=self.forum, user=self.user)
self.assertEqual(topic1.slug, long_name)
topic2 = Topic.objects.create(name=long_name, forum=self.forum, user=self.user)
self.assertEqual(topic2.slug, '%s-1' % long_name[:253])
topic3 = Topic.objects.create(name=long_name, forum=self.forum, user=self.user)
self.assertEqual(topic3.slug, '%s-2' % long_name[:253])
def test_absolute_url(self):
response = self.client.get(self.category.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['category'], self.category)
self.assertEqual('/c/%s/' % (self.category.slug), self.category.get_absolute_url())
response = self.client.get(self.forum.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['forum'], self.forum)
self.assertEqual(
'/c/%s/%s/' % (self.category.slug, self.forum.slug),
self.forum.get_absolute_url()
)
response = self.client.get(self.topic.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['topic'], self.topic)
self.assertEqual(
'/c/%s/%s/%s/' % (self.category.slug, self.forum.slug, self.topic.slug),
self.topic.get_absolute_url()
)
def test_add_topic(self):
add_topic_url = reverse('pybb:add_topic', kwargs={'forum_id': self.forum.pk})
response = self.client.get(add_topic_url)
inputs = dict(html.fromstring(response.content).xpath('//form[@class="%s"]' % "post-form")[0].inputs)
self.assertNotIn('slug', inputs)
values = self.get_form_values(response)
values.update({'name': self.topic.name, 'body': '[b]Test slug body[/b]', 'poll_type': 0})
response = self.client.post(add_topic_url, data=values, follow=True)
slug_nb = len(Topic.objects.filter(slug__startswith=compat.slugify(self.topic.name))) - 1
self.assertIsNotNone = Topic.objects.get(slug='%s-%d' % (self.topic.name, slug_nb))
_attach_perms_class('pybb.tests.CustomPermissionHandler')
response = self.client.get(add_topic_url)
inputs = dict(html.fromstring(response.content).xpath('//form[@class="%s"]' % "post-form")[0].inputs)
self.assertIn('slug', inputs)
values = self.get_form_values(response)
values.update({'name': self.topic.name, 'body': '[b]Test slug body[/b]',
'poll_type': 0, 'slug': 'test_slug'})
response = self.client.post(add_topic_url, data=values, follow=True)
self.assertIsNotNone = Topic.objects.get(slug='test_slug')
_detach_perms_class()
def test_old_url_redirection(self):
original_perm_redirect = defaults.PYBB_NICE_URL_PERMANENT_REDIRECT
for redirect_status in [301, 302]:
defaults.PYBB_NICE_URL_PERMANENT_REDIRECT = redirect_status == 301
response = self.client.get(reverse("pybb:category", kwargs={"pk": self.category.pk}))
self.assertRedirects(response, self.category.get_absolute_url(), status_code=redirect_status)
response = self.client.get(reverse("pybb:forum", kwargs={"pk": self.forum.pk}))
self.assertRedirects(response, self.forum.get_absolute_url(), status_code=redirect_status)
response = self.client.get(reverse("pybb:topic", kwargs={"pk": self.topic.pk}))
self.assertRedirects(response, self.topic.get_absolute_url(), status_code=redirect_status)
defaults.PYBB_NICE_URL_PERMANENT_REDIRECT = original_perm_redirect
def tearDown(self):
defaults.PYBB_NICE_URL = self.ORIGINAL_PYBB_NICE_URL
|
skolsuper/pybbm
|
pybb/tests.py
|
Python
|
bsd-2-clause
| 104,993
|
[
"VisIt"
] |
0e47a1977f2734e2270c506975dbad3dfc0693addafc0c5fc7a4dfb6cf27610a
|
#!/usr/bin/env python
"""moose_sim.py:
A cable with 1000 compartments with HH-type channels in it.
Last modified: Wed May 21, 2014 09:51AM
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, NCBS Bangalore"
__credits__ = ["NCBS Bangalore", "Bhalla Lab"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import numpy as np
import moose
from moose import utils
import time
EREST_ACT = -65e-3
per_ms = 1e3
dt = 5e-5
#dt = 1e-6
cable = []
class MooseCompartment():
"""A simple class for making MooseCompartment in moose"""
def __init__(self, path, length, diameter, args):
""" Initialize moose-compartment """
self.mc_ = None
self.path = path
# Following values are taken from Upi's chapter on Rallpacks
self.RM = args.get('RM', 4.0)
self.RA = args.get('RA', 1.0)
self.CM = args.get('CM', 0.01)
self.Em = args.get('Em', -0.065)
self.diameter = diameter
self.compLength = length
self.computeParams( )
try:
self.mc_ = moose.Compartment(self.path)
self.mc_.length = self.compLength
self.mc_.diameter = self.diameter
self.mc_.Ra = self.Ra
self.mc_.Rm = self.Rm
self.mc_.Cm = self.Cm
self.mc_.Em = self.Em
self.mc_.initVm = self.Em
except Exception as e:
utils.dump("ERROR"
, [ "Can't create compartment with path %s " % path
, "Failed with error %s " % e
]
)
raise
#utils.dump('DEBUG', [ 'Compartment: {}'.format( self ) ] )
def __repr__( self ):
msg = '{}: '.format( self.mc_.path )
msg += '\n\t|- Length: {:1.4e}, Diameter: {:1.4e}'.format(
self.mc_.length, self.mc_.diameter
)
# msg += '\n\t|- Cross-section: {:1.4e}, SurfaceArea: {:1.4e}'.format(
# self.crossSection, self.surfaceArea
# )
msg += '\n\t|- Ra: {:1.3e}, Rm: {:1.3e}, Cm: {:1.3e}, Em: {:1.3e}'.format(
self.mc_.Ra, self.mc_.Rm, self.mc_.Cm, self.mc_.Em
)
return msg
def __str__( self ):
return self.__repr__( )
def computeParams( self ):
'''Compute essentials paramters for compartment. '''
self.surfaceArea = np.pi * self.compLength * self.diameter
self.crossSection = ( np.pi * self.diameter * self.diameter ) / 4.0
self.Ra = ( self.RA * self.compLength ) / self.crossSection
self.Rm = ( self.RM / self.surfaceArea )
self.Cm = ( self.CM * self.surfaceArea )
def alphaM(A, B, V0, v):
'''Compute alpha_m at point v
aplha_m = A(v - v0 ) / (exp((v-V0)/B) - 1)
'''
return (A*(v-V0) / (np.exp((v - V0)/B) -1 ))
def alphaN(A, B, V0, v):
'''Compute alpha_n at point v
aplha_n = A(v-V0) / (exp((v-V0)/B) -1 )
'''
return alphaM(A, B, V0, v)
def betaM(A, B, V0, v):
'''Compute beta_m at point v
'''
return (A * np.exp((v-V0)/B))
def betaN(A, B, V0, v):
return betaM(A, B, V0, v)
def alphaH(A, B, V0, v):
'''Compute alpha_h at point v
'''
return (A * np.exp(( v - V0) / B))
def behaH(A, B, V0, v):
'''Compute beta_h at point v
'''
return (A * np.exp((v-V0)/B) + 1)
def createChannel(species, path, **kwargs):
"""Create a channel """
if species == 'na':
return sodiumChannel( path, **kwargs)
elif species == 'ca':
channel.Xpower = 4
else:
utils.dump("FATAL", "Unsupported channel type: {}".format(species))
raise RuntimeError("Unsupported species of chanel")
def create_na_chan(parent='/library', name='na', vmin=-110e-3, vmax=50e-3, vdivs=3000):
"""Create a Hodhkin-Huxley Na channel under `parent`.
vmin, vmax, vdivs: voltage range and number of divisions for gate tables
"""
na = moose.HHChannel('%s/%s' % (parent, name))
na.Xpower = 3
na.Ypower = 1
v = np.linspace(vmin, vmax, vdivs+1) - EREST_ACT
m_alpha = per_ms * (25 - v * 1e3) / (10 * (np.exp((25 - v * 1e3) / 10) - 1))
m_beta = per_ms * 4 * np.exp(- v * 1e3/ 18)
m_gate = moose.element('%s/gateX' % (na.path))
m_gate.min = vmin
m_gate.max = vmax
m_gate.divs = vdivs
m_gate.tableA = m_alpha
m_gate.tableB = m_alpha + m_beta
h_alpha = per_ms * 0.07 * np.exp(-v / 20e-3)
h_beta = per_ms * 1/(np.exp((30e-3 - v) / 10e-3) + 1)
h_gate = moose.element('%s/gateY' % (na.path))
h_gate.min = vmin
h_gate.max = vmax
h_gate.divs = vdivs
h_gate.tableA = h_alpha
h_gate.tableB = h_alpha + h_beta
return na
def create_k_chan(parent='/library', name='k', vmin=-120e-3, vmax=40e-3, vdivs=3000):
"""Create a Hodhkin-Huxley K channel under `parent`.
vmin, vmax, vdivs: voltage range and number of divisions for gate tables
"""
k = moose.HHChannel('%s/%s' % (parent, name))
k.Xpower = 4
v = np.linspace(vmin, vmax, vdivs+1) - EREST_ACT
n_alpha = per_ms * (10 - v * 1e3)/(100 * (np.exp((10 - v * 1e3)/10) - 1))
n_beta = per_ms * 0.125 * np.exp(- v * 1e3 / 80)
n_gate = moose.element('%s/gateX' % (k.path))
n_gate.min = vmin
n_gate.max = vmax
n_gate.divs = vdivs
n_gate.tableA = n_alpha
n_gate.tableB = n_alpha + n_beta
return k
def creaetHHComp(parent='/library', name='hhcomp', diameter=1e-6, length=1e-6):
"""Create a compartment with Hodgkin-Huxley type ion channels (Na and
K).
Returns a 3-tuple: (compartment, nachannel, kchannel)
"""
compPath = '{}/{}'.format(parent, name)
mc = MooseCompartment( compPath, length, diameter, {})
c = mc.mc_
sarea = mc.surfaceArea
if moose.exists('/library/na'):
moose.copy('/library/na', c.path, 'na')
else:
create_na_chan(parent = c.path)
na = moose.element('%s/na' % (c.path))
# Na-conductance 120 mS/cm^2
na.Gbar = 120e-3 * sarea * 1e4
na.Ek = 115e-3 + EREST_ACT
moose.connect(c, 'channel', na, 'channel')
if moose.exists('/library/k'):
moose.copy('/library/k', c.path, 'k')
else:
create_k_chan(parent = c.path)
k = moose.element('%s/k' % (c.path))
# K-conductance 36 mS/cm^2
k.Gbar = 36e-3 * sarea * 1e4
k.Ek = -12e-3 + EREST_ACT
moose.connect(c, 'channel', k, 'channel')
return (c, na, k)
def makeCable(args):
global cable
ncomp = args['ncomp']
moose.Neutral('/cable')
for i in range( ncomp ):
compName = 'hhcomp{}'.format(i)
hhComp = creaetHHComp( '/cable', compName )
cable.append( hhComp[0] )
# connect the cable.
for i, hhc in enumerate(cable[0:-1]):
hhc.connect('axial', cable[i+1], 'raxial')
def setupDUT( dt ):
global cable
comp = cable[0]
data = moose.Neutral('/data')
pg = moose.PulseGen('/data/pg')
pg.firstWidth = 25e-3
pg.firstLevel = 1e-10
moose.connect(pg, 'output', comp, 'injectMsg')
setupClocks( dt )
def setupClocks( dt ):
moose.setClock(0, dt)
moose.setClock(1, dt)
def setupSolver( hsolveDt ):
hsolvePath = '/hsolve'
hsolve = moose.HSolve( hsolvePath )
hsolve.dt = hsolveDt
hsolve.target = '/cable'
moose.useClock(1, hsolvePath, 'process')
def simulate( runTime, dt):
""" Simulate the cable """
moose.useClock(0, '/cable/##', 'process')
moose.useClock(0, '/cable/##', 'init')
moose.useClock(1, '/##', 'process')
moose.reinit()
setupSolver( hsolveDt = dt )
t = time.time( )
moose.start( runTime )
print( 'Time taken to simulate %f = %f' % ( runTime, time.time() - t ) )
def main(args):
global cable
dt = args['dt']
makeCable(args)
setupDUT( dt )
simulate( args['run_time'], dt )
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description = 'Rallpacks3: A cable with n compartment with HHChannel'
)
parser.add_argument( '--tau'
, default = 0.04
, type = float
, help = 'Time constant of membrane'
)
parser.add_argument( '--run_time'
, default = 0.25
, type = float
, help = 'Simulation run time'
)
parser.add_argument( '--dt'
, default = 5e-5
, type = float
, help = 'Step time during simulation'
)
parser.add_argument( '--Em'
, default = -65e-3
, type = float
, help = 'Resting potential of membrane'
)
parser.add_argument( '--RA'
, default = 1.0
, type = float
, help = 'Axial resistivity'
)
parser.add_argument( '--lambda'
, default = 1e-3
, type = float
, help = 'Lambda, what else?'
)
parser.add_argument( '--x'
, default = 1e-3
, type = float
, help = 'You should record membrane potential somewhere, right?'
)
parser.add_argument( '--length'
, default = 1e-3
, type = float
, help = 'Length of the cable'
)
parser.add_argument( '--diameter'
, default = 1e-6
, type = float
, help = 'Diameter of cable'
)
parser.add_argument( '--inj'
, default = 1e-10
, type = float
, help = 'Current injected at one end of the cable'
)
parser.add_argument( '--ncomp'
, default = 1000
, type = int
, help = 'No of compartment in cable'
)
parser.add_argument( '--output'
, default = None
, type = str
, help = 'Store simulation results to this file'
)
args = parser.parse_args()
main( vars(args) )
|
subhacom/moose-core
|
tests/python/Rallpacks/moose_vs_neuron/rallpack3/moose_sim.py
|
Python
|
gpl-3.0
| 10,110
|
[
"MOOSE"
] |
452f996e5620911943fd76be51787de935832379cce55bd44ad697831ba67648
|
#!/usr/bin/env python
'''
Quantile regression model
Model parameters are estimated using iterated reweighted least squares. The
asymptotic covariance matrix estimated using kernel density estimation.
Author: Vincent Arel-Bundock
License: BSD-3
Created: 2013-03-19
The original IRLS function was written for Matlab by Shapour Mohammadi,
University of Tehran, 2008 (shmohammadi@gmail.com), with some lines based on
code written by James P. Lesage in Applied Econometrics Using MATLAB(1999).PP.
73-4. Translated to python with permission from original author by Christian
Prinoth (christian at prinoth dot name).
'''
import numpy as np
import warnings
import scipy.stats as stats
from numpy.linalg import pinv
from scipy.stats import norm
from statsmodels.tools.decorators import cache_readonly
from statsmodels.regression.linear_model import (RegressionModel,
RegressionResults,
RegressionResultsWrapper)
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
IterationLimitWarning)
class QuantReg(RegressionModel):
'''Quantile Regression
Estimate a quantile regression model using iterative reweighted least
squares.
Parameters
----------
endog : array or dataframe
endogenous/response variable
exog : array or dataframe
exogenous/explanatory variable(s)
Notes
-----
The Least Absolute Deviation (LAD) estimator is a special case where
quantile is set to 0.5 (q argument of the fit method).
The asymptotic covariance matrix is estimated following the procedure in
Greene (2008, p.407-408), using either the logistic or gaussian kernels
(kernel argument of the fit method).
References
----------
General:
* Birkes, D. and Y. Dodge(1993). Alternative Methods of Regression, John Wiley and Sons.
* Green,W. H. (2008). Econometric Analysis. Sixth Edition. International Student Edition.
* Koenker, R. (2005). Quantile Regression. New York: Cambridge University Press.
* LeSage, J. P.(1999). Applied Econometrics Using MATLAB,
Kernels (used by the fit method):
* Green (2008) Table 14.2
Bandwidth selection (used by the fit method):
* Bofinger, E. (1975). Estimation of a density function using order statistics. Australian Journal of Statistics 17: 1-17.
* Chamberlain, G. (1994). Quantile regression, censoring, and the structure of wages. In Advances in Econometrics, Vol. 1: Sixth World Congress, ed. C. A. Sims, 171-209. Cambridge: Cambridge University Press.
* Hall, P., and S. Sheather. (1988). On the distribution of the Studentized quantile. Journal of the Royal Statistical Society, Series B 50: 381-391.
Keywords: Least Absolute Deviation(LAD) Regression, Quantile Regression,
Regression, Robust Estimation.
'''
def __init__(self, endog, exog, **kwargs):
self._check_kwargs(kwargs)
super(QuantReg, self).__init__(endog, exog, **kwargs)
def whiten(self, data):
"""
QuantReg model whitener does nothing: returns data.
"""
return data
def fit(self, q=.5, vcov='robust', kernel='epa', bandwidth='hsheather',
max_iter=1000, p_tol=1e-6, **kwargs):
"""
Solve by Iterative Weighted Least Squares
Parameters
----------
q : float
Quantile must be strictly between 0 and 1
vcov : str, method used to calculate the variance-covariance matrix
of the parameters. Default is ``robust``:
- robust : heteroskedasticity robust standard errors (as suggested
in Greene 6th edition)
- iid : iid errors (as in Stata 12)
kernel : str, kernel to use in the kernel density estimation for the
asymptotic covariance matrix:
- epa: Epanechnikov
- cos: Cosine
- gau: Gaussian
- par: Parzene
bandwidth : str, Bandwidth selection method in kernel density
estimation for asymptotic covariance estimate (full
references in QuantReg docstring):
- hsheather: Hall-Sheather (1988)
- bofinger: Bofinger (1975)
- chamberlain: Chamberlain (1994)
"""
if q <= 0 or q >= 1:
raise Exception('q must be strictly between 0 and 1')
kern_names = ['biw', 'cos', 'epa', 'gau', 'par']
if kernel not in kern_names:
raise Exception("kernel must be one of " + ', '.join(kern_names))
else:
kernel = kernels[kernel]
if bandwidth == 'hsheather':
bandwidth = hall_sheather
elif bandwidth == 'bofinger':
bandwidth = bofinger
elif bandwidth == 'chamberlain':
bandwidth = chamberlain
else:
raise Exception("bandwidth must be in 'hsheather', 'bofinger', 'chamberlain'")
endog = self.endog
exog = self.exog
nobs = self.nobs
exog_rank = np.linalg.matrix_rank(self.exog)
self.rank = exog_rank
self.df_model = float(self.rank - self.k_constant)
self.df_resid = self.nobs - self.rank
n_iter = 0
xstar = exog
beta = np.ones(exog.shape[1])
# TODO: better start, initial beta is used only for convergence check
# Note the following does not work yet,
# the iteration loop always starts with OLS as initial beta
# if start_params is not None:
# if len(start_params) != rank:
# raise ValueError('start_params has wrong length')
# beta = start_params
# else:
# # start with OLS
# beta = np.dot(np.linalg.pinv(exog), endog)
diff = 10
cycle = False
history = dict(params = [], mse=[])
while n_iter < max_iter and diff > p_tol and not cycle:
n_iter += 1
beta0 = beta
xtx = np.dot(xstar.T, exog)
xty = np.dot(xstar.T, endog)
beta = np.dot(pinv(xtx), xty)
resid = endog - np.dot(exog, beta)
mask = np.abs(resid) < .000001
resid[mask] = ((resid[mask] >= 0) * 2 - 1) * .000001
resid = np.where(resid < 0, q * resid, (1-q) * resid)
resid = np.abs(resid)
xstar = exog / resid[:, np.newaxis]
diff = np.max(np.abs(beta - beta0))
history['params'].append(beta)
history['mse'].append(np.mean(resid*resid))
if (n_iter >= 300) and (n_iter % 100 == 0):
# check for convergence circle, should not happen
for ii in range(2, 10):
if np.all(beta == history['params'][-ii]):
cycle = True
warnings.warn("Convergence cycle detected", ConvergenceWarning)
break
if n_iter == max_iter:
warnings.warn("Maximum number of iterations (" + str(max_iter) +
") reached.", IterationLimitWarning)
e = endog - np.dot(exog, beta)
# Greene (2008, p.407) writes that Stata 6 uses this bandwidth:
# h = 0.9 * np.std(e) / (nobs**0.2)
# Instead, we calculate bandwidth as in Stata 12
iqre = stats.scoreatpercentile(e, 75) - stats.scoreatpercentile(e, 25)
h = bandwidth(nobs, q)
h = min(np.std(endog),
iqre / 1.34) * (norm.ppf(q + h) - norm.ppf(q - h))
fhat0 = 1. / (nobs * h) * np.sum(kernel(e / h))
if vcov == 'robust':
d = np.where(e > 0, (q/fhat0)**2, ((1-q)/fhat0)**2)
xtxi = pinv(np.dot(exog.T, exog))
xtdx = np.dot(exog.T * d[np.newaxis, :], exog)
vcov = xtxi @ xtdx @ xtxi
elif vcov == 'iid':
vcov = (1. / fhat0)**2 * q * (1 - q) * pinv(np.dot(exog.T, exog))
else:
raise Exception("vcov must be 'robust' or 'iid'")
lfit = QuantRegResults(self, beta, normalized_cov_params=vcov)
lfit.q = q
lfit.iterations = n_iter
lfit.sparsity = 1. / fhat0
lfit.bandwidth = h
lfit.history = history
return RegressionResultsWrapper(lfit)
def _parzen(u):
z = np.where(np.abs(u) <= .5, 4./3 - 8. * u**2 + 8. * np.abs(u)**3,
8. * (1 - np.abs(u))**3 / 3.)
z[np.abs(u) > 1] = 0
return z
kernels = {}
kernels['biw'] = lambda u: 15. / 16 * (1 - u**2)**2 * np.where(np.abs(u) <= 1, 1, 0)
kernels['cos'] = lambda u: np.where(np.abs(u) <= .5, 1 + np.cos(2 * np.pi * u), 0)
kernels['epa'] = lambda u: 3. / 4 * (1-u**2) * np.where(np.abs(u) <= 1, 1, 0)
kernels['gau'] = lambda u: norm.pdf(u)
kernels['par'] = _parzen
#kernels['bet'] = lambda u: np.where(np.abs(u) <= 1, .75 * (1 - u) * (1 + u), 0)
#kernels['log'] = lambda u: logistic.pdf(u) * (1 - logistic.pdf(u))
#kernels['tri'] = lambda u: np.where(np.abs(u) <= 1, 1 - np.abs(u), 0)
#kernels['trw'] = lambda u: 35. / 32 * (1 - u**2)**3 * np.where(np.abs(u) <= 1, 1, 0)
#kernels['uni'] = lambda u: 1. / 2 * np.where(np.abs(u) <= 1, 1, 0)
def hall_sheather(n, q, alpha=.05):
z = norm.ppf(q)
num = 1.5 * norm.pdf(z)**2.
den = 2. * z**2. + 1.
h = n**(-1. / 3) * norm.ppf(1. - alpha / 2.)**(2./3) * (num / den)**(1./3)
return h
def bofinger(n, q):
num = 9. / 2 * norm.pdf(2 * norm.ppf(q))**4
den = (2 * norm.ppf(q)**2 + 1)**2
h = n**(-1. / 5) * (num / den)**(1. / 5)
return h
def chamberlain(n, q, alpha=.05):
return norm.ppf(1 - alpha / 2) * np.sqrt(q*(1 - q) / n)
class QuantRegResults(RegressionResults):
'''Results instance for the QuantReg model'''
@cache_readonly
def prsquared(self):
q = self.q
endog = self.model.endog
e = self.resid
e = np.where(e < 0, (1 - q) * e, q * e)
e = np.abs(e)
ered = endog - stats.scoreatpercentile(endog, q * 100)
ered = np.where(ered < 0, (1 - q) * ered, q * ered)
ered = np.abs(ered)
return 1 - np.sum(e) / np.sum(ered)
#@cache_readonly
def scale(self):
return 1.
@cache_readonly
def bic(self):
return np.nan
@cache_readonly
def aic(self):
return np.nan
@cache_readonly
def llf(self):
return np.nan
@cache_readonly
def rsquared(self):
return np.nan
@cache_readonly
def rsquared_adj(self):
return np.nan
@cache_readonly
def mse(self):
return np.nan
@cache_readonly
def mse_model(self):
return np.nan
@cache_readonly
def mse_total(self):
return np.nan
@cache_readonly
def centered_tss(self):
return np.nan
@cache_readonly
def uncentered_tss(self):
return np.nan
@cache_readonly
def HC0_se(self):
raise NotImplementedError
@cache_readonly
def HC1_se(self):
raise NotImplementedError
@cache_readonly
def HC2_se(self):
raise NotImplementedError
@cache_readonly
def HC3_se(self):
raise NotImplementedError
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
"""
eigvals = self.eigenvals
condno = self.condition_number
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Least Squares']),
('Date:', None),
('Time:', None)
]
top_right = [('Pseudo R-squared:', ["%#8.4g" % self.prsquared]),
('Bandwidth:', ["%#8.4g" % self.bandwidth]),
('Sparsity:', ["%#8.4g" % self.sparsity]),
('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None)
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
# create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
# add warnings/notes, added to text format only
etext = []
if eigvals[-1] < 1e-10:
wstr = "The smallest eigenvalue is %6.3g. This might indicate "
wstr += "that there are\n"
wstr += "strong multicollinearity problems or that the design "
wstr += "matrix is singular."
wstr = wstr % eigvals[-1]
etext.append(wstr)
elif condno > 1000: # TODO: what is recommended
wstr = "The condition number is large, %6.3g. This might "
wstr += "indicate that there are\n"
wstr += "strong multicollinearity or other numerical "
wstr += "problems."
wstr = wstr % condno
etext.append(wstr)
if etext:
smry.add_extra_txt(etext)
return smry
|
statsmodels/statsmodels
|
statsmodels/regression/quantile_regression.py
|
Python
|
bsd-3-clause
| 14,115
|
[
"Gaussian"
] |
9c442d418bad7f23de3d12a4c89022519d0f34b3a4790b974245001382cb11f3
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from edi.content import views as content_views
urlpatterns = [
url(r'^$', content_views.home_page, name='home'),
url(r'^about/(?P<town_slug>[\w-]+)/$', content_views.about_page, name='about'),
url(r'^analysis/(?P<town_slug>[\w-]+)/$', content_views.analysis_page, name='analysis'),
url(r'^map/(?P<town_slug>[\w-]+)/$', content_views.map_page, name='map'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('edi.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^markdownx/', include('markdownx.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
CT-Data-Collaborative/edi-v2
|
config/urls.py
|
Python
|
mit
| 1,773
|
[
"VisIt"
] |
e7aef10c0419f3e5012c0dd0bd41a94217aadaa699b296a1ac62b226b90e7a26
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to define everything related to band structures.
"""
import numpy as np
import re
import math
import itertools
import collections
import warnings
from monty.json import MSONable
from pymatgen.core.periodic_table import get_el_sp, Specie
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.coord import pbc_diff
__author__ = "Geoffroy Hautier, Shyue Ping Ong, Michael Kocher"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Geoffroy Hautier"
__email__ = "geoffroy@uclouvain.be"
__status__ = "Development"
__date__ = "March 14, 2012"
class Kpoint(MSONable):
"""
Class to store kpoint objects. A kpoint is defined with a lattice and frac
or cartesian coordinates syntax similar than the site object in
pymatgen.core.structure.
"""
def __init__(self, coords, lattice, to_unit_cell=False,
coords_are_cartesian=False, label=None):
"""
Args:
coords: coordinate of the kpoint as a numpy array
lattice: A pymatgen.core.lattice.Lattice lattice object representing
the reciprocal lattice of the kpoint
to_unit_cell: Translates fractional coordinate to the basic unit
cell, i.e., all fractional coordinates satisfy 0 <= a < 1.
Defaults to False.
coords_are_cartesian: Boolean indicating if the coordinates given are
in cartesian or fractional coordinates (by default fractional)
label: the label of the kpoint if any (None by default)
"""
self._lattice = lattice
self._fcoords = lattice.get_fractional_coords(coords) \
if coords_are_cartesian else coords
self._label = label
if to_unit_cell:
for i in range(len(self._fcoords)):
self._fcoords[i] -= math.floor(self._fcoords[i])
self._ccoords = lattice.get_cartesian_coords(self._fcoords)
@property
def lattice(self):
"""
The lattice associated with the kpoint. It's a
pymatgen.core.lattice.Lattice object
"""
return self._lattice
@property
def label(self):
"""
The label associated with the kpoint
"""
return self._label
@property
def frac_coords(self):
"""
The fractional coordinates of the kpoint as a numpy array
"""
return np.copy(self._fcoords)
@property
def cart_coords(self):
"""
The cartesian coordinates of the kpoint as a numpy array
"""
return np.copy(self._ccoords)
@property
def a(self):
"""
Fractional a coordinate of the kpoint
"""
return self._fcoords[0]
@property
def b(self):
"""
Fractional b coordinate of the kpoint
"""
return self._fcoords[1]
@property
def c(self):
"""
Fractional c coordinate of the kpoint
"""
return self._fcoords[2]
def __str__(self):
"""
Returns a string with fractional, cartesian coordinates and label
"""
return "{} {} {}".format(self.frac_coords, self.cart_coords,
self.label)
def as_dict(self):
"""
Json-serializable dict representation of a kpoint
"""
return {"lattice": self.lattice.as_dict(),
"fcoords": list(self.frac_coords),
"ccoords": list(self.cart_coords), "label": self.label,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
class BandStructure:
"""
This is the most generic band structure data possible
it's defined by a list of kpoints + energies for each of them
.. attribute:: kpoints:
the list of kpoints (as Kpoint objects) in the band structure
.. attribute:: lattice_rec
the reciprocal lattice of the band structure.
.. attribute:: efermi
the fermi energy
.. attribute:: is_spin_polarized
True if the band structure is spin-polarized, False otherwise
.. attribute:: bands
The energy eigenvalues as a {spin: ndarray}. Note that the use of an
ndarray is necessary for computational as well as memory efficiency
due to the large amount of numerical data. The indices of the ndarray
are [band_index, kpoint_index].
.. attribute:: nb_bands
returns the number of bands in the band structure
.. attribute:: structure
returns the structure
.. attribute:: projections
The projections as a {spin: ndarray}. Note that the use of an
ndarray is necessary for computational as well as memory efficiency
due to the large amount of numerical data. The indices of the ndarray
are [band_index, kpoint_index, orbital_index, ion_index].
"""
def __init__(self, kpoints, eigenvals, lattice, efermi, labels_dict=None,
coords_are_cartesian=False, structure=None, projections=None):
"""
Args:
kpoints: list of kpoint as numpy arrays, in frac_coords of the
given lattice by default
eigenvals: dict of energies for spin up and spin down
{Spin.up:[][],Spin.down:[][]}, the first index of the array
[][] refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
kpoints array. If the band structure is not spin polarized, we
only store one data set under Spin.up
lattice: The reciprocal lattice as a pymatgen Lattice object.
Pymatgen uses the physics convention of reciprocal lattice vectors
WITH a 2*pi coefficient
efermi: fermi energy
labels_dict: (dict) of {} this links a kpoint (in frac coords or
cartesian coordinates depending on the coords) to a label.
coords_are_cartesian: Whether coordinates are cartesian.
structure: The crystal structure (as a pymatgen Structure object)
associated with the band structure. This is needed if we
provide projections to the band structure
projections: dict of orbital projections as {spin: ndarray}. The
indices of the ndarrayare [band_index, kpoint_index, orbital_index,
ion_index].If the band structure is not spin polarized, we only
store one data set under Spin.up.
"""
self.efermi = efermi
self.lattice_rec = lattice
self.kpoints = []
self.labels_dict = {}
self.structure = structure
self.projections = projections or {}
self.projections = {k: np.array(v) for k, v in self.projections.items()}
if labels_dict is None:
labels_dict = {}
if len(self.projections) != 0 and self.structure is None:
raise Exception("if projections are provided a structure object"
" needs also to be given")
for k in kpoints:
# let see if this kpoint has been assigned a label
label = None
for c in labels_dict:
if np.linalg.norm(k - np.array(labels_dict[c])) < 0.0001:
label = c
self.labels_dict[label] = Kpoint(
k, lattice, label=label,
coords_are_cartesian=coords_are_cartesian)
self.kpoints.append(
Kpoint(k, lattice, label=label,
coords_are_cartesian=coords_are_cartesian))
self.bands = {spin: np.array(v) for spin, v in eigenvals.items()}
self.nb_bands = len(eigenvals[Spin.up])
self.is_spin_polarized = len(self.bands) == 2
def get_projection_on_elements(self):
"""
Method returning a dictionary of projections on elements.
Returns:
a dictionary in the {Spin.up:[][{Element:values}],
Spin.down:[][{Element:values}]} format
if there is no projections in the band structure
returns an empty dict
"""
result = {}
structure = self.structure
for spin, v in self.projections.items():
result[spin] = [[collections.defaultdict(float)
for i in range(len(self.kpoints))]
for j in range(self.nb_bands)]
for i, j, k in itertools.product(range(self.nb_bands),
range(len(self.kpoints)),
range(structure.num_sites)):
result[spin][i][j][str(structure[k].specie)] += np.sum(
v[i, j, :, k])
return result
def get_projections_on_elements_and_orbitals(self, el_orb_spec):
"""
Method returning a dictionary of projections on elements and specific
orbitals
Args:
el_orb_spec: A dictionary of Elements and Orbitals for which we want
to have projections on. It is given as: {Element:[orbitals]},
e.g., {'Cu':['d','s']}
Returns:
A dictionary of projections on elements in the
{Spin.up:[][{Element:{orb:values}}],
Spin.down:[][{Element:{orb:values}}]} format
if there is no projections in the band structure returns an empty
dict.
"""
result = {}
structure = self.structure
el_orb_spec = {get_el_sp(el): orbs for el, orbs in el_orb_spec.items()}
for spin, v in self.projections.items():
result[spin] = [[{str(e): collections.defaultdict(float)
for e in el_orb_spec}
for i in range(len(self.kpoints))]
for j in range(self.nb_bands)]
for i, j, k in itertools.product(
range(self.nb_bands), range(len(self.kpoints)),
range(structure.num_sites)):
sp = structure[k].specie
for orb_i in range(len(v[i][j])):
o = Orbital(orb_i).name[0]
if sp in el_orb_spec:
if o in el_orb_spec[sp]:
result[spin][i][j][str(sp)][o] += v[i][j][
orb_i][k]
return result
def is_metal(self, efermi_tol=1e-4):
"""
Check if the band structure indicates a metal by looking if the fermi
level crosses a band.
Returns:
True if a metal, False if not
"""
for spin, values in self.bands.items():
for i in range(self.nb_bands):
if np.any(values[i, :] - self.efermi < -efermi_tol) and \
np.any(values[i, :] - self.efermi > efermi_tol):
return True
return False
def get_vbm(self):
"""
Returns data about the VBM.
Returns:
dict as {"band_index","kpoint_index","kpoint","energy"}
- "band_index": A dict with spin keys pointing to a list of the
indices of the band containing the VBM (please note that you
can have several bands sharing the VBM) {Spin.up:[],
Spin.down:[]}
- "kpoint_index": The list of indices in self.kpoints for the
kpoint vbm. Please note that there can be several
kpoint_indices relating to the same kpoint (e.g., Gamma can
occur at different spots in the band structure line plot)
- "kpoint": The kpoint (as a kpoint object)
- "energy": The energy of the VBM
- "projections": The projections along sites and orbitals of the
VBM if any projection data is available (else it is an empty
dictionnary). The format is similar to the projections field in
BandStructure: {spin:{'Orbital': [proj]}} where the array
[proj] is ordered according to the sites in structure
"""
if self.is_metal():
return {"band_index": [], "kpoint_index": [],
"kpoint": [], "energy": None, "projections": {}}
max_tmp = -float("inf")
index = None
kpointvbm = None
for spin, v in self.bands.items():
for i, j in zip(*np.where(v < self.efermi)):
if v[i, j] > max_tmp:
max_tmp = float(v[i, j])
index = j
kpointvbm = self.kpoints[j]
list_ind_kpts = []
if kpointvbm.label is not None:
for i in range(len(self.kpoints)):
if self.kpoints[i].label == kpointvbm.label:
list_ind_kpts.append(i)
else:
list_ind_kpts.append(index)
# get all other bands sharing the vbm
list_ind_band = collections.defaultdict(list)
for spin in self.bands:
for i in range(self.nb_bands):
if math.fabs(self.bands[spin][i][index] - max_tmp) < 0.001:
list_ind_band[spin].append(i)
proj = {}
for spin, v in self.projections.items():
if len(list_ind_band[spin]) == 0:
continue
proj[spin] = v[list_ind_band[spin][0]][list_ind_kpts[0]]
return {'band_index': list_ind_band,
'kpoint_index': list_ind_kpts,
'kpoint': kpointvbm, 'energy': max_tmp,
'projections': proj}
def get_cbm(self):
"""
Returns data about the CBM.
Returns:
{"band_index","kpoint_index","kpoint","energy"}
- "band_index": A dict with spin keys pointing to a list of the
indices of the band containing the VBM (please note that you
can have several bands sharing the VBM) {Spin.up:[],
Spin.down:[]}
- "kpoint_index": The list of indices in self.kpoints for the
kpoint vbm. Please note that there can be several
kpoint_indices relating to the same kpoint (e.g., Gamma can
occur at different spots in the band structure line plot)
- "kpoint": The kpoint (as a kpoint object)
- "energy": The energy of the VBM
- "projections": The projections along sites and orbitals of the
VBM if any projection data is available (else it is an empty
dictionnary). The format is similar to the projections field in
BandStructure: {spin:{'Orbital': [proj]}} where the array
[proj] is ordered according to the sites in structure
"""
if self.is_metal():
return {"band_index": [], "kpoint_index": [],
"kpoint": [], "energy": None, "projections": {}}
max_tmp = float("inf")
index = None
kpointcbm = None
for spin, v in self.bands.items():
for i, j in zip(*np.where(v >= self.efermi)):
if v[i, j] < max_tmp:
max_tmp = float(v[i, j])
index = j
kpointcbm = self.kpoints[j]
list_index_kpoints = []
if kpointcbm.label is not None:
for i in range(len(self.kpoints)):
if self.kpoints[i].label == kpointcbm.label:
list_index_kpoints.append(i)
else:
list_index_kpoints.append(index)
# get all other bands sharing the cbm
list_index_band = collections.defaultdict(list)
for spin in self.bands:
for i in range(self.nb_bands):
if math.fabs(self.bands[spin][i][index] - max_tmp) < 0.001:
list_index_band[spin].append(i)
proj = {}
for spin, v in self.projections.items():
if len(list_index_band[spin]) == 0:
continue
proj[spin] = v[list_index_band[spin][0]][list_index_kpoints[0]]
return {'band_index': list_index_band,
'kpoint_index': list_index_kpoints,
'kpoint': kpointcbm, 'energy': max_tmp,
'projections': proj}
def get_band_gap(self):
r"""
Returns band gap data.
Returns:
A dict {"energy","direct","transition"}:
"energy": band gap energy
"direct": A boolean telling if the gap is direct or not
"transition": kpoint labels of the transition (e.g., "\\Gamma-X")
"""
if self.is_metal():
return {"energy": 0.0, "direct": False, "transition": None}
cbm = self.get_cbm()
vbm = self.get_vbm()
result = dict(direct=False, energy=0.0, transition=None)
result["energy"] = cbm["energy"] - vbm["energy"]
if (cbm["kpoint"].label is not None and cbm["kpoint"].label == vbm[
"kpoint"].label) \
or np.linalg.norm(cbm["kpoint"].cart_coords
- vbm["kpoint"].cart_coords) < 0.01:
result["direct"] = True
result["transition"] = "-".join(
[str(c.label) if c.label is not None else
str("(") + ",".join(["{0:.3f}".format(c.frac_coords[i])
for i in range(3)])
+ str(")") for c in [vbm["kpoint"], cbm["kpoint"]]])
return result
def get_direct_band_gap_dict(self):
"""
Returns a dictionary of information about the direct
band gap
Returns:
a dictionary of the band gaps indexed by spin
along with their band indices and k-point index
"""
if self.is_metal():
raise ValueError("get_direct_band_gap_dict should"
"only be used with non-metals")
direct_gap_dict = {}
for spin, v in self.bands.items():
above = v[np.all(v > self.efermi, axis=1)]
min_above = np.min(above, axis=0)
below = v[np.all(v < self.efermi, axis=1)]
max_below = np.max(below, axis=0)
diff = min_above - max_below
kpoint_index = np.argmin(diff)
band_indices = [np.argmax(below[:, kpoint_index]),
np.argmin(above[:, kpoint_index]) + len(below)]
direct_gap_dict[spin] = {"value": diff[kpoint_index],
"kpoint_index": kpoint_index,
"band_indices": band_indices}
return direct_gap_dict
def get_direct_band_gap(self):
"""
Returns the direct band gap.
Returns:
the value of the direct band gap
"""
if self.is_metal():
return 0.0
dg = self.get_direct_band_gap_dict()
return min(v['value'] for v in dg.values())
def get_sym_eq_kpoints(self, kpoint, cartesian=False, tol=1e-2):
"""
Returns a list of unique symmetrically equivalent k-points.
Args:
kpoint (1x3 array): coordinate of the k-point
cartesian (bool): kpoint is in cartesian or fractional coordinates
tol (float): tolerance below which coordinates are considered equal
Returns:
([1x3 array] or None): if structure is not available returns None
"""
if not self.structure:
return None
sg = SpacegroupAnalyzer(self.structure)
symmops = sg.get_point_group_operations(cartesian=cartesian)
points = np.dot(kpoint, [m.rotation_matrix for m in symmops])
rm_list = []
# identify and remove duplicates from the list of equivalent k-points:
for i in range(len(points) - 1):
for j in range(i + 1, len(points)):
if np.allclose(pbc_diff(points[i], points[j]), [0, 0, 0], tol):
rm_list.append(i)
break
return np.delete(points, rm_list, axis=0)
def get_kpoint_degeneracy(self, kpoint, cartesian=False, tol=1e-2):
"""
Returns degeneracy of a given k-point based on structure symmetry
Args:
kpoint (1x3 array): coordinate of the k-point
cartesian (bool): kpoint is in cartesian or fractional coordinates
tol (float): tolerance below which coordinates are considered equal
Returns:
(int or None): degeneracy or None if structure is not available
"""
all_kpts = self.get_sym_eq_kpoints(kpoint, cartesian, tol=tol)
if all_kpts is not None:
return len(all_kpts)
def as_dict(self):
"""
Json-serializable dict representation of BandStructureSymmLine.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lattice_rec": self.lattice_rec.as_dict(), "efermi": self.efermi,
"kpoints": []}
# kpoints are not kpoint objects dicts but are frac coords (this makes
# the dict smaller and avoids the repetition of the lattice
for k in self.kpoints:
d["kpoints"].append(k.as_dict()["fcoords"])
d["bands"] = {str(int(spin)): self.bands[spin]
for spin in self.bands}
d["is_metal"] = self.is_metal()
vbm = self.get_vbm()
d["vbm"] = {"energy": vbm["energy"],
"kpoint_index": vbm["kpoint_index"],
"band_index": {str(int(spin)): vbm["band_index"][spin]
for spin in vbm["band_index"]},
'projections': {str(spin): v.tolist() for spin, v in vbm[
'projections'].items()}}
cbm = self.get_cbm()
d['cbm'] = {'energy': cbm['energy'],
'kpoint_index': cbm['kpoint_index'],
'band_index': {str(int(spin)): cbm['band_index'][spin]
for spin in cbm['band_index']},
'projections': {str(spin): v.tolist() for spin, v in cbm[
'projections'].items()}}
d['band_gap'] = self.get_band_gap()
d['labels_dict'] = {}
d['is_spin_polarized'] = self.is_spin_polarized
for c in self.labels_dict:
d['labels_dict'][c] = self.labels_dict[c].as_dict()['fcoords']
d['projections'] = {}
if len(self.projections) != 0:
d['structure'] = self.structure.as_dict()
d['projections'] = {str(int(spin)): np.array(v).tolist()
for spin, v in self.projections.items()}
return d
@classmethod
def from_dict(cls, d):
"""
Create from dict.
Args:
A dict with all data for a band structure object.
Returns:
A BandStructure object
"""
labels_dict = d['labels_dict']
projections = {}
structure = None
if isinstance(list(d['bands'].values())[0], dict):
eigenvals = {Spin(int(k)): np.array(d['bands'][k]['data'])
for k in d['bands']}
else:
eigenvals = {Spin(int(k)): d['bands'][k] for k in d['bands']}
if 'structure' in d:
structure = Structure.from_dict(d['structure'])
if d.get('projections'):
projections = {Spin(int(spin)): np.array(v)
for spin, v in d["projections"].items()}
return BandStructure(
d['kpoints'], eigenvals,
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
@classmethod
def from_old_dict(cls, d):
"""
Args:
d (dict): A dict with all data for a band structure symm line
object.
Returns:
A BandStructureSymmLine object
"""
# Strip the label to recover initial string (see trick used in as_dict to handle $ chars)
labels_dict = {k.strip(): v for k, v in d['labels_dict'].items()}
projections = {}
structure = None
if 'projections' in d and len(d['projections']) != 0:
structure = Structure.from_dict(d['structure'])
projections = {}
for spin in d['projections']:
dd = []
for i in range(len(d['projections'][spin])):
ddd = []
for j in range(len(d['projections'][spin][i])):
dddd = []
for k in range(len(d['projections'][spin][i][j])):
ddddd = []
orb = Orbital(k).name
for l in range(len(d['projections'][spin][i][j][
orb])):
ddddd.append(d['projections'][spin][i][j][
orb][l])
dddd.append(np.array(ddddd))
ddd.append(np.array(dddd))
dd.append(np.array(ddd))
projections[Spin(int(spin))] = np.array(dd)
return BandStructure(
d['kpoints'], {Spin(int(k)): d['bands'][k] for k in d['bands']},
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
class BandStructureSymmLine(BandStructure, MSONable):
r"""
This object stores band structures along selected (symmetry) lines in the
Brillouin zone. We call the different symmetry lines (ex: \\Gamma to Z)
"branches".
"""
def __init__(self, kpoints, eigenvals, lattice, efermi, labels_dict,
coords_are_cartesian=False, structure=None,
projections=None):
"""
Args:
kpoints: list of kpoint as numpy arrays, in frac_coords of the
given lattice by default
eigenvals: dict of energies for spin up and spin down
{Spin.up:[][],Spin.down:[][]}, the first index of the array
[][] refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
kpoints array. If the band structure is not spin polarized, we
only store one data set under Spin.up.
lattice: The reciprocal lattice.
Pymatgen uses the physics convention of reciprocal lattice vectors
WITH a 2*pi coefficient
efermi: fermi energy
label_dict: (dict) of {} this link a kpoint (in frac coords or
cartesian coordinates depending on the coords).
coords_are_cartesian: Whether coordinates are cartesian.
structure: The crystal structure (as a pymatgen Structure object)
associated with the band structure. This is needed if we
provide projections to the band structure.
projections: dict of orbital projections as {spin: ndarray}. The
indices of the ndarrayare [band_index, kpoint_index, orbital_index,
ion_index].If the band structure is not spin polarized, we only
store one data set under Spin.up.
"""
super().__init__(
kpoints, eigenvals, lattice, efermi, labels_dict,
coords_are_cartesian, structure, projections)
self.distance = []
self.branches = []
one_group = []
branches_tmp = []
# get labels and distance for each kpoint
previous_kpoint = self.kpoints[0]
previous_distance = 0.0
previous_label = self.kpoints[0].label
for i in range(len(self.kpoints)):
label = self.kpoints[i].label
if label is not None and previous_label is not None:
self.distance.append(previous_distance)
else:
self.distance.append(
np.linalg.norm(self.kpoints[i].cart_coords -
previous_kpoint.cart_coords) +
previous_distance)
previous_kpoint = self.kpoints[i]
previous_distance = self.distance[i]
if label:
if previous_label:
if len(one_group) != 0:
branches_tmp.append(one_group)
one_group = []
previous_label = label
one_group.append(i)
if len(one_group) != 0:
branches_tmp.append(one_group)
for b in branches_tmp:
self.branches.append(
{"start_index": b[0], "end_index": b[-1],
"name": str(self.kpoints[b[0]].label) + "-" + str(self.kpoints[b[-1]].label)})
self.is_spin_polarized = False
if len(self.bands) == 2:
self.is_spin_polarized = True
def get_equivalent_kpoints(self, index):
"""
Returns the list of kpoint indices equivalent (meaning they are the
same frac coords) to the given one.
Args:
index: the kpoint index
Returns:
a list of equivalent indices
TODO: now it uses the label we might want to use coordinates instead
(in case there was a mislabel)
"""
# if the kpoint has no label it can"t have a repetition along the band
# structure line object
if self.kpoints[index].label is None:
return [index]
list_index_kpoints = []
for i in range(len(self.kpoints)):
if self.kpoints[i].label == self.kpoints[index].label:
list_index_kpoints.append(i)
return list_index_kpoints
def get_branch(self, index):
r"""
Returns in what branch(es) is the kpoint. There can be several
branches.
Args:
index: the kpoint index
Returns:
A list of dictionaries [{"name","start_index","end_index","index"}]
indicating all branches in which the k_point is. It takes into
account the fact that one kpoint (e.g., \\Gamma) can be in several
branches
"""
to_return = []
for i in self.get_equivalent_kpoints(index):
for b in self.branches:
if b["start_index"] <= i <= b["end_index"]:
to_return.append({"name": b["name"],
"start_index": b["start_index"],
"end_index": b["end_index"],
"index": i})
return to_return
def apply_scissor(self, new_band_gap):
"""
Apply a scissor operator (shift of the CBM) to fit the given band gap.
If it's a metal. We look for the band crossing the fermi level
and shift this one up. This will not work all the time for metals!
Args:
new_band_gap: the band gap the scissor band structure need to have.
Returns:
a BandStructureSymmLine object with the applied scissor shift
"""
if self.is_metal():
# moves then the highest index band crossing the fermi level
# find this band...
max_index = -1000
# spin_index = None
for i in range(self.nb_bands):
below = False
above = False
for j in range(len(self.kpoints)):
if self.bands[Spin.up][i][j] < self.efermi:
below = True
if self.bands[Spin.up][i][j] > self.efermi:
above = True
if above and below:
if i > max_index:
max_index = i
# spin_index = Spin.up
if self.is_spin_polarized:
below = False
above = False
for j in range(len(self.kpoints)):
if self.bands[Spin.down][i][j] < self.efermi:
below = True
if self.bands[Spin.down][i][j] > self.efermi:
above = True
if above and below:
if i > max_index:
max_index = i
# spin_index = Spin.down
old_dict = self.as_dict()
shift = new_band_gap
for spin in old_dict['bands']:
for k in range(len(old_dict['bands'][spin])):
for v in range(len(old_dict['bands'][spin][k])):
if k >= max_index:
old_dict['bands'][spin][k][v] = \
old_dict['bands'][spin][k][v] + shift
else:
shift = new_band_gap - self.get_band_gap()['energy']
old_dict = self.as_dict()
for spin in old_dict['bands']:
for k in range(len(old_dict['bands'][spin])):
for v in range(len(old_dict['bands'][spin][k])):
if old_dict['bands'][spin][k][v] >= \
old_dict['cbm']['energy']:
old_dict['bands'][spin][k][v] = \
old_dict['bands'][spin][k][v] + shift
old_dict['efermi'] = old_dict['efermi'] + shift
return BandStructureSymmLine.from_dict(old_dict)
def as_dict(self):
"""
Json-serializable dict representation of BandStructureSymmLine.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lattice_rec": self.lattice_rec.as_dict(), "efermi": self.efermi,
"kpoints": []}
# kpoints are not kpoint objects dicts but are frac coords (this makes
# the dict smaller and avoids the repetition of the lattice
for k in self.kpoints:
d["kpoints"].append(k.as_dict()["fcoords"])
d["branches"] = self.branches
d["bands"] = {str(int(spin)): self.bands[spin].tolist()
for spin in self.bands}
d["is_metal"] = self.is_metal()
vbm = self.get_vbm()
d["vbm"] = {"energy": vbm["energy"],
"kpoint_index": vbm["kpoint_index"],
"band_index": {str(int(spin)): vbm["band_index"][spin]
for spin in vbm["band_index"]},
'projections': {str(spin): v.tolist() for spin, v in vbm[
'projections'].items()}}
cbm = self.get_cbm()
d['cbm'] = {'energy': cbm['energy'],
'kpoint_index': cbm['kpoint_index'],
'band_index': {str(int(spin)): cbm['band_index'][spin]
for spin in cbm['band_index']},
'projections': {str(spin): v.tolist() for spin, v in cbm[
'projections'].items()}}
d['band_gap'] = self.get_band_gap()
d['labels_dict'] = {}
d['is_spin_polarized'] = self.is_spin_polarized
# MongoDB does not accept keys starting with $. Add a blanck space to fix the problem
for c in self.labels_dict:
mongo_key = c if not c.startswith("$") else " " + c
d['labels_dict'][mongo_key] = self.labels_dict[c].as_dict()[
'fcoords']
if len(self.projections) != 0:
d['structure'] = self.structure.as_dict()
d['projections'] = {str(int(spin)): np.array(v).tolist()
for spin, v in self.projections.items()}
return d
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): A dict with all data for a band structure symm line
object.
Returns:
A BandStructureSymmLine object
"""
try:
# Strip the label to recover initial string (see trick used in as_dict to handle $ chars)
labels_dict = {k.strip(): v for k, v in d['labels_dict'].items()}
projections = {}
structure = None
if d.get('projections'):
if isinstance(d["projections"]['1'][0][0], dict):
raise ValueError("Old band structure dict format detected!")
structure = Structure.from_dict(d['structure'])
projections = {Spin(int(spin)): np.array(v)
for spin, v in d["projections"].items()}
return BandStructureSymmLine(
d['kpoints'], {Spin(int(k)): d['bands'][k]
for k in d['bands']},
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
except Exception:
warnings.warn("Trying from_dict failed. Now we are trying the old "
"format. Please convert your BS dicts to the new "
"format. The old format will be retired in pymatgen "
"5.0.")
return BandStructureSymmLine.from_old_dict(d)
@classmethod
def from_old_dict(cls, d):
"""
Args:
d (dict): A dict with all data for a band structure symm line
object.
Returns:
A BandStructureSymmLine object
"""
# Strip the label to recover initial string (see trick used in as_dict to handle $ chars)
labels_dict = {k.strip(): v for k, v in d['labels_dict'].items()}
projections = {}
structure = None
if 'projections' in d and len(d['projections']) != 0:
structure = Structure.from_dict(d['structure'])
projections = {}
for spin in d['projections']:
dd = []
for i in range(len(d['projections'][spin])):
ddd = []
for j in range(len(d['projections'][spin][i])):
dddd = []
for k in range(len(d['projections'][spin][i][j])):
ddddd = []
orb = Orbital(k).name
for l in range(len(d['projections'][spin][i][j][
orb])):
ddddd.append(d['projections'][spin][i][j][
orb][l])
dddd.append(np.array(ddddd))
ddd.append(np.array(dddd))
dd.append(np.array(ddd))
projections[Spin(int(spin))] = np.array(dd)
return BandStructureSymmLine(
d['kpoints'], {Spin(int(k)): d['bands'][k]
for k in d['bands']},
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
class LobsterBandStructureSymmLine(BandStructureSymmLine):
"""
Lobster subclass of BandStructure with customized functions.
"""
def apply_scissor(self, new_band_gap):
"""
Apply a scissor operator (shift of the CBM) to fit the given band gap.
If it's a metal. We look for the band crossing the fermi level
and shift this one up. This will not work all the time for metals!
Args:
new_band_gap: the band gap the scissor band structure need to have.
Returns:
a BandStructureSymmLine object with the applied scissor shift
"""
if self.is_metal():
# moves then the highest index band crossing the fermi level
# find this band...
max_index = -1000
# spin_index = None
for i in range(self.nb_bands):
below = False
above = False
for j in range(len(self.kpoints)):
if self.bands[Spin.up][i][j] < self.efermi:
below = True
if self.bands[Spin.up][i][j] > self.efermi:
above = True
if above and below:
if i > max_index:
max_index = i
# spin_index = Spin.up
if self.is_spin_polarized:
below = False
above = False
for j in range(len(self.kpoints)):
if self.bands[Spin.down][i][j] < self.efermi:
below = True
if self.bands[Spin.down][i][j] > self.efermi:
above = True
if above and below:
if i > max_index:
max_index = i
# spin_index = Spin.down
old_dict = self.as_dict()
shift = new_band_gap
for spin in old_dict['bands']:
for k in range(len(old_dict['bands'][spin])):
for v in range(len(old_dict['bands'][spin][k])):
if k >= max_index:
old_dict['bands'][spin][k][v] = \
old_dict['bands'][spin][k][v] + shift
else:
shift = new_band_gap - self.get_band_gap()['energy']
old_dict = self.as_dict()
for spin in old_dict['bands']:
for k in range(len(old_dict['bands'][spin])):
for v in range(len(old_dict['bands'][spin][k])):
if old_dict['bands'][spin][k][v] >= \
old_dict['cbm']['energy']:
old_dict['bands'][spin][k][v] = \
old_dict['bands'][spin][k][v] + shift
old_dict['efermi'] = old_dict['efermi'] + shift
return LobsterBandStructureSymmLine.from_dict(old_dict)
def as_dict(self):
"""
Json-serializable dict representation of BandStructureSymmLine.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lattice_rec": self.lattice_rec.as_dict(), "efermi": self.efermi,
"kpoints": []}
# kpoints are not kpoint objects dicts but are frac coords (this makes
# the dict smaller and avoids the repetition of the lattice
for k in self.kpoints:
d["kpoints"].append(k.as_dict()["fcoords"])
d["branches"] = self.branches
d["bands"] = {str(int(spin)): self.bands[spin].tolist()
for spin in self.bands}
d["is_metal"] = self.is_metal()
vbm = self.get_vbm()
d["vbm"] = {"energy": vbm["energy"],
"kpoint_index": [int(x) for x in vbm["kpoint_index"]],
"band_index": {str(int(spin)): vbm["band_index"][spin]
for spin in vbm["band_index"]},
'projections': {str(spin): v for spin, v in vbm[
'projections'].items()}}
cbm = self.get_cbm()
d['cbm'] = {'energy': cbm['energy'],
'kpoint_index': [int(x) for x in cbm["kpoint_index"]],
'band_index': {str(int(spin)): cbm['band_index'][spin]
for spin in cbm['band_index']},
'projections': {str(spin): v for spin, v in cbm[
'projections'].items()}}
d['band_gap'] = self.get_band_gap()
d['labels_dict'] = {}
d['is_spin_polarized'] = self.is_spin_polarized
# MongoDB does not accept keys starting with $. Add a blanck space to fix the problem
for c in self.labels_dict:
mongo_key = c if not c.startswith("$") else " " + c
d['labels_dict'][mongo_key] = self.labels_dict[c].as_dict()[
'fcoords']
if len(self.projections) != 0:
d['structure'] = self.structure.as_dict()
d['projections'] = {str(int(spin)): np.array(v).tolist()
for spin, v in self.projections.items()}
return d
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): A dict with all data for a band structure symm line
object.
Returns:
A BandStructureSymmLine object
"""
try:
# Strip the label to recover initial string (see trick used in as_dict to handle $ chars)
labels_dict = {k.strip(): v for k, v in d['labels_dict'].items()}
projections = {}
structure = None
if d.get('projections'):
if isinstance(d["projections"]['1'][0][0], dict):
raise ValueError("Old band structure dict format detected!")
structure = Structure.from_dict(d['structure'])
projections = {Spin(int(spin)): np.array(v)
for spin, v in d["projections"].items()}
print(projections)
return LobsterBandStructureSymmLine(
d['kpoints'], {Spin(int(k)): d['bands'][k]
for k in d['bands']},
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
except Exception:
warnings.warn("Trying from_dict failed. Now we are trying the old "
"format. Please convert your BS dicts to the new "
"format. The old format will be retired in pymatgen "
"5.0.")
return LobsterBandStructureSymmLine.from_old_dict(d)
@classmethod
def from_old_dict(cls, d):
"""
Args:
d (dict): A dict with all data for a band structure symm line
object.
Returns:
A BandStructureSymmLine object
"""
# Strip the label to recover initial string (see trick used in as_dict to handle $ chars)
labels_dict = {k.strip(): v for k, v in d['labels_dict'].items()}
projections = {}
structure = None
if 'projections' in d and len(d['projections']) != 0:
structure = Structure.from_dict(d['structure'])
projections = {}
for spin in d['projections']:
dd = []
for i in range(len(d['projections'][spin])):
ddd = []
for j in range(len(d['projections'][spin][i])):
ddd.append(d['projections'][spin][i][j])
dd.append(np.array(ddd))
projections[Spin(int(spin))] = np.array(dd)
return LobsterBandStructureSymmLine(
d['kpoints'], {Spin(int(k)): d['bands'][k]
for k in d['bands']},
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
def get_projection_on_elements(self):
"""
Method returning a dictionary of projections on elements.
It sums over all available orbitals for each element.
Returns:
a dictionary in the {Spin.up:[][{Element:values}],
Spin.down:[][{Element:values}]} format
if there is no projections in the band structure
returns an empty dict
"""
result = {}
for spin, v in self.projections.items():
result[spin] = [[collections.defaultdict(float)
for i in range(len(self.kpoints))]
for j in range(self.nb_bands)]
for i, j in itertools.product(range(self.nb_bands),
range(len(self.kpoints))):
for key, item in v[i][j].items():
for key2, item2 in item.items():
specie = str(Specie(re.split(r"[0-9]+", key)[0]))
result[spin][i][j][specie] += item2
return result
def get_projections_on_elements_and_orbitals(self, el_orb_spec):
"""
Method returning a dictionary of projections on elements and specific
orbitals
Args:
el_orb_spec: A dictionary of Elements and Orbitals for which we want
to have projections on. It is given as: {Element:[orbitals]},
e.g., {'Si':['3s','3p']} or {'Si':['3s','3p_x', '3p_y', '3p_z']} depending on input files
Returns:
A dictionary of projections on elements in the
{Spin.up:[][{Element:{orb:values}}],
Spin.down:[][{Element:{orb:values}}]} format
if there is no projections in the band structure returns an empty
dict.
"""
result = {}
el_orb_spec = {get_el_sp(el): orbs for el, orbs in el_orb_spec.items()}
for spin, v in self.projections.items():
result[spin] = [[{str(e): collections.defaultdict(float)
for e in el_orb_spec}
for i in range(len(self.kpoints))]
for j in range(self.nb_bands)]
for i, j in itertools.product(range(self.nb_bands),
range(len(self.kpoints))):
for key, item in v[i][j].items():
for key2, item2 in item.items():
specie = str(Specie(re.split(r"[0-9]+", key)[0]))
if get_el_sp(str(specie)) in el_orb_spec:
if key2 in el_orb_spec[get_el_sp(str(specie))]:
result[spin][i][j][specie][key2] += item2
return result
def get_reconstructed_band_structure(list_bs, efermi=None):
"""
This method takes a list of band structures and reconstructs
one band structure object from all of them.
This is typically very useful when you split non self consistent
band structure runs in several independent jobs and want to merge back
the results
Args:
list_bs: A list of BandStructure or BandStructureSymmLine objects.
efermi: The Fermi energy of the reconstructed band structure. If
None is assigned an average of all the Fermi energy in each
object in the list_bs is used.
Returns:
A BandStructure or BandStructureSymmLine object (depending on
the type of the list_bs objects)
"""
if efermi is None:
efermi = sum([b.efermi for b in list_bs]) / len(list_bs)
kpoints = []
labels_dict = {}
rec_lattice = list_bs[0].lattice_rec
nb_bands = min([list_bs[i].nb_bands for i in range(len(list_bs))])
kpoints = np.concatenate([[k.frac_coords for k in bs.kpoints]
for bs in list_bs])
dicts = [bs.labels_dict for bs in list_bs]
labels_dict = {k: v.frac_coords for d in dicts for k, v in d.items()}
eigenvals = {}
eigenvals[Spin.up] = np.concatenate([bs.bands[Spin.up][:nb_bands]
for bs in list_bs], axis=1)
if list_bs[0].is_spin_polarized:
eigenvals[Spin.down] = np.concatenate([bs.bands[Spin.down][:nb_bands]
for bs in list_bs], axis=1)
projections = {}
if len(list_bs[0].projections) != 0:
projs = [bs.projections[Spin.up][:nb_bands] for bs in list_bs]
projections[Spin.up] = np.concatenate(projs, axis=1)
if list_bs[0].is_spin_polarized:
projs = [bs.projections[Spin.down][:nb_bands] for bs in list_bs]
projections[Spin.down] = np.concatenate(projs, axis=1)
if isinstance(list_bs[0], BandStructureSymmLine):
return BandStructureSymmLine(kpoints, eigenvals, rec_lattice,
efermi, labels_dict,
structure=list_bs[0].structure,
projections=projections)
else:
return BandStructure(kpoints, eigenvals, rec_lattice, efermi,
labels_dict, structure=list_bs[0].structure,
projections=projections)
|
tschaume/pymatgen
|
pymatgen/electronic_structure/bandstructure.py
|
Python
|
mit
| 52,834
|
[
"CRYSTAL",
"pymatgen"
] |
5d7a60652d48de0ea98da5c234d48d3ad817485a0b1281fe078a28a2ebe4d4c8
|
from __future__ import print_function
import os
import pytest
from os.path import join
import sys
import unittest
import subprocess
if sys.platform == "win32":
GULP = "gulp.cmd"
else:
GULP = "gulp"
@pytest.mark.js
class TestBokehJS(unittest.TestCase):
def test_bokehjs(self):
os.chdir('bokehjs')
proc = subprocess.Popen([join('node_modules', '.bin', GULP), "test"],
stdout=subprocess.PIPE)
out, errs = proc.communicate()
msg = out.decode('utf-8', errors='ignore')
print(msg)
if proc.returncode != 0:
assert False
if __name__ == "__main__":
unittest.main()
|
phobson/bokeh
|
tests/test_bokehjs.py
|
Python
|
bsd-3-clause
| 669
|
[
"GULP"
] |
24efdbe3655256c8a0ff721edf0471a73f2ce3863ea0a8985fd065dfa164eca7
|
#
# @file TestXMLNode.py
# @brief XMLNode unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Michael Hucka <mhucka@caltech.edu>
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/xml/test/TestXMLNode.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
def wrapString(s):
return s
pass
class TestXMLNode(unittest.TestCase):
def test_XMLNode_attribute_add_remove(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
node = libsbml.XMLNode(triple,attr)
xt1 = libsbml.XMLTriple("name1", "http://name1.org/", "p1")
xt2 = libsbml.XMLTriple("name2", "http://name2.org/", "p2")
xt3 = libsbml.XMLTriple("name3", "http://name3.org/", "p3")
xt1a = libsbml.XMLTriple("name1", "http://name1a.org/", "p1a")
xt2a = libsbml.XMLTriple("name2", "http://name2a.org/", "p2a")
node.addAttr( "name1", "val1", "http://name1.org/", "p1")
node.addAttr(xt2, "val2")
self.assert_( node.getAttributesLength() == 2 )
self.assert_( node.isAttributesEmpty() == False )
self.assert_( ( "name1" != node.getAttrName(0) ) == False )
self.assert_( ( "val1" != node.getAttrValue(0) ) == False )
self.assert_( ( "http://name1.org/" != node.getAttrURI(0) ) == False )
self.assert_( ( "p1" != node.getAttrPrefix(0) ) == False )
self.assert_( ( "name2" != node.getAttrName(1) ) == False )
self.assert_( ( "val2" != node.getAttrValue(1) ) == False )
self.assert_( ( "http://name2.org/" != node.getAttrURI(1) ) == False )
self.assert_( ( "p2" != node.getAttrPrefix(1) ) == False )
self.assert_( node.getAttrValue( "name1") == "" )
self.assert_( node.getAttrValue( "name2") == "" )
self.assert_( ( "val1" != node.getAttrValue( "name1", "http://name1.org/") ) == False )
self.assert_( ( "val2" != node.getAttrValue( "name2", "http://name2.org/") ) == False )
self.assert_( ( "val1" != node.getAttrValue(xt1) ) == False )
self.assert_( ( "val2" != node.getAttrValue(xt2) ) == False )
self.assert_( node.hasAttr(-1) == False )
self.assert_( node.hasAttr(2) == False )
self.assert_( node.hasAttr(0) == True )
self.assert_( node.hasAttr( "name1", "http://name1.org/") == True )
self.assert_( node.hasAttr( "name2", "http://name2.org/") == True )
self.assert_( node.hasAttr( "name3", "http://name3.org/") == False )
self.assert_( node.hasAttr(xt1) == True )
self.assert_( node.hasAttr(xt2) == True )
self.assert_( node.hasAttr(xt3) == False )
node.addAttr( "noprefix", "val3")
self.assert_( node.getAttributesLength() == 3 )
self.assert_( node.isAttributesEmpty() == False )
self.assert_( ( "noprefix" != node.getAttrName(2) ) == False )
self.assert_( ( "val3" != node.getAttrValue(2) ) == False )
self.assert_( node.getAttrURI(2) == "" )
self.assert_( node.getAttrPrefix(2) == "" )
self.assert_( ( "val3" != node.getAttrValue( "noprefix") ) == False )
self.assert_( ( "val3" != node.getAttrValue( "noprefix", "") ) == False )
self.assert_( node.hasAttr( "noprefix" ) == True )
self.assert_( node.hasAttr( "noprefix", "") == True )
node.addAttr(xt1, "mval1")
node.addAttr( "name2", "mval2", "http://name2.org/", "p2")
self.assert_( node.getAttributesLength() == 3 )
self.assert_( node.isAttributesEmpty() == False )
self.assert_( ( "name1" != node.getAttrName(0) ) == False )
self.assert_( ( "mval1" != node.getAttrValue(0) ) == False )
self.assert_( ( "http://name1.org/" != node.getAttrURI(0) ) == False )
self.assert_( ( "p1" != node.getAttrPrefix(0) ) == False )
self.assert_( ( "name2" != node.getAttrName(1) ) == False )
self.assert_( ( "mval2" != node.getAttrValue(1) ) == False )
self.assert_( ( "http://name2.org/" != node.getAttrURI(1) ) == False )
self.assert_( ( "p2" != node.getAttrPrefix(1) ) == False )
self.assert_( node.hasAttr(xt1) == True )
self.assert_( node.hasAttr( "name1", "http://name1.org/") == True )
node.addAttr( "noprefix", "mval3")
self.assert_( node.getAttributesLength() == 3 )
self.assert_( node.isAttributesEmpty() == False )
self.assert_( ( "noprefix" != node.getAttrName(2) ) == False )
self.assert_( ( "mval3" != node.getAttrValue(2) ) == False )
self.assert_( node.getAttrURI(2) == "" )
self.assert_( node.getAttrPrefix(2) == "" )
self.assert_( node.hasAttr( "noprefix") == True )
self.assert_( node.hasAttr( "noprefix", "") == True )
node.addAttr(xt1a, "val1a")
node.addAttr(xt2a, "val2a")
self.assert_( node.getAttributesLength() == 5 )
self.assert_( ( "name1" != node.getAttrName(3) ) == False )
self.assert_( ( "val1a" != node.getAttrValue(3) ) == False )
self.assert_( ( "http://name1a.org/" != node.getAttrURI(3) ) == False )
self.assert_( ( "p1a" != node.getAttrPrefix(3) ) == False )
self.assert_( ( "name2" != node.getAttrName(4) ) == False )
self.assert_( ( "val2a" != node.getAttrValue(4) ) == False )
self.assert_( ( "http://name2a.org/" != node.getAttrURI(4) ) == False )
self.assert_( ( "p2a" != node.getAttrPrefix(4) ) == False )
self.assert_( ( "val1a" != node.getAttrValue( "name1", "http://name1a.org/") ) == False )
self.assert_( ( "val2a" != node.getAttrValue( "name2", "http://name2a.org/") ) == False )
self.assert_( ( "val1a" != node.getAttrValue(xt1a) ) == False )
self.assert_( ( "val2a" != node.getAttrValue(xt2a) ) == False )
node.removeAttr(xt1a)
node.removeAttr(xt2a)
self.assert_( node.getAttributesLength() == 3 )
node.removeAttr( "name1", "http://name1.org/")
self.assert_( node.getAttributesLength() == 2 )
self.assert_( node.isAttributesEmpty() == False )
self.assert_( ( "name2" != node.getAttrName(0) ) == False )
self.assert_( ( "mval2" != node.getAttrValue(0) ) == False )
self.assert_( ( "http://name2.org/" != node.getAttrURI(0) ) == False )
self.assert_( ( "p2" != node.getAttrPrefix(0) ) == False )
self.assert_( ( "noprefix" != node.getAttrName(1) ) == False )
self.assert_( ( "mval3" != node.getAttrValue(1) ) == False )
self.assert_( node.getAttrURI(1) == "" )
self.assert_( node.getAttrPrefix(1) == "" )
self.assert_( node.hasAttr( "name1", "http://name1.org/") == False )
node.removeAttr(xt2)
self.assert_( node.getAttributesLength() == 1 )
self.assert_( node.isAttributesEmpty() == False )
self.assert_( ( "noprefix" != node.getAttrName(0) ) == False )
self.assert_( ( "mval3" != node.getAttrValue(0) ) == False )
self.assert_( node.getAttrURI(0) == "" )
self.assert_( node.getAttrPrefix(0) == "" )
self.assert_( node.hasAttr(xt2) == False )
self.assert_( node.hasAttr( "name2", "http://name2.org/") == False )
node.removeAttr( "noprefix")
self.assert_( node.getAttributesLength() == 0 )
self.assert_( node.isAttributesEmpty() == True )
self.assert_( node.hasAttr( "noprefix" ) == False )
self.assert_( node.hasAttr( "noprefix", "") == False )
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt3 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt1a ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt2a ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLNode_attribute_set_clear(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
node = libsbml.XMLNode(triple,attr)
nattr = libsbml.XMLAttributes()
xt1 = libsbml.XMLTriple("name1", "http://name1.org/", "p1")
xt2 = libsbml.XMLTriple("name2", "http://name2.org/", "p2")
xt3 = libsbml.XMLTriple("name3", "http://name3.org/", "p3")
xt4 = libsbml.XMLTriple("name4", "http://name4.org/", "p4")
xt5 = libsbml.XMLTriple("name5", "http://name5.org/", "p5")
nattr.add(xt1, "val1")
nattr.add(xt2, "val2")
nattr.add(xt3, "val3")
nattr.add(xt4, "val4")
nattr.add(xt5, "val5")
node.setAttributes(nattr)
self.assert_( node.getAttributesLength() == 5 )
self.assert_( node.isAttributesEmpty() == False )
self.assert_( ( "name1" != node.getAttrName(0) ) == False )
self.assert_( ( "val1" != node.getAttrValue(0) ) == False )
self.assert_( ( "http://name1.org/" != node.getAttrURI(0) ) == False )
self.assert_( ( "p1" != node.getAttrPrefix(0) ) == False )
self.assert_( ( "name2" != node.getAttrName(1) ) == False )
self.assert_( ( "val2" != node.getAttrValue(1) ) == False )
self.assert_( ( "http://name2.org/" != node.getAttrURI(1) ) == False )
self.assert_( ( "p2" != node.getAttrPrefix(1) ) == False )
self.assert_( ( "name3" != node.getAttrName(2) ) == False )
self.assert_( ( "val3" != node.getAttrValue(2) ) == False )
self.assert_( ( "http://name3.org/" != node.getAttrURI(2) ) == False )
self.assert_( ( "p3" != node.getAttrPrefix(2) ) == False )
self.assert_( ( "name4" != node.getAttrName(3) ) == False )
self.assert_( ( "val4" != node.getAttrValue(3) ) == False )
self.assert_( ( "http://name4.org/" != node.getAttrURI(3) ) == False )
self.assert_( ( "p4" != node.getAttrPrefix(3) ) == False )
self.assert_( ( "name5" != node.getAttrName(4) ) == False )
self.assert_( ( "val5" != node.getAttrValue(4) ) == False )
self.assert_( ( "http://name5.org/" != node.getAttrURI(4) ) == False )
self.assert_( ( "p5" != node.getAttrPrefix(4) ) == False )
ntriple = libsbml.XMLTriple("test2","http://test2.org/","p2")
node.setTriple(ntriple)
self.assert_( ( "test2" != node.getName() ) == False )
self.assert_( ( "http://test2.org/" != node.getURI() ) == False )
self.assert_( ( "p2" != node.getPrefix() ) == False )
node.clearAttributes()
self.assert_( node.getAttributesLength() == 0 )
self.assert_( node.isAttributesEmpty() != False )
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ntriple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ nattr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt3 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt4 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt5 ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLNode_convert(self):
xmlstr = wrapString("<annotation>\n" + " <test xmlns=\"http://test.org/\" id=\"test\">test</test>\n" + "</annotation>")
node = libsbml.XMLNode.convertStringToXMLNode(xmlstr,None)
child = node.getChild(0)
gchild = child.getChild(0)
attr = child.getAttributes()
ns = child.getNamespaces()
self.assert_( ( "annotation" != node.getName() ) == False )
self.assert_( ( "test" != child.getName() ) == False )
self.assert_( ( "test" != gchild.getCharacters() ) == False )
self.assert_( ( "id" != attr.getName(0) ) == False )
self.assert_( ( "test" != attr.getValue(0) ) == False )
self.assert_( ( "http://test.org/" != ns.getURI(0) ) == False )
self.assert_( ns.getPrefix(0) == "" )
toxmlstring = node.toXMLString()
self.assert_( ( xmlstr != toxmlstring ) == False )
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLNode_convert_dummyroot(self):
xmlstr_nodummy1 = wrapString("<notes>\n" + " <p>test</p>\n" + "</notes>")
xmlstr_nodummy2 = wrapString("<html>\n" + " <p>test</p>\n" + "</html>")
xmlstr_nodummy3 = wrapString("<body>\n" + " <p>test</p>\n" + "</body>")
xmlstr_nodummy4 = "<p>test</p>";
xmlstr_nodummy5 = wrapString("<test1>\n" + " <test2>test</test2>\n" + "</test1>")
xmlstr_dummy1 = "<p>test1</p><p>test2</p>";
xmlstr_dummy2 = "<test1>test1</test1><test2>test2</test2>";
rootnode = libsbml.XMLNode.convertStringToXMLNode(xmlstr_nodummy1,None)
self.assert_( rootnode.getNumChildren() == 1 )
child = rootnode.getChild(0)
gchild = child.getChild(0)
self.assert_( ( "notes" != rootnode.getName() ) == False )
self.assert_( ( "p" != child.getName() ) == False )
self.assert_( ( "test" != gchild.getCharacters() ) == False )
toxmlstring = rootnode.toXMLString()
self.assert_( ( xmlstr_nodummy1 != toxmlstring ) == False )
_dummyList = [ rootnode ]; _dummyList[:] = []; del _dummyList
rootnode = libsbml.XMLNode.convertStringToXMLNode(xmlstr_nodummy2,None)
self.assert_( rootnode.getNumChildren() == 1 )
child = rootnode.getChild(0)
gchild = child.getChild(0)
self.assert_( ( "html" != rootnode.getName() ) == False )
self.assert_( ( "p" != child.getName() ) == False )
self.assert_( ( "test" != gchild.getCharacters() ) == False )
toxmlstring = rootnode.toXMLString()
self.assert_( ( xmlstr_nodummy2 != toxmlstring ) == False )
_dummyList = [ rootnode ]; _dummyList[:] = []; del _dummyList
rootnode = libsbml.XMLNode.convertStringToXMLNode(xmlstr_nodummy3,None)
self.assert_( rootnode.getNumChildren() == 1 )
child = rootnode.getChild(0)
gchild = child.getChild(0)
self.assert_( ( "body" != rootnode.getName() ) == False )
self.assert_( ( "p" != child.getName() ) == False )
self.assert_( ( "test" != gchild.getCharacters() ) == False )
toxmlstring = rootnode.toXMLString()
self.assert_( ( xmlstr_nodummy3 != toxmlstring ) == False )
_dummyList = [ rootnode ]; _dummyList[:] = []; del _dummyList
rootnode = libsbml.XMLNode.convertStringToXMLNode(xmlstr_nodummy4,None)
self.assert_( rootnode.getNumChildren() == 1 )
child = rootnode.getChild(0)
self.assert_( ( "p" != rootnode.getName() ) == False )
self.assert_( ( "test" != child.getCharacters() ) == False )
toxmlstring = rootnode.toXMLString()
self.assert_( ( xmlstr_nodummy4 != toxmlstring ) == False )
_dummyList = [ rootnode ]; _dummyList[:] = []; del _dummyList
rootnode = libsbml.XMLNode.convertStringToXMLNode(xmlstr_nodummy5,None)
self.assert_( rootnode.getNumChildren() == 1 )
child = rootnode.getChild(0)
gchild = child.getChild(0)
self.assert_( ( "test1" != rootnode.getName() ) == False )
self.assert_( ( "test2" != child.getName() ) == False )
self.assert_( ( "test" != gchild.getCharacters() ) == False )
toxmlstring = rootnode.toXMLString()
self.assert_( ( xmlstr_nodummy5 != toxmlstring ) == False )
_dummyList = [ rootnode ]; _dummyList[:] = []; del _dummyList
rootnode = libsbml.XMLNode.convertStringToXMLNode(xmlstr_dummy1,None)
self.assert_( rootnode.isEOF() == True )
self.assert_( rootnode.getNumChildren() == 2 )
child = rootnode.getChild(0)
gchild = child.getChild(0)
self.assert_( ( "p" != child.getName() ) == False )
self.assert_( ( "test1" != gchild.getCharacters() ) == False )
child = rootnode.getChild(1)
gchild = child.getChild(0)
self.assert_( ( "p" != child.getName() ) == False )
self.assert_( ( "test2" != gchild.getCharacters() ) == False )
toxmlstring = rootnode.toXMLString()
self.assert_( ( xmlstr_dummy1 != toxmlstring ) == False )
_dummyList = [ rootnode ]; _dummyList[:] = []; del _dummyList
rootnode = libsbml.XMLNode.convertStringToXMLNode(xmlstr_dummy2,None)
self.assert_( rootnode.isEOF() == True )
self.assert_( rootnode.getNumChildren() == 2 )
child = rootnode.getChild(0)
gchild = child.getChild(0)
self.assert_( ( "test1" != child.getName() ) == False )
self.assert_( ( "test1" != gchild.getCharacters() ) == False )
child = rootnode.getChild(1)
gchild = child.getChild(0)
self.assert_( ( "test2" != child.getName() ) == False )
self.assert_( ( "test2" != gchild.getCharacters() ) == False )
toxmlstring = rootnode.toXMLString()
self.assert_( ( xmlstr_dummy2 != toxmlstring ) == False )
_dummyList = [ rootnode ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLNode_create(self):
node = libsbml.XMLNode()
self.assert_( node != None )
self.assert_( node.getNumChildren() == 0 )
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
node = libsbml.XMLNode()
self.assert_( node != None )
node2 = libsbml.XMLNode()
self.assert_( node2 != None )
node.addChild(node2)
self.assert_( node.getNumChildren() == 1 )
node3 = libsbml.XMLNode()
self.assert_( node3 != None )
node.addChild(node3)
self.assert_( node.getNumChildren() == 2 )
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node3 ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLNode_createElement(self):
name = "test";
uri = "http://test.org/";
prefix = "p";
text = "text node";
triple = libsbml.XMLTriple(name,uri,prefix)
ns = libsbml.XMLNamespaces()
attr = libsbml.XMLAttributes()
ns.add(uri,prefix)
attr.add("id", "value",uri,prefix)
snode = libsbml.XMLNode(triple,attr,ns)
self.assert_( snode != None )
self.assert_( snode.getNumChildren() == 0 )
self.assert_( ( name != snode.getName() ) == False )
self.assert_( ( prefix != snode.getPrefix() ) == False )
self.assert_( ( uri != snode.getURI() ) == False )
self.assert_( snode.isElement() == True )
self.assert_( snode.isStart() == True )
self.assert_( snode.isEnd() == False )
self.assert_( snode.isText() == False )
snode.setEnd()
self.assert_( snode.isEnd() == True )
snode.unsetEnd()
self.assert_( snode.isEnd() == False )
cattr = snode.getAttributes()
self.assert_( cattr != None )
self.assert_( ( "id" != cattr.getName(0) ) == False )
self.assert_( ( "value" != cattr.getValue(0) ) == False )
self.assert_( ( prefix != cattr.getPrefix(0) ) == False )
self.assert_( ( uri != cattr.getURI(0) ) == False )
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ snode ]; _dummyList[:] = []; del _dummyList
attr = libsbml.XMLAttributes()
attr.add("id", "value")
triple = libsbml.XMLTriple(name, "", "")
snode = libsbml.XMLNode(triple,attr)
self.assert_( snode != None )
self.assert_( snode.getNumChildren() == 0 )
self.assert_( ( "test" != snode.getName() ) == False )
self.assert_( snode.getPrefix() == "" )
self.assert_( snode.getURI() == "" )
self.assert_( snode.isElement() == True )
self.assert_( snode.isStart() == True )
self.assert_( snode.isEnd() == False )
self.assert_( snode.isText() == False )
cattr = snode.getAttributes()
self.assert_( cattr != None )
self.assert_( ( "id" != cattr.getName(0) ) == False )
self.assert_( ( "value" != cattr.getValue(0) ) == False )
self.assert_( cattr.getPrefix(0) == "" )
self.assert_( cattr.getURI(0) == "" )
enode = libsbml.XMLNode(triple)
self.assert_( enode != None )
self.assert_( enode.getNumChildren() == 0 )
self.assert_( ( "test" != enode.getName() ) == False )
self.assert_( enode.getPrefix() == "" )
self.assert_( enode.getURI() == "" )
self.assert_( enode.isElement() == True )
self.assert_( enode.isStart() == False )
self.assert_( enode.isEnd() == True )
self.assert_( enode.isText() == False )
tnode = libsbml.XMLNode(text)
self.assert_( tnode != None )
self.assert_( ( text != tnode.getCharacters() ) == False )
self.assert_( tnode.getNumChildren() == 0 )
self.assert_( tnode.getName() == "" )
self.assert_( tnode.getPrefix() == "" )
self.assert_( tnode.getURI() == "" )
self.assert_( tnode.isElement() == False )
self.assert_( tnode.isStart() == False )
self.assert_( tnode.isEnd() == False )
self.assert_( tnode.isText() == True )
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ snode ]; _dummyList[:] = []; del _dummyList
_dummyList = [ enode ]; _dummyList[:] = []; del _dummyList
_dummyList = [ tnode ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLNode_createFromToken(self):
triple = libsbml.XMLTriple("attr", "uri", "prefix")
token = libsbml.XMLToken(triple)
node = libsbml.XMLNode(token)
self.assert_( node != None )
self.assert_( node.getNumChildren() == 0 )
self.assert_( ( "attr" != node.getName() ) == False )
self.assert_( ( "prefix" != node.getPrefix() ) == False )
self.assert_( ( "uri" != node.getURI() ) == False )
self.assert_( node.getChild(1) != None )
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLNode_getters(self):
NS = libsbml.XMLNamespaces()
NS.add( "http://test1.org/", "test1")
token = libsbml.XMLToken("This is a test")
node = libsbml.XMLNode(token)
self.assert_( node != None )
self.assert_( node.getNumChildren() == 0 )
self.assert_( ( "This is a test" != node.getCharacters() ) == False )
self.assert_( node.getChild(1) != None )
attr = libsbml.XMLAttributes()
self.assert_( attr != None )
attr.add( "attr2", "value")
triple = libsbml.XMLTriple("attr", "uri", "prefix")
token = libsbml.XMLToken(triple,attr)
self.assert_( token != None )
node = libsbml.XMLNode(token)
self.assert_( ( "attr" != node.getName() ) == False )
self.assert_( ( "uri" != node.getURI() ) == False )
self.assert_( ( "prefix" != node.getPrefix() ) == False )
returnattr = node.getAttributes()
self.assert_( ( "attr2" != returnattr.getName(0) ) == False )
self.assert_( ( "value" != returnattr.getValue(0) ) == False )
token = libsbml.XMLToken(triple,attr,NS)
node = libsbml.XMLNode(token)
returnNS = node.getNamespaces()
self.assert_( returnNS.getLength() == 1 )
self.assert_( returnNS.isEmpty() == False )
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLNode_insert(self):
attr = libsbml.XMLAttributes()
trp_p = libsbml.XMLTriple("parent","","")
trp_c1 = libsbml.XMLTriple("child1","","")
trp_c2 = libsbml.XMLTriple("child2","","")
trp_c3 = libsbml.XMLTriple("child3","","")
trp_c4 = libsbml.XMLTriple("child4","","")
trp_c5 = libsbml.XMLTriple("child5","","")
p = libsbml.XMLNode(trp_p,attr)
c1 = libsbml.XMLNode(trp_c1,attr)
c2 = libsbml.XMLNode(trp_c2,attr)
c3 = libsbml.XMLNode(trp_c3,attr)
c4 = libsbml.XMLNode(trp_c4,attr)
c5 = libsbml.XMLNode(trp_c5,attr)
p.addChild(c2)
p.addChild(c4)
p.insertChild(0,c1)
p.insertChild(2,c3)
p.insertChild(4,c5)
self.assert_( p.getNumChildren() == 5 )
self.assert_( ( "child1" != p.getChild(0).getName() ) == False )
self.assert_( ( "child2" != p.getChild(1).getName() ) == False )
self.assert_( ( "child3" != p.getChild(2).getName() ) == False )
self.assert_( ( "child4" != p.getChild(3).getName() ) == False )
self.assert_( ( "child5" != p.getChild(4).getName() ) == False )
p.removeChildren()
p.insertChild(0,c1)
p.insertChild(0,c2)
p.insertChild(0,c3)
p.insertChild(0,c4)
p.insertChild(0,c5)
self.assert_( p.getNumChildren() == 5 )
self.assert_( ( "child5" != p.getChild(0).getName() ) == False )
self.assert_( ( "child4" != p.getChild(1).getName() ) == False )
self.assert_( ( "child3" != p.getChild(2).getName() ) == False )
self.assert_( ( "child2" != p.getChild(3).getName() ) == False )
self.assert_( ( "child1" != p.getChild(4).getName() ) == False )
p.removeChildren()
p.insertChild(1,c1)
p.insertChild(2,c2)
p.insertChild(3,c3)
p.insertChild(4,c4)
p.insertChild(5,c5)
self.assert_( p.getNumChildren() == 5 )
self.assert_( ( "child1" != p.getChild(0).getName() ) == False )
self.assert_( ( "child2" != p.getChild(1).getName() ) == False )
self.assert_( ( "child3" != p.getChild(2).getName() ) == False )
self.assert_( ( "child4" != p.getChild(3).getName() ) == False )
self.assert_( ( "child5" != p.getChild(4).getName() ) == False )
p.removeChildren()
tmp = p.insertChild(0,c1)
self.assert_( ( "child1" != tmp.getName() ) == False )
tmp = p.insertChild(0,c2)
self.assert_( ( "child2" != tmp.getName() ) == False )
tmp = p.insertChild(0,c3)
self.assert_( ( "child3" != tmp.getName() ) == False )
tmp = p.insertChild(0,c4)
self.assert_( ( "child4" != tmp.getName() ) == False )
tmp = p.insertChild(0,c5)
self.assert_( ( "child5" != tmp.getName() ) == False )
p.removeChildren()
tmp = p.insertChild(1,c1)
self.assert_( ( "child1" != tmp.getName() ) == False )
tmp = p.insertChild(2,c2)
self.assert_( ( "child2" != tmp.getName() ) == False )
tmp = p.insertChild(3,c3)
self.assert_( ( "child3" != tmp.getName() ) == False )
tmp = p.insertChild(4,c4)
self.assert_( ( "child4" != tmp.getName() ) == False )
tmp = p.insertChild(5,c5)
self.assert_( ( "child5" != tmp.getName() ) == False )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ c1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ c2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ c3 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ c4 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ c5 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ trp_p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ trp_c1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ trp_c2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ trp_c3 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ trp_c4 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ trp_c5 ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLNode_namespace_add(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
node = libsbml.XMLNode(triple,attr)
self.assert_( node.getNamespacesLength() == 0 )
self.assert_( node.isNamespacesEmpty() == True )
node.addNamespace( "http://test1.org/", "test1")
self.assert_( node.getNamespacesLength() == 1 )
self.assert_( node.isNamespacesEmpty() == False )
node.addNamespace( "http://test2.org/", "test2")
self.assert_( node.getNamespacesLength() == 2 )
self.assert_( node.isNamespacesEmpty() == False )
node.addNamespace( "http://test1.org/", "test1a")
self.assert_( node.getNamespacesLength() == 3 )
self.assert_( node.isNamespacesEmpty() == False )
node.addNamespace( "http://test1.org/", "test1a")
self.assert_( node.getNamespacesLength() == 3 )
self.assert_( node.isNamespacesEmpty() == False )
self.assert_( (node.getNamespaceIndex( "http://test1.org/") == -1) == False )
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLNode_namespace_get(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
node = libsbml.XMLNode(triple,attr)
node.addNamespace( "http://test1.org/", "test1")
node.addNamespace( "http://test2.org/", "test2")
node.addNamespace( "http://test3.org/", "test3")
node.addNamespace( "http://test4.org/", "test4")
node.addNamespace( "http://test5.org/", "test5")
node.addNamespace( "http://test6.org/", "test6")
node.addNamespace( "http://test7.org/", "test7")
node.addNamespace( "http://test8.org/", "test8")
node.addNamespace( "http://test9.org/", "test9")
self.assert_( node.getNamespacesLength() == 9 )
self.assert_( node.getNamespaceIndex( "http://test1.org/") == 0 )
self.assert_( ( "test2" != node.getNamespacePrefix(1) ) == False )
self.assert_( ( "test1" != node.getNamespacePrefix( "http://test1.org/") ) == False )
self.assert_( ( "http://test2.org/" != node.getNamespaceURI(1) ) == False )
self.assert_( ( "http://test2.org/" != node.getNamespaceURI( "test2") ) == False )
self.assert_( node.getNamespaceIndex( "http://test1.org/") == 0 )
self.assert_( node.getNamespaceIndex( "http://test2.org/") == 1 )
self.assert_( node.getNamespaceIndex( "http://test5.org/") == 4 )
self.assert_( node.getNamespaceIndex( "http://test9.org/") == 8 )
self.assert_( node.getNamespaceIndex( "http://testX.org/") == -1 )
self.assert_( node.hasNamespaceURI( "http://test1.org/") != False )
self.assert_( node.hasNamespaceURI( "http://test2.org/") != False )
self.assert_( node.hasNamespaceURI( "http://test5.org/") != False )
self.assert_( node.hasNamespaceURI( "http://test9.org/") != False )
self.assert_( node.hasNamespaceURI( "http://testX.org/") == False )
self.assert_( node.getNamespaceIndexByPrefix( "test1") == 0 )
self.assert_( node.getNamespaceIndexByPrefix( "test5") == 4 )
self.assert_( node.getNamespaceIndexByPrefix( "test9") == 8 )
self.assert_( node.getNamespaceIndexByPrefix( "testX") == -1 )
self.assert_( node.hasNamespacePrefix( "test1") != False )
self.assert_( node.hasNamespacePrefix( "test5") != False )
self.assert_( node.hasNamespacePrefix( "test9") != False )
self.assert_( node.hasNamespacePrefix( "testX") == False )
self.assert_( node.hasNamespaceNS( "http://test1.org/", "test1") != False )
self.assert_( node.hasNamespaceNS( "http://test5.org/", "test5") != False )
self.assert_( node.hasNamespaceNS( "http://test9.org/", "test9") != False )
self.assert_( node.hasNamespaceNS( "http://testX.org/", "testX") == False )
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLNode_namespace_remove(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
node = libsbml.XMLNode(triple,attr)
node.addNamespace( "http://test1.org/", "test1")
node.addNamespace( "http://test2.org/", "test2")
node.addNamespace( "http://test3.org/", "test3")
node.addNamespace( "http://test4.org/", "test4")
node.addNamespace( "http://test5.org/", "test5")
self.assert_( node.getNamespacesLength() == 5 )
node.removeNamespace(4)
self.assert_( node.getNamespacesLength() == 4 )
node.removeNamespace(3)
self.assert_( node.getNamespacesLength() == 3 )
node.removeNamespace(2)
self.assert_( node.getNamespacesLength() == 2 )
node.removeNamespace(1)
self.assert_( node.getNamespacesLength() == 1 )
node.removeNamespace(0)
self.assert_( node.getNamespacesLength() == 0 )
node.addNamespace( "http://test1.org/", "test1")
node.addNamespace( "http://test2.org/", "test2")
node.addNamespace( "http://test3.org/", "test3")
node.addNamespace( "http://test4.org/", "test4")
node.addNamespace( "http://test5.org/", "test5")
self.assert_( node.getNamespacesLength() == 5 )
node.removeNamespace(0)
self.assert_( node.getNamespacesLength() == 4 )
node.removeNamespace(0)
self.assert_( node.getNamespacesLength() == 3 )
node.removeNamespace(0)
self.assert_( node.getNamespacesLength() == 2 )
node.removeNamespace(0)
self.assert_( node.getNamespacesLength() == 1 )
node.removeNamespace(0)
self.assert_( node.getNamespacesLength() == 0 )
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLNode_namespace_remove_by_prefix(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
node = libsbml.XMLNode(triple,attr)
node.addNamespace( "http://test1.org/", "test1")
node.addNamespace( "http://test2.org/", "test2")
node.addNamespace( "http://test3.org/", "test3")
node.addNamespace( "http://test4.org/", "test4")
node.addNamespace( "http://test5.org/", "test5")
self.assert_( node.getNamespacesLength() == 5 )
node.removeNamespace( "test1")
self.assert_( node.getNamespacesLength() == 4 )
node.removeNamespace( "test2")
self.assert_( node.getNamespacesLength() == 3 )
node.removeNamespace( "test3")
self.assert_( node.getNamespacesLength() == 2 )
node.removeNamespace( "test4")
self.assert_( node.getNamespacesLength() == 1 )
node.removeNamespace( "test5")
self.assert_( node.getNamespacesLength() == 0 )
node.addNamespace( "http://test1.org/", "test1")
node.addNamespace( "http://test2.org/", "test2")
node.addNamespace( "http://test3.org/", "test3")
node.addNamespace( "http://test4.org/", "test4")
node.addNamespace( "http://test5.org/", "test5")
self.assert_( node.getNamespacesLength() == 5 )
node.removeNamespace( "test5")
self.assert_( node.getNamespacesLength() == 4 )
node.removeNamespace( "test4")
self.assert_( node.getNamespacesLength() == 3 )
node.removeNamespace( "test3")
self.assert_( node.getNamespacesLength() == 2 )
node.removeNamespace( "test2")
self.assert_( node.getNamespacesLength() == 1 )
node.removeNamespace( "test1")
self.assert_( node.getNamespacesLength() == 0 )
node.addNamespace( "http://test1.org/", "test1")
node.addNamespace( "http://test2.org/", "test2")
node.addNamespace( "http://test3.org/", "test3")
node.addNamespace( "http://test4.org/", "test4")
node.addNamespace( "http://test5.org/", "test5")
self.assert_( node.getNamespacesLength() == 5 )
node.removeNamespace( "test3")
self.assert_( node.getNamespacesLength() == 4 )
node.removeNamespace( "test1")
self.assert_( node.getNamespacesLength() == 3 )
node.removeNamespace( "test4")
self.assert_( node.getNamespacesLength() == 2 )
node.removeNamespace( "test5")
self.assert_( node.getNamespacesLength() == 1 )
node.removeNamespace( "test2")
self.assert_( node.getNamespacesLength() == 0 )
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLNode_namespace_set_clear(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
node = libsbml.XMLNode(triple,attr)
ns = libsbml.XMLNamespaces()
self.assert_( node.getNamespacesLength() == 0 )
self.assert_( node.isNamespacesEmpty() == True )
ns.add( "http://test1.org/", "test1")
ns.add( "http://test2.org/", "test2")
ns.add( "http://test3.org/", "test3")
ns.add( "http://test4.org/", "test4")
ns.add( "http://test5.org/", "test5")
node.setNamespaces(ns)
self.assert_( node.getNamespacesLength() == 5 )
self.assert_( node.isNamespacesEmpty() == False )
self.assert_( ( "test1" != node.getNamespacePrefix(0) ) == False )
self.assert_( ( "test2" != node.getNamespacePrefix(1) ) == False )
self.assert_( ( "test3" != node.getNamespacePrefix(2) ) == False )
self.assert_( ( "test4" != node.getNamespacePrefix(3) ) == False )
self.assert_( ( "test5" != node.getNamespacePrefix(4) ) == False )
self.assert_( ( "http://test1.org/" != node.getNamespaceURI(0) ) == False )
self.assert_( ( "http://test2.org/" != node.getNamespaceURI(1) ) == False )
self.assert_( ( "http://test3.org/" != node.getNamespaceURI(2) ) == False )
self.assert_( ( "http://test4.org/" != node.getNamespaceURI(3) ) == False )
self.assert_( ( "http://test5.org/" != node.getNamespaceURI(4) ) == False )
node.clearNamespaces()
self.assert_( node.getNamespacesLength() == 0 )
self.assert_( node.isAttributesEmpty() != False )
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLNode_remove(self):
attr = libsbml.XMLAttributes()
trp_p = libsbml.XMLTriple("parent","","")
trp_c1 = libsbml.XMLTriple("child1","","")
trp_c2 = libsbml.XMLTriple("child2","","")
trp_c3 = libsbml.XMLTriple("child3","","")
trp_c4 = libsbml.XMLTriple("child4","","")
trp_c5 = libsbml.XMLTriple("child5","","")
p = libsbml.XMLNode(trp_p,attr)
c1 = libsbml.XMLNode(trp_c1,attr)
c2 = libsbml.XMLNode(trp_c2,attr)
c3 = libsbml.XMLNode(trp_c3,attr)
c4 = libsbml.XMLNode(trp_c4,attr)
c5 = libsbml.XMLNode(trp_c5,attr)
p.addChild(c1)
p.addChild(c2)
p.addChild(c3)
p.addChild(c4)
p.addChild(c5)
r = p.removeChild(5)
self.assert_( r == None )
r = p.removeChild(1)
self.assert_( p.getNumChildren() == 4 )
self.assert_( ( "child2" != r.getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(3)
self.assert_( p.getNumChildren() == 3 )
self.assert_( ( "child5" != r.getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(0)
self.assert_( p.getNumChildren() == 2 )
self.assert_( ( "child1" != r.getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(1)
self.assert_( p.getNumChildren() == 1 )
self.assert_( ( "child4" != r.getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(0)
self.assert_( p.getNumChildren() == 0 )
self.assert_( ( "child3" != r.getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
p.addChild(c1)
p.addChild(c2)
p.addChild(c3)
p.addChild(c4)
p.addChild(c5)
r = p.removeChild(4)
self.assert_( p.getNumChildren() == 4 )
self.assert_( ( "child5" != r.getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(3)
self.assert_( p.getNumChildren() == 3 )
self.assert_( ( "child4" != r.getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(2)
self.assert_( p.getNumChildren() == 2 )
self.assert_( ( "child3" != r.getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(1)
self.assert_( p.getNumChildren() == 1 )
self.assert_( ( "child2" != r.getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(0)
self.assert_( p.getNumChildren() == 0 )
self.assert_( ( "child1" != r.getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
p.addChild(c1)
p.addChild(c2)
p.addChild(c3)
p.addChild(c4)
p.addChild(c5)
r = p.removeChild(0)
self.assert_( p.getNumChildren() == 4 )
self.assert_( ( "child1" != r.getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(0)
self.assert_( p.getNumChildren() == 3 )
self.assert_( ( "child2" != r.getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(0)
self.assert_( p.getNumChildren() == 2 )
self.assert_( ( "child3" != r.getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(0)
self.assert_( p.getNumChildren() == 1 )
self.assert_( ( "child4" != r.getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(0)
self.assert_( p.getNumChildren() == 0 )
self.assert_( ( "child5" != r.getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
p.addChild(c1)
p.addChild(c2)
p.addChild(c3)
p.addChild(c4)
p.addChild(c5)
r = p.removeChild(0)
self.assert_( ( "child1" != r.getName() ) == False )
p.insertChild(0,r)
self.assert_( p.getNumChildren() == 5 )
self.assert_( ( "child1" != p.getChild(0).getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(1)
self.assert_( ( "child2" != r.getName() ) == False )
p.insertChild(1,r)
self.assert_( p.getNumChildren() == 5 )
self.assert_( ( "child2" != p.getChild(1).getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(2)
self.assert_( ( "child3" != r.getName() ) == False )
p.insertChild(2,r)
self.assert_( p.getNumChildren() == 5 )
self.assert_( ( "child3" != p.getChild(2).getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(3)
self.assert_( ( "child4" != r.getName() ) == False )
p.insertChild(3,r)
self.assert_( p.getNumChildren() == 5 )
self.assert_( ( "child4" != p.getChild(3).getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
r = p.removeChild(4)
self.assert_( ( "child5" != r.getName() ) == False )
p.insertChild(4,r)
self.assert_( p.getNumChildren() == 5 )
self.assert_( ( "child5" != p.getChild(4).getName() ) == False )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ c1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ c2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ c3 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ c4 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ c5 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ trp_p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ trp_c1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ trp_c2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ trp_c3 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ trp_c4 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ trp_c5 ]; _dummyList[:] = []; del _dummyList
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestXMLNode))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
TheCoSMoCompany/biopredyn
|
Prototype/src/libsbml-5.10.0/src/bindings/python/test/xml/TestXMLNode.py
|
Python
|
bsd-3-clause
| 44,331
|
[
"VisIt"
] |
d97be3e2bf2005fb33985443bc0968020788ebe0c74bbbdaa2ce101b8ff4ae3b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# beads.py
"""
Calculate bead densities.
Copyright (c) 2016, David Hoffman
"""
import click
from scipy.constants import Avogadro, pi
def good_N(ci, M, Nf=5e10):
"""
This function is for converting a dye solution into desired number of particles.
Parameters
----------
ci : float
initial concentration in mg/mL
M : float
molar mass (g/mol)
Nf : float
the number of particles per mL in the final dilution
Returns
-------
Vf : float
The final dilution
Example
-------
>>> print('{:.3e}'.format(good_N(631/8*1e5))
7.888e+06
"""
return ci / (M * 1e3) / Nf * Avogadro
def num_particles(concentration, diameter, rho=1.05):
"""
Equation from https://tools.thermofisher.com/content/sfs/manuals/mp05000.pdf
Parameters
----------
concentration : float
Concentration of solution in g/mL
diameter : float
diameter of the beads in microns
rho : float
density of beads in g/mL (default: 1.05 for polystyrene)
Returns
-------
N : float
number of microspheres/mL
Example
-------
>>> print('{:.3e}'.format(num_particles(0.02, 10)))
3.638e+7
"""
return 6 * concentration * 1e12 / (rho * pi * diameter ** 3)
def calc_molarity(num_part):
"""Convert number of particles per mL to pico molarity"""
return num_part * 1000 / Avogadro / 1e-12
@click.group("name")
def main():
"""Main
Concentration of solution in g/mL
"""
pass
def sub_molarity(diameter, concentration, rho):
"""Calculate the molarity of solution"""
num_part = num_particles(concentration, diameter, rho=rho)
mol = calc_molarity(num_part)
click.echo(
(
"For a bead diameter of {} µm and a concentration of {} "
"g/mL you have a {:.3f} pM solution"
).format(diameter, concentration, mol)
)
return mol
@main.command()
@click.argument("diameter", type=float)
@click.argument(
"concentration", type=float,
)
@click.option(
"--rho",
default=1.05,
type=float,
help="density of beads in g/mL (default: 1.05 for polystyrene)",
)
def molarity(diameter, concentration, rho):
sub_molarity(diameter, concentration, rho)
@main.command()
@click.argument("diameter", type=float)
@click.argument(
"concentration", type=float,
)
@click.argument("desired", type=float)
@click.option(
"--rho",
default=1.05,
type=float,
help="density of beads in g/mL (default: 1.05 for polystyrene)",
)
def dilution(diameter, concentration, desired, rho):
"""Calculate the molarity of solution everything in pM"""
mol = sub_molarity(diameter, concentration, rho=rho)
click.echo(
"To get a concentration of {} pM you'd need to do a 1:{:.0f} dilution".format(
desired, mol / desired
)
)
molarity.__doc__ = num_particles.__doc__
if __name__ == "__main__":
main()
|
david-hoffman/scripts
|
beads.py
|
Python
|
apache-2.0
| 3,050
|
[
"Avogadro"
] |
e6b91df1c90389406ad9ac16aaf5b9dcad95c961d9119d5440f9c8774a295e94
|
# for LEGO Movie Module
LEGO_QUOTES = [
"He is coming... cover your butt.",
"You've hidden the Kragle well old man. Robots! Destroy him.",
"Your robots are no match for a Master Builder, for I see everything!",
"Ahh Haa Haa! Now my evil power will be unlimited. Can you feel me?!",
"One day a talented lass or fellow, a special one, with face of yellow, will make the Piece of Resistance found, from it's hiding refuge underground. And with a noble army at the helm, this Master Builder will thwart the Kragle and save the realm, and be the greatest, most interesting, most important person of all times. All this is true because it rhymes.",
"All this is true because it rhymes.",
"Ohh! Wow! That was a great inspiring legend.. that you made up.",
"What a load of hippie dippie baloney.",
"Goooood morning apartment. Good morning doorway, morning wall, morning ceiling, good morning floooorr. Ready to start the day!",
"Ahh, here it is. Instructions to fit in, have everybody like you and always be happy!",
"Step 1: Breathe.",
"Step 2: Greet the day, smile and say: \"Good morning, city!\"",
"Step 3: Exercise. Jumpin' Jacks, hit 'em!.. 1... 2... 3... Haha, I am so pumped up!",
"Step 4: Shower... and always be sure to keep the soap out of yo- AAHHHHH!",
"Wear clothes. Ooops! Almost forgot that one!",
"Hey Planty, what'd you wanna do this morning? Watch TV? Me too!",
"Hi, I'm President Business, president of the Octan corporation and the world. Let's take special care to follow the instructions, or you'll be put to sleep. And don't forget Taco Tuesday is coming up! That's the day everyone gets a free taco and my love! You have a great day everyone.",
"You have a great day too President Business. Man, he's such a cool guy, I always wanna hear more of- Wait, did he say put to sleep?!",
"Honey, where are my paaaaannts?",
"What was I just thinkin'? I don't care :D",
"Everything is awesome! Everything is cool when you're part of a team. Everything is awesooooomme when you're living a dreeeam.",
"Gooooooo sports team!",
"Hey guys, watch me drill this down!",
"I think I heard a \"Whoosh\".",
"Hey pal, I hate to tell you this but ahh, I don't think you're supposed to be down here...",
"Yeah, the rules specifically state: work site closes at six, its a hard hat area only, that's not official safety orange...",
"If you see anything weird report it immediately. Well! I guess I'm just going to have to report yoooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo",
"I dunno what to do... I don't have my instructions...",
"Touch the piece...",
"I feel like maybe I should touch that...",
"Playin' dumb Master Builder?",
"You're a LIAR!",
"I watch a lot of cop shows on TV.. isn't there supposed to-... isn't there supposed to be a good cop?",
"Hi buddy! I'm your friendly neighourhood police officer! Would you like a glass of water? TOO BAD!",
"Security cameras picked this up. You were found at the construction site, convulsing with a strange piece.",
"It's not my fault! I have no idea how this thing got on my back!",
"Of course buddy, we believe you.",
"I believe you too. You see the quotations I'm making with my claw hands? It means I DOn't believe you.",
"Predsident Business is going to end the world? But he's such a good guy.. and Octan, they make good stuff! Music, dairy products, coffee, TV shows, surveillance systems, all history books, voting machines... wait a minute.",
"That guy is not a criminal mastermind.",
"When you say {}, I go HA HA HAHA! When you say the other guy, I go... :|",
"I can't break him, take him to the melting chamber!",
"You're going to melt me?! Am I gonna die?!",
"Yes, we've told him he's going to live, but ahh.. we're lying to him.",
"Come with me if you wanna not die.",
"Hi everybody! How is the melting going- Hey, hey, heeeyyy!",
"Oh sir you're brilliant! We'll build a motorcycle out of the alley way.",
"It's brilliant sir that you've pretended to be a useless nobody, but you can drop the act with me its cool.",
"All units, cut him off on Elm, NOW!... or, whenever you can.",
"You found the Piece of Resistance and the prophecy states that you are the most important, most talented, most interesting and most extraordinary person in the universe. That's you isn't it?",
"That was incredible, you're even better than the prophecy said you'd be!",
"I can't do this! That is against the instructions!",
"Darn, darn, darn, darny, DARN!",
"You're not the special, you lied to me!",
"I know what a Master Builder is, why don't you tell me what it is, that way I could see if you're right.",
"Oh my G. O. S. H.",
"By the way I have a boyfriend. Its super serious and you do not want to mess with him. So don't get any ideas.",
"I never have any ideas.",
"Hey ahh listen do you think you could explain to me like why I'm dressed like this and what those big words in the sky were all about and where we are.. in.. time?",
"Blah, blah, blah proper name, place name, back story stuff. I'm so pretty, I like you but I'm angry with you for some reason.",
"Great! I think I got it. But just in case... tell me the whole thing again because I wasn't listening.",
"We're trying to find the fugitive, but his face is so generic it matches every other face in our database.",
"Diabolical.",
"I love everyone in this room!",
"Would you cancel my two o'clock? This next meeting could run a little bit.. deadly.",
"This one is the cloak of Band Ai'd. I hear its super painful to take off. You wanna try it on?",
"It makes me.. just wanna pickup whoever is standing closest to me.. and just throw them through this window.. out into the INFINITE. ABYSS. of NOTHINGNESS!... I wanna do it sooo baadd.",
"Robots! Bring me the fleece crested sceptre of Cu Tee'p and the Po'leesh remover of Na'eel.",
"Howdy I'm a cowboy! Bang bang, bang bang bang, shoot shoot shoot, bullet bullet gun, zap zap zap pow zap pow! What're they lookin' at?",
"You have a very weirdly decorated place.",
"The prophecy states that you are the special. The embodiment of good, foiler of evil, the most talented, most brilliant, most important person in the universe.",
"Just given what's around you, build something simple... like an awesome race car.",
"Wooooaah are we inside my brain right now? It's biiig I must be smart.",
"That is literally the dumbest thing I have ever heard.",
"That idea is just the worst.",
"{}, your mind is already so prodigiously empty that there is nothing in it to clear away in the first place. With proper training, you could become a great Master Builder.",
"All you have to do is to believe... then you will see everything.",
"Freeze, Turkey's!",
"We need to attach the wheel to something that spins around.",
"Rest in pieces.",
"Well, for what its worth, this has been about the greatest 15 minutes of my life.",
"BATMAN!!!",
"I'm Batman.",
"Batman huh? Where'd you guys meet?",
"Police to meet you Bad Cop.",
"Guess what you big dumb baby? Your car is a baby carriage.",
"Oh nooo your boyfriend's gone..",
"Hey babe. Let's hold hands.",
"Soo uhhh, hey guys, I think we're about to crash into the sun.",
"Yeah, but it's gonna look really cool.",
"Ahh, is this Cloud Cuckoo Land? I don't see any clouds or.. Cuckoo's..",
"This is Middle Zealand. A wonderous land full of knights, castles, mutten, torture weapons, poverty, leeches, illiteracy, and umm... DRAAAGGONNNNSS!!",
"Yeah, yeah anyway. You guys have gotta checkout these new subwoofers I got installed in the back. I call them \"The Dogs\". Listen to 'em bark!",
"This is a song I wrote for {}. Darkness! It's about how I'm an orphan. No parents!",
"This is real music {}. Batman's a true artist.. dark, brooding.",
"Yeah, well I can be dark and brooding too - Ohh, Look guys a rainbow!",
"I just need to give the secret knock...",
"I'm just gonna come right out, I.. have no idea what is going on, or what this place is.. at all.",
"Hiiiii! I am Princess {}, and I welcome you all to Cloud Cuckoo Land!",
"So there are no signs or anything.. how does anyone know what not to do?",
"Here in Cloud Cuckoo Land there are no rules! There's no government, no babysitters, no bedtimes, no frownie faces, no bushy mustaches and no negativity of any kind!",
"I hate this place..",
"Any idea is a good idea, except the not happy ones. Those get pushed down deep inside where you will never, ever, ever, EVER! Fiiind theeem :)",
"D-Didn't Krypton blow up?",
"The special will now give an eloquent speech... Go ahead man, you got this.",
"Really hard?! Wiping your butt with a hook for a hand is REALLY HARD!",
"I had to replace every part of my once strapping, virial pirate body with this useless hunk of junk ye see before ye.",
"Yes, it's true. I may not be a Master Builder.. I may not have a lot of experience, fighting or leading or coming up with plans.. or have ideas in general.",
"{}, you bring your space chair right back here!",
"Well, you were right about him being a ding dong.",
"Oh oh! It's the bad guys.",
"Ya'll ready for this?!... Oh no! They were ready for that!",
"I super hate you..",
"Hey! I'm Ben but you can call me Benny, and I can build a spaceship. Watch this.. spaceship, spaceship, spaaaceship, spaceship, spaceship!",
"That's okay. I didn't really wanna.. build a spaceship anyway.. that's cooool..",
"Bat Submarine! Patent pending.",
"If anybody has black parts I need them okay. I only work in black... and sometimes very very dark gray.",
"Eww. Get your retro space stuff out of my area.",
"Oh no.. I feel something inside.. it's like.. the opposite of HAPPINESS! I must.. stay positive. Ahhh, bubblegums.. Butterflies!.. Oh! Cotton candy!",
"You are so disappointing on so many levels.",
"Greetings all. Welcome to my THINK TANK!",
"Does anybody have some Kryptonite that they could get me?",
"It's kinda hard not to hear when you're yelling everything.",
"What's the last thing Lord Business would expect us to do? Build a spaceship? Kill a chicken? Marry a marshmellow? Why this! Ha-ha-how ya gonna keep 'em, down on the farm! down on the- No! It's follow the instructions.",
"She be a fine speech there laddy.",
"Somebody get me some markers!... Some construction paper!... And some glitter glue!",
"One of those sounds awesome to me.",
"So not.. special spaceship that I'm.. I'm building for all of you right now?",
"Oooo you're really letting the oxygen out of my tank here.",
"Hold on Han. This might be the right galaxy after all, because I see a heavenly body.",
"Well he's just as blind as.. a.. guy.. who's eyes.. just.. s-s-stopped working.",
"Is that last name butt, first name yourrr.. OH MY GOSH!",
"Pow! Wham! Ka-zab! Eh, eh, oh, eh... First try!",
"{}, that was awesome.",
"First law of the sea. Never place your rear end on a pirates face.",
"Cooool, talking computer!",
"There are no movies in your area with that title.",
"Bruce Wayne? Hahaha, who's that? Sounds like a cool guy.",
"Sure, sure, sure... let's go with that.",
"Untz, untz, untz, untz untz untz, untz, untz, untz untz untz, untz, untz, everything is awesome!",
"Business, business, business.. numbers. Is this working? YAAAAYYYY!",
"Don't worry Dad I read your dumb instructions, stop yelling at me.",
"Downloading latests episodes of \"Where are my pants?\"",
"We'll wing it... that's a bat pun...",
"Searching for Albanian restaurants...",
"Which phrase would you like me to undermine?",
"Acceptable work, {}.",
"You see {}, a corrupted spirit is no match for the purity of imaginat-",
"The prophecy... I made it up. It's not true.",
"You must listen... for what I am about to tell you.. will change the course of history...",
"As unspecial as I am, you are a thousand billion times more unspecial than me.",
"Must be weird... One minute you're the most special person in the universe... the next minute you're NOBODY.",
"It's not personal. It's just business... Lord Business. Ciao!",
"Don't worry about this big, black, monolith thing that's blocking out the sun. What you need to worry about is this question that I'm about to ask you... Who wants a TACO?!",
"Alright everyone, act normal... Now, FREEZE!",
"Commencing micro-management.",
"{}, you'll... you'll think of something won't you? Just like you always do.",
"The only thing anyone needs to be special is to believe that you can be. I know that sounds like a cat poster, but its true.",
"Nineteen eighties something technology? Now you're talking!",
"Hey everybody. You don't know me but I'm on TV so... you can trust me.",
"All of you have the ability inside you to be a ground breaker! And I mean literally, BREAK THE GROUND!",
"I'll hold these guys off, you go schtop 'em. Yay!",
"SPACESHIP!!!!",
"What is going on?! You stop building that stuff! Just STOP IT!",
"If only {} was here to see this. He'd say something adorable like...",
"Why is the dragon on top of the luxury condo development?",
"You accidentally... expertly, carefully took the top off that tower?",
"But the way I'm using it makes it an adult thing.",
"That's a suggestion. They have to put that on there.",
"He's not just a construction worker, {}. He's the hero!",
"If I could get the attention of the smaller creature...",
"Believe! I know it sounds like a cat poster, but its true.",
"Sorry, street.",
"I can see everything... I am a Master Builder!",
"Stay positive... Stay. Positive... Ahhh, forget it! AARRRGGGHHH!!! You all need to be more friendlllyyy!!!",
"Back from the dead, {}? Skeletrons, get him!",
"What is that? Is it super small? I don't see anything.",
"Okay. What I see are people, inspired by each other and by you. People taking what you made and making something new out of it.",
"You don't have to be the bad guy. You are the most talented, most interesting, and most extraordinary person in the universe. And you are capable of amazing things, because you are the special. And so am I, and so is everyone. The prophecy is made up but its also true, it's about all of us. Right now its about you, and you still.. can change everything.",
"Be careful. I have been told, it might explode.",
"No, wait, {}. He's the hero you deserve.",
"We are from the planet Duplo, and we are here to.. destroy you."
]
|
idooo/pancake-hipchat-bot
|
plugins/lego/lego_quotes.py
|
Python
|
mit
| 14,822
|
[
"Galaxy"
] |
661e12a64729f13bea3539320de55650d376c32e0a65fcfbfbca280d530727f1
|
# coding: utf-8
# Copyright (c) Materials Virtual Lab.
# Distributed under the terms of the BSD License.
"""
Implementation for `pmg query` CLI.
"""
from pymatgen.ext.matproj import MPRester
import json
from monty.serialization import dumpfn
import re
from tabulate import tabulate
def do_query(args):
"""
Perform query to the Materials Project
Args:
args (dict): Args from argparse.
"""
m = MPRester()
try:
criteria = json.loads(args.criteria)
except json.decoder.JSONDecodeError:
criteria = args.criteria
if args.structure:
count = 0
for d in m.query(criteria, properties=["structure", "task_id"]):
s = d["structure"]
formula = re.sub(r"\s+", "", s.formula)
if args.structure == "poscar":
fname = "POSCAR.%s_%s" % (d["task_id"], formula)
else:
fname = "%s-%s.%s" % (d["task_id"], formula, args.structure)
s.to(filename=fname)
count += 1
print("%d structures written!" % count)
elif args.entries:
entries = m.get_entries(criteria)
dumpfn(entries, args.entries)
print("%d entries written to %s!" % (len(entries), args.entries))
else:
props = ["e_above_hull", "spacegroup"]
props += args.data
entries = m.get_entries(criteria, property_data=props)
t = []
headers = ["mp-id", "Formula", "Spacegroup", "E/atom (eV)",
"E above hull (eV)"] + args.data
for e in entries:
row = [e.entry_id, e.composition.reduced_formula,
e.data["spacegroup"]["symbol"],
e.energy_per_atom, e.data["e_above_hull"]]
row += [e.data[s] for s in args.data]
t.append(row)
t = sorted(t, key=lambda x: x[headers.index("E above hull (eV)")])
print(tabulate(t, headers=headers, tablefmt="pipe", floatfmt=".3f"))
|
gVallverdu/pymatgen
|
pymatgen/cli/pmg_query.py
|
Python
|
mit
| 1,959
|
[
"pymatgen"
] |
563e0fe941749d0e5057fce910ec24b04f3040996177b3114df7eaea532976e7
|
"""
A python wrapper for SkyNet neural network that
can de downloaded here :
http://ccpforge.cse.rl.ac.uk/gf/project/skynet/
SkyNet is an efficient and robust neural network
training code for machine learning. It is able to
train large and deep feed-forward neural networks,
including autoencoders, for use in a wide range of
supervised and unsupervised learning applications,
such as regression, classification, density estimation,
clustering and dimensionality reduction. SkyNet is
implemented in C/C++ and fully parallelised using MPI.
SkyNet is written by Philip Graff, Farhan Feroz,
Michael P. Hobson and Anthony N. Lasenby
reference : http://xxx.lanl.gov/abs/1309.0790
=============================================
"""
# Authors: CHRISTOPHER BONNETT <c.bonnett@gmail.com>
# Licence: BSD 3 clause
__version__ = "0.1"
__author__ = "Christopher Bonnett"
__maintainer__ = "Christopher Bonnett"
__email__ = "c.bonnett@gmail.com"
import numpy as np
import os
import subprocess
import write_SkyNet_files as binning
__all__ = ["SkyNetClassifier", "SkyNetRegressor"]
try:
SKYNET_PATH = os.environ["SKYNETPATH"]
except:
SKYNET_PATH = '.'
class SkyNet():
"""
Skynet base class
"""
def __init__(self):
super(SkyNetClassifier, self).__init__()
super(SkyNetRegressor, self).__init__()
def fit(self, X_train, y_train, X_valid, y_valid):
"""Train a Neural Net using the training and validation data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification).
"""
# ## check of feature lengths are equal ###
_, self.n_features_ = X_train.shape
_, valid_features = X_valid.shape
if self.n_features_ != valid_features:
raise ValueError("Number of features in validation set must "
" match the training set. Train n_features is {} and "
" valid n_features is {} ".format(
self.n_features_, valid_features))
### if class=True : check if number classes are equal ###
if self.classification_network:
self.n_classes_ = len(np.unique(y_train))
self.classes_ = np.unique(y_train)
classes_valid = np.unique(y_valid)
if not np.array_equal(self.classes_, classes_valid):
raise ValueError("Training and validation must have the same "
"number of classes. Train has {} classes "
"and valid has {} classes".format(
self.n_classes_, len(classes_valid)))
### training/validation file names to be written ###
self.train_input_file = ''.join([self.input_root, self.id, '_train.txt'])
self.valid_input_file = ''.join([self.input_root, self.id, '_test.txt'])
### write training/validation files ###
if self.classification_network:
binning.write_SkyNet_cla_bin(self.train_input_file, X_train, y_train)
binning.write_SkyNet_cla_bin(self.valid_input_file, X_valid, y_valid)
self.SkyNet_config_file = ''.join([self.config_root, self.id, '_cla.inp'])
else:
binning.write_SkyNet_reg(self.train_input_file, X_train, y_train)
binning.write_SkyNet_reg(self.valid_input_file, X_valid, y_valid)
self.SkyNet_config_file = ''.join([self.config_root, self.id, '_reg.inp'])
output_root_file = ''.join([self.output_root, self.id, '_'])
self.network_file = ''.join([output_root_file, 'network.txt'])
### write config file ###
binning.write_SkyNet_config_file(self.SkyNet_config_file, self.train_input_file,
self.network_file, self.classification_network,
self.layers, self.activation,
self.prior, self.whitenin,
self.whitenout, self.noise_scaling,
self.set_whitened_noise, self.sigma,
self.confidence_rate,
self.confidence_rate_minimum,
self.iteration_print_frequency, self.fix_seed,
self.fixed_seed, self.calculate_evidence,
self.resume, self.historic_maxent,
self.recurrent, self.convergence_function,
self.validation_data, self.verbose,
self.pretrain, self.nepoch, self.max_iter,
self.line_search, self.mini_batch_fraction,
self.norbias, self.reset_alpha,
self.reset_sigma, self.randomise_weights)
### run SkyNet and catch std output ###
SkyNet_run_array = ''.join(['mpirun -np ',
str(self.n_jobs),
' SkyNet ',
self.SkyNet_config_file])
p = subprocess.Popen(SkyNet_run_array,
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
out, err = p.communicate()
### exit pySkyNet if SkyNet throws an error ###
if err != None:
print "SkyNet error:"
print out
print "Exciting pySkyNet"
sys.exit(1)
self.error_dataframe, self.corr_dataframe, self.class_dataframe = (
binning._parse_SkyNet_output(out,
self.iteration_print_frequency,
self.classification_network,
self.verbose,
self.validation_data))
### read the results from the files ###
self.train_pred_file = ''.join([output_root_file, 'train_pred.txt'])
self.valid_pred_file = ''.join([output_root_file, 'test_pred.txt'])
if self.classification_network: # pandas ? should be faster !
self.train_pred = np.loadtxt(self.train_pred_file,
usecols = range(self.n_classes_ + self.n_features_,
(2 * self.n_classes_) + self.n_features_))
self.valid_pred = np.loadtxt(self.valid_pred_file,
usecols = range(self.n_classes_ + self.n_features_,
(2 * self.n_classes_) + self.n_features_))
else:
self.train_pred = np.loadtxt(self.train_pred_file,
usecols = [self.n_features_ + 1, ])
self.valid_pred = np.loadtxt(self.train_pred_file,
usecols = [self.n_features_ + 1, ])
# =============================================================================
# Public estimators
# =============================================================================
class SkyNetClassifier(SkyNet):
"""A neural net classifier.
This class calls Skynet as a classifier.
Parameters
----------
id : string, compulsory
This is a base id used to as an identifier.
All files written by Skynet will contain
id in the file-name.
input_root : string, optional (default=custom)
The folder where SkyNet-wrapper will write and SkyNet wil look for the train
and validation files.
output_root : string, optional (default=custom)
The folder where SkyNet will write the network file
(i.e the trained weights)
result_root : string, optional (default=custom)
The folder where SkyNet will write prediction
files.
config_root : string, optional (default=custom)
The folder where SkyNet will write the
config file that it uses to train.
layers : tuple , optional (default=(10,10,10))
The amount of hidden layers and the amount
of nodes per hidden layer. Default is 3
hidden layers with 10 nodes in each layer.
activation : tuple =, optional (default=(2,2,2,0))
Which activation function to use per layer:
0 = linear
1 = sigmoid
2 = tanh
3 = rectified linear
4 = sofsign
Needs to have len(layers) + 1
as the activation of the final
layer needs to be set.
prior : boolean, optional (default =True)
Use L2 weight regularization.
Strongly advised.
mini-batch_fraction : float, optional(default=1.0)
What fraction of training data to be used in each batch
validation_data : bool, optional (default = True)
Is there validation data to test against?
Strongly advise to use to prevent overfitting
confidence_rate : float, optional (default=0.03)
Initial learning rate
Step size factor, higher values are more aggressive.
confidence_rate_minimum : float, optional (default=0.02)
minimum confidence rate allowed
iteration_print_frequency : int, optional (default=50)
Skynet feedback frequency
max_iter : int, optional (default=2000)
Maxium training epochs
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for 'fit'.
whitenin : integer, optional (default=1)
Which input transformation to use:
0 = none
1 = min-max
2 = normal.
whitenout : integer, optional (default=1)
Which output transformation to use:
0 = none
1 = min-max
2 = normal.
convergence_function : integer, optional (default=4)
Which minimization function to use for
convergence testing:
1 = log-posterior
2 = log-likelihood
3 = correlation
4 = error squared.
historic_maxent : bool, optional (default=False)
Experimental implementation of MemSys's historic maxent option.
line_search : int, optional (default = 0)
Perform line search for optimal distance:
0 = none
1 = golden section
2 = linbcg lnsrch.
noise_scaling : bool, optional (default = False)
If noise level (standard deviation of outputs) is to be estimated.
set_whitened_noise : bool, optional (default =False)
Whether the noise is to be set on whitened data.
sigma : float, optional (default = 0.3)
Initial noise level, set on (un-)whitened data.
fix_seed : bool, optional (default =False)
Use a fixed seed?
Useful for debugging and unit-test.
fixed_seed : int, optional (default =0)
Seed to use if fix_seed == True.
resume : bool, optional (default = False)
Resume from a previous job.
reset_alpha : bool, optional (default = False)
Reset hyperparameter upon resume.
reset_sigma : bool, optional (default = False)
reset hyperparameters upon resume.
randomise_weights : float, optional (default = 0.01)
Random factor to add to saved weights upon resume.
verbose : int, optional (default=2)
Verbosity level of feedback sent to stdout
by SkyNet (0=min, 3=max).
pretrain : bool,
Perform pre-training using
restricted BM.
nepoch : int, optional (default=10)
Number of epochs to use in pre-training.
Attributes
----------
n_features : int
The number of features.
train_input_file : string
Filename of the written training file.
valid_input_file : string.
Filename of the written validation file.
SkyNet_config_file : string
Filename of SkyNet config file.
network_file : string
Filename of SkyNet network file.
This file contains the trained weights:
References
----------
.. [1] SKYNET: an efficient and robust neural network
training tool for machine learning in
astronomy http://arxiv.org/abs/1309.0790
See also
--------
SkyNetRegressor
"""
def __init__(self,
id,
classification_network = True,
input_root = ''.join([SKYNET_PATH, '/train_valid/']),
output_root = ''.join([SKYNET_PATH, '/network/']),
result_root = ''.join([SKYNET_PATH, '/predictions/']),
config_root = ''.join([SKYNET_PATH, '/config_files/']),
layers = (10, 10, 10),
activation = (2, 2, 2, 0),
prior = True,
confidence_rate = 0.3,
confidence_rate_minimum = 0.02,
iteration_print_frequency = 50,
max_iter = 2000,
whitenin = True,
whitenout = True,
noise_scaling = 0,
set_whitened_noise = False,
sigma = 0.035,
fix_seed = False,
fixed_seed = 0,
calculate_evidence = True,
historic_maxent = False,
recurrent = False,
convergence_function = 4,
validation_data = True,
verbose = 2,
pretrain = False,
nepoch = 10,
line_search = 0,
mini_batch_fraction = 1.0,
resume = False,
norbias = False,
reset_alpha = False,
reset_sigma = False,
randomise_weights = 0.1,
n_jobs = 1):
self.id = id
self.classification_network = classification_network
self.input_root = input_root
self.output_root = output_root
self.result_root = result_root
self.config_root = config_root
self.layers = layers
self.prior = prior
self.sigma = sigma
self.confidence_rate = confidence_rate
self.confidence_rate_minimum = confidence_rate_minimum
self.iteration_print_frequency = iteration_print_frequency
self.max_iter = max_iter
self.whitenin = whitenin
self.whitenout = whitenout
self.noise_scaling = noise_scaling
self.set_whitened_noise = set_whitened_noise
self.fix_seed = fix_seed
self.fixed_seed = fixed_seed
self.calculate_evidence = calculate_evidence
self.resume = resume
self.historic_maxent = historic_maxent
self.recurrent = recurrent
self.convergence_function = convergence_function
self.validation_data = validation_data
self.verbose = verbose
self.pretrain = pretrain
self.nepoch = nepoch
self.n_jobs = n_jobs
self.activation = activation
self.mini_batch_fraction = mini_batch_fraction
self.line_search = line_search
self.norbias = norbias
self.reset_alpha = reset_alpha
self.reset_sigma = reset_sigma
self.randomise_weights = randomise_weights
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
by trained neural network
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Attributes
----------
output_file : String
SkyNet writes to this file:
result_root + self.id + _predictions.txt.
Returns
-------
p : array of shape = [n_samples,n_classes]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# ## set file names ###
self.pred_input_file = ''.join([self.input_root, self.id, '_to_predict.txt'])
self.output_file = ''.join([self.result_root, self.id, '_predictions.txt'])
### check feature lenght ###
n_samples_pred, pred_n_features_ = X.shape
if self.n_features_ != pred_n_features_:
raise ValueError("Number of features in prediction set must "
"match the training/validation set."
"Training set has {} features "
"Prediction set has {} features ".format(
self.n_features_, pred_n_features_))
dummy_classes = np.random.randint(0, high = self.n_classes_, size = n_samples_pred)
binning.write_SkyNet_cla_bin(self.pred_input_file, X, dummy_classes)
### check file existence ###
if not os.path.isfile(self.network_file):
raise IOError("Network file {} not found".format(self.network_file))
if not os.path.isfile(self.train_input_file):
raise IOError("Input file {} not found".format(self.train_input_file))
if not os.path.isfile(self.pred_input_file):
raise IOError("Prediction file {} not found".format(self.pred_input_file))
### calculate predictions ###
SkyNet_predictions_string = ''.join(['CalPred',
' 0 1 0 ',
self.network_file, ' ',
self.train_input_file, ' ',
self.pred_input_file, ' ',
self.output_file, ' 0 0 0'])
p = subprocess.Popen(SkyNet_predictions_string, shell = True, stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
out, err = p.communicate()
### read in prediction ###
predictions = np.loadtxt(self.output_file,
usecols = range(self.n_classes_ + self.n_features_,
(2 * self.n_classes_) + self.n_features_))
return predictions
class SkyNetRegressor(SkyNet):
"""A neural net regeressor.
Parameters
----------
id : string, compulsory
This is a base id used to as an identifier.
input_root : string, optional (default=custom)
The folder where SkyNet-wrapper will write and SkyNet wil look for the train
and validation files.
output_root : string, optional (default=custom)
The folder where SkyNet will write the network file
(i.e the trained weights)
result_root : string, optional (default=custom)
The folder where SkyNet will write prediction
files.
config_root : string, optional (default=custom)
The folder where SkyNet will write the
config file that it uses to train.
This parameter is best adjusted
in SkyNet.py
layers : tuple , optional (default=(10,10,10))
The amount of hidden layers and the amount
of nodes per hidden layer. Default is 3
hidden layers with 10 nodes in each layer.
activation : tuple =, optional (default=(2,2,2,0))
Which activation function to use per layer:
0 = linear
1 = sigmoid
2 = tanh
3 = rectified linear
4 = sofsign
Needs to have len(layers) + 1
as the activation of the final
layer needs to be set.
prior : boolean, optional (default =True)
Use L2 weight regularization.
Strongly advised.
mini-batch_fraction : float, optional(default=1.0)
What fraction of training data to be used in each batch
validation_data : bool, optional (default = True)
Is there validation data to test against?
Strongly advise to use to prevent overfitting
confidence_rate : float, optional (default=0.03)
Initial learing rate
Step size factor, higher values are more aggressive.
confidence_rate_minimum : float, optional (default=0.02)
minimum confidence rate allowed
iteration_print_frequency : int, optional (default=50)
Skynet feedback frequency
max_iter : int, optional (default=2000)
Maxium training epochs
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for 'fit'.
whitenin : integer, optional (default=1)
Which input transformation to use:
0 = none
1 = min-max
2 = normal.
whitenout : integer, optional (default=1)
Which output transformation to use:
0 = none
1 = min-max
2 = normal.
convergence_function : integer, optional (default=4)
Which minimization function to use for
convergence testing:
1 = log-posterior
2 = log-likelihood
3 = correlation
4 = error squared.
historic_maxent : bool, optional (default=False)
Experimental implementation of MemSys's historic maxent option.
line_search : int, optional (default = 0)
Perform line search for optimal distance:
0 = none
1 = golden section
2 = linbcg lnsrch.
noise_scaling : bool, optional (default = False)
If noise level (standard deviation of outputs) is to be estimated.
set_whitened_noise : bool, optional (default =False)
Whether the noise is to be set on whitened data.
sigma : float, optional (default = 0.3)
Initial noise level, set on (un-)whitened data.
fix_seed : bool, optional (default =False)
Use a fixed seed?
Usefull for debugging and unit-test.
fixed_seed : int, optional (default =0)
Seed to use if fix_seed == True.
resume : bool, optional (default = False)
Resume from a previous job.
reset_alpha : bool, optional (default = False)
Reset hyperparameter upon resume.
reset_sigma : bool, optional (default = False)
reset hyperparameters upon resume.
randomise_weights : float, optional (default = 0.01)
Random factor to add to saved weights upon resume.
verbose : int, optional (default=2)
Verbosity level of feedback sent to stdout
by SkyNet (0=min, 3=max).
pretrain : bool,
Perform pre-training using
restricted BM.
nepoch : int, optional (default=10)
Number of epochs to use in pre-training.
Attributes
----------
n_features : int
The number of features.
train_input_file : string
Filename of the written training file.
valid_input_file : string.
Filename of the written validation file.
SkyNet_config_file : string
Filename of SkyNet config file.
network_file : string
Filename of SkyNet network file.
This file contains the trained weights:
References
----------
.. [1] SKYNET: an efficient and robust neural network
training tool for machine learning in
astronomy http://arxiv.org/abs/1309.0790
See also
--------
SkyNetClassifier
"""
def __init__(self,
id,
classification_network = False,
input_root = ''.join([SKYNET_PATH, '/train_valid/']),
output_root = ''.join([SKYNET_PATH, '/network/']),
result_root = ''.join([SKYNET_PATH, '/predictions/']),
config_root = ''.join([SKYNET_PATH, '/config_files/']),
layers = (10, 10, 10),
activation = (2, 2, 2, 0),
prior = True,
confidence_rate = 0.3,
confidence_rate_minimum = 0.02,
iteration_print_frequency = 50,
max_iter = 2000,
whitenin = True,
whitenout = True,
noise_scaling = 0,
set_whitened_noise = False,
sigma = 0.035,
fix_seed = False,
fixed_seed = 0,
calculate_evidence = True,
historic_maxent = False,
recurrent = False,
convergence_function = 4,
validation_data = True,
verbose = 2,
pretrain = False,
nepoch = 10,
line_search = 0,
mini_batch_fraction = 1.0,
resume = False,
norbias = False,
reset_alpha = False,
reset_sigma = False,
randomise_weights = 0.1,
n_jobs = 1):
self.id = id
self.classification_network = classification_network
self.input_root = input_root
self.output_root = output_root
self.result_root = result_root
self.config_root = config_root
self.layers = layers
self.prior = prior
self.sigma = sigma
self.confidence_rate = confidence_rate
self.confidence_rate_minimum = confidence_rate_minimum
self.iteration_print_frequency = iteration_print_frequency
self.max_iter = max_iter
self.whitenin = whitenin
self.whitenout = whitenout
self.noise_scaling = noise_scaling
self.set_whitened_noise = set_whitened_noise
self.fix_seed = fix_seed
self.fixed_seed = fixed_seed
self.calculate_evidence = calculate_evidence
self.resume = resume
self.historic_maxent = historic_maxent
self.recurrent = recurrent
self.convergence_function = convergence_function
self.validation_data = validation_data
self.verbose = verbose
self.pretrain = pretrain
self.nepoch = nepoch
self.n_jobs = n_jobs
self.activation = activation
self.mini_batch_fraction = mini_batch_fraction
self.line_search = line_search
self.norbias = norbias
self.reset_alpha = reset_alpha
self.reset_sigma = reset_sigma
self.randomise_weights = randomise_weights
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is by the trained
neural network.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Attributes
----------
output_file : String
SkyNet writes to this file:
Returns
-------
y: array of shape = [n_samples]
The predicted values.
"""
# ## set file names ###
self.pred_input_file = ''.join([self.input_root, self.id, '_to_predict.txt'])
self.output_file = ''.join([self.result_root, self.id, '_predictions.txt'])
##### check feature lenght ###
n_samples_pred, pred_n_features_ = X.shape
if self.n_features_ != pred_n_features_:
raise ValueError("Number of features in prediction set must "
"match the training/validation set."
"Training set has {} features "
"Prediction set has {} features ".format(
self.n_features_, pred_n_features_))
### write feature file ###
dummy_targets = np.zeros(n_samples_pred)
binning.write_SkyNet_reg(self.pred_input_file, X, dummy_targets)
### check file exitence ###
if not os.path.isfile(self.network_file):
raise IOError("Network file {} not found".format(self.network_file))
if not os.path.isfile(self.train_input_file):
raise IOError("Input file {} not found".format(self.train_input_file))
if not os.path.isfile(self.pred_input_file):
raise IOError("Prediction file {} not found".format(self.pred_input_file))
### calulate predictions ###
SkyNet_predictions_string = ''.join(['CalPred',
' 0 0 0 ',
self.network_file, ' ',
self.train_input_file, ' ',
self.pred_input_file, ' ',
self.output_file,
' 0 0 0'])
p = subprocess.Popen(SkyNet_predictions_string,
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
out, err = p.communicate()
### read in prediction ####
predictions = np.loadtxt(self.output_file, usecols = [self.n_features_ + 1, ])
return predictions
|
cbonnett/SkyNet_wrapper
|
src/SkyNet.py
|
Python
|
gpl-3.0
| 28,963
|
[
"exciting"
] |
b55ac763ec9d3ee498d51c6209b2adbaec9937965c909403b9c3131baa6d1a5f
|
#!/usr/bin/env python
"""
LAMMPS Replica Exchange Molecular Dynamics (REMD) trajectories are arranged by
replica, i.e., each trajectory is a continuous replica that records all the
ups and downs in temperature. However, often the requirement is trajectories
that are continuous in temperature, which is achieved by this tool.
Author:
Tanmoy Sanyal, Shell lab, Chemical Engineering, UC Santa Barbara
Email: tanmoy dot 7989 at gmail dot com
Usage
-----
To get detailed information about the arguments, flags, etc use:
python reorder_remd_traj.py -h or
python reorder_remd_traj.py --help
Features of this script
-----------------------
a) reorder LAMMPS REMD trajectories by temperature keeping only desired frames.
Note: this only handles LAMMPS format trajectories (i.e. .lammpstrj format)
Trajectories can be gzipped or bz2-compressed. The trajectories are assumed to
be named as <prefix>.%d.lammpstrj[.gz or .bz2]
b) (optionally) calculate configurational weights for each frame at each
temperature if potential energies are supplied. But this if for the canonical
(NVT) ensemble only.
Dependencies
------------
mpi4py
pymbar (for getting configurational weights)
tqdm (for printing pretty progress bars)
StringIO (or io if in Python 3.x)
"""
import os, numpy as np, argparse, time, pickle
from scipy.special import logsumexp
from mpi4py import MPI
from tqdm import tqdm
import gzip, bz2
try:
# python-2
from StringIO import StringIO as IOBuffer
except ImportError:
# python-3
from io import BytesIO as IOBuffer
#### INITIALISE MPI ####
# (note that all output on screen will be printed only on the ROOT proc)
ROOT = 0
comm = MPI.COMM_WORLD
me = comm.rank # my proc id
nproc = comm.size
#### HELPER FUNCTIONS ####
def _get_nearest_temp(temps, query_temp):
"""
Helper function to get the nearest temp in a list
from a given query_temp
:param temps: list of temps.
:param query_temp: query temp
Returns:
idx: index of nearest temp in the list
out_temp: nearest temp from the list
"""
if isinstance(temps, list): temps = np.array(temps)
return temps[np.argmin(np.abs(temps-query_temp))]
def readwrite(trajfn, mode):
"""
Helper function for input/output LAMMPS traj files.
Trajectories may be plain text, .gz or .bz2 compressed.
:param trajfn: name of LAMMPS traj
:param mode: "r" ("w") and "rb" ("wb") depending on read or write
Returns: file pointer
"""
if trajfn.endswith(".gz"):
of = gzip.open(trajfn, mode)
#return gzip.GzipFile(trajfn, mode)
elif trajfn.endswith(".bz2"):
of = bz2.open(trajfn, mode)
#return bz2.BZ2File(trajfn, mode)
else:
of = open(trajfn, mode)
return of
def get_replica_frames(logfn, temps, nswap, writefreq):
"""
Get a list of frames from each replica that is
at a particular temp. Do this for all temps.
:param logfn: master LAMMPS log file that contains the temp
swap history of all replicas
:param temps: list of all temps used in the REMD simulation.
:param nswap: swap frequency of the REMD simulation
:param writefreq: traj dump frequency in LAMMPS
Returns: master_frametuple_dict:
dict containing a tuple (replica #, frame #) for each temp.
"""
n_rep = len(temps)
swap_history = np.loadtxt(logfn, skiprows = 3)
master_frametuple_dict = dict( (n, []) for n in range(n_rep) )
# walk through the replicas
print("Getting frames from all replicas at temperature:")
for n in range(n_rep):
print("%3.2f K" % temps[n])
rep_inds = [np.where(x[1:] == n)[0][0] for x in swap_history]
# case-1: when frames are dumped faster than temp. swaps
if writefreq <= nswap:
for ii, i in enumerate(rep_inds[:-1]):
start = int(ii * nswap / writefreq)
stop = int( (ii+1) * nswap / writefreq)
[master_frametuple_dict[n].append( (i,x) ) \
for x in range(start, stop)]
# case-2: when temps. are swapped faster than dumping frames
else:
nskip = int(writefreq / nswap)
[master_frametuple_dict[n].append( (i,ii) ) \
for ii, i in enumerate(rep_inds[0::nskip])]
return master_frametuple_dict
def get_byte_index(rep_inds, byteindfns, intrajfns):
"""
Get byte indices from (un-ordered) trajectories.
:param rep_inds: indices of replicas to process on this proc
:param byteindsfns: list of filenames that will contain the byte indices
:param intrajfns: list of (unordered) input traj filenames
"""
for n in rep_inds:
# check if the byte indices for this traj has aleady been computed
if os.path.isfile(byteindfns[n]): continue
# extract bytes
fobj = readwrite(intrajfns[n], "rb")
byteinds = [ [0,0] ]
# place file pointer at first line
nframe = 0
first_line = fobj.readline()
cur_pos = fobj.tell()
# status printed only for replica read on root proc
# this assumes that each proc takes roughly the same time
if me == ROOT:
pb = tqdm(desc = "Reading replicas", leave = True,
position = ROOT + 2*me,
unit = "B/replica", unit_scale = True,
unit_divisor = 1024)
# start crawling through the bytes
while True:
next_line = fobj.readline()
if len(next_line) == 0: break
# this will only work with lammpstrj traj format.
# this condition essentially checks periodic recurrences
# of the token TIMESTEP. Each time it is found,
# we have crawled through a frame (snapshot)
if next_line == first_line:
nframe += 1
byteinds.append( [nframe, cur_pos] )
if me == ROOT: pb.update()
cur_pos = fobj.tell()
if me == ROOT: pb.update(0)
if me == ROOT: pb.close()
# take care of the EOF
cur_pos = fobj.tell()
byteinds.append( [nframe+1, cur_pos] ) # dummy index for the EOF
# write to file
np.savetxt(byteindfns[n], np.array(byteinds), fmt = "%d")
# close the trajfile object
fobj.close()
return
def write_reordered_traj(temp_inds, byte_inds, outtemps, temps,
frametuple_dict, nprod, writefreq,
outtrajfns, infobjs):
"""
Reorders trajectories by temp. and writes them to disk
:param temp_inds: list index of temps (in the list of all temps) for which
reordered trajs will be produced on this proc.
:param byte_inds: dict containing the (previously stored) byte indices
for each replica file (key = replica number)
:param outtemps: list of all temps for which to produce reordered trajs.
:param temps: list of all temps used in the REMD simulation.
:param outtrajfns: list of filenames for output (ordered) trajs.
:param frametuple_dict: dict containing a tuple (replica #, frame #)
for each temp.
:param nprod: number of production timesteps.
Last (nprod / writefreq) frames
from the end will be written to disk.
:param writefreq: traj dump frequency in LAMMPS
:param infobjs: list of file pointers to input (unordered) trajs.
"""
nframes = int(nprod / writefreq)
for n in temp_inds:
# open string-buffer and file
buf = IOBuffer()
of = readwrite(outtrajfns[n], "wb")
# get frames
abs_temp_ind = np.argmin( abs(temps - outtemps[n]) )
frametuple = frametuple_dict[abs_temp_ind][-nframes:]
# write frames to buffer
if me == ROOT:
pb = tqdm(frametuple,
desc = ("Buffering trajectories for writing"),
leave = True, position = ROOT + 2*me,
unit = 'frame/replica', unit_scale = True)
iterable = pb
else:
iterable = frametuple
for i, (rep, frame) in enumerate(iterable):
infobj = infobjs[rep]
start_ptr = int(byte_inds[rep][frame,1])
stop_ptr = int(byte_inds[rep][frame+1,1])
byte_len = stop_ptr - start_ptr
infobj.seek(start_ptr)
buf.write(infobj.read(byte_len))
if me == ROOT: pb.close()
# write buffer to disk
if me == ROOT: print("Writing buffer to file")
of.write(buf.getvalue())
of.close()
buf.close()
for i in infobjs: i.close()
return
def get_canonical_logw(enefn, frametuple_dict, temps, nprod, writefreq,
kB):
"""
Gets configurational log-weights (logw) for each frame and at each temp.
from the REMD simulation. ONLY WRITTEN FOR THE CANONICAL (NVT) ensemble.
This weights can be used to calculate the
ensemble averaged value of any simulation observable X at a given temp. T :
<X> (T) = \sum_{k=1, ntemps} \sum_{n=1, nframes} w[idx][k,n] X[k,n]
where nframes is the number of frames to use from each *reordered* traj
:param enefn: ascii file (readable by numpy.loadtxt) containing an array
u[r,n] of *total* potential energy for the n-th frame for
the r-th replica.
:param frametuple_dict: dict containing a tuple (replica #, frame #)
for each temp.
:param temps: array of temps. used in the REMD simulation
:param nprod: number of production timesteps. Last (nprod / writefreq)
frames from the end will be written to disk.
:param writefreq: traj dump frequency in LAMMPS
:param kB : Boltzmann constant to set the energy scale.
Default is in kcal/mol
Returns: logw: dict, logw[l][k,n] gives the log weights from the
n-th frame of the k-th temp. *ordered* trajectory
to reweight to the l-th temp.
"""
try:
import pymbar
except ImportError:
print("""
Configurational log-weight calculation requires pymbar.
Here are some options to install it:
conda install -c omnia pymbar
pip install --user pymbar
sudo pip install pymbar
To install the dev. version directly from github, use:
pip install pip install git+https://github.com/choderalab/pymbar.git
""")
u_rn = np.loadtxt(enefn)
ntemps = u_rn.shape[0] # number of temps.
nframes = int(nprod / writefreq) # number of frames at each temp.
# reorder the temps
u_kn = np.zeros([ntemps, nframes], float)
for k in range(ntemps):
frame_tuple = frametuple_dict[k][-nframes:]
for i, (rep, frame) in enumerate(frame_tuple):
u_kn[k, i] = u_rn[rep, frame]
# prep input for pymbar
#1) array of frames at each temp.
nframes_k = nframes * np.ones(ntemps, np.uint8)
#2) inverse temps. for chosen energy scale
beta_k = 1.0 / (kB * temps)
#3) get reduced energies (*ONLY FOR THE CANONICAL ENSEMBLE*)
u_kln = np.zeros([ntemps, ntemps, nframes], float)
for k in range(ntemps):
u_kln[k] = np.outer(beta_k, u_kn[k])
# run pymbar and extract the free energies
print("\nRunning pymbar...")
mbar = pymbar.mbar.MBAR(u_kln, nframes_k, verbose = True)
f_k = mbar.f_k # (1 x k array)
# calculate the log-weights
print("\nExtracting log-weights...")
log_nframes = np.log(nframes)
logw = dict( (k, np.zeros([ntemps, nframes], float)) for k in range(ntemps) )
# get log-weights to reweight to this temp.
for k in range(ntemps):
for n in range(nframes):
num = -beta_k[k] * u_kn[k,n]
denom = f_k - beta_k[k] * u_kn[k,n]
for l in range(ntemps):
logw[l][k,n] = num - logsumexp(denom) - log_nframes
return logw
#### MAIN WORKFLOW ####
if __name__ == "__main__":
# accept user inputs
parser = argparse.ArgumentParser(description = __doc__,
formatter_class = argparse.RawDescriptionHelpFormatter)
parser.add_argument("prefix",
help = "Prefix of REMD LAMMPS trajectories.\
Supply full path. Trajectories assumed to be named as \
<prefix>.%%d.lammpstrj. \
Can be in compressed (.gz or .bz2) format. \
This is a required argument")
parser.add_argument("-logfn", "--logfn", default = "log.lammps",
help = "LAMMPS log file that contains swap history \
of temperatures among replicas. \
Default = 'lammps.log'")
parser.add_argument("-tfn", "--tempfn", default = "temps.txt",
help = "ascii file (readable by numpy.loadtxt) with \
the temperatures used in the REMD simulation.")
parser.add_argument("-ns", "--nswap", type = int,
help = "Swap frequency used in LAMMPS temper command")
parser.add_argument("-nw", "--nwrite", type = int, default = 1,
help = "Trajectory writing frequency used \
in LAMMPS dump command")
parser.add_argument("-np", "--nprod", type = int, default = 0,
help = "Number of timesteps to save in the reordered\
trajectories.\
This should be in units of the LAMMPS timestep")
parser.add_argument("-logw", "--logw", action = 'store_true',
help = "Supplying this flag \
calculates *canonical* (NVT ensemble) log weights")
parser.add_argument("-e", "--enefn",
help = "File that has n_replica x n_frames array\
of total potential energies")
parser.add_argument("-kB", "--boltzmann_const",
type = float, default = 0.001987,
help = "Boltzmann constant in appropriate units. \
Default is kcal/mol")
parser.add_argument("-ot", "--out_temps", nargs = '+', type = np.float64,
help = "Reorder trajectories at these temperatures.\n \
Default is all temperatures used in the simulation")
parser.add_argument("-od", "--outdir", default = ".",
help = "All output will be saved to this directory")
# parse inputs
args = parser.parse_args()
traj_prefix = os.path.abspath(args.prefix)
logfn = os.path.abspath(args.logfn)
tempfn = os.path.abspath(args.tempfn)
nswap = args.nswap
writefreq = args.nwrite
nprod = args.nprod
enefn = args.enefn
if not enefn is None: enefn = os.path.abspath(enefn)
get_logw = args.logw
kB = args.boltzmann_const
out_temps = args.out_temps
outdir = os.path.abspath(args.outdir)
if not os.path.isdir(outdir):
if me == ROOT: os.mkdir(outdir)
# check that all input files are present (only on the ROOT proc)
if me == ROOT:
if not os.path.isfile(tempfn):
raise IOError("Temperature file %s not found." % tempfn)
elif not os.path.isfile(logfn):
raise IOError("LAMMPS log file %s not found." % logfn)
elif get_logw and not os.path.isfile(enefn):
raise IOError("Canonical log-weight calculation requested but\
energy file %s not found" % enefn)
# get (unordered) trajectories
temps = np.loadtxt(tempfn)
ntemps = len(temps)
intrajfns = ["%s.%d.lammpstrj" % (traj_prefix, k) for k in range(ntemps)]
# check if the trajs. (or their zipped versions are present)
for i in range(ntemps):
this_intrajfn = intrajfns[i]
x = this_intrajfn + ".gz"
if os.path.isfile(this_intrajfn): continue
elif os.path.isfile(this_intrajfn + ".gz"):
intrajfns[i] = this_intrajfn + ".gz"
elif os.path.isfile(this_intrajfn + ".bz2"):
intrajfns[i] = this_intrajfn + ".bz2"
else:
if me == ROOT:
raise IOError("Trajectory for replica # %d missing" % i)
# set output filenames
outprefix = os.path.join(outdir, traj_prefix.split('/')[-1])
outtrajfns = ["%s.%3.2f.lammpstrj.gz" % \
(outprefix, _get_nearest_temp(temps, t)) \
for t in out_temps]
byteindfns = [os.path.join(outdir, ".byteind_%d.gz" % k) \
for k in range(ntemps)]
frametuplefn = outprefix + '.frametuple.pickle'
if get_logw:
logwfn = outprefix + ".logw.pickle"
# get a list of all frames at a particular temp visited by each replica
# this is fast so run only on ROOT proc.
master_frametuple_dict = {}
if me == ROOT:
master_frametuple_dict = get_replica_frames(logfn = logfn,
temps = temps,
nswap = nswap,
writefreq = writefreq)
# save to a pickle from the ROOT proc
with open(frametuplefn, 'wb') as of:
pickle.dump(master_frametuple_dict, of)
# broadcast to all procs
master_frametuple_dict = comm.bcast(master_frametuple_dict, root = ROOT)
# define a chunk of replicas to process on each proc
CHUNKSIZE_1 = int(ntemps/nproc)
if me < nproc - 1:
my_rep_inds = range( (me*CHUNKSIZE_1), (me+1)*CHUNKSIZE_1 )
else:
my_rep_inds = range( (me*CHUNKSIZE_1), ntemps )
# get byte indices from replica (un-ordered) trajs. in parallel
get_byte_index(rep_inds = my_rep_inds,
byteindfns = byteindfns,
intrajfns = intrajfns)
# block until all procs have finished
comm.barrier()
# open all replica files for reading
infobjs = [readwrite(i, "rb") for i in intrajfns]
# open all byteindex files
byte_inds = dict( (i, np.loadtxt(fn)) for i, fn in enumerate(byteindfns) )
# define a chunk of output trajs. to process for each proc.
# # of reordered trajs. to write may be less than the total # of replicas
# which is usually equal to the requested nproc. If that is indeed the case,
# retire excess procs
n_out_temps = len(out_temps)
CHUNKSIZE_2 = int(n_out_temps / nproc)
if CHUNKSIZE_2 == 0:
nproc_active = n_out_temps
CHUNKSIZE_2 = 1
if me == ROOT:
print("\nReleasing %d excess procs" % (nproc - nproc_active))
else:
nproc_active = nproc
if me < nproc_active-1:
my_temp_inds = range( (me*CHUNKSIZE_2), (me+1)*CHUNKSIZE_1 )
else:
my_temp_inds = range( (me*CHUNKSIZE_2), n_out_temps)
# retire the excess procs
# dont' forget to close any open file objects
if me >= nproc_active:
for fobj in infobjs: fobj.close()
exit()
# write reordered trajectories to disk from active procs in parallel
write_reordered_traj(temp_inds = my_temp_inds,
byte_inds = byte_inds,
outtemps = out_temps, temps = temps,
frametuple_dict = master_frametuple_dict,
nprod = nprod, writefreq = writefreq,
outtrajfns = outtrajfns,
infobjs = infobjs)
# calculate canonical log-weights if requested
# usually this is very fast so retire all but the ROOT proc
if not get_logw: exit()
if not me == ROOT: exit()
logw = get_canonical_logw(enefn = enefn, temps = temps,
frametuple_dict = master_frametuple_dict,
nprod = nprod, writefreq = writefreq,
kB = kB)
# save the logweights to a pickle
with open(logwfn, 'wb') as of:
pickle.dump(logw, of)
|
Pakketeretet2/lammps
|
tools/replica/reorder_remd_traj.py
|
Python
|
gpl-2.0
| 20,165
|
[
"LAMMPS"
] |
3de368d200a0a239160d86c0bf94d117e3bd860a66764523c2435d6f7ee4efcb
|
# -*- coding: utf-8 -*-
import subprocess
import random
from splinter import Browser
from splinter import exceptions
import time
import threading
from selenium.common.exceptions import ElementNotVisibleException
from splinter.exceptions import ElementDoesNotExist
from urllib2 import URLError
import string
import constants
import shutil, os
class SurfThread(threading.Thread):
def __init__(self, hoehe, breite, _format):
threading.Thread.__init__(self)
self.seiten = []
self.words = []
self.toWait = None
self.elemNo = None
self.wordNo = None
self.clickNo = None
self.clickX = None
self.clickY = None
self.back = None
self.changeTabs = None
self.__browser = Browser("firefox", profile=constants.profile)
time.sleep(5)
#self.__maximizeWindow()
#time.sleep(5)
SurfThread.timer = False
SurfThread.hoehe = hoehe
SurfThread.breite = breite
SurfThread._format = _format
def __readData(self):
# read homepages to visit
surfListe = open("/home/steffi/Dokumente/surfListe.txt", "rb")
for line in surfListe:
self.seiten.append(line)
surfListe.close()
# read words for search in google, wikipedia, amazon, youtube
keyWords = open("/home/steffi/Dokumente/keyWords.txt", "rb").readlines()
for line in keyWords:
self.words.append(line.decode("utf-8"))
#keyWords.close(),
print "data read"
def run(self):
self.__readData()
rand = random.randint(2,5)
for i in range(0, rand):
print "noch "+ str(i) +" mal"
print "TIMER:" +str(SurfThread.timer)
if SurfThread.timer == False :
self.__generateRandom()
print "visit: "+self.seiten[self.elemNo]
self.__visitHomepage( self.seiten[self.elemNo].strip())
print "clickNo: "+ str(self.clickNo)
print "towait = "+ str(self.toWait)
time.sleep(self.toWait)
for i in range(self.clickNo):
time.sleep(random.randrange(5,10))
if i % 2 == 0:
self.__generateRandomClick()
if i == 2:
self.__pageDown()
time.sleep(random.randrange(1,5))
if i == (self.clickNo-1):
self.__pageBottom()
time.sleep(random.randrange(2,10))
if i%2 == 0 and self.back == 1:
self.__goBack()
time.sleep(random.randrange(2,10))
path = self.__browser.driver.firefox_profile.profile_dir
print path
os.remove(constants.profile+'/places.sqlite')
shutil.copyfile(path+'/places.sqlite', constants.profile+'/places.sqlite')
self.__closeWindow()
shutil.rmtree(path)
#os.rmdir(path)
print "Firefox beendet"
def starte(self):
self.run()
def __generateRandom(self):
self.toWait = random.randrange(5,45)
self.elemNo = random.randrange(0,len(self.seiten))
self.clickNo = random.randrange(2,7)
self.back = random.randrange(0,10)
self.wordNo = random.randrange(0, len(self.words))
def __generateRandomClick(self):
self.clickX = random.randrange(100,constants.BREITE - 50) #1366
self.clickY = random.randrange(50,constants.HOEHE-50) #768
command = "mousemove "+ str(self.clickX) + " "+ str(self.clickY)
print command
subprocess.call(["xte", command])
subprocess.call(["xte", "mouseclick 1"])
def __followLink(self, text, index=0):
if index == None:
index = 0
try:
self.__browser.click_link_by_partial_text(text)[index]
except ElementDoesNotExist:
print "Element does not exist"
except TypeError:
print "Type Error"
except Exception as e:
print "nix passiert" + e
def __visitGooglePage(self, url):
print "google"
self.__browser.visit(url)
time.sleep(random.randrange(2,15))
searchWord = str(self.words[self.wordNo]).strip().decode("utf-8")
print searchWord
self.__fillInput('q', searchWord)
time.sleep(random.randrange(2,15))
self.__findElementAndClick("btnG", "name", None)
subprocess.call(["xte", "key Return"])
wordSplit = str(searchWord).split(" ")
time.sleep(random.randrange(10,30))
#baaaad practice
try:
self.__followLink(wordSplit[0], self.wordNo%10)
except Exception:
try:
self.__followLink(wordSplit[1], self.wordNo%10)
except Exception:
pass
def __visitHomepage(self, url):
clickNoMod4 = self.clickNo % 4
toWaitMod4 = self.toWait % 4
if "google" in url:
self.__visitGooglePage(url)
elif "wikipedia" in url:
self.__visitWikipediaPage(url)
elif "amazon" in url:
self.__visitAmazonPage(url)
elif "ebay" in url:
self.__visitEbayPage(url)
elif "youtube" in url:
print "youtube"
self.__watchYoutubeVideo(url)
elif "facebook" in url:
print "facebook"
self.__visitFacebook(url)
elif "twitter" in url:
print "twitter"
self.__twitterSomething(url)
else:
try:
self.__browser.visit(url)
except Exception as e:
print e
pass
def __goBack(self):
self.__browser.back()
def shutdown(self):
print "setze timer um und beende firefox"
changeTimer()
def __fillInput(self, _id, _input):
try:
self.__browser.fill(_id, _input)
except Exception as e:
print e.message
pass
def __findElementAndClick(self, name, identifier, index):
#check falls keine nummer mitgenommen wurde
if index == None:
index = 0
#suche nach elementen
try:
if identifier == "name":
button = self.__browser.find_by_name(name)[index]
elif identifier == "id":
button = self.__browser.find_by_id(name).click
button.click()
except (exceptions.ElementDoesNotExist, ElementNotVisibleException, URLError):
print "ElementDoesnotExist OR ElementNotVisible OR URLError"
pass
except Exception as e:
print e
pass
def __closeWindow(self):
time.sleep(3)
subprocess.call(["xte", "keydown Control_L"])
#subprocess.call(["xte", "keydown Shift_L"])
subprocess.call(["xte", "key q"])
#subprocess.call(["xte", "keyup Shift_L"])
subprocess.call(["xte", "keyup Control_L"])
print "Fenster geschlossen"
def __maximizeWindow(self):
time.sleep(2)
subprocess.call(["xte", "keydown Control_L"])
subprocess.call(["xte", "key F10"])
subprocess.call(["xte", "keyup Control_L"])
print "Fenster maximiert"
def __pageDown(self):
time.sleep(3)
subprocess.call(["xte", "key Page_Down"])
def __pageBottom(self):
subprocess.call(["xte", "key End"])
def __watchYoutubeVideo(self, url):
self.__browser.visit(url)
time.sleep(random.randrange(2,15))
searchWord = str(self.words[self.wordNo]).strip().decode("utf-8")
print searchWord
self.__fillInput('search_query', searchWord)
time.sleep(random.randrange(2,15))
subprocess.call(["xte", "key Return"])
time.sleep(random.randrange(2,15))
#nur bei 16:9 monitor
index = None
breite = 0
if SurfThread._format == "16:9":
index = [int(SurfThread.hoehe // 4.59),
int(SurfThread.hoehe // 3.04),
int(SurfThread.hoehe // 2.22),
int(SurfThread.hoehe // 1.77)]
breite = int(SurfThread.breite//4.74)
else:
index = [int(SurfThread.hoehe // 4.10),
int(SurfThread.hoehe // 2.19),
int(SurfThread.hoehe // 1.54),
int(SurfThread.hoehe // 1.28)]
breite = int(SurfThread.breite//2.15)
#self.__followLink(searchWord, None)
#235 1 - 355 2 - 4853
rand = random.randint(0, (len(index)-1))
subprocess.call(["xte", "mousemove "+ str(breite) + " " +str(index[rand])])
time.sleep(random.randrange(2,15))
subprocess.call(["xte", "mouseclick 1"])
time.sleep(5)
print "mousemove + anschauen"
#breite höhe von links oben
#subprocess.call(["xte", "mousemove "+ str(int(SurfThread.breite//3.17)) + " " + str(int(SurfThread.hoehe//3.2225))])
#time.sleep(2)
subprocess.call(["xte", "mouseclick 1"])
#todo mehr zeit
time.sleep(random.randrange(2,45))
def __visitWikipediaPage(self, url):
print "wikipedia"
self.__browser.visit(url)
time.sleep(2)
searchWord = str(self.words[self.wordNo]).strip().decode("utf-8")
print searchWord
self.__fillInput('search', searchWord)
time.sleep(random.randrange(2,15))
subprocess.call(["xte", "key Return"])
wordSplit = str(searchWord).split(" ")
time.sleep(2)
#baaaad practice
try:
self.__followLink(wordSplit[0], self.wordNo%10)
except Exception:
try:
self.__followLink(wordSplit[1], self.wordNo%10)
except Exception:
pass
def __visitAmazonPage(self, url):
print "amazon"
self.__browser.visit(url)
time.sleep(random.randrange(2,15))
searchWord = str(self.words[self.wordNo]).strip().decode("utf-8")
print searchWord
self.__fillInput('field-keywords', searchWord+'\n')
time.sleep(2)
subprocess.call(["xte", "key Return"])
wordSplit = str(searchWord).split(" ")
time.sleep(random.randrange(2,15))
#baaaad practice
try:
self.__followLink(wordSplit[0], self.wordNo%10)
except Exception:
try:
self.__followLink(wordSplit[1], self.wordNo%10)
except Exception:
pass
def __visitEbayPage(self, url):
print "ebay"
self.__browser.visit(url)
time.sleep(random.randrange(2,15))
searchWord = str(self.words[self.wordNo]).strip().decode("utf-8")
print searchWord
self.__typeWord(searchWord)
time.sleep(random.randrange(2,15))
subprocess.call(["xte", "key Return"])
wordSplit = str(searchWord).split(" ")
time.sleep(random.randrange(2,15))
#baaaad practice
self.__followLink(wordSplit[0], self.wordNo%10)
def __visitFacebook(self, url):
print "facebook"
self.__browser.visit(url)
time.sleep(random.randrange(2,15))
#gegenebenefalls einloggen
if self.__browser.is_text_present(constants.FB_USER) == False:
print "noch nicht eingeloggt"
self.__fillInput('email', constants.FB_EMAIL)
time.sleep(2)
self.__fillInput('pass', constants.FB_PW)
time.sleep(2)
subprocess.call(["xte", "key Return"])
time.sleep(5)
def __twitterSomething(self, url):
print "twitter"
self.__browser.visit(url)
time.sleep(random.randrange(2,15))
#todo wenns tart seite nicht sichtbar, einloggen
if self.__browser.is_text_present('Startseite') == False:
print "noch nicht eingeloggt"
'''name = self.__browser.find_by_name('session[username_or_email]').first
if name != None:
print "name gefunden"
name.click()
time.sleep(3)
self.__typeWord('steffi_spam')
passW = self.__browser.find_by_id('signin-password').first
passW.click()
time.sleep(3)
self.__typeWord('steffispam')'''
#self.__fillInput("session[username_or_email]", "steffispam@anoome.at")
#time.sleep(2)
#self.__fillInput('signin-pass', "steffispam")
#self.__fillInput('signin-pass', "session[password]")
#time.sleep(2)
#subprocess.call(["xte", "key Return"])
#time.sleep(5)
# so gehts 13.5.13
time.sleep(random.randrange(2,15))
subprocess.call(["xte", "key Tab"])
time.sleep(3)
subprocess.call(["xte", "key Tab"])
time.sleep(3)
subprocess.call(["xte", "key Tab"])
time.sleep(random.randrange(2,15))
self.__typeWord(constants.TWITTER_USER)
subprocess.call(["xte", "key Tab"])
time.sleep(2)
self.__typeWord(constants.TWITTER_PW)
time.sleep(2)
subprocess.call(["xte", "key Return"])
time.sleep(random.randrange(2,15))
''' self.__followLink("Kleine Zeitung")
# time.sleep(5)
# self.back()
# self.__followLink("ORF Sport")
# time.sleep(5)
# self.back()'''
self.__followLink("Startseite")
time.sleep(3)
print "input twitter"
field = self.__browser.find_by_id("tweet-box-mini-home-profile").first
field.click()
print "geklickt"
self.__typeWord(twittertext[random.randrange(0,len(twittertext)-1)])
time.sleep(random.randrange(2,15))
subprocess.call(["xte", "key Tab"])
time.sleep(2)
subprocess.call(["xte", "key Return"])
print "tweet gepostet"
def __typeWord(self, word):
spell = ""
for i in range(0, len(word)):
#special character
if spell == "/":
spell = "/"+word[i]
else:
spell = word[i]
# todo algorithmus der entescheidet, zuerst spezialzeichen oder normales zeichen
if spell == "@":
subprocess.call(["xte", "keydown Control_L"])
subprocess.call(["xte", "key at"])
subprocess.call(["xte", "keyup Control_L"])
#sonderzeichen
elif spell not in string.ascii_letters:
spell = keySyms[spell]
#sonderzeichen mit shift
if spell in upKeys:
subprocess.call(["xte", "keydown Shift_L"])
subprocess.call(["xte", "key "+spell])
subprocess.call(["xte", "keyup Shift_L"])
#sonderzeichen mit altgr
elif spell in altGrKeys:
subprocess.call(["xte", "keydown Alt_R"])
subprocess.call(["xte", "key "+spell])
subprocess.call(["xte", "keyup Alt_R"])
else:
subprocess.call(["xte", "key "+spell])
elif spell == "ß":
spell = "question"
subprocess.call(["xte", "key "+spell])
else:
subprocess.call(["xte", "key "+spell])
twittertext = ['#weather sunshine :)', '#oebb zugfahren macht freude...']
upKeys = ['question', 'exclam', "percent",
"dollar", "ampersand", "quotedbl", "apostrophe",
"parenleft", "parenright", "asterisk", "equal",
"slash", "colon", "semicolon", "greater", "underscore"
]
altGrKeys = [ "at", "bracketleft", "bracketright", "backslash",
"asciicircum", "underscore", "grave", "braceleft", "bar",
"braceright", "asciitilde"]
keySyms = {
' ' : "space",
'\t' : "Tab",
'\n' : "Return", # for some reason this needs to be cr, not lf
'\r' : "Return",
'\e' : "Escape",
'!' : "exclam",
'#' : "numbersign",
'%' : "percent",
'$' : "dollar",
'&' : "ampersand",
'"' : "quotedbl",
'\'' : "apostrophe",
'(' : "parenleft",
')' : "parenright",
'*' : "asterisk",
'=' : "equal",
'+' : "plus",
',' : "comma",
'-' : "minus",
'.' : "period",
'/' : "slash",
':' : "colon",
';' : "semicolon",
'<' : "less",
'>' : "greater",
'?' : "question",
'@' : "at",
'[' : "bracketleft",
']' : "bracketright",
'\\' : "backslash",
'^' : "asciicircum",
'_' : "underscore",
'`' : "grave",
'{' : "braceleft",
'|' : "bar",
'}' : "braceright",
'~' : "asciitilde"
}
def changeTimer():
SurfThread.timer = True
|
mmulazzani/alibiFramework
|
simulator/scheduler/surfThread.py
|
Python
|
gpl-3.0
| 17,461
|
[
"VisIt"
] |
1a6998dd2125b192a35f7ed778c114d0a758f626bfd47de438bd51d11a439a04
|
# Copyright 2001 by Gavin E. Crooks. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# Gavin E. Crooks 2001-10-10
"""ASTRAL RAF (Rapid Access Format) Sequence Maps.
The ASTRAL RAF Sequence Maps record the relationship between the PDB SEQRES
records (representing the sequence of the molecule used in an experiment) to
the ATOM records (representing the atoms experimentally observed).
This data is derived from the Protein Data Bank CIF files. Known errors in the
CIF files are corrected manually, with the original PDB file serving as the
final arbiter in case of discrepancies.
Residues are referenced by residue ID. This consists of a the PDB residue
sequence number (upto 4 digits) and an optional PDB insertion code (an
ascii alphabetic character, a-z, A-Z). e.g. "1", "10A", "1010b", "-1"
See "ASTRAL RAF Sequence Maps":http://astral.stanford.edu/raf.html
to_one_letter_code -- A mapping from the 3-letter amino acid codes found
in PDB files to 1-letter codes. The 3-letter codes
include chemically modified residues.
"""
from copy import copy
from Bio.SCOP.Residues import Residues
from three_to_one_dict import to_one_letter_code
def normalize_letters(one_letter_code):
"""Convert RAF one-letter amino acid codes into IUPAC standard codes.
Letters are uppercased, and "." ("Unknown") is converted to "X".
"""
if one_letter_code == '.':
return 'X'
else:
return one_letter_code.upper()
class SeqMapIndex(dict):
"""An RAF file index.
The RAF file itself is about 50 MB. This index provides rapid, random
access of RAF records without having to load the entire file into memory.
The index key is a concatenation of the PDB ID and chain ID. e.g
"2drcA", "155c_". RAF uses an underscore to indicate blank
chain IDs.
"""
def __init__(self, filename):
"""
Arguments:
filename -- The file to index
"""
dict.__init__(self)
self.filename = filename
f = open(self.filename, "rU")
try:
position = 0
while True:
line = f.readline()
if not line: break
key = line[0:5]
if key != None:
self[key]=position
position = f.tell()
finally:
f.close()
def __getitem__(self, key):
""" Return an item from the indexed file. """
position = dict.__getitem__(self,key)
f = open(self.filename, "rU")
try:
f.seek(position)
line = f.readline()
record = SeqMap(line)
finally:
f.close()
return record
def getSeqMap(self, residues):
"""Get the sequence map for a collection of residues.
residues -- A Residues instance, or a string that can be converted into
a Residues instance.
"""
if isinstance(residues, basestring):
residues = Residues(residues)
pdbid = residues.pdbid
frags = residues.fragments
if not frags: frags =(('_','',''),) # All residues of unnamed chain
seqMap = None
for frag in frags:
chainid = frag[0]
if chainid=='' or chainid=='-' or chainid==' ' or chainid=='_':
chainid = '_'
id = pdbid + chainid
sm = self[id]
#Cut out fragment of interest
start = 0
end = len(sm.res)
if frag[1] : start = int(sm.index(frag[1], chainid))
if frag[2] : end = int(sm.index(frag[2], chainid)+1)
sm = sm[start:end]
if seqMap == None:
seqMap = sm
else:
seqMap += sm
return seqMap
class SeqMap(object):
"""An ASTRAL RAF (Rapid Access Format) Sequence Map.
This is a list like object; You can find the location of particular residues
with index(), slice this SeqMap into fragments, and glue fragments back
together with extend().
pdbid -- The PDB 4 character ID
pdb_datestamp -- From the PDB file
version -- The RAF format version. e.g. 0.01
flags -- RAF flags. (See release notes for more information.)
res -- A list of Res objects, one for each residue in this sequence map
"""
def __init__(self, line=None):
self.pdbid = ''
self.pdb_datestamp = ''
self.version = ''
self.flags = ''
self.res = []
if line:
self._process(line)
def _process(self, line):
"""Parses a RAF record into a SeqMap object.
"""
header_len = 38
line = line.rstrip() # no trailing whitespace
if len(line)<header_len:
raise ValueError("Incomplete header: "+line)
self.pdbid = line[0:4]
chainid = line[4:5]
self.version = line[6:10]
#Raf format versions 0.01 and 0.02 are identical for practical purposes
if(self.version != "0.01" and self.version !="0.02"):
raise ValueError("Incompatible RAF version: "+self.version)
self.pdb_datestamp = line[14:20]
self.flags = line[21:27]
for i in range(header_len, len(line), 7):
f = line[i : i+7]
if len(f)!=7:
raise ValueError("Corrupt Field: ("+f+")")
r = Res()
r.chainid = chainid
r.resid = f[0:5].strip()
r.atom = normalize_letters(f[5:6])
r.seqres = normalize_letters(f[6:7])
self.res.append(r)
def index(self, resid, chainid="_"):
for i in range(0, len(self.res)):
if self.res[i].resid == resid and self.res[i].chainid == chainid:
return i
raise KeyError("No such residue "+chainid+resid)
def __getitem__(self, index):
if not isinstance(index, slice):
raise NotImplementedError
s = copy(self)
s.res = s.res[index]
return s
def append(self, res):
"""Append another Res object onto the list of residue mappings."""
self.res.append(res)
def extend(self, other):
"""Append another SeqMap onto the end of self.
Both SeqMaps must have the same PDB ID, PDB datestamp and
RAF version. The RAF flags are erased if they are inconsistent. This
may happen when fragments are taken from different chains.
"""
if not isinstance(other, SeqMap):
raise TypeError("Can only extend a SeqMap with a SeqMap.")
if self.pdbid != other.pdbid:
raise TypeError("Cannot add fragments from different proteins")
if self.version != other.version:
raise TypeError("Incompatible rafs")
if self.pdb_datestamp != other.pdb_datestamp:
raise TypeError("Different pdb dates!")
if self.flags != other.flags:
self.flags = ''
self.res += other.res
def __iadd__(self, other):
self.extend(other)
return self
def __add__(self, other):
s = copy(self)
s.extend(other)
return s
def getAtoms(self, pdb_handle, out_handle):
"""Extract all relevant ATOM and HETATOM records from a PDB file.
The PDB file is scanned for ATOM and HETATOM records. If the
chain ID, residue ID (seqNum and iCode), and residue type match
a residue in this sequence map, then the record is echoed to the
output handle.
This is typically used to find the coordinates of a domain, or other
residue subset.
pdb_handle -- A handle to the relevant PDB file.
out_handle -- All output is written to this file like object.
"""
#This code should be refactored when (if?) biopython gets a PDB parser
#The set of residues that I have to find records for.
resSet = {}
for r in self.res:
if r.atom=='X' : #Unknown residue type
continue
chainid = r.chainid
if chainid == '_':
chainid = ' '
resid = r.resid
resSet[(chainid,resid)] = r
resFound = {}
for line in pdb_handle.xreadlines():
if line.startswith("ATOM ") or line.startswith("HETATM"):
chainid = line[21:22]
resid = line[22:27].strip()
key = (chainid, resid)
if key in resSet:
res = resSet[key]
atom_aa = res.atom
resName = line[17:20]
if resName in to_one_letter_code:
if to_one_letter_code[resName] == atom_aa:
out_handle.write(line)
resFound[key] = res
if len(resSet) != len(resFound):
#for k in resFound.keys():
# del resSet[k]
#print resSet
raise RuntimeError('I could not find at least one ATOM or HETATM' \
+' record for each and every residue in this sequence map.')
class Res(object):
""" A single residue mapping from a RAF record.
chainid -- A single character chain ID.
resid -- The residue ID.
atom -- amino acid one-letter code from ATOM records.
seqres -- amino acid one-letter code from SEQRES records.
"""
def __init__(self):
self.chainid = ''
self.resid = ''
self.atom = ''
self.seqres = ''
def parse(handle):
"""Iterates over a RAF file, returning a SeqMap object for each line
in the file.
Arguments:
handle -- file-like object.
"""
for line in handle:
yield SeqMap(line)
|
bryback/quickseq
|
genescript/Bio/SCOP/Raf.py
|
Python
|
mit
| 10,101
|
[
"Biopython"
] |
fd30570ba9e120927286b8c5c1dafc93a02a2af4587c7a4324ceed9b5f49d314
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""Tests Spack's ability to parse the name and version of a package
based on its URL.
"""
import os
import pytest
from spack.url import parse_name_offset, parse_version_offset
from spack.url import parse_name_and_version, substitute_version
from spack.url import strip_name_suffixes, strip_version_suffixes
from spack.url import UndetectableVersionError
from spack.version import Version
@pytest.mark.parametrize('url,expected', [
# No suffix
('rgb-1.0.6', 'rgb-1.0.6'),
# Misleading prefix
('jpegsrc.v9b', 'jpegsrc.v9b'),
('turbolinux702', 'turbolinux702'),
('converge_install_2.3.16', 'converge_install_2.3.16'),
# Download type - src
('apache-ant-1.9.7-src', 'apache-ant-1.9.7'),
('go1.7.4.src', 'go1.7.4'),
# Download type - source
('bowtie2-2.2.5-source', 'bowtie2-2.2.5'),
('grib_api-1.17.0-Source', 'grib_api-1.17.0'),
# Download type - full
('julia-0.4.3-full', 'julia-0.4.3'),
# Download type - bin
('apache-maven-3.3.9-bin', 'apache-maven-3.3.9'),
# Download type - binary
('Jmol-14.8.0-binary', 'Jmol-14.8.0'),
# Download type - gem
('rubysl-date-2.0.9.gem', 'rubysl-date-2.0.9'),
# Download type - tar
('gromacs-4.6.1-tar', 'gromacs-4.6.1'),
# Download type - sh
('Miniconda2-4.3.11-Linux-x86_64.sh', 'Miniconda2-4.3.11'),
# Download version - release
('v1.0.4-release', 'v1.0.4'),
# Download version - stable
('libevent-2.0.21-stable', 'libevent-2.0.21'),
# Download version - final
('2.6.7-final', '2.6.7'),
# Download version - rel
('v1.9.5.1rel', 'v1.9.5.1'),
# Download version - orig
('dash_0.5.5.1.orig', 'dash_0.5.5.1'),
# Download version - plus
('ncbi-blast-2.6.0+-src', 'ncbi-blast-2.6.0'),
# License
('cppad-20170114.gpl', 'cppad-20170114'),
# OS - linux
('astyle_2.04_linux', 'astyle_2.04'),
# OS - unix
('install-tl-unx', 'install-tl'),
# OS - macos
('astyle_1.23_macosx', 'astyle_1.23'),
('haxe-2.08-osx', 'haxe-2.08'),
# PyPI - wheel
('entrypoints-0.2.2-py2.py3-none-any.whl', 'entrypoints-0.2.2'),
('numpy-1.12.0-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl', 'numpy-1.12.0'), # noqa
# PyPI - exe
('PyYAML-3.12.win-amd64-py3.5.exe', 'PyYAML-3.12'),
# Combinations of multiple patterns - all
('p7zip_9.04_src_all', 'p7zip_9.04'),
# Combinations of multiple patterns - run
('cuda_8.0.44_linux.run', 'cuda_8.0.44'),
# Combinations of multiple patterns - file
('ack-2.14-single-file', 'ack-2.14'),
# Combinations of multiple patterns - jar
('antlr-3.4-complete.jar', 'antlr-3.4'),
# Combinations of multiple patterns - oss
('tbb44_20160128oss_src_0', 'tbb44_20160128'),
# Combinations of multiple patterns - darwin
('ghc-7.0.4-x86_64-apple-darwin', 'ghc-7.0.4'),
('ghc-7.0.4-i386-apple-darwin', 'ghc-7.0.4'),
# Combinations of multiple patterns - arch
('VizGlow_v2.2alpha17-R21November2016-Linux-x86_64-Install',
'VizGlow_v2.2alpha17-R21November2016'),
('jdk-8u92-linux-x64', 'jdk-8u92'),
('cuda_6.5.14_linux_64.run', 'cuda_6.5.14'),
# Combinations of multiple patterns - with
('mafft-7.221-with-extensions-src', 'mafft-7.221'),
('spark-2.0.0-bin-without-hadoop', 'spark-2.0.0'),
# Combinations of multiple patterns - public
('dakota-6.3-public.src', 'dakota-6.3'),
# Combinations of multiple patterns - universal
('synergy-1.3.6p2-MacOSX-Universal', 'synergy-1.3.6p2'),
# Combinations of multiple patterns - dynamic
('snptest_v2.5.2_linux_x86_64_dynamic', 'snptest_v2.5.2'),
])
def test_url_strip_version_suffixes(url, expected):
stripped = strip_version_suffixes(url)
assert stripped == expected
@pytest.mark.parametrize('url,version,expected', [
# No suffix
('rgb-1.0.6', '1.0.6', 'rgb'),
('nauty26r7', '26r7', 'nauty'),
# Download type - install
('converge_install_2.3.16', '2.3.16', 'converge'),
# Download type - src
('jpegsrc.v9b', '9b', 'jpeg'),
# Download type - archive
('coinhsl-archive-2014.01.17', '2014.01.17', 'coinhsl'),
# Download type - std
('ghostscript-fonts-std-8.11', '8.11', 'ghostscript-fonts'),
# Download version - release
('cbench_release_1.3.0.tar.gz', '1.3.0', 'cbench'),
# Download version - snapshot
('gts-snapshot-121130', '121130', 'gts'),
# Download version - distrib
('zoltan_distrib_v3.83', '3.83', 'zoltan'),
# VCS - bazaar
('libvterm-0+bzr681', '681', 'libvterm'),
# License - gpl
('PyQt-x11-gpl-4.11.3', '4.11.3', 'PyQt-x11')
])
def test_url_strip_name_suffixes(url, version, expected):
stripped = strip_name_suffixes(url, version)
assert stripped == expected
@pytest.mark.parametrize('name,noffset,ver,voffset,path', [
# Name in path
('antlr', 25, '2.7.7', 40, 'https://github.com/antlr/antlr/tarball/v2.7.7'),
# Name in stem
('gmp', 32, '6.0.0a', 36, 'https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2'),
# Name in suffix
# Don't think I've ever seen one of these before
# We don't look for it, so it would probably fail anyway
# Version in path
('nextflow', 31, '0.20.1', 59, 'https://github.com/nextflow-io/nextflow/releases/download/v0.20.1/nextflow'),
# Version in stem
('zlib', 24, '1.2.10', 29, 'http://zlib.net/fossils/zlib-1.2.10.tar.gz'),
('slepc', 51, '3.6.2', 57, 'http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz'),
('cloog', 61, '0.18.1', 67, 'http://www.bastoul.net/cloog/pages/download/count.php3?url=./cloog-0.18.1.tar.gz'),
('libxc', 58, '2.2.2', 64, 'http://www.tddft.org/programs/octopus/down.php?file=libxc/libxc-2.2.2.tar.gz'),
# Version in suffix
('swiftsim', 36, '0.3.0', 76, 'http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0'),
('sionlib', 30, '1.7.1', 59, 'http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1'),
# Regex in name
('voro++', 40, '0.4.6', 47, 'http://math.lbl.gov/voro++/download/dir/voro++-0.4.6.tar.gz'),
# SourceForge download
('glew', 55, '2.0.0', 60, 'https://sourceforge.net/projects/glew/files/glew/2.0.0/glew-2.0.0.tgz/download'),
])
def test_url_parse_offset(name, noffset, ver, voffset, path):
"""Tests that the name, version and offsets are computed correctly.
Args:
name (str): expected name
noffset (int): name offset
ver (str): expected version
voffset (int): version offset
path (str): url to be parsed
"""
# Make sure parse_name_offset and parse_name_version are working
v, vstart, vlen, vi, vre = parse_version_offset(path)
n, nstart, nlen, ni, nre = parse_name_offset(path, v)
assert n == name
assert v == ver
assert nstart == noffset
assert vstart == voffset
@pytest.mark.parametrize('name,version,url', [
# Common Repositories - github downloads
('nco', '4.6.2', 'https://github.com/nco/nco/archive/4.6.2.tar.gz'),
# name/archive/vver.ver
('vim', '8.0.0134', 'https://github.com/vim/vim/archive/v8.0.0134.tar.gz'),
# name/archive/name-ver.ver
('oce', '0.18', 'https://github.com/tpaviot/oce/archive/OCE-0.18.tar.gz'),
# name/releases/download/vver/name-ver.ver
('libmesh', '1.0.0', 'https://github.com/libMesh/libmesh/releases/download/v1.0.0/libmesh-1.0.0.tar.bz2'),
# name/tarball/vver.ver
('git', '2.7.1', 'https://github.com/git/git/tarball/v2.7.1'),
# name/zipball/vver.ver
('git', '2.7.1', 'https://github.com/git/git/zipball/v2.7.1'),
# Common Repositories - gitlab downloads
# name/repository/archive.ext?ref=vver.ver
('swiftsim', '0.3.0',
'http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0'),
# name/repository/archive.ext?ref=name-ver.ver
('icet', '1.2.3',
'https://gitlab.kitware.com/icet/icet/repository/archive.tar.gz?ref=IceT-1.2.3'),
# Common Repositories - bitbucket downloads
# name/get/ver.ver
('eigen', '3.2.7', 'https://bitbucket.org/eigen/eigen/get/3.2.7.tar.bz2'),
# name/get/vver.ver
('hoomd-blue', '1.3.3',
'https://bitbucket.org/glotzer/hoomd-blue/get/v1.3.3.tar.bz2'),
# name/downloads/name-ver.ver
('dolfin', '2016.1.0',
'https://bitbucket.org/fenics-project/dolfin/downloads/dolfin-2016.1.0.tar.gz'),
# Common Repositories - sourceforge downloads
# name-ver.ver
('libpng', '1.6.27',
'http://download.sourceforge.net/libpng/libpng-1.6.27.tar.gz'),
('lcms2', '2.6',
'http://downloads.sourceforge.net/project/lcms/lcms/2.6/lcms2-2.6.tar.gz'),
('modules', '3.2.10',
'http://prdownloads.sourceforge.net/modules/modules-3.2.10.tar.gz'),
# name-ver.ver.ext/download
('glew', '2.0.0',
'https://sourceforge.net/projects/glew/files/glew/2.0.0/glew-2.0.0.tgz/download'),
# Common Repositories - cran downloads
# name.name_ver.ver-ver.ver
('TH.data', '1.0-8', 'https://cran.r-project.org/src/contrib/TH.data_1.0-8.tar.gz'),
('knitr', '1.14', 'https://cran.rstudio.com/src/contrib/knitr_1.14.tar.gz'),
('devtools', '1.12.0', 'https://cloud.r-project.org/src/contrib/devtools_1.12.0.tar.gz'),
# Common Repositories - pypi downloads
# name.name_name-ver.ver
('3to2', '1.1.1', 'https://pypi.python.org/packages/source/3/3to2/3to2-1.1.1.zip'),
('mpmath', '0.19',
'https://pypi.python.org/packages/source/m/mpmath/mpmath-all-0.19.tar.gz'),
('pandas', '0.16.0',
'https://pypi.python.org/packages/source/p/pandas/pandas-0.16.0.tar.gz#md5=bfe311f05dc0c351f8955fbd1e296e73'),
('sphinx_rtd_theme', '0.1.10a0',
'https://pypi.python.org/packages/da/6b/1b75f13d8aa3333f19c6cdf1f0bc9f52ea739cae464fbee050307c121857/sphinx_rtd_theme-0.1.10a0.tar.gz'),
('backports.ssl_match_hostname', '3.5.0.1',
'https://pypi.io/packages/source/b/backports.ssl_match_hostname/backports.ssl_match_hostname-3.5.0.1.tar.gz'),
# Common Repositories - bazaar downloads
('libvterm', '681', 'http://www.leonerd.org.uk/code/libvterm/libvterm-0+bzr681.tar.gz'),
# Common Tarball Formats
# ver.ver
('eigen', '3.2.7', 'https://bitbucket.org/eigen/eigen/get/3.2.7.tar.bz2'),
# ver.ver-ver
('ImageMagick', '7.0.2-7', 'https://github.com/ImageMagick/ImageMagick/archive/7.0.2-7.tar.gz'),
# vver.ver
('CGNS', '3.3.0', 'https://github.com/CGNS/CGNS/archive/v3.3.0.tar.gz'),
# vver_ver
('luafilesystem', '1_6_3', 'https://github.com/keplerproject/luafilesystem/archive/v1_6_3.tar.gz'),
# No separators
('turbolinux', '702', 'file://{0}/turbolinux702.tar.gz'.format(os.getcwd())),
('nauty', '26r7', 'http://pallini.di.uniroma1.it/nauty26r7.tar.gz'),
# Dashes only
('Trilinos', '12-10-1',
'https://github.com/trilinos/Trilinos/archive/trilinos-release-12-10-1.tar.gz'),
('panda', '2016-03-07',
'http://comopt.ifi.uni-heidelberg.de/software/PANDA/downloads/panda-2016-03-07.tar'),
('gts', '121130',
'http://gts.sourceforge.net/tarballs/gts-snapshot-121130.tar.gz'),
('cdd', '061a',
'http://www.cs.mcgill.ca/~fukuda/download/cdd/cdd-061a.tar.gz'),
# Only underscores
('tinyxml', '2_6_2',
'https://sourceforge.net/projects/tinyxml/files/tinyxml/2.6.2/tinyxml_2_6_2.tar.gz'),
('boost', '1_55_0',
'http://downloads.sourceforge.net/project/boost/boost/1.55.0/boost_1_55_0.tar.bz2'),
('yorick', '2_2_04',
'https://github.com/dhmunro/yorick/archive/y_2_2_04.tar.gz'),
('tbb', '44_20160413',
'https://www.threadingbuildingblocks.org/sites/default/files/software_releases/source/tbb44_20160413oss_src.tgz'),
# Only dots
# name.name.ver.ver
('prank', '150803', 'http://wasabiapp.org/download/prank/prank.source.150803.tgz'),
('jpeg', '9b', 'http://www.ijg.org/files/jpegsrc.v9b.tar.gz'),
('openjpeg', '2.1',
'https://github.com/uclouvain/openjpeg/archive/version.2.1.tar.gz'),
# name.namever.ver
('atlas', '3.11.34',
'http://sourceforge.net/projects/math-atlas/files/Developer%20%28unstable%29/3.11.34/atlas3.11.34.tar.bz2'),
('visit', '2.10.1', 'http://portal.nersc.gov/project/visit/releases/2.10.1/visit2.10.1.tar.gz'),
('geant', '4.10.01.p03', 'http://geant4.cern.ch/support/source/geant4.10.01.p03.tar.gz'),
('tcl', '8.6.5', 'http://prdownloads.sourceforge.net/tcl/tcl8.6.5-src.tar.gz'),
# Dash and dots
# name-name-ver.ver
# digit in name
('m4', '1.4.17', 'https://ftp.gnu.org/gnu/m4/m4-1.4.17.tar.gz'),
# letter in version
('gmp', '6.0.0a', 'https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2'),
# version starts with 'v'
('LaunchMON', '1.0.2',
'https://github.com/LLNL/LaunchMON/releases/download/v1.0.2/launchmon-v1.0.2.tar.gz'),
# name-ver-ver.ver
('libedit', '20150325-3.1', 'http://thrysoee.dk/editline/libedit-20150325-3.1.tar.gz'),
# Dash and unserscores
# name-name-ver_ver
('icu4c', '57_1', 'http://download.icu-project.org/files/icu4c/57.1/icu4c-57_1-src.tgz'),
# Underscores and dots
# name_name_ver.ver
('superlu_dist', '4.1', 'http://crd-legacy.lbl.gov/~xiaoye/SuperLU/superlu_dist_4.1.tar.gz'),
('pexsi', '0.9.0', 'https://math.berkeley.edu/~linlin/pexsi/download/pexsi_v0.9.0.tar.gz'),
# name_name.ver.ver
('fer', '696', 'ftp://ftp.pmel.noaa.gov/ferret/pub/source/fer_source.v696.tar.gz'),
# Dash dot dah dot
# name-name-ver.ver-ver.ver
('sowing', '1.1.23-p1', 'http://ftp.mcs.anl.gov/pub/petsc/externalpackages/sowing-1.1.23-p1.tar.gz'),
('bib2xhtml', '3.0-15-gf506', 'http://www.spinellis.gr/sw/textproc/bib2xhtml/bib2xhtml-v3.0-15-gf506.tar.gz'),
# namever.ver-ver.ver
('go', '1.4-bootstrap-20161024', 'https://storage.googleapis.com/golang/go1.4-bootstrap-20161024.tar.gz'),
# Underscore dash dot
# name_name-ver.ver
('the_silver_searcher', '0.32.0', 'http://geoff.greer.fm/ag/releases/the_silver_searcher-0.32.0.tar.gz'),
('sphinx_rtd_theme', '0.1.10a0',
'https://pypi.python.org/packages/source/s/sphinx_rtd_theme/sphinx_rtd_theme-0.1.10a0.tar.gz'),
# Dot underscore dot dash dot
# name.name_ver.ver-ver.ver
('TH.data', '1.0-8', 'https://cran.r-project.org/src/contrib/TH.data_1.0-8.tar.gz'),
('XML', '3.98-1.4', 'https://cran.r-project.org/src/contrib/XML_3.98-1.4.tar.gz'),
# Dash dot underscore dot
# name-name-ver.ver_ver.ver
('pypar', '2.1.5_108',
'https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/pypar/pypar-2.1.5_108.tgz'),
# name-namever.ver_ver.ver
('STAR-CCM+', '11.06.010_02',
'file://{0}/STAR-CCM+11.06.010_02_linux-x86_64.tar.gz'.format(os.getcwd())),
# Weird URLS
# github.com/repo/name/releases/download/name-vver/name
('nextflow', '0.20.1', 'https://github.com/nextflow-io/nextflow/releases/download/v0.20.1/nextflow'),
# suffix queries
('swiftsim', '0.3.0', 'http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0'),
('sionlib', '1.7.1', 'http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1'),
# stem queries
('slepc', '3.6.2', 'http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz'),
('otf', '1.12.5salmon',
'http://wwwpub.zih.tu-dresden.de/%7Emlieber/dcount/dcount.php?package=otf&get=OTF-1.12.5salmon.tar.gz'),
# single character name
('R', '3.3.2', 'https://cloud.r-project.org/src/base/R-3/R-3.3.2.tar.gz'),
# name starts with digit
('3to2', '1.1.1', 'https://pypi.python.org/packages/source/3/3to2/3to2-1.1.1.zip'),
# plus in name
('gtk+', '2.24.31', 'http://ftp.gnome.org/pub/gnome/sources/gtk+/2.24/gtk+-2.24.31.tar.xz'),
('voro++', '0.4.6', 'http://math.lbl.gov/voro++/download/dir/voro++-0.4.6.tar.gz'),
# Name comes before download.php
('sionlib', '1.7.1', 'http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1'),
# Ignore download.php
('slepc', '3.6.2', 'http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz'),
('ScientificPython', '2.8.1',
'https://sourcesup.renater.fr/frs/download.php/file/4411/ScientificPython-2.8.1.tar.gz'),
# gloox beta style
('gloox', '1.0-beta7', 'http://camaya.net/download/gloox-1.0-beta7.tar.bz2'),
# sphinx beta style
('sphinx', '1.10-beta', 'http://sphinxsearch.com/downloads/sphinx-1.10-beta.tar.gz'),
# ruby version style
('ruby', '1.9.1-p243', 'ftp://ftp.ruby-lang.org/pub/ruby/1.9/ruby-1.9.1-p243.tar.gz'),
# rc style
('libvorbis', '1.2.2rc1', 'http://downloads.xiph.org/releases/vorbis/libvorbis-1.2.2rc1.tar.bz2'),
# dash rc style
('js', '1.8.0-rc1', 'http://ftp.mozilla.org/pub/mozilla.org/js/js-1.8.0-rc1.tar.gz'),
# apache version style
('apache-cassandra', '1.2.0-rc2',
'http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin.tar.gz'),
# xaw3d version
('Xaw3d', '1.5E', 'ftp://ftp.visi.com/users/hawkeyd/X/Xaw3d-1.5E.tar.gz'),
# fann version
('fann', '2.1.0beta', 'http://downloads.sourceforge.net/project/fann/fann/2.1.0beta/fann-2.1.0beta.zip'),
# imap version
('imap', '2007f', 'ftp://ftp.cac.washington.edu/imap/imap-2007f.tar.gz'),
# suite3270 version
('suite3270', '3.3.12ga7',
'http://sourceforge.net/projects/x3270/files/x3270/3.3.12ga7/suite3270-3.3.12ga7-src.tgz'),
# scalasca version
('cube', '4.2.3', 'http://apps.fz-juelich.de/scalasca/releases/cube/4.2/dist/cube-4.2.3.tar.gz'),
('cube', '4.3-TP1', 'http://apps.fz-juelich.de/scalasca/releases/cube/4.3/dist/cube-4.3-TP1.tar.gz'),
# github raw url
('CLAMR', '2.0.7', 'https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true'),
# luaposix version
('luaposix', '33.4.0', 'https://github.com/luaposix/luaposix/archive/release-v33.4.0.tar.gz'),
# nco version
('nco', '4.6.2-beta03', 'https://github.com/nco/nco/archive/4.6.2-beta03.tar.gz'),
('nco', '4.6.3-alpha04', 'https://github.com/nco/nco/archive/4.6.3-alpha04.tar.gz'),
])
def test_url_parse_name_and_version(name, version, url):
# Make sure correct name and version are extracted.
parsed_name, parsed_version = parse_name_and_version(url)
assert parsed_name == name
assert parsed_version == Version(version)
# Make sure Spack formulates the right URL when we try to
# build one with a specific version.
assert url == substitute_version(url, version)
@pytest.mark.parametrize('not_detectable_url', [
'http://www.netlib.org/blas/blast-forum/cblas.tgz',
'http://www.netlib.org/voronoi/triangle.zip',
])
def test_no_version(not_detectable_url):
with pytest.raises(UndetectableVersionError):
parse_name_and_version(not_detectable_url)
|
EmreAtes/spack
|
lib/spack/spack/test/url_parse.py
|
Python
|
lgpl-2.1
| 20,084
|
[
"BLAST",
"Gromacs",
"HOOMD-blue",
"Jmol",
"Octopus",
"VisIt"
] |
42d280b6d11bfff6f557db4c21a95259aeadb28b9291284cc4d8b7edd6cb6cfa
|
import os
import sys
from math import sin, cos, radians, atan2, degrees
import numpy as np
from ase.parallel import world
class DevNull:
def write(self, string):
pass
def flush(self):
pass
def seek(self, offset, whence=0):
return 0
def tell(self):
return 0
def close(self):
pass
devnull = DevNull()
def opencew(filename, my_world=world):
"""Create and open filename exclusively for writing.
If master cpu gets exclusive write access to filename, a file
descriptor is returned (a dummy file descriptor is returned on the
slaves). If the master cpu does not get write access, None is
returned on all processors."""
if my_world.rank == 0:
try:
fd = os.open(filename, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
except OSError:
ok = 0
else:
ok = 1
fd = os.fdopen(fd, 'w')
else:
ok = 0
fd = devnull
# Syncronize:
if my_world.sum(ok) == 0:
return None
else:
return fd
class Lock:
def __init__(self, name='lock'):
self.name = name
def acquire(self):
fd = None
while fd is None:
fd = opencew(self.name)
def release(self):
world.barrier()
if world.rank == 0:
os.remove(self.name)
def __enter__(self):
self.acquire()
def __exit__(self, type, value, tb):
self.release()
class OpenLock:
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, tb):
pass
def prnt(*args, **kwargs):
"""Python 3 style print function."""
kwargs.pop('file', sys.stdout).write(
kwargs.pop('sep', ' ').join(str(arg) for arg in args) +
kwargs.pop('end', '\n'))
if kwargs:
raise TypeError('%r is an invalid keyword argument for this function' %
kwargs.keys()[0])
def gcd(a, b):
"""Greatest common divisor of a and b."""
while a != 0:
a, b = b % a, a
return b
def rotate(rotations, rotation=np.identity(3)):
"""Convert string of format '50x,-10y,120z' to a rotation matrix.
Note that the order of rotation matters, i.e. '50x,40z' is different
from '40z,50x'.
"""
if rotations == '':
return rotation.copy()
for i, a in [('xyz'.index(s[-1]), radians(float(s[:-1])))
for s in rotations.split(',')]:
s = sin(a)
c = cos(a)
if i == 0:
rotation = np.dot(rotation, [(1, 0, 0),
(0, c, s),
(0, -s, c)])
elif i == 1:
rotation = np.dot(rotation, [(c, 0, -s),
(0, 1, 0),
(s, 0, c)])
else:
rotation = np.dot(rotation, [(c, s, 0),
(-s, c, 0),
(0, 0, 1)])
return rotation
def givens(a, b):
"""Solve the equation system::
[ c s] [a] [r]
[ ] . [ ] = [ ]
[-s c] [b] [0]
"""
sgn = lambda x: cmp(x, 0)
if b == 0:
c = sgn(a)
s = 0
r = abs(a)
elif abs(b) >= abs(a):
cot = a / b
u = sgn(b) * (1 + cot**2)**0.5
s = 1. / u
c = s * cot
r = b * u
else:
tan = b / a
u = sgn(a) * (1 + tan**2)**0.5
c = 1. / u
s = c * tan
r = a * u
return c, s, r
def irotate(rotation, initial=np.identity(3)):
"""Determine x, y, z rotation angles from rotation matrix."""
a = np.dot(initial, rotation)
cx, sx, rx = givens(a[2, 2], a[1, 2])
cy, sy, ry = givens(rx, a[0, 2])
cz, sz, rz = givens(cx * a[1, 1] - sx * a[2, 1],
cy * a[0, 1] - sy * (sx * a[1, 1] + cx * a[2, 1]))
x = degrees(atan2(sx, cx))
y = degrees(atan2(-sy, cy))
z = degrees(atan2(sz, cz))
return x, y, z
def hsv2rgb(h, s, v):
"""http://en.wikipedia.org/wiki/HSL_and_HSV
h (hue) in [0, 360[
s (saturation) in [0, 1]
v (value) in [0, 1]
return rgb in range [0, 1]
"""
if v == 0:
return 0, 0, 0
if s == 0:
return v, v, v
i, f = divmod(h / 60., 1)
p = v * (1 - s)
q = v * (1 - s * f)
t = v * (1 - s * (1 - f))
if i == 0:
return v, t, p
elif i == 1:
return q, v, p
elif i == 2:
return p, v, t
elif i == 3:
return p, q, v
elif i == 4:
return t, p, v
elif i == 5:
return v, p, q
else:
raise RuntimeError('h must be in [0, 360]')
def hsv(array, s=.9, v=.9):
array = (array + array.min()) * 359. / (array.max() - array.min())
result = np.empty((len(array.flat), 3))
for rgb, h in zip(result, array.flat):
rgb[:] = hsv2rgb(h, s, v)
return np.reshape(result, array.shape + (3,))
## This code does the same, but requires pylab
## def cmap(array, name='hsv'):
## import pylab
## a = (array + array.min()) / array.ptp()
## rgba = getattr(pylab.cm, name)(a)
## return rgba[:-1] # return rgb only (not alpha)
ON_POSIX = 'posix' in sys.builtin_module_names
try:
from subprocess import Popen
except ImportError:
from os import popen3
else:
def popen3(cmd):
from subprocess import PIPE
p = Popen(cmd, shell=True, close_fds=ON_POSIX,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
return p.stdin, p.stdout, p.stderr
|
conwayje/ase-python
|
ase/utils/__init__.py
|
Python
|
gpl-2.0
| 5,632
|
[
"ASE"
] |
e0dc82394e2fb516d99438be6439538e92abd4225541d2194eb9b0409adfc11c
|
"""
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array
from ..exceptions import NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Falling back to np.dot. '
'Data must be of same type of either '
'32 or 64 bit float for the BLAS function, gemm, to be '
'used for an efficient dot operation. ',
NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.exceptions import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _incremental_mean_and_var(X, last_mean=.0, last_variance=None,
last_sample_count=0):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = X.sum(axis=0)
new_sample_count = X.shape[0]
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = X.var(axis=0) * new_sample_count
if last_sample_count == 0: # Avoid division by 0
updated_unnormalized_variance = new_unnormalized_variance
else:
last_over_new_count = last_sample_count / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance +
new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
|
Djabbz/scikit-learn
|
sklearn/utils/extmath.py
|
Python
|
bsd-3-clause
| 23,268
|
[
"Gaussian"
] |
42ac3b70321f1c83a314ebe7ac7171d60dac3f2758db60fc3034bba7bf11f298
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import os
import glob
import warnings
import logging
import importlib
import numpy as np
from natsort import natsorted
from hyperspy.drawing.marker import markers_metadata_dict_to_markers
from hyperspy.misc.io.tools import ensure_directory
from hyperspy.misc.io.tools import overwrite as overwrite_method
from hyperspy.misc.utils import strlist2enumeration
from hyperspy.misc.utils import stack as stack_method
from hyperspy.io_plugins import io_plugins, default_write_ext
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.ui_registry import get_gui
from hyperspy.extensions import ALL_EXTENSIONS
_logger = logging.getLogger(__name__)
# Utility string:
f_error_fmt = (
"\tFile %d:\n"
"\t\t%d signals\n"
"\t\tPath: %s")
def load(filenames=None,
signal_type=None,
stack=False,
stack_axis=None,
new_axis_name="stack_element",
lazy=False,
convert_units=False,
**kwds):
"""
Load potentially multiple supported file into an hyperspy structure.
Supported formats: hspy (HDF5), msa, Gatan dm3, Ripple (rpl+raw),
Bruker bcf and spx, FEI ser and emi, SEMPER unf, EMD, EDAX spd/spc,
tif, and a number of image formats.
Any extra keyword is passed to the corresponding reader. For
available options see their individual documentation.
Parameters
----------
filenames : None, str or list of strings
The filename to be loaded. If None, a window will open to select
a file to load. If a valid filename is passed in that single
file is loaded. If multiple file names are passed in
a list, a list of objects or a single object containing the data
of the individual files stacked are returned. This behaviour is
controlled by the `stack` parameter (see bellow). Multiple
files can be loaded by using simple shell-style wildcards,
e.g. 'my_file*.msa' loads all the files that starts
by 'my_file' and has the '.msa' extension.
signal_type : {None, "EELS", "EDS_SEM", "EDS_TEM", "", str}
The acronym that identifies the signal type.
The value provided may determine the Signal subclass assigned to the
data.
If None the value is read/guessed from the file. Any other value
overrides the value stored in the file if any.
For electron energy-loss spectroscopy use "EELS".
For energy dispersive x-rays use "EDS_TEM"
if acquired from an electron-transparent sample — as it is usually
the case in a transmission electron microscope (TEM) —,
"EDS_SEM" if acquired from a non electron-transparent sample
— as it is usually the case in a scanning electron microscope (SEM).
If "" (empty string) the value is not read from the file and is
considered undefined.
stack : bool
If True and multiple filenames are passed in, stacking all
the data into a single object is attempted. All files must match
in shape. If each file contains multiple (N) signals, N stacks will be
created, with the requirement that each file contains the same number
of signals.
stack_axis : {None, int, str}
If None, the signals are stacked over a new axis. The data must
have the same dimensions. Otherwise the
signals are stacked over the axis given by its integer index or
its name. The data must have the same shape, except in the dimension
corresponding to `axis`.
new_axis_name : string
The name of the new axis when `axis` is None.
If an axis with this name already
exists it automatically append '-i', where `i` are integers,
until it finds a name that is not yet in use.
lazy : {None, bool}
Open the data lazily - i.e. without actually reading the data from the
disk until required. Allows opening arbitrary-sized datasets. The default
is `False`.
convert_units : {bool}
If True, convert the units using the `convert_to_units` method of
the `axes_manager`. If False, does nothing. The default is False.
print_info: bool
For SEMPER unf- and EMD (Berkeley)-files, if True (default is False)
additional information read during loading is printed for a quick
overview.
downsample : int (1–4095)
For Bruker bcf files, if set to integer (>=2) (default 1)
bcf is parsed into down-sampled size array by given integer factor,
multiple values from original bcf pixels are summed forming downsampled
pixel. This allows to improve signal and conserve the memory with the
cost of lower resolution.
cutoff_at_kV : {None, int, float}
For Bruker bcf files, if set to numerical (default is None)
bcf is parsed into array with depth cutoff at coresponding given energy.
This allows to conserve the memory, with cutting-off unused spectra's
tail, or force enlargement of the spectra size.
select_type : {'spectrum_image', 'image', 'single_spectrum', None}
If `None` (default), all data are loaded.
For Bruker bcf and Velox emd files: if one of 'spectrum_image', 'image'
or 'single_spectrum', the loader return single_spectrumns either only
the spectrum image or only the images (including EDS map for Velox emd
files) or only the single spectra (for Velox emd files).
first_frame : int (default 0)
Only for Velox emd files: load only the data acquired after the
specified fname.
last_frame : None or int (default None)
Only for Velox emd files: load only the data acquired up to specified
fname. If None, load up the data to the end.
sum_frames : bool (default is True)
Only for Velox emd files: if False, load each EDS frame individually.
sum_EDS_detectors : bool (default is True)
Only for Velox emd files: if True, the signal from the different
detector are summed. If False, a distinct signal is returned for each
EDS detectors.
rebin_energy : int, a multiple of the length of the energy dimension (default 1)
Only for Velox emd files: rebin the energy axis by the integer provided
during loading in order to save memory space.
SI_dtype : numpy.dtype
Only for Velox emd files: set the dtype of the spectrum image data in
order to save memory space. If None, the default dtype from the Velox emd
file is used.
load_SI_image_stack : bool (default False)
Only for Velox emd files: if True, load the stack of STEM images
acquired simultaneously as the EDS spectrum image.
dataset_name : string or list, optional
For filetypes which support several datasets in the same file, this
will only load the specified dataset. Several datasets can be loaded
by using a list of strings. Only for EMD (NCEM) files.
only_valid_data : bool, optional
Only for FEI emi/ser file in case of series or linescan with the
acquisition stopped before the end: if True, load only the acquired
data. If False, fill empty data with zeros. Default is False and this
default value will change to True in version 2.0.
Returns
-------
Signal instance or list of signal instances
Examples
--------
Loading a single file providing the signal type:
>>> d = hs.load('file.dm3', signal_type="EDS_TEM")
Loading multiple files:
>>> d = hs.load('file1.dm3','file2.dm3')
Loading multiple files matching the pattern:
>>> d = hs.load('file*.dm3')
Loading (potentially larger than the available memory) files lazily and
stacking:
>>> s = hs.load('file*.blo', lazy=True, stack=True)
"""
deprecated = ['mmap_dir', 'load_to_memory']
warn_str = "'{}' argument is deprecated, please use 'lazy' instead"
for k in deprecated:
if k in kwds:
lazy = True
warnings.warn(warn_str.format(k), VisibleDeprecationWarning)
del kwds[k]
kwds['signal_type'] = signal_type
kwds['convert_units'] = convert_units
if filenames is None:
from hyperspy.signal_tools import Load
load_ui = Load()
get_gui(load_ui, toolkey="hyperspy.load")
if load_ui.filename:
filenames = load_ui.filename
lazy = load_ui.lazy
if filenames is None:
raise ValueError("No file provided to reader")
if isinstance(filenames, str):
filenames = natsorted([f for f in glob.glob(filenames)
if os.path.isfile(f)])
if not filenames:
raise ValueError('No file name matches this pattern')
elif not isinstance(filenames, (list, tuple)):
raise ValueError(
'The filenames parameter must be a list, tuple, string or None')
if not filenames:
raise ValueError('No file provided to reader.')
else:
if len(filenames) > 1:
_logger.info('Loading individual files')
if stack is True:
# We are loading a stack!
# Note that while each file might contain several signals, all
# files are required to contain the same number of signals. We
# therefore use the first file to determine the number of signals.
for i, filename in enumerate(filenames):
obj = load_single_file(filename, lazy=lazy,
**kwds)
if i == 0:
# First iteration, determine number of signals, if several:
if isinstance(obj, (list, tuple)):
n = len(obj)
else:
n = 1
# Initialize signal 2D list:
signals = [[] for j in range(n)]
else:
# Check that number of signals per file doesn't change
# for other files:
if isinstance(obj, (list, tuple)):
if n != len(obj):
raise ValueError(
"The number of sub-signals per file does not "
"match:\n" +
(f_error_fmt % (1, n, filenames[0])) +
(f_error_fmt % (i, len(obj), filename)))
elif n != 1:
raise ValueError(
"The number of sub-signals per file does not "
"match:\n" +
(f_error_fmt % (1, n, filenames[0])) +
(f_error_fmt % (i, len(obj), filename)))
# Append loaded signals to 2D list:
if n == 1:
signals[0].append(obj)
elif n > 1:
for j in range(n):
signals[j].append(obj[j])
# Next, merge the signals in the `stack_axis` direction:
# When each file had N signals, we create N stacks!
objects = []
for i in range(n):
signal = signals[i] # Sublist, with len = len(filenames)
signal = stack_method(
signal, axis=stack_axis, new_axis_name=new_axis_name,
lazy=lazy)
signal.metadata.General.title = os.path.split(
os.path.split(os.path.abspath(filenames[0]))[0])[1]
_logger.info('Individual files loaded correctly')
_logger.info(signal._summary())
objects.append(signal)
else:
# No stack, so simply we load all signals in all files separately
objects = [load_single_file(filename, lazy=lazy,
**kwds)
for filename in filenames]
if len(objects) == 1:
objects = objects[0]
return objects
def load_single_file(filename, **kwds):
"""
Load any supported file into an HyperSpy structure
Supported formats: netCDF, msa, Gatan dm3, Ripple (rpl+raw),
Bruker bcf, FEI ser and emi, EDAX spc and spd, hspy (HDF5), and SEMPER unf.
Parameters
----------
filename : string
File name (including the extension)
"""
extension = os.path.splitext(filename)[1][1:]
i = 0
while extension.lower() not in io_plugins[i].file_extensions and \
i < len(io_plugins) - 1:
i += 1
if i == len(io_plugins):
# Try to load it with the python imaging library
try:
from hyperspy.io_plugins import image
reader = image
return load_with_reader(filename, reader, **kwds)
except BaseException:
raise IOError('If the file format is supported'
' please report this error')
else:
reader = io_plugins[i]
return load_with_reader(filename=filename, reader=reader, **kwds)
def load_with_reader(filename, reader, signal_type=None, convert_units=False,
**kwds):
lazy = kwds.get('lazy', False)
file_data_list = reader.file_reader(filename,
**kwds)
objects = []
for signal_dict in file_data_list:
if 'metadata' in signal_dict:
if "Signal" not in signal_dict["metadata"]:
signal_dict["metadata"]["Signal"] = {}
if signal_type is not None:
signal_dict['metadata']["Signal"]['signal_type'] = signal_type
objects.append(dict2signal(signal_dict, lazy=lazy))
folder, filename = os.path.split(os.path.abspath(filename))
filename, extension = os.path.splitext(filename)
objects[-1].tmp_parameters.folder = folder
objects[-1].tmp_parameters.filename = filename
objects[-1].tmp_parameters.extension = extension.replace('.', '')
if convert_units:
objects[-1].axes_manager.convert_units()
else:
# it's a standalone model
continue
if len(objects) == 1:
objects = objects[0]
return objects
def assign_signal_subclass(dtype,
signal_dimension,
signal_type="",
lazy=False):
"""Given record_by and signal_type return the matching Signal subclass.
Parameters
----------
dtype : :class:`~.numpy.dtype`
signal_dimension: int
signal_type : {"EELS", "EDS", "EDS_SEM", "EDS_TEM", "DielectricFunction", "", str}
lazy: bool
Returns
-------
Signal or subclass
"""
# Check if parameter values are allowed:
if np.issubdtype(dtype, np.complexfloating):
dtype = 'complex'
elif ('float' in dtype.name or 'int' in dtype.name or
'void' in dtype.name or 'bool' in dtype.name or
'object' in dtype.name):
dtype = 'real'
else:
raise ValueError('Data type "{}" not understood!'.format(dtype.name))
if not isinstance(signal_dimension, int) or signal_dimension < 0:
raise ValueError("signal_dimension must be a positive interger")
signals = {key: value for key, value in ALL_EXTENSIONS["signals"].items()
if value["lazy"] == lazy}
dtype_matches = {key: value for key, value in signals.items()
if value["dtype"] == dtype}
dtype_dim_matches = {key: value for key, value in dtype_matches.items()
if signal_dimension == value["signal_dimension"]}
dtype_dim_type_matches = {key: value for key, value in dtype_dim_matches.items()
if signal_type == value["signal_type"] or
"signal_type_aliases" in value and
signal_type in value["signal_type_aliases"]}
if dtype_dim_type_matches:
# Perfect match found
signal_dict = dtype_dim_type_matches
else:
# If the following dict is not empty, only signal_dimension and dtype match.
# The dict should contain a general class for the given signal
# dimension.
signal_dict = {key: value for key, value in dtype_dim_matches.items()
if value["signal_type"] == ""}
if not signal_dict:
# no signal_dimension match either, hence select the general subclass for
# correct dtype
signal_dict = {key: value for key, value in dtype_matches.items()
if value["signal_dimension"] == -1
and value["signal_type"] == ""}
# Sanity check
if len(signal_dict) > 1:
_logger.warning(
"There is more than one kind of signal that match the current specifications, "
"which is not expected."
"Please report this issue to the HyperSpy developers.")
# Regardless of the number of signals in the dict we assign one.
# The following should only raise an error if the base classes
# are not correctly registered.
for key, value in signal_dict.items():
signal_class = getattr(importlib.import_module(value["module"]), key)
return signal_class
def dict2signal(signal_dict, lazy=False):
"""Create a signal (or subclass) instance defined by a dictionary
Parameters
----------
signal_dict : dictionary
Returns
-------
s : Signal or subclass
"""
if "package" in signal_dict and signal_dict["package"]:
try:
importlib.import_module(signal_dict["package"])
except ImportError:
_logger.warning(
f"This file contains a signal provided by the " +
f'{signal_dict["package"]} Python package that is not ' +
f'currently installed. The signal will be loaded into a '
f'generic HyperSpy signal. Consider installing ' +
f'{signal_dict["package"]} to load this dataset into its '
f'original signal class.')
signal_dimension = -1 # undefined
signal_type = ""
if "metadata" in signal_dict:
mp = signal_dict["metadata"]
if "Signal" in mp and "record_by" in mp["Signal"]:
record_by = mp["Signal"]['record_by']
if record_by == "spectrum":
signal_dimension = 1
elif record_by == "image":
signal_dimension = 2
del mp["Signal"]['record_by']
if "Signal" in mp and "signal_type" in mp["Signal"]:
signal_type = mp["Signal"]['signal_type']
if "attributes" in signal_dict and "_lazy" in signal_dict["attributes"]:
lazy = signal_dict["attributes"]["_lazy"]
# "Estimate" signal_dimension from axes. It takes precedence over record_by
if ("axes" in signal_dict and
len(signal_dict["axes"]) == len(
[axis for axis in signal_dict["axes"] if "navigate" in axis])):
# If navigate is defined for all axes
signal_dimension = len(
[axis for axis in signal_dict["axes"] if not axis["navigate"]])
elif signal_dimension == -1:
# If not defined, all dimension are categorised as signal
signal_dimension = signal_dict["data"].ndim
signal = assign_signal_subclass(signal_dimension=signal_dimension,
signal_type=signal_type,
dtype=signal_dict['data'].dtype,
lazy=lazy)(**signal_dict)
if signal._lazy:
signal._make_lazy()
if signal.axes_manager.signal_dimension != signal_dimension:
# This may happen when the signal dimension couldn't be matched with
# any specialised subclass
signal.axes_manager.set_signal_dimension(signal_dimension)
if "post_process" in signal_dict:
for f in signal_dict['post_process']:
signal = f(signal)
if "mapping" in signal_dict:
for opattr, (mpattr, function) in signal_dict["mapping"].items():
if opattr in signal.original_metadata:
value = signal.original_metadata.get_item(opattr)
if function is not None:
value = function(value)
if value is not None:
signal.metadata.set_item(mpattr, value)
if "metadata" in signal_dict and "Markers" in mp:
markers_dict = markers_metadata_dict_to_markers(
mp['Markers'],
axes_manager=signal.axes_manager)
del signal.metadata.Markers
signal.metadata.Markers = markers_dict
return signal
def save(filename, signal, overwrite=None, **kwds):
extension = os.path.splitext(filename)[1][1:]
if extension == '':
extension = "hspy"
filename = filename + '.' + extension
writer = None
for plugin in io_plugins:
if extension.lower() in plugin.file_extensions:
writer = plugin
break
if writer is None:
raise ValueError(
('.%s does not correspond to any supported format. Supported ' +
'file extensions are: %s') %
(extension, strlist2enumeration(default_write_ext)))
else:
# Check if the writer can write
sd = signal.axes_manager.signal_dimension
nd = signal.axes_manager.navigation_dimension
if writer.writes is False:
raise ValueError('Writing to this format is not '
'supported, supported file extensions are: %s ' %
strlist2enumeration(default_write_ext))
if writer.writes is not True and (sd, nd) not in writer.writes:
yes_we_can = [plugin.format_name for plugin in io_plugins
if plugin.writes is True or
plugin.writes is not False and
(sd, nd) in plugin.writes]
raise IOError('This file format cannot write this data. '
'The following formats can: %s' %
strlist2enumeration(yes_we_can))
ensure_directory(filename)
is_file = os.path.isfile(filename)
if overwrite is None:
write = overwrite_method(filename) # Ask what to do
elif overwrite is True or (overwrite is False and not is_file):
write = True # Write the file
elif overwrite is False and is_file:
write = False # Don't write the file
else:
raise ValueError("`overwrite` parameter can only be None, True or "
"False.")
if write:
writer.file_writer(filename, signal, **kwds)
_logger.info('The %s file was created' % filename)
folder, filename = os.path.split(os.path.abspath(filename))
signal.tmp_parameters.set_item('folder', folder)
signal.tmp_parameters.set_item('filename',
os.path.splitext(filename)[0])
signal.tmp_parameters.set_item('extension', extension)
|
sem-geologist/hyperspy
|
hyperspy/io.py
|
Python
|
gpl-3.0
| 23,941
|
[
"NetCDF"
] |
941b412166dd548329f08505badc908e803591148b83a9b7b7e14bd2a83b9aab
|
# -*- coding:utf-8; python-indent:2; indent-tabs-mode:nil -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility classes for testing the PYTD parser."""
import os
import sys
import textwrap
from pytype.pytd import pytd
from pytype.pytd.parse import parser
from pytype.pytd.parse import visitors
import unittest
class ParserTest(unittest.TestCase):
"""Test utility class. Knows how to parse PYTD and compare source code."""
def setUp(self):
self.parser = parser.TypeDeclParser()
def Parse(self, src, version=None):
# TODO(kramm): Using self.parser here breaks tests. Why?
tree = parser.TypeDeclParser(version=version).Parse(textwrap.dedent(src))
tree.Visit(visitors.VerifyVisitor())
return tree
def ToAST(self, src_or_tree):
# TODO(pludemann): The callers are not consistent in how they use this
# and in most (all?) cases they know whether they're
# passing in a source string or parse tree. It would
# be better if all the calles were consistent.
if isinstance(src_or_tree, basestring):
# Put into a canonical form (removes comments, standard indents):
return self.Parse(src_or_tree + "\n")
else: # isinstance(src_or_tree, tuple):
src_or_tree.Visit(visitors.VerifyVisitor())
return src_or_tree
def AssertSourceEquals(self, src_or_tree_1, src_or_tree_2):
# Strip leading "\n"s for convenience
ast1 = self.ToAST(src_or_tree_1)
ast2 = self.ToAST(src_or_tree_2)
src1 = pytd.Print(ast1).strip() + "\n"
src2 = pytd.Print(ast2).strip() + "\n"
# Verify printed versions are the same and ASTs are the same.
# TODO(pludemann): Find out why some tests leave confuse NamedType and
# ClassType and fix the tests so that this conversion isn't
# needed.
ast1 = ast1.Visit(visitors.ClassTypeToNamedType())
ast2 = ast2.Visit(visitors.ClassTypeToNamedType())
if src1 != src2 or not ast1.ASTeq(ast2):
# Due to differing opinions on the form of debug output, allow an
# environment variable to control what output you want. Set
# PY_UNITTEST_DIFF to get diff output.
if os.getenv("PY_UNITTEST_DIFF"):
self.maxDiff = None # for better diff output (assertMultiLineEqual)
self.assertMultiLineEqual(src1, src2)
else:
sys.stdout.flush()
sys.stderr.flush()
print >>sys.stderr, "Source files or ASTs differ:"
print >>sys.stderr, "-" * 36, " Actual ", "-" * 36
print >>sys.stderr, textwrap.dedent(src1).strip()
print >>sys.stderr, "-" * 36, "Expected", "-" * 36
print >>sys.stderr, textwrap.dedent(src2).strip()
print >>sys.stderr, "-" * 80
if ast1 == ast2:
print >>sys.stderr, "Actual AST:", ast1
print >>sys.stderr, "Expect AST:", ast2
self.fail("source files differ")
def ApplyVisitorToString(self, data, visitor):
tree = self.Parse(data)
new_tree = tree.Visit(visitor)
return pytd.Print(new_tree)
|
pombredanne/pytype
|
pytype/pytd/parse/parser_test.py
|
Python
|
apache-2.0
| 3,604
|
[
"VisIt"
] |
1218f5f7ab02f40353fb87c03d90561add99c20aae87260e69db7cfb21abeee7
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a Composition class to represent compositions,
and a ChemicalPotential class to represent potentials.
"""
import collections
import numbers
import os
import re
import string
import warnings
from functools import total_ordering
from itertools import combinations_with_replacement, product
from typing import Dict, Generator, List, Tuple, Union
from monty.fractions import gcd, gcd_float
from monty.json import MSONable
from monty.serialization import loadfn
from pymatgen.core.periodic_table import DummySpecies, Element, Species, get_el_sp
from pymatgen.core.units import Mass
from pymatgen.util.string import Stringify, formula_double_format
SpeciesLike = Union[str, Element, Species, DummySpecies]
@total_ordering
class Composition(collections.abc.Hashable, collections.abc.Mapping, MSONable, Stringify):
"""
Represents a Composition, which is essentially a {element:amount} mapping
type. Composition is written to be immutable and hashable,
unlike a standard Python dict.
Note that the key can be either an Element or a Species. Elements and Species
are treated differently. i.e., a Fe2+ is not the same as a Fe3+ Species and
would be put in separate keys. This differentiation is deliberate to
support using Composition to determine the fraction of a particular Species.
Works almost completely like a standard python dictionary, except that
__getitem__ is overridden to return 0 when an element is not found.
(somewhat like a defaultdict, except it is immutable).
Also adds more convenience methods relevant to compositions, e.g.,
get_fraction.
It should also be noted that many Composition related functionality takes
in a standard string as a convenient input. For example,
even though the internal representation of a Fe2O3 composition is
{Element("Fe"): 2, Element("O"): 3}, you can obtain the amount of Fe
simply by comp["Fe"] instead of the more verbose comp[Element("Fe")].
>>> comp = Composition("LiFePO4")
>>> comp.get_atomic_fraction(Element("Li"))
0.14285714285714285
>>> comp.num_atoms
7.0
>>> comp.reduced_formula
'LiFePO4'
>>> comp.formula
'Li1 Fe1 P1 O4'
>>> comp.get_wt_fraction(Element("Li"))
0.04399794666951898
>>> comp.num_atoms
7.0
"""
# Tolerance in distinguishing different composition amounts.
# 1e-8 is fairly tight, but should cut out most floating point arithmetic
# errors.
amount_tolerance = 1e-8
# Special formula handling for peroxides and certain elements. This is so
# that formula output does not write LiO instead of Li2O2 for example.
special_formulas = {
"LiO": "Li2O2",
"NaO": "Na2O2",
"KO": "K2O2",
"HO": "H2O2",
"CsO": "Cs2O2",
"RbO": "Rb2O2",
"O": "O2",
"N": "N2",
"F": "F2",
"Cl": "Cl2",
"H": "H2",
}
oxi_prob = None # prior probability of oxidation used by oxi_state_guesses
def __init__(self, *args, strict: bool = False, **kwargs):
r"""
Very flexible Composition construction, similar to the built-in Python
dict(). Also extended to allow simple string init.
Args:
Any form supported by the Python built-in {} function.
1. A dict of either {Element/Species: amount},
{string symbol:amount}, or {atomic number:amount} or any mixture
of these. E.g., {Element("Li"):2 ,Element("O"):1},
{"Li":2, "O":1}, {3:2, 8:1} all result in a Li2O composition.
2. Keyword arg initialization, similar to a dict, e.g.,
Composition(Li = 2, O = 1)
In addition, the Composition constructor also allows a single
string as an input formula. E.g., Composition("Li2O").
strict: Only allow valid Elements and Species in the Composition.
allow_negative: Whether to allow negative compositions. This
argument must be popped from the **kwargs due to *args
ambiguity.
"""
self.allow_negative = kwargs.pop("allow_negative", False)
# it's much faster to recognize a composition and use the elmap than
# to pass the composition to {}
if len(args) == 1 and isinstance(args[0], Composition):
elmap = args[0]
elif len(args) == 1 and isinstance(args[0], str):
elmap = self._parse_formula(args[0]) # type: ignore
else:
elmap = dict(*args, **kwargs) # type: ignore
elamt = {}
self._natoms = 0
for k, v in elmap.items():
if v < -Composition.amount_tolerance and not self.allow_negative:
raise ValueError("Amounts in Composition cannot be negative!")
if abs(v) >= Composition.amount_tolerance:
elamt[get_el_sp(k)] = v
self._natoms += abs(v)
self._data = elamt
if strict and not self.valid:
raise ValueError("Composition is not valid, contains: {}".format(", ".join(map(str, self.elements))))
def __getitem__(self, item: SpeciesLike):
try:
sp = get_el_sp(item)
return self._data.get(sp, 0)
except ValueError as ex:
raise TypeError(f"Invalid key {item}, {type(item)} for Composition\nValueError exception:\n{ex}")
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.keys().__iter__()
def __contains__(self, item):
try:
sp = get_el_sp(item)
return sp in self._data
except ValueError as ex:
raise TypeError(f"Invalid key {item}, {type(item)} for Composition\nValueError exception:\n{ex}")
def __eq__(self, other):
# elements with amounts < Composition.amount_tolerance don't show up
# in the elmap, so checking len enables us to only check one
# compositions elements
if len(self) != len(other):
return False
return all(abs(v - other[el]) <= Composition.amount_tolerance for el, v in self.items())
def __ge__(self, other):
"""
Defines >= for Compositions. Should ONLY be used for defining a sort
order (the behavior is probably not what you'd expect)
"""
for el in sorted(set(self.elements + other.elements)):
if other[el] - self[el] >= Composition.amount_tolerance:
return False
if self[el] - other[el] >= Composition.amount_tolerance:
return True
return True
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
"""
Adds two compositions. For example, an Fe2O3 composition + an FeO
composition gives a Fe3O4 composition.
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] += v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __sub__(self, other):
"""
Subtracts two compositions. For example, an Fe2O3 composition - an FeO
composition gives an FeO2 composition.
Raises:
ValueError if the subtracted composition is greater than the
original composition in any of its elements, unless allow_negative
is True
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] -= v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __mul__(self, other):
"""
Multiply a Composition by an integer or a float.
Fe2O3 * 4 -> Fe8O12
"""
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] * other for el in self}, allow_negative=self.allow_negative)
__rmul__ = __mul__
def __truediv__(self, other):
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] / other for el in self}, allow_negative=self.allow_negative)
__div__ = __truediv__
def __hash__(self):
"""
hash based on the chemical system
"""
return hash(frozenset(self._data.keys()))
@property
def average_electroneg(self) -> float:
"""
:return: Average electronegativity of the composition.
"""
return sum((el.X * abs(amt) for el, amt in self.items())) / self.num_atoms
@property
def total_electrons(self) -> float:
"""
:return: Total number of electrons in composition.
"""
return sum((el.Z * abs(amt) for el, amt in self.items()))
def almost_equals(self, other: "Composition", rtol: float = 0.1, atol: float = 1e-8) -> bool:
"""
Returns true if compositions are equal within a tolerance.
Args:
other (Composition): Other composition to check
rtol (float): Relative tolerance
atol (float): Absolute tolerance
"""
sps = set(self.elements + other.elements)
for sp in sps:
a = self[sp]
b = other[sp]
tol = atol + rtol * (abs(a) + abs(b)) / 2
if abs(b - a) > tol:
return False
return True
@property
def is_element(self) -> bool:
"""
True if composition is for an element.
"""
return len(self) == 1
def copy(self) -> "Composition":
"""
:return: A copy of the composition.
"""
return Composition(self, allow_negative=self.allow_negative)
@property
def formula(self) -> str:
"""
Returns a formula string, with elements sorted by electronegativity,
e.g., Li4 Fe4 P4 O16.
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys(), key=lambda sym: get_el_sp(sym).X)
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def alphabetical_formula(self) -> str:
"""
Returns a formula string, with elements sorted by alphabetically
e.g., Fe4 Li4 O16 P4.
"""
return " ".join(sorted(self.formula.split(" ")))
@property
def iupac_formula(self) -> str:
"""
Returns a formula string, with elements sorted by the iupac
electronegativity ordering defined in Table VI of "Nomenclature of
Inorganic Chemistry (IUPAC Recommendations 2005)". This ordering
effectively follows the groups and rows of the periodic table, except
the Lanthanides, Actanides and hydrogen. Polyanions are still determined
based on the true electronegativity of the elements.
e.g. CH2(SO4)2
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys(), key=lambda s: get_el_sp(s).iupac_ordering)
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def element_composition(self) -> "Composition":
"""
Returns the composition replacing any species by the corresponding
element.
"""
return Composition(self.get_el_amt_dict(), allow_negative=self.allow_negative)
@property
def fractional_composition(self) -> "Composition":
"""
Returns the normalized composition which the number of species sum to
1.
Returns:
Normalized composition which the number of species sum to 1.
"""
return self / self._natoms
@property
def reduced_composition(self) -> "Composition":
"""
Returns the reduced composition,i.e. amounts normalized by greatest
common denominator. e.g., Composition("FePO4") for
Composition("Fe4P4O16").
"""
return self.get_reduced_composition_and_factor()[0]
def get_reduced_composition_and_factor(self) -> Tuple["Composition", float]:
"""
Calculates a reduced composition and factor.
Returns:
A normalized composition and a multiplicative factor, i.e.,
Li4Fe4P4O16 returns (Composition("LiFePO4"), 4).
"""
factor = self.get_reduced_formula_and_factor()[1]
return self / factor, factor
def get_reduced_formula_and_factor(self, iupac_ordering: bool = False) -> Tuple[str, float]:
"""
Calculates a reduced formula and factor.
Args:
iupac_ordering (bool, optional): Whether to order the
formula by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actanides and hydrogen. Note that polyanions
will still be determined based on the true electronegativity of
the elements.
Returns:
A pretty normalized formula and a multiplicative factor, i.e.,
Li4Fe4P4O16 returns (LiFePO4, 4).
"""
all_int = all(abs(x - round(x)) < Composition.amount_tolerance for x in self.values())
if not all_int:
return self.formula.replace(" ", ""), 1
d = {k: int(round(v)) for k, v in self.get_el_amt_dict().items()}
(formula, factor) = reduce_formula(d, iupac_ordering=iupac_ordering)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor
def get_integer_formula_and_factor(
self, max_denominator: int = 10000, iupac_ordering: bool = False
) -> Tuple[str, float]:
"""
Calculates an integer formula and factor.
Args:
max_denominator (int): all amounts in the el:amt dict are
first converted to a Fraction with this maximum denominator
iupac_ordering (bool, optional): Whether to order the
formula by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actanides and hydrogen. Note that polyanions
will still be determined based on the true electronegativity of
the elements.
Returns:
A pretty normalized formula and a multiplicative factor, i.e.,
Li0.5O0.25 returns (Li2O, 0.25). O0.25 returns (O2, 0.125)
"""
el_amt = self.get_el_amt_dict()
g = gcd_float(list(el_amt.values()), 1 / max_denominator)
d = {k: round(v / g) for k, v in el_amt.items()}
(formula, factor) = reduce_formula(d, iupac_ordering=iupac_ordering)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor * g
@property
def reduced_formula(self) -> str:
"""
Returns a pretty normalized formula, i.e., LiFePO4 instead of
Li4Fe4P4O16.
"""
return self.get_reduced_formula_and_factor()[0]
@property
def hill_formula(self) -> str:
"""
:return: Hill formula. The Hill system (or Hill notation) is a system
of writing empirical chemical formulas, molecular chemical formulas and
components of a condensed formula such that the number of carbon atoms
in a molecule is indicated first, the number of hydrogen atoms next,
and then the number of all other chemical elements subsequently, in
alphabetical order of the chemical symbols. When the formula contains
no carbon, all the elements, including hydrogen, are listed
alphabetically.
"""
c = self.element_composition
elements = sorted(el.symbol for el in c.keys())
if "C" in elements:
elements = ["C"] + [el for el in elements if el != "C"]
formula = ["{}{}".format(el, formula_double_format(c[el]) if c[el] != 1 else "") for el in elements]
return " ".join(formula)
@property
def elements(self) -> List[Union[Element, Species, DummySpecies]]:
"""
Returns view of elements in Composition.
"""
return list(self.keys())
def __str__(self):
return " ".join([f"{k}{formula_double_format(v, ignore_ones=False)}" for k, v in self.as_dict().items()])
def to_pretty_string(self) -> str:
"""
Returns:
str: Same as output __str__() but without spaces.
"""
return re.sub(r"\s+", "", self.__str__())
@property
def num_atoms(self) -> float:
"""
Total number of atoms in Composition. For negative amounts, sum
of absolute values
"""
return self._natoms
@property
def weight(self) -> float:
"""
Total molecular weight of Composition
"""
return Mass(sum(amount * el.atomic_mass for el, amount in self.items()), "amu")
def get_atomic_fraction(self, el: SpeciesLike) -> float:
"""
Calculate atomic fraction of an Element or Species.
Args:
el (Element/Species): Element or Species to get fraction for.
Returns:
Atomic fraction for element el in Composition
"""
return abs(self[el]) / self._natoms
def get_wt_fraction(self, el: SpeciesLike) -> float:
"""
Calculate weight fraction of an Element or Species.
Args:
el (Element/Species): Element or Species to get fraction for.
Returns:
Weight fraction for element el in Composition
"""
return get_el_sp(el).atomic_mass * abs(self[el]) / self.weight
def contains_element_type(self, category: str) -> bool:
"""
Check if Composition contains any elements matching a given category.
Args:
category (str): one of "noble_gas", "transition_metal",
"post_transition_metal", "rare_earth_metal", "metal", "metalloid",
"alkali", "alkaline", "halogen", "chalcogen", "lanthanoid",
"actinoid", "quadrupolar", "s-block", "p-block", "d-block", "f-block"
Returns:
True if any elements in Composition match category, otherwise False
"""
allowed_categories = (
"noble_gas",
"transition_metal",
"post_transition_metal",
"rare_earth_metal",
"metal",
"metalloid",
"alkali",
"alkaline",
"halogen",
"chalcogen",
"lanthanoid",
"actinoid",
"quadrupolar",
"s-block",
"p-block",
"d-block",
"f-block",
)
if category not in allowed_categories:
raise ValueError("Please pick a category from: {}".format(", ".join(allowed_categories)))
if "block" in category:
return any(category[0] in el.block for el in self.elements)
return any(getattr(el, f"is_{category}") for el in self.elements)
def _parse_formula(self, formula: str) -> Dict[str, float]:
"""
Args:
formula (str): A string formula, e.g. Fe2O3, Li3Fe2(PO4)3
Returns:
Composition with that formula.
Notes:
In the case of Metallofullerene formula (e.g. Y3N@C80),
the @ mark will be dropped and passed to parser.
"""
# for Metallofullerene like "Y3N@C80"
formula = formula.replace("@", "")
def get_sym_dict(form: str, factor: Union[int, float]) -> Dict[str, float]:
sym_dict: Dict[str, float] = collections.defaultdict(float)
for m in re.finditer(r"([A-Z][a-z]*)\s*([-*\.e\d]*)", form):
el = m.group(1)
amt = 1.0
if m.group(2).strip() != "":
amt = float(m.group(2))
sym_dict[el] += amt * factor
form = form.replace(m.group(), "", 1)
if form.strip():
raise ValueError(f"{form} is an invalid formula!")
return sym_dict
m = re.search(r"\(([^\(\)]+)\)\s*([\.e\d]*)", formula)
if m:
factor = 1.0
if m.group(2) != "":
factor = float(m.group(2))
unit_sym_dict = get_sym_dict(m.group(1), factor)
expanded_sym = "".join([f"{el}{amt}" for el, amt in unit_sym_dict.items()])
expanded_formula = formula.replace(m.group(), expanded_sym)
return self._parse_formula(expanded_formula)
return get_sym_dict(formula, 1)
@property
def anonymized_formula(self) -> str:
"""
An anonymized formula. Unique species are arranged in ordering of
increasing amounts and assigned ascending alphabets. Useful for
prototyping formulas. For example, all stoichiometric perovskites have
anonymized_formula ABC3.
"""
reduced = self.element_composition
if all(x == int(x) for x in self.values()):
reduced /= gcd(*(int(i) for i in self.values()))
anon = ""
for e, amt in zip(string.ascii_uppercase, sorted(reduced.values())):
if amt == 1:
amt_str = ""
elif abs(amt % 1) < 1e-8:
amt_str = str(int(amt))
else:
amt_str = str(amt)
anon += f"{e}{amt_str}"
return anon
@property
def chemical_system(self) -> str:
"""
Get the chemical system of a Composition, for example "O-Si" for
SiO2. Chemical system is a string of a list of elements
sorted alphabetically and joined by dashes, by convention for use
in database keys.
"""
return "-".join(sorted(el.symbol for el in self.elements))
@property
def valid(self) -> bool:
"""
Returns True if Composition contains valid elements or species and
False if the Composition contains any dummy species.
"""
return not any(isinstance(el, DummySpecies) for el in self.elements)
def __repr__(self) -> str:
return "Comp: " + self.formula
@classmethod
def from_dict(cls, d) -> "Composition":
"""
Creates a composition from a dict generated by as_dict(). Strictly not
necessary given that the standard constructor already takes in such an
input, but this method preserves the standard pymatgen API of having
from_dict methods to reconstitute objects generated by as_dict(). Allows
for easier introspection.
Args:
d (dict): {symbol: amount} dict.
"""
return cls(d)
def get_el_amt_dict(self) -> Dict[str, float]:
"""
Returns:
Dict with element symbol and (unreduced) amount e.g.,
{"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0}
"""
d: Dict[str, float] = collections.defaultdict(float)
for e, a in self.items():
d[e.symbol] += a
return d
def as_dict(self) -> Dict[str, float]:
"""
Returns:
dict with species symbol and (unreduced) amount e.g.,
{"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0}
"""
d: Dict[str, float] = collections.defaultdict(float)
for e, a in self.items():
d[str(e)] += a
return d
@property
def to_reduced_dict(self) -> dict:
"""
Returns:
Dict with element symbol and reduced amount e.g.,
{"Fe": 2.0, "O":3.0}
"""
return self.get_reduced_composition_and_factor()[0].as_dict()
@property
def to_data_dict(self) -> dict:
"""
Returns:
A dict with many keys and values relating to Composition/Formula,
including reduced_cell_composition, unit_cell_composition,
reduced_cell_formula, elements and nelements.
"""
return {
"reduced_cell_composition": self.get_reduced_composition_and_factor()[0],
"unit_cell_composition": self.as_dict(),
"reduced_cell_formula": self.reduced_formula,
"elements": list(self.as_dict().keys()),
"nelements": len(self.as_dict().keys()),
}
def oxi_state_guesses(
self,
oxi_states_override: dict = None,
target_charge: float = 0,
all_oxi_states: bool = False,
max_sites: int = None,
) -> List[Dict[str, float]]:
"""
Checks if the composition is charge-balanced and returns back all
charge-balanced oxidation state combinations. Composition must have
integer values. Note that more num_atoms in the composition gives
more degrees of freedom. e.g., if possible oxidation states of
element X are [2,4] and Y are [-3], then XY is not charge balanced
but X2Y2 is. Results are returned from most to least probable based
on ICSD statistics. Use max_sites to improve performance if needed.
Args:
oxi_states_override (dict): dict of str->list to override an
element's common oxidation states, e.g. {"V": [2,3,4,5]}
target_charge (int): the desired total charge on the structure.
Default is 0 signifying charge balance.
all_oxi_states (bool): if True, an element defaults to
all oxidation states in pymatgen Element.icsd_oxidation_states.
Otherwise, default is Element.common_oxidation_states. Note
that the full oxidation state list is *very* inclusive and
can produce nonsensical results.
max_sites (int): if possible, will reduce Compositions to at most
this many sites to speed up oxidation state guesses. If the
composition cannot be reduced to this many sites a ValueError
will be raised. Set to -1 to just reduce fully. If set to a
number less than -1, the formula will be fully reduced but a
ValueError will be thrown if the number of atoms in the reduced
formula is greater than abs(max_sites).
Returns:
A list of dicts - each dict reports an element symbol and average
oxidation state across all sites in that composition. If the
composition is not charge balanced, an empty list is returned.
"""
return self._get_oxid_state_guesses(all_oxi_states, max_sites, oxi_states_override, target_charge)[0]
def replace(self, elem_map: Dict[str, Union[str, Dict[str, Union[int, float]]]]) -> "Composition":
"""
Replace elements in a composition. Returns a new Composition, leaving the old one unchanged.
Args:
elem_map (dict[str, str | dict[str, int | float]]): dict of elements or species to swap. E.g.
{"Li": "Na"} performs a Li for Na substitution. The target can be a {species: factor} dict. For
example, in Fe2O3 you could map {"Fe": {"Mg": 0.5, "Cu":0.5}} to obtain MgCuO3.
Returns:
Composition: New object with elements remapped according to elem_map.
"""
# drop inapplicable substitutions
invalid_elems = [key for key in elem_map if key not in self]
if invalid_elems:
warnings.warn(
"Some elements to be substituted are not present in composition. Please check your input. "
f"Problematic element = {invalid_elems}; {self}"
)
for elem in invalid_elems:
elem_map.pop(elem)
new_comp = self.as_dict()
for old_elem, new_elem in elem_map.items():
amount = new_comp.pop(old_elem)
if isinstance(new_elem, dict):
for el, factor in new_elem.items():
new_comp[el] = factor * amount
else:
new_comp[new_elem] = amount
return Composition(new_comp)
def add_charges_from_oxi_state_guesses(
self,
oxi_states_override: dict = None,
target_charge: float = 0,
all_oxi_states: bool = False,
max_sites: int = None,
) -> "Composition":
"""
Assign oxidation states basedon guessed oxidation states.
See `oxi_state_guesses` for an explanation of how oxidation states are
guessed. This operation uses the set of oxidation states for each site
that were determined to be most likley from the oxidation state guessing
routine.
Args:
oxi_states_override (dict): dict of str->list to override an
element's common oxidation states, e.g. {"V": [2,3,4,5]}
target_charge (int): the desired total charge on the structure.
Default is 0 signifying charge balance.
all_oxi_states (bool): if True, an element defaults to
all oxidation states in pymatgen Element.icsd_oxidation_states.
Otherwise, default is Element.common_oxidation_states. Note
that the full oxidation state list is *very* inclusive and
can produce nonsensical results.
max_sites (int): if possible, will reduce Compositions to at most
this many sites to speed up oxidation state guesses. If the
composition cannot be reduced to this many sites a ValueError
will be raised. Set to -1 to just reduce fully. If set to a
number less than -1, the formula will be fully reduced but a
ValueError will be thrown if the number of atoms in the reduced
formula is greater than abs(max_sites).
Returns:
Composition, where the elements are assigned oxidation states based
on the results form guessing oxidation states. If no oxidation state
is possible, returns a Composition where all oxidation states are 0.
"""
_, oxidation_states = self._get_oxid_state_guesses(
all_oxi_states, max_sites, oxi_states_override, target_charge
)
# Special case: No charged compound is possible
if not oxidation_states:
return Composition({Species(e, 0): f for e, f in self.items()})
# Generate the species
species = []
for el, charges in oxidation_states[0].items():
species.extend([Species(el, c) for c in charges])
# Return the new object
return Composition(collections.Counter(species))
def remove_charges(self) -> "Composition":
"""
Removes the charges from any species in a Composition object.
Returns:
Composition object without charge decoration, for example
{"Fe3+": 2.0, "O2-":3.0} becomes {"Fe": 2.0, "O":3.0}
"""
d: Dict[Element, float] = collections.defaultdict(float)
for e, a in self.items():
d[Element(e.symbol)] += a
return Composition(d)
def _get_oxid_state_guesses(self, all_oxi_states, max_sites, oxi_states_override, target_charge):
"""
Utility operation for guessing oxidation states.
See `oxi_state_guesses` for full details. This operation does the
calculation of the most likely oxidation states
Args:
oxi_states_override (dict): dict of str->list to override an
element's common oxidation states, e.g. {"V": [2,3,4,5]}
target_charge (int): the desired total charge on the structure.
Default is 0 signifying charge balance.
all_oxi_states (bool): if True, an element defaults to
all oxidation states in pymatgen Element.icsd_oxidation_states.
Otherwise, default is Element.common_oxidation_states. Note
that the full oxidation state list is *very* inclusive and
can produce nonsensical results.
max_sites (int): if possible, will reduce Compositions to at most
this many sites to speed up oxidation state guesses. If the
composition cannot be reduced to this many sites a ValueError
will be raised. Set to -1 to just reduce fully. If set to a
number less than -1, the formula will be fully reduced but a
ValueError will be thrown if the number of atoms in the reduced
formula is greater than abs(max_sites).
Returns:
A list of dicts - each dict reports an element symbol and average
oxidation state across all sites in that composition. If the
composition is not charge balanced, an empty list is returned.
A list of dicts - each dict maps the element symbol to a list of
oxidation states for each site of that element. For example, Fe3O4 could
return a list of [2,2,2,3,3,3] for the oxidation states of If the composition
is
"""
comp = self.copy()
# reduce Composition if necessary
if max_sites and max_sites < 0:
comp = self.reduced_composition
if max_sites < -1 and comp.num_atoms > abs(max_sites):
raise ValueError(f"Composition {comp} cannot accommodate max_sites setting!")
elif max_sites and comp.num_atoms > max_sites:
reduced_comp, reduced_factor = self.get_reduced_composition_and_factor()
if reduced_factor > 1:
reduced_comp *= max(1, int(max_sites / reduced_comp.num_atoms))
comp = reduced_comp # as close to max_sites as possible
if comp.num_atoms > max_sites:
raise ValueError(f"Composition {comp} cannot accommodate max_sites setting!")
# Load prior probabilities of oxidation states, used to rank solutions
if not Composition.oxi_prob:
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
all_data = loadfn(os.path.join(module_dir, "..", "analysis", "icsd_bv.yaml"))
Composition.oxi_prob = {Species.from_string(sp): data for sp, data in all_data["occurrence"].items()}
oxi_states_override = oxi_states_override or {}
# assert: Composition only has integer amounts
if not all(amt == int(amt) for amt in comp.values()):
raise ValueError("Charge balance analysis requires integer values in Composition!")
# for each element, determine all possible sum of oxidations
# (taking into account nsites for that particular element)
el_amt = comp.get_el_amt_dict()
els = el_amt.keys()
el_sums = [] # matrix: dim1= el_idx, dim2=possible sums
el_sum_scores = collections.defaultdict(set) # dict of el_idx, sum -> score
el_best_oxid_combo = {} # dict of el_idx, sum -> oxid combo with best score
for idx, el in enumerate(els):
el_sum_scores[idx] = {}
el_best_oxid_combo[idx] = {}
el_sums.append([])
if oxi_states_override.get(el):
oxids = oxi_states_override[el]
elif all_oxi_states:
oxids = Element(el).oxidation_states
else:
oxids = Element(el).icsd_oxidation_states or Element(el).oxidation_states
# get all possible combinations of oxidation states
# and sum each combination
for oxid_combo in combinations_with_replacement(oxids, int(el_amt[el])):
# List this sum as a possible option
oxid_sum = sum(oxid_combo)
if oxid_sum not in el_sums[idx]:
el_sums[idx].append(oxid_sum)
# Determine how probable is this combo?
score = sum(Composition.oxi_prob.get(Species(el, o), 0) for o in oxid_combo)
# If it is the most probable combo for a certain sum,
# store the combination
if oxid_sum not in el_sum_scores[idx] or score > el_sum_scores[idx].get(oxid_sum, 0):
el_sum_scores[idx][oxid_sum] = score
el_best_oxid_combo[idx][oxid_sum] = oxid_combo
# Determine which combination of oxidation states for each element
# is the most probable
all_sols = [] # will contain all solutions
all_oxid_combo = [] # will contain the best combination of oxidation states for each site
all_scores = [] # will contain a score for each solution
for x in product(*el_sums):
# each x is a trial of one possible oxidation sum for each element
if sum(x) == target_charge: # charge balance condition
el_sum_sol = dict(zip(els, x)) # element->oxid_sum
# normalize oxid_sum by amount to get avg oxid state
sol = {el: v / el_amt[el] for el, v in el_sum_sol.items()}
# add the solution to the list of solutions
all_sols.append(sol)
# determine the score for this solution
score = 0
for idx, v in enumerate(x):
score += el_sum_scores[idx][v]
all_scores.append(score)
# collect the combination of oxidation states for each site
all_oxid_combo.append({e: el_best_oxid_combo[idx][v] for idx, (e, v) in enumerate(zip(els, x))})
# sort the solutions by highest to lowest score
if all_scores:
all_sols, all_oxid_combo = zip(
*[
(y, x)
for (z, y, x) in sorted(
zip(all_scores, all_sols, all_oxid_combo),
key=lambda pair: pair[0],
reverse=True,
)
]
)
return all_sols, all_oxid_combo
@staticmethod
def ranked_compositions_from_indeterminate_formula(
fuzzy_formula: str, lock_if_strict: bool = True
) -> List["Composition"]:
"""
Takes in a formula where capitalization might not be correctly entered,
and suggests a ranked list of potential Composition matches.
Author: Anubhav Jain
Args:
fuzzy_formula (str): A formula string, such as "co2o3" or "MN",
that may or may not have multiple interpretations
lock_if_strict (bool): If true, a properly entered formula will
only return the one correct interpretation. For example,
"Co1" will only return "Co1" if true, but will return both
"Co1" and "C1 O1" if false.
Returns:
A ranked list of potential Composition matches
"""
# if we have an exact match and the user specifies lock_if_strict, just
# return the exact match!
if lock_if_strict:
# the strict composition parsing might throw an error, we can ignore
# it and just get on with fuzzy matching
try:
comp = Composition(fuzzy_formula)
return [comp]
except ValueError:
pass
all_matches = Composition._comps_from_fuzzy_formula(fuzzy_formula)
# remove duplicates
uniq_matches = list(set(all_matches))
# sort matches by rank descending
ranked_matches = sorted(uniq_matches, key=lambda match: (match[1], match[0]), reverse=True)
return [m[0] for m in ranked_matches]
@staticmethod
def _comps_from_fuzzy_formula(
fuzzy_formula: str,
m_dict: Dict[str, float] = None,
m_points: int = 0,
factor: Union[int, float] = 1,
) -> Generator[Tuple["Composition", int], None, None]:
"""
A recursive helper method for formula parsing that helps in
interpreting and ranking indeterminate formulas.
Author: Anubhav Jain
Args:
fuzzy_formula (str): A formula string, such as "co2o3" or "MN",
that may or may not have multiple interpretations.
m_dict (dict): A symbol:amt dictionary from the previously parsed
formula.
m_points: Number of points gained from the previously parsed
formula.
factor: Coefficient for this parse, e.g. (PO4)2 will feed in PO4
as the fuzzy_formula with a coefficient of 2.
Returns:
list[tuple[Composition, int]]: A list of tuples, with the first element being a Composition
and the second element being the number of points awarded that Composition interpretation.
"""
m_dict = m_dict or {}
def _parse_chomp_and_rank(m, f, m_dict, m_points):
"""
A helper method for formula parsing that helps in interpreting and
ranking indeterminate formulas
Author: Anubhav Jain
Args:
m: A regex match, with the first group being the element and
the second group being the amount
f: The formula part containing the match
m_dict: A symbol:amt dictionary from the previously parsed
formula
m_points: Number of points gained from the previously parsed
formula
Returns:
A tuple of (f, m_dict, points) where m_dict now contains data
from the match and the match has been removed (chomped) from
the formula f. The "goodness" of the match determines the
number of points returned for chomping. Returns
(None, None, None) if no element could be found...
"""
points = 0
# Points awarded if the first element of the element is correctly
# specified as a capital
points_first_capital = 100
# Points awarded if the second letter of the element is correctly
# specified as lowercase
points_second_lowercase = 100
# get element and amount from regex match
el = m.group(1)
if len(el) > 2 or len(el) < 1:
raise ValueError("Invalid element symbol entered!")
amt = float(m.group(2)) if m.group(2).strip() != "" else 1
# convert the element string to proper [uppercase,lowercase] format
# and award points if it is already in that format
char1 = el[0]
char2 = el[1] if len(el) > 1 else ""
if char1 == char1.upper():
points += points_first_capital
if char2 and char2 == char2.lower():
points += points_second_lowercase
el = char1.upper() + char2.lower()
# if it's a valid element, chomp and add to the points
if Element.is_valid_symbol(el):
if el in m_dict:
m_dict[el] += amt * factor
else:
m_dict[el] = amt * factor
return f.replace(m.group(), "", 1), m_dict, m_points + points
# else return None
return None, None, None
fuzzy_formula = fuzzy_formula.strip()
if len(fuzzy_formula) == 0:
# The entire formula has been parsed into m_dict. Return the
# corresponding Composition and number of points
if m_dict:
yield (Composition.from_dict(m_dict), m_points)
else:
# if there is a parenthesis, remove it and match the remaining stuff
# with the appropriate factor
for mp in re.finditer(r"\(([^\(\)]+)\)([\.\d]*)", fuzzy_formula):
mp_points = m_points
mp_form = fuzzy_formula.replace(mp.group(), " ", 1)
mp_dict = dict(m_dict)
mp_factor = 1 if mp.group(2) == "" else float(mp.group(2))
# Match the stuff inside the parenthesis with the appropriate
# factor
for match in Composition._comps_from_fuzzy_formula(mp.group(1), mp_dict, mp_points, factor=mp_factor):
only_me = True
# Match the stuff outside the parentheses and return the
# sum.
for match2 in Composition._comps_from_fuzzy_formula(mp_form, mp_dict, mp_points, factor=1):
only_me = False
yield (match[0] + match2[0], match[1] + match2[1])
# if the stuff inside the parenthesis is nothing, then just
# return the stuff inside the parentheses
if only_me:
yield match
return
# try to match the single-letter elements
m1 = re.match(r"([A-z])([\.\d]*)", fuzzy_formula)
if m1:
m_points1 = m_points
m_form1 = fuzzy_formula
m_dict1 = dict(m_dict)
(m_form1, m_dict1, m_points1) = _parse_chomp_and_rank(m1, m_form1, m_dict1, m_points1)
if m_dict1:
# there was a real match
for match in Composition._comps_from_fuzzy_formula(m_form1, m_dict1, m_points1, factor):
yield match
# try to match two-letter elements
m2 = re.match(r"([A-z]{2})([\.\d]*)", fuzzy_formula)
if m2:
m_points2 = m_points
m_form2 = fuzzy_formula
m_dict2 = dict(m_dict)
(m_form2, m_dict2, m_points2) = _parse_chomp_and_rank(m2, m_form2, m_dict2, m_points2)
if m_dict2:
# there was a real match
for match in Composition._comps_from_fuzzy_formula(m_form2, m_dict2, m_points2, factor):
yield match
def reduce_formula(sym_amt, iupac_ordering: bool = False) -> Tuple[str, float]:
"""
Helper method to reduce a sym_amt dict to a reduced formula and factor.
Args:
sym_amt (dict): {symbol: amount}.
iupac_ordering (bool, optional): Whether to order the
formula by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actinides and hydrogen. Note that polyanions
will still be determined based on the true electronegativity of
the elements.
Returns:
(reduced_formula, factor).
"""
syms = sorted(sym_amt.keys(), key=lambda x: [get_el_sp(x).X, x])
syms = list(filter(lambda x: abs(sym_amt[x]) > Composition.amount_tolerance, syms))
factor = 1
# Enforce integers for doing gcd.
if all(int(i) == i for i in sym_amt.values()):
factor = abs(gcd(*(int(i) for i in sym_amt.values())))
polyanion = []
# if the composition contains a poly anion
if len(syms) >= 3 and get_el_sp(syms[-1]).X - get_el_sp(syms[-2]).X < 1.65:
poly_sym_amt = {syms[i]: sym_amt[syms[i]] / factor for i in [-2, -1]}
(poly_form, poly_factor) = reduce_formula(poly_sym_amt, iupac_ordering=iupac_ordering)
if poly_factor != 1:
polyanion.append(f"({poly_form}){int(poly_factor)}")
syms = syms[: len(syms) - 2 if polyanion else len(syms)]
if iupac_ordering:
syms = sorted(syms, key=lambda x: [get_el_sp(x).iupac_ordering, x])
reduced_form = []
for s in syms:
normamt = sym_amt[s] * 1.0 / factor
reduced_form.append(s)
reduced_form.append(formula_double_format(normamt))
reduced_form = "".join(reduced_form + polyanion) # type: ignore
return reduced_form, factor # type: ignore
class ChemicalPotential(dict, MSONable):
"""
Class to represent set of chemical potentials. Can be: multiplied/divided by a Number
multiplied by a Composition (returns an energy) added/subtracted with other ChemicalPotentials.
"""
def __init__(self, *args, **kwargs):
"""
Args:
*args, **kwargs: any valid dict init arguments
"""
d = dict(*args, **kwargs)
super().__init__((get_el_sp(k), v) for k, v in d.items())
if len(d) != len(self):
raise ValueError("Duplicate potential specified")
def __mul__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v * other for k, v in self.items()})
raise NotImplementedError()
__rmul__ = __mul__
def __truediv__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v / other for k, v in self.items()})
raise NotImplementedError()
__div__ = __truediv__
def __sub__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) - other.get(e, 0) for e in els})
raise NotImplementedError()
def __add__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) + other.get(e, 0) for e in els})
raise NotImplementedError()
def get_energy(self, composition: Composition, strict: bool = True) -> float:
"""
Calculates the energy of a composition.
Args:
composition (Composition): input composition
strict (bool): Whether all potentials must be specified
"""
if strict and set(composition.keys()) > set(self.keys()):
s = set(composition.keys()) - set(self.keys())
raise ValueError(f"Potentials not specified for {s}")
return sum(self.get(k, 0) * v for k, v in composition.items())
def __repr__(self):
return "ChemPots: " + super().__repr__()
class CompositionError(Exception):
"""Exception class for composition errors"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
vorwerkc/pymatgen
|
pymatgen/core/composition.py
|
Python
|
mit
| 50,992
|
[
"pymatgen"
] |
b81b0e8806c6bc74f924c7d5104968f0b49fbafef8bca0bae631070de046e55c
|
from setuptools import setup, find_packages
setup(
name = 'octopus',
version = '1.0.0',
packages = find_packages(),
install_requires = [
"werkzeug==0.8.3",
"Flask==0.9",
"Flask-Login==0.1.3",
"requests",
"esprit",
"simplejson",
"lxml==3.4.4",
"Flask-WTF==0.8.3",
"nose",
"Flask-Mail==0.9.1",
"python-dateutil",
"unidecode"
],
url = 'http://cottagelabs.com/',
author = 'Cottage Labs',
author_email = 'us@cottagelabs.com',
description = 'Magnificent Octopus - Flask application helper library',
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: Copyheart',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
JiscPER/magnificent-octopus
|
setup.py
|
Python
|
apache-2.0
| 966
|
[
"Octopus"
] |
5eab7892b032c43b6fec385a968729d232e11d2917ce63ce4a6231fc1f76df5f
|
# -----------------------------------------------------------------------------------------------------
# CONDOR
# Simulator for diffractive single-particle imaging experiments with X-ray lasers
# http://xfel.icm.uu.se/condor/
# -----------------------------------------------------------------------------------------------------
# Copyright 2016 Max Hantke, Filipe R.N.C. Maia, Tomas Ekeberg
# Condor is distributed under the terms of the BSD 2-Clause License
# -----------------------------------------------------------------------------------------------------
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------------------------------
# General note:
# All variables are in SI units by default. Exceptions explicit by variable name.
# -----------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import # Compatibility with python 2 and 3
import numpy
import logging
logger = logging.getLogger(__name__)
import condor
import condor.utils.log
from condor.utils.log import log_and_raise_error,log_warning,log_info,log_debug
from .particle_abstract import AbstractContinuousParticle
from condor.utils.variation import Variation
class ParticleSpheroid(AbstractContinuousParticle):
"""
Class for a particle model
*Model:* Uniformly filled spheroid particle (continuum approximation)
:math:`a`: radius (*semi-diameter*) perpendicular to the rotation axis of the ellipsoid
:math:`c`: radius (*semi-diameter*) along the rotation axis of the ellipsoid
Before applying rotations the rotation axis is parallel to the the *y*-axis
Args:
:diameter (float): Sphere diameter
Kwargs:
:diameter_variation (str): See :meth:`condor.particle.particle_abstract.AbstractContinuousParticle.set_diameter_variation` (default ``None``)
:diameter_spread (float): See :meth:`condor.particle.particle_abstract.AbstractContinuousParticle.set_diameter_variation` (default ``None``)
:diameter_variation_n (int): See :meth:`condor.particle.particle_abstract.AbstractContinuousParticle.set_diameter_variation` (default ``None``)
:flattening (float): (Mean) value of :math:`a/c` (default ``0.75``)
:flattening_variation (str): See :meth:`condor.particle.particle_spheroid.set_flattening_variation` (default ``None``)
:flattening_spread (float): See :meth:`condor.particle.particle_spheroid.set_flattening_variation` (default ``None``)
:flattening_variation_n (int): See :meth:`condor.particle.particle_spheroid.set_flattening_variation` (default ``None``)
:rotation_values (array): See :meth:`condor.particle.particle_abstract.AbstractParticle.set_alignment` (default ``None``)
:rotation_formalism (str): See :meth:`condor.particle.particle_abstract.AbstractParticle.set_alignment` (default ``None``)
:rotation_mode (str): See :meth:`condor.particle.particle_abstract.AbstractParticle.set_alignment` (default ``None``)
:number (float): Expectation value for the number of particles in the interaction volume. (defaukt ``1.``)
:arrival (str): Arrival of particles at the interaction volume can be either ``'random'`` or ``'synchronised'``. If ``sync`` at every event the number of particles in the interaction volume equals the rounded value of ``number``. If ``'random'`` the number of particles is Poissonian and ``number`` is the expectation value. (default ``'synchronised'``)
:position (array): See :class:`condor.particle.particle_abstract.AbstractParticle` (default ``None``)
:position_variation (str): See :meth:`condor.particle.particle_abstract.AbstractParticle.set_position_variation` (default ``None``)
:position_spread (float): See :meth:`condor.particle.particle_abstract.AbstractParticle.set_position_variation` (default ``None``)
:position_variation_n (int): See :meth:`condor.particle.particle_abstract.AbstractParticle.set_position_variation` (default ``None``)
:material_type (str): See :meth:`condor.particle.particle_abstract.AbstractContinuousParticle.set_material` (default ``\'water\'``)
:massdensity (float): See :meth:`condor.particle.particle_abstract.AbstractContinuousParticle.set_material` (default ``None``)
:atomic_composition (dict): See :meth:`condor.particle.particle_abstract.AbstractContinuousParticle.set_material` (default ``None``)
:electron_density (float): See :meth:`condor.particle.particle_abstract.AbstractContinuousParticle.set_material` (default ``None``)
"""
def __init__(self,
diameter,
diameter_variation = None, diameter_spread = None, diameter_variation_n = None,
flattening = 0.75, flattening_variation = None, flattening_spread = None, flattening_variation_n = None,
rotation_values = None, rotation_formalism = None, rotation_mode = "extrinsic",
number = 1., arrival = "synchronised",
position = None, position_variation = None, position_spread = None, position_variation_n = None,
material_type = 'water', massdensity = None, atomic_composition = None, electron_density = None):
# Initialise base class
AbstractContinuousParticle.__init__(self,
diameter=diameter, diameter_variation=diameter_variation, diameter_spread=diameter_spread, diameter_variation_n=diameter_variation_n,
rotation_values=rotation_values, rotation_formalism=rotation_formalism, rotation_mode=rotation_mode,
number=number, arrival=arrival,
position=position, position_variation=position_variation, position_spread=position_spread, position_variation_n=position_variation_n,
material_type=material_type, massdensity=massdensity, atomic_composition=atomic_composition, electron_density=electron_density)
self.flattening_mean = flattening
self.set_flattening_variation(flattening_variation=flattening_variation, flattening_spread=flattening_spread, flattening_variation_n=flattening_variation_n)
def get_conf(self):
"""
Get configuration in form of a dictionary. Another identically configured ParticleMap instance can be initialised by:
.. code-block:: python
conf = P0.get_conf() # P0: already existing ParticleSpheroid instance
P1 = condor.ParticleSpheroid(**conf) # P1: new ParticleSpheroid instance with the same configuration as P0
"""
conf = {}
conf.update(AbstractContinuousParticle.get_conf(self))
conf["flattening"] = self.flattening_mean
fvar = self._flattening_variation.get_conf()
conf["flattening_variation"] = fvar["mode"]
conf["flattening_spread"] = fvar["spread"]
conf["flattening_variation_n"] = fvar["n"]
return conf
def get_next(self):
"""
Iterate the parameters and return them as a dictionary
"""
O = AbstractContinuousParticle.get_next(self)
O["particle_model"] = "spheroid"
O["flattening"] = self._get_next_flattening()
return O
def set_flattening_variation(self, flattening_variation, flattening_spread, flattening_variation_n):
"""
Set the variation scheme of the flattening parameter
Args:
:flattening_variation (str): Variation of the particle flattening
*Choose one of the following options:*
- ``None`` - No variation
- ``\'normal\'`` - Normal (*Gaussian*) variation
- ``\'uniform\'`` - Uniformly distributed flattenings
- ``\'range\'`` - Equidistant sequence of particle-flattening samples within the spread limits. ``flattening_variation_n`` defines the number of samples within the range
:flattening_spread (float): Statistical spread of the parameter
:flattening_variation_n (int): Number of particle-flattening samples within the specified range
.. note:: The argument ``flattening_variation_n`` takes effect only if ``flattening_variation=\'range\'``
"""
self._flattening_variation = Variation(flattening_variation, flattening_spread, flattening_variation_n)
def _get_next_flattening(self):
f = self._flattening_variation.get(self.flattening_mean)
# Non-random
if self._flattening_variation._mode in [None, "range"]:
if f <= 0:
log_and_raise_error(logger, "Spheroid flattening smaller-equals zero. Change your configuration.")
else:
return f
# Random
else:
if f <= 0.:
log_warning(logger, "Spheroid flattening smaller-equals zero. Try again.")
return self._get_next_flattening()
else:
return f
def get_dn(self, photon_wavelength):
if self.materials is None:
dn = 0.
else:
dn = numpy.array([m.get_dn(photon_wavelength) for m in self.materials]).sum()
return dn
|
mhantke/condor
|
condor/particle/particle_spheroid.py
|
Python
|
bsd-2-clause
| 10,607
|
[
"Gaussian"
] |
279b6ae68d7b6e5fa83d0ef04e5eab69fca99984ea9048aa86cb9c8c975fcc35
|
from django.core.management.base import BaseCommand
from schools.models import School, BoundaryType
from stories.models import Question, Questiongroup, QuestionType, QuestiongroupQuestions, Source
class Command(BaseCommand):
args = ""
help = """Populate DB with GKA IVRS questions
./manage.py populategkaivrsdata"""
def handle(self, *args, **options):
s = Source.objects.get(name="ivrs")
q = Questiongroup.objects.get_or_create(version=2, source=s)[0]
b = BoundaryType.objects.get(name='Primary School')
qtype_checkbox = QuestionType.objects.get(name='checkbox')
qtype_numeric = QuestionType.objects.get(name='numeric')
q1 = Question.objects.get_or_create(
text="Was the school open?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)[0]
q2 = Question.objects.get_or_create(
text="Class visited",
data_type=1,
question_type=qtype_numeric,
options="{4,5}",
school_type=b
)[0]
q3 = Question.objects.get_or_create(
text="Was Math class happening on the day of your visit?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)[0]
q4 = Question.objects.get_or_create(
text="Which chapter of the textbook was taught?",
data_type=1,
question_type=qtype_numeric,
options="{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}",
school_type=b
)[0]
q5 = Question.objects.get_or_create(
text="Which Ganitha Kalika Andolana TLM was being used by teacher?",
data_type=1,
question_type=qtype_numeric,
options="{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}",
school_type=b
)[0]
q6 = Question.objects.get_or_create(
text="Did you see children using the Ganitha Kalika Andolana TLM?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)[0]
q7 = Question.objects.get_or_create(
text="Was group work happening in the class on the day of your visit?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)[0]
q8 = Question.objects.get_or_create(
text="Were children using square line book during math class?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)[0]
q9 = Question.objects.get_or_create(
text="Are all the toilets in the school functional?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)[0]
q10 = Question.objects.get_or_create(
text="Does the school have a separate functional toilet for girls?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)[0]
q11 = Question.objects.get_or_create(
text="Does the school have drinking water?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)[0]
q12 = Question.objects.get_or_create(
text="Is a Mid Day Meal served in the school?",
data_type=1,
question_type=qtype_checkbox,
options="{'Yes','No'}",
school_type=b
)[0]
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q1, sequence=1)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q2, sequence=2)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q3, sequence=3)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q4, sequence=4)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q5, sequence=5)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q6, sequence=6)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q7, sequence=7)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q8, sequence=8)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q9, sequence=9)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q10, sequence=10)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q11, sequence=11)
QuestiongroupQuestions.objects.get_or_create(
questiongroup=q, question=q12, sequence=12)
print "GKA questions populated"
|
klpdotorg/dubdubdub
|
apps/ivrs/management/commands/archived_commands/populategkaivrsdata.py
|
Python
|
mit
| 5,178
|
[
"VisIt"
] |
4396fb34b2a4a468daaf1ae3878fdeb19f942eb674a92306dc7aa437e6ec0875
|
# Copyright 2009, Mark Fassler
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
import vtk
class create2dRectangle:
def __init__ (self, xmin, ymin, xmax, ymax):
self.pts = vtk.vtkPoints()
self.pts.InsertPoint(0, xmin, ymin, 0)
self.pts.InsertPoint(1, xmax, ymin, 0)
self.pts.InsertPoint(2, xmax, ymax, 0)
self.pts.InsertPoint(3, xmin, ymax, 0)
self.rect = vtk.vtkCellArray()
self.rect.InsertNextCell(4)
self.rect.InsertCellPoint(0)
self.rect.InsertCellPoint(1)
self.rect.InsertCellPoint(2)
self.rect.InsertCellPoint(3)
self.selectRect = vtk.vtkPolyData()
self.selectRect.SetPoints(self.pts)
self.selectRect.SetPolys(self.rect)
self.mapper = vtk.vtkPolyDataMapper2D()
self.mapper.SetInputData(self.selectRect)
self.actor = vtk.vtkActor2D()
self.actor.SetMapper(self.mapper)
rprop = self.actor.GetProperty()
rprop.SetColor(1, 1, 1)
rprop.SetOpacity (0.4)
|
mfassler/jaivis
|
jv/primitives.py
|
Python
|
gpl-2.0
| 1,181
|
[
"VTK"
] |
d48002630a39c8cc1796ab4a6e5bd2135c966d2a530511cff1253ce73490cb3a
|
# Date: September 2017
# Author: Kutay B. Sezginel
"""
Initialize Lammps simulation using lammps_interface
"""
import os
import glob
from lammps_interface.lammps_main import LammpsSimulation
from lammps_interface.structure_data import from_CIF
from . import read_lines, write_lines
from thermof.sample import lammps_input
from thermof.parameters import Parameters
def write_lammps_files(simdir, parameters, verbose=True):
"""
Write Lammps files using lammps_interface.
Args:
- simdir (str): Directory to write Lammps simulation files
- parameters (Parameters): Lammps simulation parameters
Returns:
- None: Writes Lammps simulation files to simulation directory
"""
print('I. Writing Lammps input and data files...') if verbose else None
lammpspar = Parameters(parameters.lammps)
sim = LammpsSimulation(lammpspar)
cell, graph = from_CIF(lammpspar.cif_file)
sim.set_cell(cell)
sim.set_graph(graph)
sim.split_graph()
sim.assign_force_fields()
sim.compute_simulation_size()
sim.merge_graphs()
sim.write_lammps_files(simdir)
def write_lammps_input(simdir, parameters, lammps_input=lammps_input, verbose=True):
"""
Write Lammps simulation input file.
Args:
- simdir (str): Directory to write Lammps input file
- parameters (Parameters): Lammps simulation parameters
Returns:
- None: Rewrites Lammps simulation input file to simulation directory
"""
simpar = parameters.thermof
inp_file = glob.glob(os.path.join(simdir, 'in.*'))[0]
print('II. Updating Lammps input file -> %s' % inp_file) if verbose else None
input_lines = read_lines(inp_file)
data_file = glob.glob(os.path.join(simdir, 'data.*'))[0]
simpar['atom_list'] = get_atom_list(data_file)
simpar_lines = get_simpar_lines(simpar, simpar_file=lammps_input['simpar'])
input_lines += '\n'
input_lines += simpar_lines
print('Adding fixes: %s' % ' | '.join(simpar['fix'])) if verbose else None
for fix in simpar['fix']:
fix_lines = get_fix_lines(fix, simpar, lammps_input=lammps_input)
input_lines += '\n'
input_lines += fix_lines
write_lines(inp_file, input_lines)
print('Updating simulation parameters...') if verbose else None
parameters.thermof['kpar']['log_file'] = 'log.%s' % parameters.thermof['mof']['name']
parameters.thermof['kpar']['fix'] = None
parameters.thermof['kpar']['temp'] = parameters.thermof['temperature']
parameters.thermof['kpar']['thermo_style'] = parameters.thermof['thermo_style']
def get_fix_lines(fix, simpar, lammps_input=lammps_input):
"""
Get lines for selected fix.
"""
if fix == 'NPT':
fix_lines = get_npt_lines(simpar, npt_file=lammps_input['npt'])
elif fix == 'NVE':
fix_lines = get_nve_lines(simpar, nve_file=lammps_input['nve'])
elif fix == 'NVT':
fix_lines = get_nvt_lines(simpar, nvt_file=lammps_input['nvt'])
elif fix == 'MIN':
fix_lines = get_min_lines(simpar, min_file=lammps_input['min'])
elif fix == 'TC':
fix_lines = get_tc_lines(simpar, tc_file=lammps_input['thermal_conductivity'])
elif fix == 'THEXP':
fix_lines = get_thexp_lines(simpar, thexp_file=lammps_input['thermal_expansion'])
elif fix == 'NVE_ANGLE':
fix_lines = get_nve_improved_angle_lines(simpar, nve_file=lammps_input['nve_improved_angle'])
return fix_lines
def get_simpar_lines(simpar, simpar_file=lammps_input['simpar']):
"""
Get input lines for Lammps simulation parameters using thermof_parameters.
"""
simpar_lines = read_lines(simpar_file)
simpar_lines[1] = 'variable T equal %i\n' % simpar['temperature']
simpar_lines[2] = 'variable dt equal %.1f\n' % simpar['dt']
simpar_lines[3] = 'variable seed equal %i\n' % simpar['seed']
simpar_lines[4] = 'variable p equal %i\n' % simpar['correlation_length']
simpar_lines[5] = 'variable s equal %i\n' % simpar['sample_interval']
simpar_lines[12] = 'thermo %i\n' % simpar['thermo']
simpar_lines[13] = 'thermo_style custom %s\n' % ' '.join(simpar['thermo_style'])
if simpar['dump_xyz'] != 0:
simpar_lines[7] = 'variable txyz equal %i\n' % simpar['dump_xyz']
simpar_lines[9] = 'dump_modify 1 element %s\n' % ' '.join(simpar['atom_list'])
else:
del simpar_lines[7:10]
return simpar_lines
def get_npt_lines(simpar, npt_file=lammps_input['npt']):
"""
Get input lines for NPT simulation using thermof_parameters.
"""
npt_lines = read_lines(npt_file)
npt_lines[1] = 'variable pdamp equal %i*${dt}\n' % simpar['npt']['pdamp']
npt_lines[2] = 'variable tdamp equal %i*${dt}\n' % simpar['npt']['tdamp']
npt_lines[4] = 'run %i\n' % simpar['npt']['steps']
if simpar['npt']['restart']:
npt_lines.append('write_restart restart.npt\n')
return npt_lines
def get_nvt_lines(simpar, nvt_file=lammps_input['nvt']):
"""
Get input lines for NVT simulation using thermof_parameters.
"""
nvt_lines = read_lines(nvt_file)
nvt_lines[2] = 'run %i\n' % simpar['nvt']['steps']
if simpar['nvt']['restart']:
nvt_lines.append('write_restart restart.nvt\n')
return nvt_lines
def get_nve_lines(simpar, nve_file=lammps_input['nve']):
"""
Get input lines for NVE simulation (including thermal conductivity calc.) using thermof_parameters.
"""
nve_lines = read_lines(nve_file)
if simpar['nve']['equilibration'] >= 0:
nve_lines[2] = 'run %i\n' % simpar['nve']['equilibration']
else:
nve_lines = nve_lines[4:]
nve_lines[42] = 'run %i\n' % simpar['nve']['steps']
if simpar['nve']['restart']:
nve_lines.append('write_restart restart.nve\n')
return nve_lines
def get_nve_improved_angle_lines(simpar, nve_file=lammps_input['nve_improved_angle']):
"""
Get input lines for NVE simulation (including thermal conductivity calc.) using thermof_parameters.
"""
nve_lines = read_lines(nve_file)
if simpar['nve']['equilibration'] >= 0:
nve_lines[2] = 'run %i\n' % simpar['nve']['equilibration']
else:
nve_lines = nve_lines[4:]
nve_lines[29] = 'run %i\n' % simpar['nve']['steps']
if simpar['nve']['restart']:
nve_lines.append('write_restart restart.nve\n')
return nve_lines
def get_min_lines(simpar, min_file=lammps_input['min']):
"""
Get input lines for minimization using thermof_parameters.
"""
mof = simpar['mof']['name']
min_lines = read_lines(min_file)
min_lines[2] = 'print "MinStep,CellMinStep,AtomMinStep,FinalStep,Energy,EDiff" file %s.min.csv screen no\n' % mof
min_lines[3] = 'variable min_eval equal %.1e\n' % simpar['min']['edif']
min_lines[9] = 'minimize %.1e %.1e %i %i\n' % (simpar['min']['etol'], simpar['min']['ftol'], simpar['min']['maxiter'], simpar['min']['maxeval'])
min_lines[14] = 'minimize %.1e %.1e %i %i\n' % (simpar['min']['etol'], simpar['min']['ftol'], simpar['min']['maxiter'], simpar['min']['maxeval'])
min_lines[18] = 'print "${iter},${CellMinStep},${AtomMinStep},${AtomMinStep},$(pe),${min_E}" append %s.min.csv screen no\n' % mof
if simpar['min']['restart']:
nve_lines.append('write_restart restart.min\n')
return min_lines
def get_tc_lines(simpar, tc_file=lammps_input['thermal_conductivity']):
"""
Get thermal conductivity calculation Lammps input lines
Args:
- parameters (Parameters): Lammps parameters (see thermof.parameters)
- tc_file (str): Sample thermal conductivity Lammps input file
Returns:
- list: List of Lammps input lines for thermal conductivity calculations
"""
tc_lines = read_lines(tc_file)
tc_lines[1] = 'variable T equal %.1f\n' % simpar['temperature']
tc_lines[2] = 'variable dt equal %.1f\n' % simpar['dt']
tc_lines[3] = 'variable seed equal %i\n' % simpar['seed']
return tc_lines
def get_thexp_lines(simpar, thexp_file=lammps_input['thermal_expansion']):
"""
Get thermal expansion calculation Lammps input lines
Args:
- parameters (Parameters): Lammps parameters (see thermof.parameters)
- thexp_file (str): Sample thermal expansion Lammps input file
Returns:
- list: List of Lammps input lines for thermal expansion calculation
"""
thexp_lines = read_lines(thexp_file)
thexp_lines[1] = 'variable pdamp equal %i*${dt}\n' % simpar['thexp']['pdamp']
thexp_lines[2] = 'variable tdamp equal %i*${dt}\n' % simpar['thexp']['tdamp']
thexp_lines[4] = 'fix thexp all print %i "$(step),$(vol),$(enthalpy)" file %s screen no title "Step,Volume,Enthalpy"\n' % (simpar['thexp']['print'], simpar['thexp']['file'])
thexp_lines[5] = 'run %i\n' % simpar['thexp']['steps']
return thexp_lines
def get_atom_list(data_file):
"""
Reads list of atoms from the data file created by lammps_interface for dump_modify command.
"""
with open(data_file, 'r') as ld:
ld_lines = ld.readlines()
atom_lines = ld_lines[ld_lines.index('Masses\n') + 2:ld_lines.index('Bond Coeffs\n') - 1]
atoms = [line.split()[3][:2].replace('_', '') for line in atom_lines]
return atoms
|
kbsezginel/tee_mof
|
thermof/initialize/lammps.py
|
Python
|
mit
| 9,487
|
[
"LAMMPS"
] |
8639766def96a6a2ed51abff11050e591b736c9e2b59509878d6a64cdd8f0889
|
################################################################################
# Copyright (C) 2013-2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
import numpy as np
import matplotlib.pyplot as plt
import functools
from bayespy.utils import misc
"""
This module contains a sketch of a new implementation of the framework.
"""
def message_sum_multiply(plates_parent, dims_parent, *arrays):
"""
Compute message to parent and sum over plates.
Divide by the plate multiplier.
"""
# The shape of the full message
shapes = [np.shape(array) for array in arrays]
shape_full = misc.broadcasted_shape(*shapes)
# Find axes that should be summed
shape_parent = plates_parent + dims_parent
sum_axes = misc.axes_to_collapse(shape_full, shape_parent)
# Compute the multiplier for cancelling the
# plate-multiplier. Because we are summing over the
# dimensions already in this function (for efficiency), we
# need to cancel the effect of the plate-multiplier
# applied in the message_to_parent function.
r = 1
for j in sum_axes:
if j >= 0 and j < len(plates_parent):
r *= shape_full[j]
elif j < 0 and j < -len(dims_parent):
r *= shape_full[j]
# Compute the sum-product
m = misc.sum_multiply(*arrays,
axis=sum_axes,
sumaxis=True,
keepdims=True) / r
# Remove extra axes
m = misc.squeeze_to_dim(m, len(shape_parent))
return m
class Moments():
"""
Base class for defining the expectation of the sufficient statistics.
The benefits:
* Write statistic-specific features in one place only. For instance,
covariance from Gaussian message.
* Different nodes may have identically defined statistic so you need to
implement related features only once. For instance, Gaussian and
GaussianARD differ on the prior but the moments are the same.
* General processing nodes which do not change the type of the moments may
"inherit" the features from the parent node. For instance, slicing
operator.
* Conversions can be done easily in both of the above cases if the message
conversion is defined in the moments class. For instance,
GaussianMarkovChain to Gaussian and VaryingGaussianMarkovChain to
Gaussian.
"""
_converters = {}
class NoConverterError(Exception):
pass
def get_instance_converter(self, **kwargs):
"""Default converter within a moments class is an identity.
Override this method when moment class instances are not identical if
they have different attributes.
"""
if len(kwargs) > 0:
raise NotImplementedError(
"get_instance_converter not implemented for class {0}"
.format(self.__class__.__name__)
)
return None
def get_instance_conversion_kwargs(self):
"""
Override this method when moment class instances are not identical if
they have different attributes.
"""
return {}
@classmethod
def add_converter(cls, moments_to, converter):
cls._converters = cls._converters.copy()
cls._converters[moments_to] = converter
return
def get_converter(self, moments_to):
"""
Finds conversion to another moments type if possible.
Note that a conversion from moments A to moments B may require
intermediate conversions. For instance: A->C->D->B. This method finds
the path which uses the least amount of conversions and returns that
path as a single conversion. If no conversion path is available, an
error is raised.
The search algorithm starts from the original moments class and applies
all possible converters to get a new list of moments classes. This list
is extended by adding recursively all parent classes because their
converters are applicable. Then, all possible converters are applied to
this list to get a new list of current moments classes. This is iterated
until the algorithm hits the target moments class or its subclass.
"""
# Check if there is no need for a conversion
#
# TODO/FIXME: This isn't sufficient. Moments can have attributes that
# make them incompatible (e.g., ndim in GaussianMoments).
if isinstance(self, moments_to):
return lambda X: X
# Initialize variables
visited = set()
visited.add(self.__class__)
converted_list = [(self.__class__, [])]
# Each iteration step consists of two parts:
# 1) form a set of the current classes and all their parent classes
# recursively
# 2) from the current set, apply possible conversions to get a new set
# of classes
# Repeat these two steps until in step (1) you hit the target class.
while len(converted_list) > 0:
# Go through all parents recursively so we can then use all
# converters that are available
current_list = []
for (moments_class, converter_path) in converted_list:
if issubclass(moments_class, moments_to):
# Shortest conversion path found, return the resulting total
# conversion function
return misc.composite_function(converter_path)
current_list.append((moments_class, converter_path))
parents = list(moments_class.__bases__)
for parent in parents:
# Recursively add parents
for p in parent.__bases__:
if isinstance(p, Moments):
parents.append(p)
# Add un-visited parents
if issubclass(parent, Moments) and parent not in visited:
visited.add(parent)
current_list.append((parent, converter_path))
# Find all converters and extend the converter paths
converted_list = []
for (moments_class, converter_path) in current_list:
for (conv_mom_cls, conv) in moments_class._converters.items():
if conv_mom_cls not in visited:
visited.add(conv_mom_cls)
converted_list.append((conv_mom_cls,
converter_path + [conv]))
raise self.NoConverterError("No conversion defined from %s to %s"
% (self.__class__.__name__,
moments_to.__name__))
def compute_fixed_moments(self, x):
# This method can't be static because the computation of the moments may
# depend on, for instance, ndim in Gaussian arrays.
raise NotImplementedError("compute_fixed_moments not implemented for "
"%s"
% (self.__class__.__name__))
@classmethod
def from_values(cls, x):
raise NotImplementedError("from_values not implemented "
"for %s"
% (cls.__name__))
def ensureparents(func):
@functools.wraps(func)
def wrapper(self, *parents, **kwargs):
# Convert parents to proper nodes
if self._parent_moments is None:
raise ValueError(
"Parent moments must be defined for {0}"
.format(self.__class__.__name__)
)
parents = [
Node._ensure_moments(
parent,
moments.__class__,
**moments.get_instance_conversion_kwargs()
)
for (parent, moments) in zip(parents, self._parent_moments)
]
# parents = list(parents)
# for (ind, parent) in enumerate(parents):
# parents[ind] = self._ensure_moments(parent,
# self._parent_moments[ind])
# Run the function
return func(self, *parents, **kwargs)
return wrapper
class Node():
"""
Base class for all nodes.
mask
dims
plates
parents
children
name
Sub-classes must implement:
1. For computing the message to children:
get_moments(self):
2. For computing the message to parents:
_get_message_and_mask_to_parent(self, index)
Sub-classes may need to re-implement:
1. If they manipulate plates:
_compute_weights_to_parent(index, weights)
_plates_to_parent(self, index)
_plates_from_parent(self, index)
"""
# These are objects of the _parent_moments_class. If the default way of
# creating them is not correct, write your own creation code.
_moments = None
_parent_moments = None
plates = None
_id_counter = 0
@ensureparents
def __init__(self, *parents, dims=None, plates=None, name="",
notify_parents=True, plotter=None, plates_multiplier=None,
allow_dependent_parents=False):
self.parents = parents
self.dims = dims
self.name = name
self._plotter = plotter
if not allow_dependent_parents:
parent_id_list = []
for parent in parents:
parent_id_list = parent_id_list + list(parent._get_id_list())
if len(parent_id_list) != len(set(parent_id_list)):
raise ValueError("Parent nodes are not independent")
# Inform parent nodes
if notify_parents:
for (index,parent) in enumerate(self.parents):
parent._add_child(self, index)
# Check plates
parent_plates = [self._plates_from_parent(index)
for index in range(len(self.parents))]
if any(p is None for p in parent_plates):
raise ValueError("Method _plates_from_parent returned None")
# Get and validate the plates for this node
plates = self._total_plates(plates, *parent_plates)
if self.plates is None:
self.plates = plates
# By default, ignore all plates
self.mask = np.array(False)
# Children
self.children = set()
# Get and validate the plate multiplier
parent_plates_multiplier = [self._plates_multiplier_from_parent(index)
for index in range(len(self.parents))]
#if plates_multiplier is None:
# plates_multiplier = parent_plates_multiplier
plates_multiplier = self._total_plates(plates_multiplier,
*parent_plates_multiplier)
self.plates_multiplier = plates_multiplier
def get_pdf_nodes(self):
return tuple(
node
for (child, _) in self.children
for node in child._get_pdf_nodes_conditioned_on_parents()
)
def _get_pdf_nodes_conditioned_on_parents(self):
return self.get_pdf_nodes()
def _get_id_list(self):
"""
Returns the stochastic ID list.
This method is used to check that same stochastic nodes are not direct
parents of a node several times. It is only valid if there are
intermediate stochastic nodes.
To put it another way: each ID corresponds to one factor q(..) in the
posterior approximation. Different IDs mean different factors, thus they
mean independence. The parents must have independent factors.
Stochastic nodes should return their unique ID. Deterministic nodes
should return the IDs of their parents. Constant nodes should return
empty list of IDs.
"""
raise NotImplementedError()
@classmethod
def _total_plates(cls, plates, *parent_plates):
if plates is None:
# By default, use the minimum number of plates determined
# from the parent nodes
try:
return misc.broadcasted_shape(*parent_plates)
except ValueError:
raise ValueError(
"The plates of the parents do not broadcast: {0}".format(
parent_plates
)
)
else:
# Check that the parent_plates are a subset of plates.
for (ind, p) in enumerate(parent_plates):
if not misc.is_shape_subset(p, plates):
raise ValueError("The plates %s of the parents "
"are not broadcastable to the given "
"plates %s."
% (p,
plates))
return plates
@staticmethod
def _ensure_moments(node, moments_class, **kwargs):
try:
converter = node._moments.get_converter(moments_class)
except AttributeError:
from .constant import Constant
return Constant(
moments_class.from_values(node, **kwargs),
node
)
else:
node = converter(node)
converter = node._moments.get_instance_converter(**kwargs)
if converter is not None:
from .converters import NodeConverter
return NodeConverter(converter, node)
return node
def _compute_plates_to_parent(self, index, plates):
# Sub-classes may want to overwrite this if they manipulate plates
return plates
def _compute_plates_from_parent(self, index, plates):
# Sub-classes may want to overwrite this if they manipulate plates
return plates
def _compute_plates_multiplier_from_parent(self, index, plates_multiplier):
# TODO/FIXME: How to handle this properly?
return plates_multiplier
def _plates_to_parent(self, index):
return self._compute_plates_to_parent(index, self.plates)
def _plates_from_parent(self, index):
return self._compute_plates_from_parent(index,
self.parents[index].plates)
def _plates_multiplier_from_parent(self, index):
return self._compute_plates_multiplier_from_parent(
index,
self.parents[index].plates_multiplier
)
@property
def plates_multiplier(self):
""" Plate multiplier is applied to messages to parents """
return self.__plates_multiplier
@plates_multiplier.setter
def plates_multiplier(self, value):
# TODO/FIXME: Check that multiplier is consistent with plates
self.__plates_multiplier = value
return
def get_shape(self, ind):
return self.plates + self.dims[ind]
def _add_child(self, child, index):
"""
Add a child node.
Parameters
----------
child : node
index : int
The parent index of this node for the child node.
The child node recognizes its parents by their index
number.
"""
self.children.add((child, index))
def _remove_child(self, child, index):
"""
Remove a child node.
"""
self.children.remove((child, index))
def get_mask(self):
return self.mask
## def _get_message_mask(self):
## return self.mask
def _set_mask(self, mask):
# Sub-classes may overwrite this method if they have some other masks to
# be combined (for instance, observation mask)
self.mask = mask
def _update_mask(self):
# Combine masks from children
mask = np.array(False)
for (child, index) in self.children:
mask = np.logical_or(mask, child._mask_to_parent(index))
# Set the mask of this node
self._set_mask(mask)
if not misc.is_shape_subset(np.shape(self.mask), self.plates):
raise ValueError("The mask of the node %s has updated "
"incorrectly. The plates in the mask %s are not a "
"subset of the plates of the node %s."
% (self.name,
np.shape(self.mask),
self.plates))
# Tell parents to update their masks
for parent in self.parents:
parent._update_mask()
def _compute_weights_to_parent(self, index, weights):
"""Compute the mask used for messages sent to parent[index].
The mask tells which plates in the messages are active. This method is
used for obtaining the mask which is used to set plates in the messages
to parent to zero.
Sub-classes may want to overwrite this method if they do something to
plates so that the mask is somehow altered.
"""
return weights
def _mask_to_parent(self, index):
"""
Get the mask with respect to parent[index].
The mask tells which plate connections are active. The mask is "summed"
(logical or) and reshaped into the plate shape of the parent. Thus, it
can't be used for masking messages, because some plates have been summed
already. This method is used for propagating the mask to parents.
"""
mask = self._compute_weights_to_parent(index, self.mask) != 0
# Check the shape of the mask
plates_to_parent = self._plates_to_parent(index)
if not misc.is_shape_subset(np.shape(mask), plates_to_parent):
raise ValueError("In node %s, the mask being sent to "
"parent[%d] (%s) has invalid shape: The shape of "
"the mask %s is not a sub-shape of the plates of "
"the node with respect to the parent %s. It could "
"be that this node (%s) is manipulating plates "
"but has not overwritten the method "
"_compute_weights_to_parent."
% (self.name,
index,
self.parents[index].name,
np.shape(mask),
plates_to_parent,
self.__class__.__name__))
# "Sum" (i.e., logical or) over the plates that have unit length in
# the parent node.
parent_plates = self.parents[index].plates
s = misc.axes_to_collapse(np.shape(mask), parent_plates)
mask = np.any(mask, axis=s, keepdims=True)
mask = misc.squeeze_to_dim(mask, len(parent_plates))
return mask
def _message_to_child(self):
u = self.get_moments()
# Debug: Check that the message has appropriate shape
for (ui, dim) in zip(u, self.dims):
ndim = len(dim)
if ndim > 0:
if np.shape(ui)[-ndim:] != dim:
raise RuntimeError(
"A bug found by _message_to_child for %s: "
"The variable axes of the moments %s are not equal to "
"the axes %s defined by the node %s. A possible reason "
"is that the plates of the node are inferred "
"incorrectly from the parents, and the method "
"_plates_from_parents should be implemented."
% (self.__class__.__name__,
np.shape(ui)[-ndim:],
dim,
self.name))
if not misc.is_shape_subset(np.shape(ui)[:-ndim],
self.plates):
raise RuntimeError(
"A bug found by _message_to_child for %s: "
"The plate axes of the moments %s are not a subset of "
"the plate axes %s defined by the node %s."
% (self.__class__.__name__,
np.shape(ui)[:-ndim],
self.plates,
self.name))
else:
if not misc.is_shape_subset(np.shape(ui), self.plates):
raise RuntimeError(
"A bug found by _message_to_child for %s: "
"The plate axes of the moments %s are not a subset of "
"the plate axes %s defined by the node %s."
% (self.__class__.__name__,
np.shape(ui),
self.plates,
self.name))
return u
def _message_to_parent(self, index, u_parent=None):
# Compute the message, check plates, apply mask and sum over some plates
if index >= len(self.parents):
raise ValueError("Parent index larger than the number of parents")
# Compute the message and mask
(m, mask) = self._get_message_and_mask_to_parent(index, u_parent=u_parent)
mask = misc.squeeze(mask)
# Plates in the mask
plates_mask = np.shape(mask)
# The parent we're sending the message to
parent = self.parents[index]
# Plates with respect to the parent
plates_self = self._plates_to_parent(index)
# Plate multiplier of the parent
multiplier_parent = self._plates_multiplier_from_parent(index)
# Check if m is a logpdf function (for black-box variational inference)
if callable(m):
return m
def m_function(*args):
lpdf = m(*args)
# Log pdf only contains plate axes!
plates_m = np.shape(lpdf)
r = (self.broadcasting_multiplier(plates_self,
plates_m,
plates_mask,
parent.plates) *
self.broadcasting_multiplier(self.plates_multiplier,
multiplier_parent))
axes_msg = misc.axes_to_collapse(plates_m, parent.plates)
m[i] = misc.sum_multiply(mask_i, m[i], r,
axis=axes_msg,
keepdims=True)
# Remove leading singular plates if the parent does not have
# those plate axes.
m[i] = misc.squeeze_to_dim(m[i], len(shape_parent))
return m_function
raise NotImplementedError()
# Compact the message to a proper shape
for i in range(len(m)):
# Empty messages are given as None. We can ignore those.
if m[i] is not None:
try:
r = self.broadcasting_multiplier(self.plates_multiplier,
multiplier_parent)
except:
raise ValueError("The plate multipliers are incompatible. "
"This node (%s) has %s and parent[%d] "
"(%s) has %s"
% (self.name,
self.plates_multiplier,
index,
parent.name,
multiplier_parent))
ndim = len(parent.dims[i])
# Source and target shapes
if ndim > 0:
dims = misc.broadcasted_shape(np.shape(m[i])[-ndim:],
parent.dims[i])
from_shape = plates_self + dims
else:
from_shape = plates_self
to_shape = parent.get_shape(i)
# Add variable axes to the mask
mask_i = misc.add_trailing_axes(mask, ndim)
# Apply mask and sum plate axes as necessary (and apply plate
# multiplier)
m[i] = r * misc.sum_multiply_to_plates(np.where(mask_i, m[i], 0),
to_plates=to_shape,
from_plates=from_shape,
ndim=0)
return m
def _message_from_children(self, u_self=None):
msg = [np.zeros(shape) for shape in self.dims]
#msg = [np.array(0.0) for i in range(len(self.dims))]
isfunction = None
for (child,index) in self.children:
m = child._message_to_parent(index, u_parent=u_self)
if callable(m):
if isfunction is False:
raise NotImplementedError()
elif isfunction is None:
msg = m
else:
def join(m1, m2):
return (m1[0] + m2[0], m1[1] + m2[1])
msg = lambda x: join(m(x), msg(x))
isfunction = True
else:
if isfunction is True:
raise NotImplementedError()
else:
isfunction = False
for i in range(len(self.dims)):
if m[i] is not None:
# Check broadcasting shapes
sh = misc.broadcasted_shape(self.get_shape(i), np.shape(m[i]))
try:
# Try exploiting broadcasting rules
msg[i] += m[i]
except ValueError:
msg[i] = msg[i] + m[i]
return msg
def _message_from_parents(self, exclude=None):
return [list(parent._message_to_child())
if ind != exclude else
None
for (ind,parent) in enumerate(self.parents)]
def get_moments(self):
raise NotImplementedError()
def delete(self):
"""
Delete this node and the children
"""
for (ind, parent) in enumerate(self.parents):
parent._remove_child(self, ind)
for (child, _) in self.children:
child.delete()
@staticmethod
def broadcasting_multiplier(plates, *args):
return misc.broadcasting_multiplier(plates, *args)
## """
## Compute the plate multiplier for given shapes.
## The first shape is compared to all other shapes (using NumPy
## broadcasting rules). All the elements which are non-unit in the first
## shape but 1 in all other shapes are multiplied together.
## This method is used, for instance, for computing a correction factor for
## messages to parents: If this node has non-unit plates that are unit
## plates in the parent, those plates are summed. However, if the message
## has unit axis for that plate, it should be first broadcasted to the
## plates of this node and then summed to the plates of the parent. In
## order to avoid this broadcasting and summing, it is more efficient to
## just multiply by the correct factor. This method computes that
## factor. The first argument is the full plate shape of this node (with
## respect to the parent). The other arguments are the shape of the message
## array and the plates of the parent (with respect to this node).
## """
## # Check broadcasting of the shapes
## for arg in args:
## misc.broadcasted_shape(plates, arg)
## # Check that each arg-plates are a subset of plates?
## for arg in args:
## if not misc.is_shape_subset(arg, plates):
## raise ValueError("The shapes in args are not a sub-shape of "
## "plates.")
## r = 1
## for j in range(-len(plates),0):
## mult = True
## for arg in args:
## # if -j <= len(arg) and arg[j] != 1:
## if not (-j > len(arg) or arg[j] == 1):
## mult = False
## if mult:
## r *= plates[j]
## return r
def move_plates(self, from_plate, to_plate):
return _MovePlate(self,
from_plate,
to_plate,
name=self.name + ".move_plates")
def add_plate_axis(self, to_plate):
return AddPlateAxis(to_plate)(self,
name=self.name+".add_plate_axis")
def __getitem__(self, index):
return Slice(self, index,
name=(self.name+".__getitem__"))
def has_plotter(self):
"""
Return True if the node has a plotter
"""
return callable(self._plotter)
def set_plotter(self, plotter):
self._plotter = plotter
def plot(self, fig=None, **kwargs):
"""
Plot the node distribution using the plotter of the node
Because the distributions are in general very difficult to plot, the
user must specify some functions which performs the plotting as
wanted. See, for instance, bayespy.plot.plotting for available plotters,
that is, functions that perform plotting for a node.
"""
if fig is None:
fig = plt.gcf()
if callable(self._plotter):
ax = self._plotter(self, fig=fig, **kwargs)
fig.suptitle('q(%s)' % self.name)
return ax
else:
raise Exception("No plotter defined, can not plot")
@staticmethod
def _compute_message(*arrays, plates_from=(), plates_to=(), ndim=0):
"""
A general function for computing messages by sum-multiply
The function computes the product of the input arrays and then sums to
the requested plates.
"""
# Check that the plates broadcast properly
if not misc.is_shape_subset(plates_to, plates_from):
raise ValueError("plates_to must be broadcastable to plates_from")
# Compute the explicit shape of the product
shapes = [np.shape(array) for array in arrays]
arrays_shape = misc.broadcasted_shape(*shapes)
# Compute plates and dims that are present
if ndim == 0:
arrays_plates = arrays_shape
dims = ()
else:
arrays_plates = arrays_shape[:-ndim]
dims = arrays_shape[-ndim:]
# Compute the correction term. If some of the plates that should be
# summed are actually broadcasted, one must multiply by the size of the
# corresponding plate
r = Node.broadcasting_multiplier(plates_from, arrays_plates, plates_to)
# For simplicity, make the arrays equal ndim
arrays = misc.make_equal_ndim(*arrays)
# Keys for the input plates: (N-1, N-2, ..., 0)
nplates = len(arrays_plates)
in_plate_keys = list(range(nplates-1, -1, -1))
# Keys for the output plates
out_plate_keys = [key
for key in in_plate_keys
if key < len(plates_to) and plates_to[-key-1] != 1]
# Keys for the dims
dim_keys = list(range(nplates, nplates+ndim))
# Total input and output keys
in_keys = len(arrays) * [in_plate_keys + dim_keys]
out_keys = out_plate_keys + dim_keys
# Compute the sum-product with correction
einsum_args = misc.zipper_merge(arrays, in_keys) + [out_keys]
y = r * np.einsum(*einsum_args)
# Reshape the result and apply correction
nplates_result = min(len(plates_to), len(arrays_plates))
if nplates_result == 0:
plates_result = []
else:
plates_result = [min(plates_to[ind], arrays_plates[ind])
for ind in range(-nplates_result, 0)]
y = np.reshape(y, plates_result + list(dims))
return y
from .deterministic import Deterministic
def slicelen(s, length=None):
if length is not None:
s = slice(*(s.indices(length)))
return max(0, misc.ceildiv(s.stop - s.start, s.step))
class Slice(Deterministic):
"""
Basic slicing for plates.
Slicing occurs when index is a slice object (constructed by start:stop:step
notation inside of brackets), an integer, or a tuple of slice objects and
integers.
Currently, accept slices, newaxis, ellipsis and integers. For instance, does
not accept lists/tuples to pick multiple indices of the same axis.
Ellipsis expand to the number of : objects needed to make a selection tuple
of the same length as x.ndim. Only the first ellipsis is expanded, any
others are interpreted as :.
Similar to:
http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#basic-slicing
"""
def __init__(self, X, slices, **kwargs):
self._moments = X._moments
self._parent_moments = (X._moments,)
# Force a list
if not isinstance(slices, tuple):
slices = [slices]
else:
slices = list(slices)
#
# Expand Ellipsis
#
# Compute the number of required axes and how Ellipsis is expanded
num_axis = 0
ellipsis_index = None
for (k, s) in enumerate(slices):
if misc.is_scalar_integer(s) or isinstance(s, slice):
num_axis += 1
elif s is None:
pass
elif s is Ellipsis:
# Index is an ellipsis, e.g., [...]
if ellipsis_index is None:
# Expand ...
ellipsis_index = k
else:
# Interpret ... as :
num_axis += 1
slices[k] = slice(None)
else:
raise TypeError("Invalid argument type: {0}".format(s.__class__))
if num_axis > len(X.plates):
raise IndexError("Too many indices")
# The number of plates that were not given explicit slicing (either
# Ellipsis was used or the number of slices was smaller than the number
# of plate axes)
expand_len = len(X.plates) - num_axis
if ellipsis_index is not None:
# Replace Ellipsis with correct number of :
k = ellipsis_index
del slices[k]
slices = slices[:k] + [slice(None)] * expand_len + slices[k:]
else:
# Add trailing : so that each plate has explicit slicing
slices = slices + [slice(None)] * expand_len
#
# Preprocess indexing:
# - integer indices to non-negative values
# - slice start/stop values to non-negative
# - slice start/stop values based on the size of the plate
#
# Index for parent plates
j = 0
for (k, s) in enumerate(slices):
if misc.is_scalar_integer(s):
# Index is an integer, e.g., [3]
if s < 0:
# Handle negative index
s += X.plates[j]
if s < 0 or s >= X.plates[j]:
raise IndexError("Index out of range")
# Store the preprocessed integer index
slices[k] = s
j += 1
elif isinstance(s, slice):
# Index is a slice, e.g., [2:6]
# Normalize the slice
s = slice(*(s.indices(X.plates[j])))
if slicelen(s) <= 0:
raise IndexError("Slicing leads to empty plates")
slices[k] = s
j += 1
self.slices = slices
super().__init__(X,
dims=X.dims,
**kwargs)
def _plates_to_parent(self, index):
return self.parents[index].plates
def _plates_from_parent(self, index):
plates = list(self.parents[index].plates)
# Compute the plates. Note that Ellipsis has already been preprocessed
# to a proper number of :
k = 0
for s in self.slices:
# Then, each case separately: slice, newaxis, integer
if isinstance(s, slice):
# Slice, e.g., [2:5]
N = slicelen(s)
if N <= 0:
raise IndexError("Slicing leads to empty plates")
plates[k] = N
k += 1
elif s is None:
# [np.newaxis]
plates = plates[:k] + [1] + plates[k:]
k += 1
elif misc.is_scalar_integer(s):
# Integer, e.g., [3]
del plates[k]
else:
raise RuntimeError("BUG: Unknown index type. Should capture earlier.")
return tuple(plates)
@staticmethod
def __reverse_indexing(slices, m_child, plates, dims):
"""
A helpful function for performing reverse indexing/slicing
"""
j = -1 # plate index for parent
i = -1 # plate index for child
child_slices = ()
parent_slices = ()
msg_plates = ()
# Compute plate axes in the message from children
ndim = len(dims)
if ndim > 0:
m_plates = np.shape(m_child)[:-ndim]
else:
m_plates = np.shape(m_child)
for s in reversed(slices):
if misc.is_scalar_integer(s):
# Case: integer
parent_slices = (s,) + parent_slices
msg_plates = (plates[j],) + msg_plates
j -= 1
elif s is None:
# Case: newaxis
if -i <= len(m_plates):
child_slices = (0,) + child_slices
i -= 1
elif isinstance(s, slice):
# Case: slice
if -i <= len(m_plates):
child_slices = (slice(None),) + child_slices
parent_slices = (s,) + parent_slices
if ((-i > len(m_plates) or m_plates[i] == 1)
and slicelen(s) == plates[j]):
# Broadcasting can be applied. The message does not need
# to be explicitly shaped to the full size
msg_plates = (1,) + msg_plates
else:
# No broadcasting. Must explicitly form the full size
# axis
msg_plates = (plates[j],) + msg_plates
j -= 1
i -= 1
else:
raise RuntimeError("BUG: Unknown index type. Should capture earlier.")
# Set the elements of the message
m_parent = np.zeros(msg_plates + dims)
if np.ndim(m_parent) == 0 and np.ndim(m_child) == 0:
m_parent = m_child
elif np.ndim(m_parent) == 0:
m_parent = m_child[child_slices]
elif np.ndim(m_child) == 0:
m_parent[parent_slices] = m_child
else:
m_parent[parent_slices] = m_child[child_slices]
return m_parent
def _compute_weights_to_parent(self, index, weights):
"""
Compute the mask to the parent node.
"""
if index != 0:
raise ValueError("Invalid index")
parent = self.parents[0]
return self.__reverse_indexing(self.slices,
weights,
parent.plates,
())
def _compute_message_to_parent(self, index, m, u):
"""
Compute the message to a parent node.
"""
if index != 0:
raise ValueError("Invalid index")
parent = self.parents[0]
# Apply reverse indexing for the message arrays
msg = [self.__reverse_indexing(self.slices,
m_child,
parent.plates,
dims)
for (m_child, dims) in zip(m, parent.dims)]
return msg
def _compute_moments(self, u):
"""
Get the moments with an added plate axis.
"""
# Process each moment
for n in range(len(u)):
# Compute the effective plates in the message/moment
ndim = len(self.dims[n])
if ndim > 0:
shape = np.shape(u[n])[:-ndim]
else:
shape = np.shape(u[n])
# Construct a list of slice objects
u_slices = []
# Index for the shape
j = -len(self.parents[0].plates)
for (k, s) in enumerate(self.slices):
if s is None:
# [np.newaxis]
if -j < len(shape):
# Only add newaxis if there are some axes before
# this. It does not make any difference if you added
# leading unit axes
u_slices.append(s)
else:
# slice or integer index
if -j <= len(shape):
# The moment has this axis, so it is not broadcasting it
if shape[j] != 1:
# Use the slice as it is
u_slices.append(s)
elif isinstance(s, slice):
# Slice.
# The moment is using broadcasting, just pick the
# first element but use slice in order to keep the
# axis
u_slices.append(slice(0,1,1))
else:
# Integer.
# The moment is using broadcasting, just pick the
# first element
u_slices.append(0)
j += 1
# Slice the message/moment
u[n] = u[n][tuple(u_slices)]
return u
def AddPlateAxis(to_plate):
if to_plate >= 0:
raise Exception("Give negative value for axis index to_plate.")
class _AddPlateAxis(Deterministic):
def __init__(self, X, **kwargs):
nonlocal to_plate
N = len(X.plates) + 1
# Check the parameters
if to_plate >= 0 or to_plate < -N:
raise ValueError("Invalid plate position to add.")
# Use positive indexing only
## if to_plate < 0:
## to_plate += N
# Use negative indexing only
if to_plate >= 0:
to_plate -= N
#self.to_plate = to_plate
super().__init__(X,
dims=X.dims,
**kwargs)
def _plates_to_parent(self, index):
plates = list(self.plates)
plates.pop(to_plate)
return tuple(plates)
#return self.plates[:to_plate] + self.plates[(to_plate+1):]
def _plates_from_parent(self, index):
plates = list(self.parents[index].plates)
plates.insert(len(plates)-to_plate+1, 1)
return tuple(plates)
def _compute_weights_to_parent(self, index, weights):
# Remove the added mask plate
if abs(to_plate) <= np.ndim(weights):
sh_weighs = list(np.shape(weights))
sh_weights.pop(to_plate)
weights = np.reshape(weights, sh_weights)
return weights
def _compute_message_to_parent(self, index, m, *u_parents):
"""
Compute the message to a parent node.
"""
# Remove the added message plate
for i in range(len(m)):
# Remove the axis
if np.ndim(m[i]) >= abs(to_plate) + len(self.dims[i]):
axis = to_plate - len(self.dims[i])
sh_m = list(np.shape(m[i]))
sh_m.pop(axis)
m[i] = np.reshape(m[i], sh_m)
return m
def _compute_moments(self, u):
"""
Get the moments with an added plate axis.
"""
# Get parents' moments
#u = self.parents[0].message_to_child()
# Move a plate axis
u = list(u)
for i in range(len(u)):
# Make sure the moments have all the axes
#diff = len(self.plates) + len(self.dims[i]) - np.ndim(u[i]) - 1
#u[i] = misc.add_leading_axes(u[i], diff)
# The location of the new axis/plate:
axis = np.ndim(u[i]) - abs(to_plate) - len(self.dims[i]) + 1
if axis > 0:
# Add one axes to the correct position
sh_u = list(np.shape(u[i]))
sh_u.insert(axis, 1)
u[i] = np.reshape(u[i], sh_u)
return u
return _AddPlateAxis
class NodeConstantScalar(Node):
@staticmethod
def compute_fixed_u_and_f(x):
""" Compute u(x) and f(x) for given x. """
return ([x], 0)
def __init__(self, a, **kwargs):
self.u = [a]
super().__init__(self,
plates=np.shape(a),
dims=[()],
**kwargs)
def start_optimization(self):
# FIXME: Set the plate sizes appropriately!!
x0 = self.u[0]
#self.gradient = np.zeros(np.shape(x0))
def transform(x):
# E.g., for positive scalars you could have exp here.
self.gradient = np.zeros(np.shape(x0))
self.u[0] = x
def gradient():
# This would need to apply the gradient of the
# transformation to the computed gradient
return self.gradient
return (x0, transform, gradient)
def add_to_gradient(self, d):
self.gradient += d
def message_to_child(self, gradient=False):
if gradient:
return (self.u, [ [np.ones(np.shape(self.u[0])),
#self.gradient] ])
self.add_to_gradient] ])
else:
return self.u
def stop_optimization(self):
#raise Exception("Not implemented for " + str(self.__class__))
pass
|
jluttine/bayespy
|
bayespy/inference/vmp/nodes/node.py
|
Python
|
mit
| 47,354
|
[
"Gaussian"
] |
ccea97391589ca39fb6a7f997ebc9d2a25ae2f02b9a5070909c1f0be80b04d81
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para longurl (acortador de url)
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse
from lib import unshortenit
SERVICES_SHORT = ["adf.ly","sh.st","bit.ly","ul.to"]
def expand_url(url):
e = unshortenit.UnshortenIt()
while Es_Corto(url):
long_url,estado = e.unshorten(url)
url = long_url
return long_url
def Es_Corto(url):
server = urlparse.urlsplit(url).netloc
Corto = (server in SERVICES_SHORT)
return Corto
|
ChopChopKodi/pelisalacarta
|
python/main-classic/servers/expurl.py
|
Python
|
gpl-3.0
| 725
|
[
"ADF"
] |
5e5d2e02ada13f6ab110d0dd55526acbd8ef89844a953d46516c5fc138e2cf87
|
#****************************************************************************
# This file has been modified from its original version. It has been
# formatted to fit your irc bot.
#
# The original version is a nifty PyQt application written by Douglas Bell,
# available at http://convertall.bellz.org/
#
# Below is the original copyright. Doug Bell rocks.
# The hijacker is Keith Jones, and he has no bomb in his shoe.
#
#****************************************************************************
import re, copy, sys, os.path, StringIO
import supybot.conf as conf
import supybot.registry as registry
unitData = \
"""
#*****************************************************************************
# units.dat, the units data file, version 0.3.1
#
# ConvertAll, a units conversion program
# Copyright (C) 2005, Douglas W. Bell
#
# This is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License, Version 2. This program is
# distributed in the hope that it will be useful, but WITTHOUT ANY WARRANTY.
#*****************************************************************************
#
# Units are defined by an optional quantity and an equivalent unit or unit
# combination. A python expression may be used for the quantity, but is
# resticted to using only the following operators: *, /, +, -, **, (, ).
# Beware of integer division truncation: be sure to use a float for at
# least one of the values.
#
# The unit type must be placed in square brackets before a set of units.
# The first comment after the equivalent unit will be put in parenthesis after
# the unit name (usually used to give the full name of an abbreviated unit).
# The next comment will be used in the program list's comment column;
# later comments and full line comments are ignored.
#
# Non-linear units are indicated with an equivalent unit in square brackets,
# followed by either equations or equivalency lists for the definition.
# For equations, two are given, separated by a ';'. Both are functions of
# "x", the first going from the unit to the equivalent unit and the second
# one in reverse. Any valid Python expression returning a float (including
# the functions in the math module) should work. The equivalency list is a
# python list of tuples giving points for linear interpolation.
#
# All units must reduce to primitive units, which are indicated by an '!'
# as the equivalent unit. Circular refernces must also be avoided.
#
# Primitive units: kg, m, s, K, A, mol, cd, rad, sr, bit, unit
#
##############################################################################
#
# mass units
#
[mass]
kg = ! # kilogram
kilogram = kg
key = kg # # drug slang
hectogram = 100 gram
dekagram = 10 gram
gram = 0.001 kg
g = gram # gram
decigram = 0.1 gram
centigram = 0.01 gram
milligram = 0.001 gram
mg = milligram # milligram
microgram = 0.001 mg
tonne = 1000 kg # # metric
metric ton = tonne
megagram = tonne
kilotonne = 1000 tonne # # metric
gigagram = 1e9 gram
teragram = 1e12 gram
carat = 0.2 gram
ct = carat # carat
amu = 1.66053873e-27 kg # atomic mass
atomic mass unit = amu
pound = 0.45359237 kg
lb = pound # pound
lbm = pound # pound
ounce = 1/16.0 pound
oz = ounce # ounce
lid = ounce # # drug slang
pound troy = 5760 grain
lb troy = pound troy # pound troy
ounce troy = 1/12.0 lb troy
oz troy = ounce troy # ounce troy
ton = 2000 lb # # non-metric
kiloton = 1000 ton # # non-metric
slug = lbf*s^2/ft
stone = 14 lb
grain = 1/7000.0 lb
#
# length / distance units
#
[length]
m = ! # meter
meter = m
metre = m
decimeter = 0.1 m
cm = 0.01 m # centimeter
centimeter = cm
mm = 0.001 m # millimeter
millimeter = mm
micrometer = 1e-6 m
micron = micrometer
nanometer = 1e-9 m
nm = nanometer # nanometer
dekameter = 10 m
hectometer = 100 m
km = 1000 m # kilometer
kilometer = km
megameter = 1000 km
angstrom = 1e-10 m
fermi = 1e-15 m # # nuclear sizes
inch = 2.54 cm
in = inch # inch
inches = inch
mil = 0.001 inch
microinch = 1e-6 inch
microinches = microinch
foot = 12 inch
ft = foot # foot
feet = foot
yard = 3 ft
yd = yard # yard
mile = 5280 ft
mi = mile # mile
nautical mile = 1852 m
nmi = nautical mile # nautical mile
league = 3 mile
chain = 66 ft
fathom = 6 ft
rod = 5.5 yard
furlong = 40 rod
hand = 4 inch
cubit = 21.8 inch # # biblical unit
point = 1/72.27 inch
pica = 12 point
caliber = 0.01 inch # # bullet sizes
football field = 100 yd
marathon = 46145 yd
mil Swedish = 10 km
au = 1.49597870691e11 m # astronomical unit
astronomical unit = au
light year = 365.25 light speed * day
light minute = light speed * min
light second = light speed * s
parsec = 3.0856775813e16 m
kiloparsec = 1000 parsec
megaparsec = 1000 kiloparsec
screw size = [in] 0.013*x + 0.06 ; (x - 0.06) / 0.013 \
# # Unified diameters, non-linear
AWG = [in] 92.0**((36-x)/39.0)/200.0 ; \
36 - 39.0*log(200.0*x)/log(92.0) \
# American Wire Gauge \
# use -1, -2 for 00, 000; non-linear
American Wire Gauge = [in] 92.0**((36-x)/39.0)/200.0 ; \
36 - 39.0*log(200.0*x)/log(92.0) \
# # use -1, -2 for 00, 000; non-linear
standard gauge = [in] [(-5, .448350), (1, .269010), (14, .0747250), \
(16, .0597800), (17, .0538020), (20, .0358680), \
(26, .0179340), (31, .0104615), (36, .00672525), \
(38, .00597800)] # steel \
# Manufacturers Std. Gauge, non-linear
zinc gauge = [in] [(1, .002), (10, .02), (15, .04), (19, .06), \
(23, .1), (24, .125), (27, .5), (28, 1)] \
# # sheet metal thickness, non-linear
ring size = [in] 0.1018*x + 1.4216 ; (x - 1.4216) / 0.1018 \
# # US size, circum., non-linear
shoe size mens = [in] x/3.0 + 7 + 1/3.0 ; (x - 7 - 1/3.0) * 3 \
# # US sizes, non-linear
shoe size womens = [in] x/3.0 + 6 + 5/6.0 ; (x - 6 - 5/6.0) * 3 \
# # US sizes, non-linear
#
# time units
#
[time]
s = ! # second
sec = s # second
second = s
ms = 0.001 s # millisecond
millisecond = ms
microsecond = 1e-6 s
ns = 1e-9 s # nanosecond
nanosecond = ns
minute = 60 s
min = minute # minute
hour = 60 min
hr = hour # hour
bell = 30 min # # naval definition
watch = 4 hour
watches = watch
day = 24 hr
week = 7 day
wk = week # week
fortnight = 14 day
month = 1/12.0 year
year = 365.242198781 day
yr = year # year
calendar year = 365 day
decade = 10 year
century = 100 year
centuries = century
millennium = 1000 year
millennia = millennium
[scheduling]
man hour = 168/40.0 hour
man week = 40 man hour
man month = 1/12.0 man year
man year = 52 man week
#
# temperature
#
[temperature]
K = ! # Kelvin
Kelvin = K
deg K = K # Kelvin
degree Kelvin = K
C = [K] x + 273.15 ; x - 273.15 # Celsius # non-linear
Celsius = [K] x + 273.15 ; x - 273.15 # # non-linear
deg C = [K] x + 273.15 ; x - 273.15 # Celsius # non-linear
degree Celsius = [K] x + 273.15 ; x - 273.15 # # non-linear
R = 5/9.0 K # Rankine
Rankine = R
deg R = R # Rankine
F = [R] x + 459.67 ; x - 459.67 # Fahrenheit # non-linear
Fahrenheit = [R] x + 459.67 ; x - 459.67 # # non-linear
deg F = [R] x + 459.67 ; x - 459.67 # Fahrenheit # non-linear
degree Fahrenheit = [R] x + 459.67 ; x - 459.67 # # non-linear
[temp. diff.]
C deg = K # Celsius degree
Celsius degree = C deg
F deg = R # Fahrenheit deg.
Fahrenheit degree = F deg
#
# electrical units
#
[current]
A = ! # ampere
ampere = A
amp = A
milliampere = 0.001 A
milliamp = milliampere
mA = milliampere # milliampere
microampere = 0.001 mA
kiloampere = 1000 A
kA = kiloampere # kiloampere
[charge]
coulomb = A*s
amp hour = A*hr
mAh = 0.001 amp hour # milliamp hour
milliamp hour = mAh
[potential]
volt = W/A
V = volt # volt
millivolt = 0.001 volt
mV = millivolt # millivolt
kilovolt = 1000 volt
kV = kilovolt # kilovolt
[resistance]
ohm = V/A
milliohm = 0.001 ohm
microhm = 0.001 milliohm
kilohm = 1000 ohm
[conductance]
siemens = A/V
[capacitance]
farad = coulomb/V
millifarad = 0.001 farad
microfarad = 0.001 millifarad
nanofarad = 1e-9 farad
picofarad = 1e-12 farad
[magn. flux]
weber = V*s
Wb = weber # weber
[inductance]
henry = Wb/A
H = henry # henry
millihenry = 0.001 henry
mH = millihenry # millihenry
microhenry = 0.001 mH
[flux density]
tesla = Wb/m^2
T = tesla # tesla
#
# molecular units
#
[molecular qty]
mol = ! # mole # gram mole
mole = mol # # gram mole
gram mole = mol
kilomole = 1000 mol
kmol = kilomole # kilomole
pound mole = mol*lbm/gram
lbmol = pound mole # pound mole
[size of a mol]
avogadro = gram/amu*mol
#
# Illumination units
#
[lum. intens.]
cd = ! # candela
candela = cd
[luminous flux]
lumen = cd * sr
lm = lumen # lumen
[illuminance]
lux = lumen/m^2
footcandle = lumen/ft^2
metercandle = lumen/m^2
[luminance]
lambert = cd/pi*cm^2
millilambert = 0.001 lambert
footlambert = cd/pi*ft^2
#
# angular units
#
[angle]
radian = !
rad = radian # radian
circle = 2 pi*radian
turn = circle
revolution = circle
rev = revolution # revolution
degree = 1/360.0 circle
deg = degree # degree
arc min = 1/60.0 degree # minute
arc minute = arc min
min arc = arc min # minute
minute arc = arc min
arc sec = 1/60.0 arc min # second
arc second = arc sec
sec arc = arc sec # second
second arc = arc sec
quadrant = 1/4.0 circle
right angle = quadrant
gradian = 0.01 quadrant
#
# solid angle units
#
[solid angle]
sr = ! # steradian
steradian = sr
sphere = 4 pi*sr
hemisphere = 1/2.0 sphere
#
# information units
#
[data]
bit = !
kilobit = 1000 bit # # based on power of 10
megabit = 1000 kilobit # # based on power of 10
byte = 8 bit
B = byte # byte
kilobyte = 1024 byte # # based on power of 2
kB = kilobyte # kilobyte # based on power of 2
megabyte = 1024 kB # # based on power of 2
MB = megabyte # megabyte # based on power of 2
gigabyte = 1024 MB # # based on power of 2
GB = gigabyte # gigabyte # based on power of 2
terabyte = 1024 GB # # based on power of 2
TB = terabyte # terabyte # based on power of 2
petabyte = 1024 TB # # based on power of 2
PB = petabyte # petabyte # based on power of 2
kilobyte IEC std = 1000 byte # # based on power of 10
kB IEC std = kilobyte IEC std # kilobyte # based on power of 10
megabyte IEC std = 1000 kB IEC std # # based on power of 10
MB IEC std = megabyte IEC std # megabyte # based on power of 10
gigabyte IEC std = 1000 MB IEC std # # based on power of 10
GB IEC std = gigabyte IEC std # gigabyte # based on power of 10
terabyte IEC std = 1000 GB IEC std # # based on power of 10
TB IEC std = terabyte IEC std # terabyte # based on power of 10
petabyte IEC std = 1000 TB IEC std # # based on power of 10
PB IEC std = petabyte IEC std # petabyte # based on power of 10
kibibyte = 1024 byte
KiB = kibibyte # kibibyte
mebibyte = 1024 KiB
MiB = mebibyte # mebibyte
gibibyte = 1024 MiB
GiB = gibibyte # gibibyte
tebibyte = 1024 GiB
TiB = tebibyte # tebibyte
pebibyte = 1024 TiB
PiB = pebibyte # pebibyte
[data transfer]
bps = bit/sec # bits / second
kbps = 1000 bps # kilobits / sec. # based on power of 10
#
# Unitless numbers
#
[quantity]
unit = !
1 = unit # unit
pi = 3.14159265358979323846 unit
pair = 2 unit
hat trick = 3 unit # # sports
dozen = 12 unit
doz = dozen # dozen
bakers dozen = 13 unit
score = 20 unit
gross = 144 unit
great gross = 12 gross
ream = 500 unit
percent = 0.01 unit
% = percent
mill = 0.001 unit
[interest rate]
APR = [unit] log(1 + x/100) ; (exp(x) - 1)*100 \
# annual % rate # based on continuous compounding
[concentration]
proof = 1/200.0 unit # # alcohol content
ppm = 1e-6 unit # parts per million
parts per million = ppm
ppb = 1e-9 unit # parts per billion
parts per billion = ppb
ppt = 1e-12 unit # parts per trillion
parts per trillion = ppt
karat = 1/24.0 unit # # gold purity
carat gold = karat # # gold purity
#
# force units
#
[force]
newton = kg*m/s^2
N = newton # newton
dekanewton = 10 newton
kilonewton = 1000 N
kN = kilonewton # kilonewton
meganewton = 1000 kN
millinewton = 0.001 N
dyne = cm*g/s^2
kg force = kg * gravity # kilogram f
kgf = kg force # kilogram force
kilogram force = kg force
gram force = g * gravity
pound force = lbm * gravity
lbf = pound force # pound force
ton force = ton * gravity
ounce force = ounce * gravity
ozf = ounce force # ounce force
#
# area units
#
[area]
barn = 1e-28 m^2 # # particle physics
are = 100 m^2
decare = 10 are
dekare = 10 are
hectare = 100 are
acre = 10 chain^2
section = mile^2
township = 36 section
homestead = 160 acre
rai = 1600 m^2 # # Thai
ngaan = 400 m^2 # # Thai
circular inch = 1/4.0 pi*in^2 # # area of 1 inch circle
circular mil = 1/4.0 pi*mil^2 # # area of 1 mil circle
#
# volume units
#
[volume]
cc = cm^3 # cubic centimeter
cubic centimeter = cc
liter = 1000 cc
l = liter # liter
litre = liter
deciliter = 0.1 liter
centiliter = 0.01 liter
milliliter = cc
ml = milliliter # milliliter
dekaliter = 10 liter
hectoliter = 100 liter
kiloliter = 1000 liter
kl = kiloliter # kiloliter
megaliter = 1000 kiloliter
gallon = 231 in^3 # # US liquid
gal = gallon # gallon # US liquid
quart = 1/4.0 gallon # # US liquid
qt = quart # quart # US liquid
pint = 1/2.0 quart # # US liquid
pt = pint # pint # US liquid
fluid ounce = 1/16.0 pint # # US
fl oz = fluid ounce # fluid ounce # US
ounce fluid = fluid ounce # # US
imperial gallon = 4.54609 liter
imp gal = imperial gallon # imperial gallon
gallon imperial = imperial gallon
imperial quart = 1/4.0 imp gal
imp qt = imperial quart # imperial quart
quart imperial = imperial quart
imperial pint = 1/8.0 imp gal
imp pt = imperial pint # imperial pint
pint imperial = imperial pint
imperial fluid ounce = 1/160.0 imp gal
imp fl oz = imperial fluid ounce # imperial fluid ounce
cup = 8 fl oz
tablespoon = 1/16.0 cup
tbsp = tablespoon # tablespoon
teaspoon = 1/3.0 tbsp
tsp = teaspoon # teaspoon
barrel = 42 gallon
bbl = barrel # barrel
shot = 1.5 fl oz
fifth = 1/5.0 gallon # # alcohol
wine bottle = 750 ml
magnum = 1.5 liter # # alcohol
keg = 15.5 gallon # # beer
hogshead wine = 63 gal
hogshead beer = 54 gal
bushel = 2150.42 in^3
peck = 1/4.0 bushel
cord = 128 ft^3
board foot = ft^2*in
board feet = board foot
#
# velocity units
#
[velocity]
knot = nmi/hr
kt = knot # knot
light speed = 2.99792458e8 m/s
mph = mi/hr # miles/hour
kph = km/hr # kilometers/hour
mach = 340.29 m/s # # speed sound at STP
[rot. velocity]
rpm = rev/min # rev/min
rps = rev/sec # rev/sec
#
# flow rate units
#
[fluid flow]
gph = gal/hr # gallons/hour
gpm = gal/min # gallons/minute
cfs = ft^3/sec # cu ft/second
cfm = ft^3/min # cu ft/minute
lpm = l/min # liter/min
[gas flow]
sccm = atm*cc/min # std cc/min # pressure * flow
sccs = atm*cc/sec # std cc/sec # pressure * flow
slpm = atm*l/min # std liter/min # pressure * flow
slph = atm*l/hr # std liter/hour # pressure * flow
scfh = atm*ft^3/hour # std cu ft/hour # pressure * flow
scfm = atm*ft^3/min # std cu ft/min # pressure * flow
#
# pressure units
#
[pressure]
Pa = N/m^2 # pascal
pascal = Pa
hPa = 100 Pa # hectopascal
hectopascal = hPa
kPa = 1000 Pa # kilopascal
kilopascal = kPa
MPa = 1000 kPa # megapascal
megapascal = MPa
GPa = 1000 MPa # gigapascal
gigapascal = GPa
atm = 101325 Pa # atmosphere
atmosphere = atm
bar = 1e5 Pa
mbar = 0.001 bar # millibar
millibar = mbar
microbar = 0.001 mbar
decibar = 0.1 bar
kilobar = 1000 bar
megabar = 1000 kilobar
mm Hg = mm*density Hg*gravity
millimeter of Hg = mm Hg
torr = mm Hg
in Hg = in*density Hg*gravity # inch of Hg
inch of Hg = in Hg
m water = m*density water*gravity # meter of H2O
m H2O = m water # meter of H2O
meter of water = m water
in water = in*density water*gravity # inch of H2O
in H2O = in water # inch of H2O
inch of water = in water
ft water = ft*density water*gravity # feet of H2O
ft H2O = ft water # feet of H20
feet of water = ft water
foot of head = ft water
ft hd = ft water # foot of head
psi = lbf/in^2 # pound / sq inch
pound per sq inch = psi
ksi = 1000 psi # 1000 lb / sq inch
#
# density units
#
[density]
density water = gram/cm^3
density sea water = 1.025 gram/cm^3
density Hg = 13.5950981 gram/cm^3
density air = 1.293 kg/m^3 # # at STP
density steel = 0.283 lb/in^3 # # carbon steel
density aluminum = 0.098 lb/in^3
density zinc = 0.230 lb/in^3
density brass = 0.310 lb/in^3 # # 80Cu-20Zn
density copper = 0.295 lb/in^3
density iron = 0.260 lb/in^3 # # cast iron
density nickel = 0.308 lb/in^3
density tin = 0.275 lb/in^3
density titanium = 0.170 lb/in^3
density silver = 0.379 lb/in^3
density nylon = 0.045 lb/in^3
density polycarbonate = 0.045 lb/in^3
#
# energy units
#
[energy]
joule = N*m
J = joule # joule
kilojoule = 1000 joule
kJ = kilojoule # kilojoule
megajoule = 1000 kilojoule
gigajoule = 1000 megajoule
millijoule = 0.001 joule
mJ = millijoule # millijoule
calorie = 4.1868 J
cal = calorie # calorie
kilocalorie = 1000 cal
kcal = kilocalorie # kilocalorie
calorie food = kilocalorie
Btu = cal*lb*R/g*K # British thermal unit
British thermal unit = Btu
erg = cm*dyne
electronvolt = 1.602176462e-19 J
eV = electronvolt # electronvolt
kWh = kW*hour # kilowatt-hour
kilowatt hour = kWh
ton TNT = 4.184e9 J
#
# power units
#
[power]
watt = J/s
W = watt # watt
kilowatt = 1000 W
kW = kilowatt # kilowatt
megawatt = 1000 kW
MW = megawatt # megawatt
gigawatt = 1000 MW
GW = gigawatt # gigawatt
milliwatt = 0.001 W
horsepower = 550 ft*lbf/sec
hp = horsepower # horsepower
metric horsepower = 75 kgf*m/s
#
# frequency
#
[frequency]
hertz = unit/sec
Hz = hertz # hertz
millihertz = 0.001 Hz
kilohertz = 1000 Hz
kHz = kilohertz # kilohertz
megahertz = 1000 kHz
MHz = megahertz # megahertz
gigahertz = 1000 MHz
GHz = gigahertz # gigahertz
#
# radioactivity
#
[radioactivity]
becquerel = unit/sec
Bq = becquerel # becquerel
curie = 3.7e10 Bq
millicurie = 0.001 curie
roentgen = 2.58e-4 coulomb/kg
[radiation dose]
gray = J/kg
Gy = gray # gray
rad. abs. dose = 0.001 Gy # # commonly rad
sievert = J/kg # # equiv. dose
millisievert = 0.001 sievert # # equiv. dose
Sv = sievert # sievert # equiv. dose
rem = 0.01 Sv # # roentgen equiv mammal
millirem = 0.001 rem # # roentgen equiv mammal
#
# viscosity
#
[dyn viscosity]
poise = g/cm*s
P = poise # poise
centipoise = 0.01 poise
cP = centipoise # centipoise
[kin viscosity]
stokes = cm^2/s
St = stokes # stokes
centistokes = 0.01 stokes
cSt = centistokes # centistokes
#
# misc. units
#
[acceleration]
gravity = 9.80665 m/s^2
[constant]
gravity constant = 6.673e-11 N*m^2/kg^2
gas constant = 8.314472 J/mol*K # R
[fuel consumpt.]
mpg = mi/gal # miles/gallon
liter per 100 km = [mpg] 3.785411784 / (x * 0.01609344) ; \
3.785411784 / (x * 0.01609344) # # non-linear
"""
class UnitGroup:
"Stores, updates and converts a group of units"
maxDecPlcs = 8
def __init__(self, unitData, option):
self.unitData = unitData
self.option = option
self.unitList = []
self.currentNum = 0
self.factor = 1.0
self.reducedList = []
self.linear = 1
def update(self, text, cursorPos=None):
"Decode user entered text into units"
self.unitList = self.parseGroup(text)
if cursorPos != None:
self.updateCurrentUnit(text, cursorPos)
else:
self.currentNum = len(self.unitList) - 1
def updateCurrentUnit(self, text, cursorPos):
"Set current unit number"
self.currentNum = len(re.findall('[\*/]', text[:cursorPos]))
def currentUnit(self):
"Return current unit if its a full match, o/w None"
if self.unitList and self.unitList[self.currentNum].equiv:
return self.unitList[self.currentNum]
return None
def currentPartialUnit(self):
"Return unit with at least a partial match, o/w None"
if not self.unitList:
return None
return self.unitData.findPartialMatch(self.unitList[self.currentNum]\
.name)
def currentSortPos(self):
"Return unit near current unit for sorting"
if not self.unitList:
return self.unitData[self.unitData.sortedKeys[0]]
return self.unitData.findSortPos(self.unitList[self.currentNum]\
.name)
def replaceCurrent(self, unit):
"Replace the current unit with unit"
if self.unitList:
exp = self.unitList[self.currentNum].exp
self.unitList[self.currentNum] = copy.copy(unit)
self.unitList[self.currentNum].exp = exp
else:
self.unitList.append(copy.copy(unit))
def completePartial(self):
"Replace a partial unit with a full one"
if self.unitList and not self.unitList[self.currentNum].equiv:
text = self.unitList[self.currentNum].name
unit = self.unitData.findPartialMatch(text)
if unit:
exp = self.unitList[self.currentNum].exp
self.unitList[self.currentNum] = copy.copy(unit)
self.unitList[self.currentNum].exp = exp
def moveToNext(self, upward):
"Replace unit with adjacent one based on match or sort position"
unit = self.currentSortPos()
num = self.unitData.sortedKeys.index(unit.name.\
replace(' ', '')) \
+ (upward and -1 or 1)
if 0 <= num < len(self.unitData.sortedKeys):
self.replaceCurrent(self.unitData[self.unitData.sortedKeys[num]])
def addOper(self, mult):
"Add new operator & blank unit after current, * if mult is true"
if self.unitList:
self.completePartial()
prevExp = self.unitList[self.currentNum].exp
self.currentNum += 1
self.unitList.insert(self.currentNum, Unit(''))
if (not mult and prevExp > 0) or (mult and prevExp < 0):
self.unitList[self.currentNum].exp = -1
def changeExp(self, newExp):
"Change the current unit's exponent"
if self.unitList:
self.completePartial()
if self.unitList[self.currentNum].exp > 0:
self.unitList[self.currentNum].exp = newExp
else:
self.unitList[self.currentNum].exp = -newExp
def clearUnit(self):
"Remove units"
self.unitList = []
def parseGroup(self, text):
"Return list of units from text string"
unitList = []
parts = [part.strip() for part in re.split('([\*/])', text)]
numerator = 1
while parts:
unit = self.parseUnit(parts.pop(0))
if not numerator:
unit.exp = -unit.exp
if parts and parts.pop(0) == '/':
numerator = not numerator
unitList.append(unit)
return unitList
def parseUnit(self, text):
"Return a valid or invalid unit with exponent from a text string"
parts = text.split('^', 1)
exp = 1
if len(parts) > 1: # has exponent
try:
exp = int(parts[1])
except ValueError:
if parts[1].lstrip().startswith('-'):
exp = -Unit.partialExp # tmp invalid exp
else:
exp = Unit.partialExp
unitText = parts[0].strip().replace(' ', '')
unit = copy.copy(self.unitData.get(unitText, None))
if not unit and unitText and unitText[-1] == 's' and not \
self.unitData.findPartialMatch(unitText): # check for plural
unit = copy.copy(self.unitData.get(unitText[:-1], None))
if not unit:
#unit = Unit(parts[0].strip()) # tmp invalid unit
raise UnitDataError('%s is not a valid unit.' % (unitText))
unit.exp = exp
return unit
def unitString(self, unitList=None):
"Return the full string for this group or a given group"
if unitList == None:
unitList = self.unitList[:]
fullText = ''
if unitList:
fullText = unitList[0].unitText(0)
numerator = 1
for unit in unitList[1:]:
if (numerator and unit.exp > 0) \
or (not numerator and unit.exp < 0):
fullText = '%s * %s' % (fullText, unit.unitText(1))
else:
fullText = '%s / %s' % (fullText, unit.unitText(1))
numerator = not numerator
return fullText
def groupValid(self):
"Return 1 if all unitself.reducedLists are valid"
if not self.unitList:
return 0
for unit in self.unitList:
if not unit.unitValid():
return 0
return 1
def reduceGroup(self):
"Update reduced list of units and factor"
self.linear = 1
self.reducedList = []
self.factor = 1.0
if not self.groupValid():
return
count = 0
tmpList = self.unitList[:]
while tmpList:
count += 1
if count > 5000:
raise UnitDataError, 'Circular unit definition'
unit = tmpList.pop(0)
if unit.equiv == '!':
self.reducedList.append(copy.copy(unit))
elif not unit.equiv:
raise UnitDataError, 'Invalid conversion for "%s"' % unit.name
else:
if unit.fromEqn:
self.linear = 0
newList = self.parseGroup(unit.equiv)
for newUnit in newList:
newUnit.exp *= unit.exp
tmpList.extend(newList)
self.factor *= unit.factor**unit.exp
self.reducedList.sort()
tmpList = self.reducedList[:]
self.reducedList = []
for unit in tmpList:
if self.reducedList and unit == self.reducedList[-1]:
self.reducedList[-1].exp += unit.exp
else:
self.reducedList.append(unit)
self.reducedList = [unit for unit in self.reducedList if \
unit.name != 'unit' and unit.exp != 0]
def categoryMatch(self, otherGroup):
"Return 1 if unit types are equivalent"
if not self.checkLinear() or not otherGroup.checkLinear():
return 0
return self.reducedList == otherGroup.reducedList and \
[unit.exp for unit in self.reducedList] \
== [unit.exp for unit in otherGroup.reducedList]
def checkLinear(self):
"Return 1 if linear or acceptable non-linear"
if not self.linear:
if len(self.unitList) > 1 or self.unitList[0].exp != 1:
return 0
return 1
def compatStr(self):
"Return string with reduced unit or linear compatability problem"
if self.checkLinear():
return self.unitString(self.reducedList)
return 'Cannot combine non-linear units'
def convert(self, num, toGroup):
"Return num of this group converted to toGroup"
if self.linear:
num *= self.factor
else:
num = self.nonLinearCalc(num, 1) * self.factor
n2 = -1
if toGroup.linear:
n2 = num / toGroup.factor
else:
n2 = toGroup.nonLinearCalc(num / toGroup.factor, 0)
return n2
def nonLinearCalc(self, num, isFrom):
"Return result of non-linear calculation"
x = num
try:
if self.unitList[0].toEqn: # regular equations
if isFrom:
temp = float(eval(self.unitList[0].fromEqn))
return temp
temp = float(eval(self.unitList[0].toEqn))
return temp
data = list(eval(self.unitList[0].fromEqn)) # extrapolation list
if isFrom:
data = [(float(group[0]), float(group[1])) for group in data]
else:
data = [(float(group[1]), float(group[0])) for group in data]
data.sort()
pos = len(data) - 1
for i in range(len(data)):
if num <= data[i][0]:
pos = i
break
if pos == 0:
pos = 1
y = (num-data[pos-1][0]) / float(data[pos][0]-data[pos-1][0]) \
* (data[pos][1]-data[pos-1][1]) + data[pos-1][1]
return y
except OverflowError:
return 1e9999
except:
raise UnitDataError, 'Bad equation for %s' % self.unitList[0].name
def convertStr(self, num, toGroup):
"Return formatted string of converted number"
return self.formatNumStr(self.convert(num, toGroup))
def formatNumStr(self, num):
"Return num string formatted per options"
decPlcs = self.option.intData('DecimalPlaces', 0, UnitGroup.maxDecPlcs)
if self.option.boolData('SciNotation'):
return ('%%0.%dE' % decPlcs) % num
if self.option.boolData('FixedDecimals'):
return ('%%0.%df' % decPlcs) % num
return ('%%0.%dG' % decPlcs) % num
class UnitDataError(Exception):
pass
class UnitData(dict):
def __init__(self):
dict.__init__(self)
self.sortedKeys = []
def readData(self):
"Read all unit data from file"
types = []
typeUnits = {}
try:
f = StringIO.StringIO(unitData)
lines = f.readlines()
f.close()
except IOError:
raise UnitDataError, 'Can not read "units.dat" file'
for i in range(len(lines)): # join continuation lines
delta = 1
while lines[i].rstrip().endswith('\\'):
lines[i] = ''.join([lines[i].rstrip()[:-1], lines[i+delta]])
lines[i+delta] = ''
delta += 1
units = [Unit(line) for line in lines if \
line.split('#', 1)[0].strip()] # remove comment lines
typeText = ''
for unit in units: # find & set headings
if unit.name.startswith('['):
typeText = unit.name[1:-1].strip()
types.append(typeText)
typeUnits[typeText] = []
unit.typeName = typeText
units = [unit for unit in units if unit.equiv] # keep valid units
for unit in units:
self[unit.name.replace(' ', '')] = unit
typeUnits[unit.typeName].append(unit.name)
self.sortedKeys = self.keys()
self.sortedKeys.sort()
if len(self.sortedKeys) < len(units):
raise UnitDataError, 'Duplicate unit names found'
return (types, typeUnits)
def findPartialMatch(self, text):
"Return first partially matching unit or None"
text = text.replace(' ', '')
if not text:
return None
for name in self.sortedKeys:
if name.startswith(text):
return self[name]
return None
def findSortPos(self, text):
"Return unit whose abbrev comes immediately after text"
text = text.replace(' ', '')
for name in self.sortedKeys:
if text <= name:
return self[name]
return self[self.sortedKeys[-1]]
class Unit:
"Reads and stores a single unit conversion"
partialExp = 1000
def __init__(self, dataStr):
dataList = dataStr.split('#')
unitList = dataList.pop(0).split('=', 1)
self.name = unitList.pop(0).strip()
self.equiv = ''
self.factor = 1.0
self.fromEqn = '' # used only for non-linear units
self.toEqn = '' # used only for non-linear units
if unitList:
self.equiv = unitList[0].strip()
if self.equiv[0] == '[': # used only for non-linear units
try:
self.equiv, self.fromEqn = re.match('\[(.*?)\](.*)', \
self.equiv).groups()
if ';' in self.fromEqn:
self.fromEqn, self.toEqn = self.fromEqn.split(';', 1)
self.toEqn = self.toEqn.strip()
self.fromEqn = self.fromEqn.strip()
except AttributeError:
raise UnitDataError, 'Bad equation for "%s"' % self.name
else: # split factor and equiv unit for linear
parts = self.equiv.split(None, 1)
if len(parts) > 1 and re.search('[^\d\.eE\+\-\*/]', parts[0]) \
== None: # only allowed digits and operators
try:
self.factor = float(eval(parts[0]))
self.equiv = parts[1]
except:
pass
self.comments = [comm.strip() for comm in dataList]
self.comments.extend([''] * (2 - len(self.comments)))
self.exp = 1
self.viewLink = [None, None]
self.typeName = ''
def description(self):
"Return name and 1st comment (usu. full name) if applicable"
if self.comments[0]:
return '%s (%s)' % (self.name, self.comments[0])
return self.name
def unitValid(self):
"Return 1 if unit and exponent are valid"
if self.equiv and -Unit.partialExp < self.exp < Unit.partialExp:
return 1
return 0
def unitText(self, absExp=0):
"Return text for unit name with exponent or absolute value of exp"
exp = self.exp
if absExp:
exp = abs(self.exp)
if exp == 1:
return self.name
if -Unit.partialExp < exp < Unit.partialExp:
return '%s^%d' % (self.name, exp)
if exp > 1:
return '%s^' % self.name
else:
return '%s^-' % self.name
def __cmp__(self, other):
return cmp(self.name, other.name)
############################################################################
# Wrapper functionality
#
############################################################################
# Parse the data file, and set everything up for conversion
data = UnitData()
(types, unitsByType) = data.readData()
# At the moment, we're not handling options
option = None
# set up the objects for unit conversion
fromUnit = UnitGroup(data, option)
toUnit = UnitGroup(data, option)
def convert(num, unit1, unit2):
""" Convert from one unit to another
num is the factor for the first unit. Raises UnitDataError for
various errors.
"""
fromUnit.update(unit1)
toUnit.update(unit2)
fromUnit.reduceGroup()
toUnit.reduceGroup()
# Match up unit categories
if not fromUnit.categoryMatch(toUnit):
raise UnitDataError('unit categories did not match')
return fromUnit.convert(num, toUnit)
def units(type):
""" Return comma separated string list of units of given type, or
a list of types if the argument is not valid.
"""
if type in types:
return '%s units: %s' % (type, ', '.join(unitsByType[type]))
else:
return 'valid types: ' + ', '.join(types)
|
tecan/xchat-rt
|
plugins/scripts/encryption/supybot-code-6361b1e856ebbc8e14d399019e2c53a35f4e0063/plugins/Math/local/convertcore.py
|
Python
|
gpl-2.0
| 42,339
|
[
"Avogadro"
] |
dbc286ee4777a1c89968675c515235b031089e12451356e07f89b5dd3a792ea3
|
#!/usr/bin/env python
# File created on 17 Feb 2010
from __future__ import division
from setuptools import setup
from stat import S_IEXEC
from os import (chdir, getcwd, listdir, chmod, walk, rename, remove, chmod,
stat, devnull)
from os.path import join, abspath
from sys import platform, argv
from subprocess import call
from glob import glob
from urllib import FancyURLopener
import re
__author__ = "QIIME development team"
__copyright__ = "Copyright (c) 2011--, %s" % __author__
__credits__ = ["Greg Caporaso", "Kyle Bittinger", "Jai Ram Rideout",
"Yoshiki Vazquez Baeza", "Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "qiime.help@gmail.com"
long_description = """QIIME: Quantitative Insights Into Microbial Ecology
http://www.qiime.org
QIIME Allows Integration and Analysis of High-Throughput Community Sequencing Data
J. Gregory Caporaso, Justin Kuczynski, Jesse Stombaugh, Kyle Bittinger, Frederic D. Bushman, Elizabeth K. Costello, Noah Fierer, Antonio Gonzalez Pena, Julia K. Goodrich, Jeffrey I. Gordon, Gavin A. Huttley, Scott T. Kelley, Dan Knights, Jeremy E. Koenig, Ruth E. Ley, Cathy A. Lozupone, Daniel McDonald, Brian D. Muegge, Meg Pirrung, Jens Reeder, Joel R. Sevinsky, Peter J. Turnbaugh, William van Treuren, William A. Walters, Jeremy Widmann, Tanya Yatsunenko, Jesse Zaneveld and Rob Knight.
Nature Methods, 2010.
"""
doc_imports_failed = False
try:
import sphinx
except ImportError:
doc_imports_failed = True
# if egg_info is passed as an argument do not build any of the dependencies
build_stack = 'egg_info' not in argv
def build_html():
""" Build the sphinx documentation
The code for building sphinx documentation is based on
PyCogent's setup.py.
"""
cwd = getcwd()
doc_dir = join(cwd, 'doc')
chdir(doc_dir)
call(["make", "html"])
chdir(cwd)
index_html_path = join(abspath(doc_dir), '_build', 'html', 'index.html')
print "Local documentation built with Sphinx. " +\
"Open to following path with a web browser:\n%s" %\
index_html_path
def build_denoiser():
""" Build the denoiser code binary """
cwd = getcwd()
denoiser_dir = join(cwd, 'qiime/support_files/denoiser/FlowgramAlignment')
chdir(denoiser_dir)
# make sure we compile the executable
call(["make", "clean"])
call(["make"])
chdir(cwd)
print "Denoiser built."
# heavily based on lib.util.download_file from github.com/qiime/qiime-deploy
class URLOpener(FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
msg = 'ERROR: Could not download %s\nIs the URL valid?' % url
raise IOError(msg)
# heavily based on lib.util.download_file from github.com/qiime/qiime-deploy
def download_file(URL, dest_dir, local_file, num_retries=4):
"""General file downloader
Inputs:
URL: string to download the file from
dest_dir: directory where you want to download the file
local_file: output filename of the download
num_retries: number of times the function will try to download the file
Output:
return_code: exit status for the download 0 = success, 1 = fail
"""
url_opener = URLOpener()
localFP = join(dest_dir, local_file)
tmpDownloadFP = '%s.part' % localFP
return_code = 1
while num_retries > 0:
try:
tmpLocalFP, headers = url_opener.retrieve(URL, tmpDownloadFP)
rename(tmpDownloadFP, localFP)
return_code = 0
except IOError as msg:
if num_retries == 1:
print 'Download of %s failed.' % URL
else:
print 'Download failed. Trying again... %d tries remain.' % (
num_retries - 1)
num_retries -= 1
else:
num_retries = 0
print '%s downloaded successfully.' % local_file
return return_code
def build_FastTree():
"""Download and build FastTree then copy it to the scripts directory"""
if download_file('http://www.microbesonline.org/fasttree/FastTree-2.1.3.c',
'scripts/', 'FastTree.c'):
print 'Could not download FastTree, not installing it.'
return
cwd = getcwd()
denoiser_dir = join(cwd, 'scripts')
chdir(denoiser_dir)
# as suggested by the docs in FastTree.c
call(['gcc', '-Wall', '-O3', '-finline-functions', '-funroll-loops', '-o',
'FastTree', 'FastTree.c', '-lm'])
# remove the source
remove('FastTree.c')
chdir(cwd)
print "FastTree built."
def download_UCLUST():
"""Download the UCLUST executable and set it to the scripts directory"""
if platform == 'darwin':
URL = 'http://www.drive5.com/uclust/uclustq1.2.22_i86darwin64'
elif platform == 'linux2':
URL = 'http://www.drive5.com/uclust/uclustq1.2.22_i86linux64'
else:
raise SystemError(("Platform not supported by UCLUST"))
return_value = download_file(URL, 'scripts/', 'uclust')
# make the file an executable file
if not return_value:
chmod('scripts/uclust', stat('scripts/uclust').st_mode | S_IEXEC)
return return_value
def app_available(app_name):
"""Check if a binary is available and on the user Path
Inputs:
app_name: Name of the binary, i. e. 'ls', 'gcc' etc.
Output:
False if the binary is not found, True if the binary is found
"""
# redirect all output to /dev/null so nothing is seen on screen
devnull_fd = open(devnull, 'w')
output = True
try:
call([app_name], stdout=devnull_fd, stderr=devnull_fd)
except OSError:
output = False
finally:
devnull_fd.close()
return output
# do not compile and build any of these if running under pip's egg_info
if build_stack:
if app_available('ghc'):
build_denoiser()
else:
print "GHC not installed, so cannot build the Denoiser binary."
if app_available('gcc'):
build_FastTree()
else:
print "GCC not installed, so cannot build FastTree"
if download_UCLUST():
print "UCLUST could not be installed."
# taken from PyNAST
classes = """
Development Status :: 5 - Production/Stable
Environment :: Console
License :: OSI Approved :: GPL License
Topic :: Software Development :: Bioinformatics
Programming Language :: Python
Programming Language :: Python :: 2.7
Operating System :: UNIX
Operating System :: MacOS X
Operating System :: POSIX :: BSD
Operating System :: POSIX :: Linux
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
# compile the list of all qiime_test_data files that need to be installed.
# these must be relative file paths, beginning after the qiime_test_data
# directory
qiime_test_data_files = []
for root, dnames, fnames in walk('qiime_test_data'):
try:
# strip 'qiime_test_data/' from the beginning of root
root = root.split('/', 1)[1]
except IndexError:
# if there is no '/', then we're in qiime_test_data itself
# so there is nothing to do
continue
else:
# if there is a slash, we're in a script test data directory,
# so compile all relative filepaths
for fname in fnames:
qiime_test_data_files.append(join(root, fname))
setup(name='qiime',
version=__version__,
description='Quantitative Insights Into Microbial Ecology',
long_description=long_description,
author=__author__,
classifiers=classifiers,
author_email=__email__,
maintainer=__author__,
maintainer_email=__email__,
url='http://www.qiime.org',
packages=['qiime', 'qiime/parallel', 'qiime/pycogent_backports',
'qiime/denoiser', 'qiime/workflow', 'qiime_test_data'],
scripts=glob('scripts/*py') + glob('scripts/ec2*') +
glob('scripts/FlowgramAli_4frame') + glob('scripts/FastTree') +
glob('scripts/uclust'),
package_data={'qiime':
['support_files/qiime_config',
'support_files/css/*css',
'support_files/html_templates/*html',
'support_files/images/*png',
'support_files/jar/*jar',
'support_files/js/*js',
'support_files/R/*r',
'support_files/denoiser/Data/*',
'support_files/denoiser/TestData/*',
'support_files/denoiser/FlowgramAlignment/*.lhs',
'support_files/denoiser/FlowgramAlignment/Makefile'],
'qiime_test_data': qiime_test_data_files},
license=__license__,
keywords=['bioinformatics', 'microbiome', 'microbiology', 'qiime'],
platforms=['MacOS', 'Linux'],
install_requires=['numpy >= 1.7.1',
'matplotlib >= 1.1.0, <= 1.3.1',
'pynast == 1.2.2', 'qcli', 'gdata',
'biom-format >= 2.1.1, < 2.2.0',
'emperor >= 0.9.5, < 1.0.0',
'scikit-bio >= 0.2.2, < 0.3.0',
'burrito-fillings >= 0.1.0, < 0.2.0',
'pandas >= 0.13.1', 'burrito < 1.0.0'],
extras_require={'all': ['ipython[all]', 'sphinx >= 0.3']}
)
if build_stack:
if doc_imports_failed:
print "Sphinx not installed, so cannot build local html documentation."
else:
build_html()
|
wasade/qiime
|
setup.py
|
Python
|
gpl-2.0
| 9,528
|
[
"Brian",
"scikit-bio"
] |
e708c4043a8982cc1161fff78b1a52ee445b24fd8c0885a76a80abbcaa4a15e4
|
""" Test class for Stalled Job Agent
"""
# imports
from __future__ import absolute_import
from mock import MagicMock
# DIRAC Components
from DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent import StalledJobAgent
from DIRAC import gLogger
# Mock Objects
mockAM = MagicMock()
mockNone = MagicMock()
mockNone.return_value = None
gLogger.setLevel('DEBUG')
def test__failSubmittingJobs(mocker):
""" Testing StalledJobAgent()._failSubmittingJobs()
"""
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.AgentModule.__init__")
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.AgentModule.am_getOption", side_effect=mockAM)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.JobDB.__init__", side_effect=mockNone)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.JobLoggingDB.__init__", side_effect=mockNone)
stalledJobAgent = StalledJobAgent()
stalledJobAgent._AgentModule__configDefaults = mockAM
stalledJobAgent.initialize()
stalledJobAgent.jobDB.log = gLogger
stalledJobAgent.log = gLogger
stalledJobAgent.log.setLevel('DEBUG')
result = stalledJobAgent._failSubmittingJobs()
assert not result['OK']
def test__failCompletedJobs(mocker):
""" Testing StalledJobAgent()._failCompletedJobs()
"""
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.AgentModule.__init__")
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.AgentModule.am_getOption", side_effect=mockAM)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.JobDB.__init__", side_effect=mockNone)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.JobLoggingDB.__init__", side_effect=mockNone)
stalledJobAgent = StalledJobAgent()
stalledJobAgent._AgentModule__configDefaults = mockAM
stalledJobAgent.initialize()
stalledJobAgent.jobDB.log = gLogger
stalledJobAgent.log = gLogger
stalledJobAgent.log.setLevel('DEBUG')
result = stalledJobAgent._failCompletedJobs()
assert not result['OK']
def test__kickStuckJobs(mocker):
""" Testing StalledJobAgent()._kickStuckJobs()
"""
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.AgentModule.__init__")
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.AgentModule.am_getOption", side_effect=mockAM)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.JobDB.__init__", side_effect=mockNone)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.JobLoggingDB.__init__", side_effect=mockNone)
stalledJobAgent = StalledJobAgent()
stalledJobAgent._AgentModule__configDefaults = mockAM
stalledJobAgent.initialize()
stalledJobAgent.jobDB.log = gLogger
stalledJobAgent.log = gLogger
stalledJobAgent.log.setLevel('DEBUG')
result = stalledJobAgent._kickStuckJobs()
assert not result['OK']
def test__failStalledJobs(mocker):
""" Testing StalledJobAgent()._failStalledJobs()
"""
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.AgentModule.__init__")
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.AgentModule.am_getOption", side_effect=mockAM)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.JobDB.__init__", side_effect=mockNone)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.JobLoggingDB.__init__", side_effect=mockNone)
stalledJobAgent = StalledJobAgent()
stalledJobAgent._AgentModule__configDefaults = mockAM
stalledJobAgent.initialize()
stalledJobAgent.jobDB.log = gLogger
stalledJobAgent.log = gLogger
stalledJobAgent.log.setLevel('DEBUG')
result = stalledJobAgent._failStalledJobs(0)
assert not result['OK']
def test__markStalledJobs(mocker):
""" Testing StalledJobAgent()._markStalledJobs()
"""
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.AgentModule.__init__")
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.AgentModule.am_getOption", side_effect=mockAM)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.JobDB.__init__", side_effect=mockNone)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.StalledJobAgent.JobLoggingDB.__init__", side_effect=mockNone)
stalledJobAgent = StalledJobAgent()
stalledJobAgent._AgentModule__configDefaults = mockAM
stalledJobAgent.initialize()
stalledJobAgent.jobDB.log = gLogger
stalledJobAgent.log = gLogger
stalledJobAgent.log.setLevel('DEBUG')
result = stalledJobAgent._markStalledJobs(0)
assert not result['OK']
|
fstagni/DIRAC
|
WorkloadManagementSystem/Agent/test/Test_Agent_StalledJobAgent.py
|
Python
|
gpl-3.0
| 4,552
|
[
"DIRAC"
] |
5544f48a394849de8161ac24a22c676dd3d76fac65eb97ab18a9e8d9e7484aff
|
"""
Pandas dataframes for Census tables.
"""
from ambry.pands import AmbryDataFrame, AmbrySeries
import numpy as np
from six import string_types
def melt(df):
"""Melt a census dataframe into two value columns, for the estimate and margin"""
import pandas as pd
# Intial melt
melted = pd.melt(df, id_vars=list(df.columns[:9]), value_vars=list(df.columns[9:]))
melted = melted[['gvid', 'variable', 'value']]
# Make two seperate frames for estimates and margins.
estimates = melted[~melted.variable.str.contains('_m90')].set_index(['gvid', 'variable'])
margins = melted[melted.variable.str.contains('_m90')].copy()
margins.columns = ['gvid', 'ovariable', 'm90']
margins['variable'] = margins.ovariable.str.replace('_m90', '')
# Join the estimates to the margins.
final = estimates.join(margins.set_index(['gvid', 'variable']).drop('ovariable', 1))
return final
class CensusSeries(AmbrySeries):
ambry_column = None
def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False):
super(CensusSeries, self).__init__(data, index, dtype, name, copy, fastpath)
def m90(self):
if self.name.endswith('_m90'):
return self
else:
return self._dataframe[self.name+'_m90'].astype('float')
def value(self):
"""Return the float value for an error column"""
if self.name.endswith('_m90'):
return self._dataframe[self.name.replace('_m90','')].astype('float')
else:
return self
def se(self):
"""Return a standard error series, computed from the 90% margins"""
return self.m90() / 1.645
def rse(self):
"""Return the relative standard error for a column"""
return self.se() / self.value() * 100
def m95(self):
"""Return a standard error series, computed from the 90% margins"""
return self.m90() / 1.645 * 1.96
def m99(self):
"""Return a standard error series, computed from the 90% margins"""
return self.m90() / 1.645 * 2.575
class CensusDataFrame(AmbryDataFrame):
def lookup(self, c):
from ambry.orm.exc import NotFoundError
if isinstance(c, string_types):
c = self[c]
if isinstance(c, int):
suffix = str(c).zfill(3)
full_col = [col for col in self.columns if col.endswith(suffix)][0]
c = self[full_col]
else:
pass
try:
c.ambry_column = self.partition.table.column(c.name)
except NotFoundError:
c.ambry_column = None
return c
def sum_m(self, *cols):
"""Sum a set of Dataframe series and return the summed series and margin. The series must have names"""
# See the ACS General Handbook, Appendix A, "Calculating MOEs for
# Derived Proportions". (https://www.census.gov/content/dam/Census/library/publications/2008/acs/ACSGeneralHandbook.pdf)
# for a guide to these calculations.
# This is for the case when the numerator is a subset of the denominator
# Convert string column names to columns.
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
cols = [ self.lookup(c) for c in cols]
value = sum(cols)
m = np.sqrt(sum(c.m90()*c.m90() for c in cols))
return value, m
def add_sum_m(self, col_name, *cols):
"""
Add new columns for the sum, plus error margins, for 2 or more other columns
The routine will add two new columns, one named for col_name, and one for <col_name>_m90
:param col_name: The base name of the new column
:param cols:
:return:
"""
self[col_name], self[col_name+'_m90'] = self.sum_m(*cols)
def add_rse(self, *col_name):
"""
Create a new column, <col_name>_rse for Relative Standard Error, using <col_name> and <col_name>_m90
:param col_name:
:return:
"""
for cn in col_name:
self[cn + '_rse'] = self[cn].rse()
def sum_col_group(self, header, last):
"""Sum a contiguous group of columns, and return the sum and the new margins. """
cols = [self.lookup(i) for i in range(header, last+1)]
value = sum(cols)
m = np.sqrt(np.sum(c.m90()**2 for c in cols))
return value, m
def ratio(self, n, d, subset=True):
"""
Compute a ratio of a numerator and denominator, propagating errors
Both arguments may be one of:
* A Series, which must hav a .name property for a column in the dataset
* A column name
* A tuple of two of either of the above.
In the tuple form, the first entry is the value and the second is the 90% margin
:param n: A series or tuple(Series, Series)
:param d: A series or tuple(Series, Series)
:return: Tuple(Series, Series)
"""
def normalize(x):
if isinstance(x, tuple):
x, m90 = self.lookup(x[0]), self.lookup(x[1])
elif isinstance(x, string_types):
x = self.lookup(x)
m90 = x.m90()
elif isinstance(x, AmbrySeries):
m90 = x.m90()
elif isinstance(x, int):
x = self.lookup(x)
m90 = x.m90()
return x, m90
n, n_m90 = normalize(n)
d, d_m90 = normalize(d)
rate = np.round(n / d, 3)
if subset:
try:
# From external_documentation.acs_handbook, Appendix A, "Calculating MOEs for
# Derived Proportions". This is for the case when the numerator is a subset of the
# denominator
rate_m = np.sqrt(n_m90 ** 2 - ((rate ** 2) * (d_m90 ** 2))) / d
except ValueError:
# In the case, of a neg arg to a square root, the acs_handbook recommends using the
# method for "Calculating MOEs for Derived Ratios", where the numerator
# is not a subset of the denominator. Since our numerator is a subset, the
# handbook says " use the formula for derived ratios in the next section which
# will provide a conservative estimate of the MOE."
# The handbook says this case should be rare, but for this calculation, it
# happens about 50% of the time.
rate_m = np.sqrt(n_m90 ** 2 + ((rate ** 2) * (d_m90 ** 2))) / d
else:
rate_m = np.sqrt(n_m90 ** 2 + ((rate ** 2) * (d_m90 ** 2))) / d
return rate, rate_m
def dim_columns(self, pred):
"""
Return a list of columns that have a particular value for age,
sex and race_eth. The `pred` parameter is a string of python
code which is evaled, with the classification dict as the local
variable context, so the code string can access these variables:
- sex
- age
- race-eth
- col_num
Col_num is the number in the last three digits of the column name
Some examples of predicate strings:
- "sex == 'male' and age != 'na' "
:param pred: A string of python code that is executed to find column matches.
"""
from censuslib.dimensions import classify
out_cols = []
for i, c in enumerate(self.partition.table.columns):
if c.name.endswith('_m90'):
continue
if i < 9:
continue
cf = classify(c)
cf['col_num'] = int(c.name[-3:])
if eval(pred, {}, cf):
out_cols.append(c.name)
return out_cols
def __getitem__(self, key):
"""
"""
from pandas import DataFrame, Series
from ambry.orm.exc import NotFoundError
result = super(CensusDataFrame, self).__getitem__(key)
if isinstance(result, DataFrame):
result.__class__ = CensusDataFrame
result._dataframe = self
elif isinstance(result, Series):
result.__class__ = CensusSeries
result._dataframe = self
try:
result.ambry_column = self.partition.table.column(result.name)
except NotFoundError:
result.ambry_column = None
return result
def copy(self, deep=True):
r = super(CensusDataFrame, self).copy(deep)
r.__class__ = CensusDataFrame
r.partition = self.partition
return r
|
CivicKnowledge/censuslib
|
censuslib/dataframe.py
|
Python
|
mit
| 8,628
|
[
"MOE"
] |
b0bd155bba7bb90c9e1a35a9cd8495bc58c33aa5082f7560db654b3fd5bbb809
|
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# Copyright 2009 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to work with the enzyme.dat file from
Enzyme.
http://www.expasy.ch/enzyme/
Tested with the release of 03-Mar-2009.
Functions:
read Reads a file containing one ENZYME entry
parse Reads a file containing multiple ENZYME entries
Classes:
Record Holds ENZYME data.
"""
def parse(handle):
"""Parse ENZYME records.
This function is for parsing ENZYME files containing multiple
records.
handle - handle to the file."""
while True:
record = __read(handle)
if not record:
break
yield record
def read(handle):
"""Read one ENZYME record.
This function is for parsing ENZYME files containing
exactly one record.
handle - handle to the file."""
record = __read(handle)
# We should have reached the end of the record by now
remainder = handle.read()
if remainder:
raise ValueError("More than one ENZYME record found")
return record
class Record(dict):
"""\
Holds information from an ExPASy ENZYME record as a Python dictionary.
Each record contains the following keys:
ID: EC number
DE: Recommended name
AN: Alternative names (if any)
CA: Catalytic activity
CF: Cofactors (if any)
PR: Pointers to the Prosite documentation entrie(s) that
correspond to the enzyme (if any)
DR: Pointers to the Swiss-Prot protein sequence entrie(s)
that correspond to the enzyme (if any)
CC: Comments
"""
def __init__(self):
dict.__init__(self)
self["ID"] = ''
self["DE"] = ''
self["AN"] = []
self["CA"] = ''
self["CF"] = ''
self["CC"] = [] # one comment per line
self["PR"] = []
self["DR"] = []
def __repr__(self):
if self["ID"]:
if self["DE"]:
return "%s (%s, %s)" % (self.__class__.__name__,
self["ID"], self["DE"])
else:
return "%s (%s)" % (self.__class__.__name__,
self["ID"])
else:
return "%s ( )" % (self.__class__.__name__)
def __str__(self):
output = "ID: " + self["ID"]
output += " DE: " + self["DE"]
output += " AN: " + repr(self["AN"])
output += " CA: '" + self["CA"] + "'"
output += " CF: " + self["CF"]
output += " CC: " + repr(self["CC"])
output += " PR: " + repr(self["PR"])
output += " DR: %d Records" % len(self["DR"])
return output
# Everything below is private
def __read(handle):
record = None
for line in handle:
key, value = line[:2], line[5:].rstrip()
if key=="ID":
record = Record()
record["ID"] = value
elif key=="DE":
record["DE"]+=value
elif key=="AN":
if record["AN"] and not record["AN"][-1].endswith("."):
record["AN"][-1] += " " + value
else:
record["AN"].append(value)
elif key=="CA":
record["CA"] += value
elif key=="DR":
pair_data = value.rstrip(";").split(';')
for pair in pair_data:
t1, t2 = pair.split(',')
row = [t1.strip(), t2.strip()]
record["DR"].append(row)
elif key=="CF":
if record["CF"]:
record["CF"] += " " + value
else:
record["CF"] = value
elif key=="PR":
assert value.startswith("PROSITE; ")
value = value[9:].rstrip(";")
record["PR"].append(value)
elif key=='CC':
if value.startswith("-!- "):
record["CC"].append(value[4:])
elif value.startswith(" ") and record["CC"]:
record["CC"][-1] += value[3:]
# copyright notice is silently skipped
elif key=="//":
if record:
return record
else: # This was the copyright notice
continue
if record:
raise ValueError("Unexpected end of stream")
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/ExPASy/Enzyme.py
|
Python
|
gpl-2.0
| 4,440
|
[
"Biopython"
] |
96cd5f3e224edc427479e47e127d24ae8c9af3565254b96438aed6ec00e8fc89
|
#! /usr/bin/env python
"""
usage:
vk <command> [<args>...]
vk setup
vk -h | --help
vk --version
commands:
calc
call
filter
geno
genome
hmm
phylo
primer
rename
tajima
vcf2tsv
"""
from vcfkit import __version__
from .utils import lev, message
from docopt import docopt
from subprocess import call, check_output, CalledProcessError
from .utils.vcf import *
from clint.textui import colored, puts, indent
import sys
from . import vk
import os
import signal
signal.signal(signal.SIGINT, lambda x,y: sys.exit(0))
command_list = [x.strip() for x in filter(len, __doc__.splitlines()[8:])]
debug = None
if len(sys.argv) == 1:
debug = [""]
def getScriptPath():
return os.path.dirname(vk.__file__)
def main():
args = docopt(__doc__,
argv=debug,
options_first=True,
version=__version__)
argv = [args['<command>']] + args['<args>']
program_list = {"bwa": "bwa",
"samtools": "samtools",
"bcftools": "bcftools",
"blast": "blastn",
"muscle": "muscle"}
if args["<command>"] == "setup":
"""
Use Homebrew to install programs!
"""
program_installed = list(program_list.keys())
for install_name, program in list(program_list.items()):
try:
with indent(4):
puts(colored.blue("Installing " + install_name))
check_output(["brew", "install", install_name])
program_installed.remove(install_name)
except CalledProcessError:
try:
check_output(["which", program])
with indent(4):
puts(colored.blue(program + " previously installed"))
program_installed.remove(install_name)
except CalledProcessError:
with indent(4):
puts(colored.red("Error installing " + install_name))
if len(program_installed) == 0:
with indent(4):
puts(colored.blue("Programs successfully installed!"))
else:
with indent(4):
puts(colored.red("Error: Not all programs successfully installed: " + ", ".join(program_installed)))
elif args["<command>"] == "":
print(__doc__)
for prog in list(program_list.values()):
try:
check_output(["which", prog])
except CalledProcessError:
with indent(4):
puts(
colored.red(prog + " not installed. Use a package manager to install or try using 'vk setup'\n"))
elif args['<command>'] in command_list:
comm = ['python', getScriptPath() + '/' + args["<command>"] + ".py"] + argv
exit(call(comm))
else:
levs = [(x, lev(args['<command>'], x)) for x in command_list]
closest = min(levs, key = lambda x: x[1])[0]
command = args['<command>']
message("There is no command '{command}'. Did you mean 'vk {closest}'?".format(**locals()))
if __name__ == '__main__':
main()
|
AndersenLab/vcf-toolbox
|
vcfkit/vk.py
|
Python
|
mit
| 3,184
|
[
"BLAST",
"BWA"
] |
c9d94e658c3e1ae0e4df174a16afe5905ab32f3f47a94230b824dc481769a162
|
from datetime import datetime
from flask_babel import lazy_gettext
from c3bottles import db
from c3bottles.model import drop_point
class Report(db.Model):
"""
The report of a visitor that a drop point needs maintenance.
When visitors find a drop point needing maintenance, they may report
the drop point to the bottle collectors. A report is issued for a
given drop point which has a time and optionally some information
about the state of the drop point in question.
"""
state_weights = [
["DEFAULT", 5.0], # 0 should be the default/unknown state
["NEW", 1.0], # 1 should be the state of new drop points
["NO_CRATES", 5.0],
["SOME_BOTTLES", 1.0],
["REASONABLY_FULL", 2.0],
["FULL", 3.0],
["OVERFLOW", 5.0],
["EMPTY", 0.0] # -1 should be the EMPTY state
]
states = [e[0] for e in state_weights]
rep_id = db.Column(db.Integer, primary_key=True)
dp_id = db.Column(
db.Integer,
db.ForeignKey("drop_point.number"),
nullable=False
)
dp = db.relationship("DropPoint")
time = db.Column(db.DateTime, nullable=False)
state = db.Column(
db.Enum(*states, name="report_states"),
default=states[0]
)
def __init__(self, dp, time=None, state=None):
errors = []
self.dp = dp
if not isinstance(dp, drop_point.DropPoint):
errors.append({"Report": lazy_gettext("Not given a drop point object.")})
else:
if dp.removed:
errors.append({"Report": lazy_gettext("Drop point has been removed.")})
if time and not isinstance(time, datetime):
errors.append({"Report": lazy_gettext("Time not a datetime object.")})
if isinstance(time, datetime) and time > datetime.today():
errors.append({"Report": lazy_gettext("Start time in the future.")})
self.time = time if time else datetime.today()
if state in Report.states:
self.state = state
else:
errors.append({"Report": lazy_gettext("Invalid or missing reported state.")})
if errors:
raise ValueError(*errors)
db.session.add(self)
def get_weight(self):
"""Get the weight (i.e. significance) of a report.
The weight of a report determines how significant it is for the
calculation of the priority to visit the respective drop point
soon.
Most important for the weight of a report is the state of the
drop point as seen by the reporter. The report of an
overflowing drop point is certainly more important than one
of a drop point nearly empty.
If the reporter is a trusted user, that increases the weight.
Special users see special weights: The supervisor of the
bottle collectors is not focused on full drop points (that's
what they have a collector team for) but rather on solving
problems like overflows or missing crates reported by trusted
users.
The default weight under default conditions is 1 and all
influences should only multiply that default value with some
factor.
"""
# TODO:
# - weight should depend on the reporter (trusted > stranger)
# - weight should depend on the viewer (supervisor: problem-
# focused, collector: collection-focused)
return self.get_state_weight(self.state)
@classmethod
def get_state_weight(cls, state):
for elem in cls.state_weights:
if elem[0] == state:
return elem[1]
return float(cls.state_weights[0][1])
def __repr__(self):
return "Report %s of drop point %s (state %s at %s)" % (
self.rep_id, self.dp_id,
self.state, self.time
)
|
der-michik/c3bottles
|
c3bottles/model/report.py
|
Python
|
mit
| 3,885
|
[
"VisIt"
] |
df3014fe594304f17dab6a1728733433ea1f5f1d5c5bb1610f209f1f7473e01c
|
from ase.data import atomic_numbers, chemical_symbols, reference_states
from ase.units import *
import fcc
import au
lattice = {'fcc': fcc.data,
}
element = {79: au.data, #Au
}
|
auag92/n2dm
|
Asap-3.8.4/Python/asap3/nanoparticle_mc/data/__init__.py
|
Python
|
mit
| 200
|
[
"ASE"
] |
95a1d2fb8fccb07695416eb5810cd4b58943b7a94262fddb5170c913f6e56bce
|
from numpy import *
from asap3 import *
from ase.lattice.compounds import B2
from asap3.testtools import ReportTest
atoms = B2(size=(332,166,5), symbol=('Cu', 'Zr'), latticeconstant=2.7)
lj = LennardJones((29,40), 0.2*array([0.5, 1.0, 1.0, 0.5]),
array([1.236, 2.0, 2.0, 2.352]),
rCut=2.0*2.352)
atoms.set_calculator(lj)
# If the following line is included, the bug is not seen.
#atoms.get_potential_energy()
stress = atoms.get_stress()
exp_tr = 9.30356046e-02
for i in range(3):
ReportTest("Diagonal elements of stress tensor", stress[i], exp_tr, 1e-6)
for i in range(3,6):
ReportTest("Off-diagonal elements of stress tensor", stress[i], 0.0, 1e-8)
ReportTest.Summary()
|
auag92/n2dm
|
Asap-3.8.4/Test/LJ_Stress.py
|
Python
|
mit
| 722
|
[
"ASE"
] |
1c771a3aed3f36811a31bf3d375a48f4301c6960dcabc74e2f006805ee9b14b5
|
# Copyright (C) 2015 Henrique Pereira Coutada Miranda, Alejandro Molina-Sanchez
# All rights reserved.
#
# This file is part of yambopy
#
import xml.etree.ElementTree as ET
from qepy.auxiliary import *
from numpy import array
from lattice import *
HatoeV = 27.2107
class PwXML():
""" Class to read data from a Quantum espresso XML file
"""
_eig_xml = 'eigenval.xml'
def __init__(self,prefix,path='.'):
""" Initlize the structure with the path where the datafile.xml is
"""
self.prefix = prefix
self.path = path
datafiles = {'data-file.xml': self.read_datafile,
'data-file-schema.xml': self.read_datafile_schema}
done_reading = False
#check if the name is data-file.xml or data-file-schema.xml or whatever....
for filename,read in datafiles.items():
path_filename = "%s/%s.save/%s"%(path, prefix, filename)
if os.path.isfile(path_filename):
print "reading %s"%filename
done_reading = read(path_filename)
break
#trap errors
if not done_reading:
possible_files = " or ".join(datafiles.keys())
raise ValueError('Failed to read %s in %s/%s.save'%(possible_files,path,prefix))
def read_datafile(self,filename):
"""
Read some data from the xml file in the old format of quantum espresso
"""
self.datafile_xml = ET.parse( filename ).getroot()
#get acell
self.acell = [ float(x) for x in self.datafile_xml.findall("CELL/CELL_DIMENSIONS")[0].text.strip().split('\n') ]
#get cell
self.cell = []
for i in xrange(1,4):
cell_lat = self.datafile_xml.findall("CELL/DIRECT_LATTICE_VECTORS/a%d"%i)[0].text
self.cell.append([float(x) for x in cell_lat.strip().split()])
#get reciprocal cell
self.rcell = []
for i in xrange(1,4):
rcell_lat = self.datafile_xml.findall("CELL/RECIPROCAL_LATTICE_VECTORS/b%d"%i)[0].text
self.rcell.append([float(x) for x in rcell_lat.strip().split()])
#get atoms
self.natoms = int(self.datafile_xml.findall("IONS/NUMBER_OF_ATOMS")[0].text)
self.atoms = []
for i in xrange(1,self.natoms+1):
atom = self.datafile_xml.findall("IONS/ATOM.%d"%i)[0].get('tau')
self.atoms.append([float(x) for x in atom.strip().split()])
#get nkpoints
self.nkpoints = int(self.datafile_xml.findall("BRILLOUIN_ZONE/NUMBER_OF_K-POINTS")[0].text.strip())
# Read the number of BANDS
self.nbands = int(self.datafile_xml.find("BAND_STRUCTURE_INFO/NUMBER_OF_BANDS").text)
#get k-points
self.kpoints = []
for i in range(self.nkpoints):
k_aux = self.datafile_xml.findall('BRILLOUIN_ZONE/K-POINT.%d'%(i+1))[0].get('XYZ')
self.kpoints.append([float(x) for x in k_aux.strip().split()])
#get eigenvalues
eigen = []
for ik in xrange(self.nkpoints):
for EIGENVALUES in ET.parse( "%s/%s.save/K%05d/%s" % (self.path,self.prefix,(ik + 1),self._eig_xml) ).getroot().findall("EIGENVALUES"):
eigen.append(map(float, EIGENVALUES.text.split()))
self.eigen = eigen
#get fermi
self.fermi = float(self.datafile_xml.find("BAND_STRUCTURE_INFO/FERMI_ENERGY").text)
return True
def read_datafile_schema(self,filename):
"""
Read the data from the xml file in the new format of quantum espresso
"""
self.datafile_xml = ET.parse( filename ).getroot()
#get cell
self.cell = []
for i in xrange(1,4):
cell_lat = self.datafile_xml.findall("input/atomic_structure/cell/a%d"%i)[0].text
self.cell.append([float(x) for x in cell_lat.strip().split()])
#calculate acell
self.acell = [ np.linalg.norm(a) for a in self.cell ]
#get reciprocal cell
self.rcell = []
for i in xrange(1,4):
rcell_lat = self.datafile_xml.findall("output/basis_set/reciprocal_lattice/b%d"%i)[0].text
self.rcell.append([float(x) for x in rcell_lat.strip().split()])
#get atoms
self.natoms = int(self.datafile_xml.findall("output/atomic_structure")[0].get('nat'))
self.atoms = []
atoms = self.datafile_xml.findall("output/atomic_structure/atomic_positions/atom")
for i in xrange(self.natoms):
atom = atoms[i].text
self.atoms.append([float(x) for x in atom.strip().split()])
#get nkpoints
self.nkpoints = int(self.datafile_xml.findall("output/band_structure/nks")[0].text.strip())
# Read the number of BANDS
self.nbands = int(self.datafile_xml.findall("output/band_structure/nbnd")[0].text.strip())
#get ks states
kstates = self.datafile_xml.findall('output/band_structure/ks_energies')
#get k-points
self.kpoints = []
for i in range(self.nkpoints):
kpoint = [float(x) for x in kstates[i].findall('k_point')[0].text.strip().split()]
self.kpoints.append( kpoint )
#get eigenvalues
self.eigen = []
for k in range(self.nkpoints):
eigen = [float(x) for x in kstates[k].findall('eigenvalues')[0].text.strip().split()]
self.eigen.append( eigen )
self.eigen = np.array(self.eigen)
#get fermi
self.fermi = float(self.datafile_xml.find("output/band_structure/highestOccupiedLevel").text)
return True
def get_scaled_positions(self):
""" get the atomic positions in reduced coordinates
"""
return car_red(self.atoms,self.cell)
def __str__(self):
s = ""
s += "cell:\n"
for c in self.cell:
s += ("%12.8lf "*3)%tuple(c)+'\n'
s += "atoms:\n"
for a in self.atoms:
s += ("%12.8lf "*3)%tuple(a)+'\n'
s += "nkpoints: %d\n"%self.nkpoints
s += "nbands: %d\n"%self.nbands
return s
def plot_eigen(self,path=[],xlim=(),ylim=()):
""" plot the eigenvalues using matplotlib
"""
import matplotlib.pyplot as plt
if path:
if isinstance(path,Path):
path = path.get_indexes()
plt.xticks( *zip(*path) )
plt.ylabel('E (eV)')
#plot vertical line
for point in path:
x, label = point
plt.axvline(x)
plt.axhline(0)
#plot bands
eigen = array(self.eigen)
for ib in range(self.nbands):
plt.plot(xrange(self.nkpoints),eigen[:,ib]*HatoeV - self.fermi*HatoeV, 'r-', lw=2)
#plot options
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
plt.show()
def write_eigen(self,fmt='gnuplot'):
""" write eigenvalues to a text file
"""
if fmt=='gnuplot':
f = open('%s.dat'%self.prefix,'w')
for ib in xrange(self.nbands):
for ik in xrange(self.nkpoints):
f.write("%.1lf %.4lf \n " % (ik,self.eigen[ik][ib]*HatoeV) )
f.write("\n")
f.close()
else:
print 'fmt %s not implemented'%fmt
|
henriquemiranda/yambopy
|
qepy/pwxml.py
|
Python
|
bsd-3-clause
| 7,356
|
[
"Quantum ESPRESSO"
] |
7f352e991453c8ad57a1c4bcd137194371fdd5ca43efa4f94b6bcc09e36b8e27
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.